1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #define INSTANTIATE_EXPORTED_MIGRATE_DISABLE
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hrtimer_api.h>
14 #include <linux/ktime_api.h>
15 #include <linux/sched/signal.h>
16 #include <linux/syscalls_api.h>
17 #include <linux/debug_locks.h>
18 #include <linux/prefetch.h>
19 #include <linux/capability.h>
20 #include <linux/pgtable_api.h>
21 #include <linux/wait_bit.h>
22 #include <linux/jiffies.h>
23 #include <linux/spinlock_api.h>
24 #include <linux/cpumask_api.h>
25 #include <linux/lockdep_api.h>
26 #include <linux/hardirq.h>
27 #include <linux/softirq.h>
28 #include <linux/refcount_api.h>
29 #include <linux/topology.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/cond_resched.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/hotplug.h>
35 #include <linux/sched/init.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/sched/loadavg.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/rseq_api.h>
41 #include <linux/sched/rt.h>
42
43 #include <linux/blkdev.h>
44 #include <linux/context_tracking.h>
45 #include <linux/cpuset.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/interrupt.h>
49 #include <linux/ioprio.h>
50 #include <linux/kallsyms.h>
51 #include <linux/kcov.h>
52 #include <linux/kprobes.h>
53 #include <linux/llist_api.h>
54 #include <linux/mmu_context.h>
55 #include <linux/mmzone.h>
56 #include <linux/mutex_api.h>
57 #include <linux/nmi.h>
58 #include <linux/nospec.h>
59 #include <linux/perf_event_api.h>
60 #include <linux/profile.h>
61 #include <linux/psi.h>
62 #include <linux/rcuwait_api.h>
63 #include <linux/rseq.h>
64 #include <linux/sched/wake_q.h>
65 #include <linux/scs.h>
66 #include <linux/slab.h>
67 #include <linux/syscalls.h>
68 #include <linux/vtime.h>
69 #include <linux/wait_api.h>
70 #include <linux/workqueue_api.h>
71 #include <linux/livepatch_sched.h>
72
73 #ifdef CONFIG_PREEMPT_DYNAMIC
74 # ifdef CONFIG_GENERIC_IRQ_ENTRY
75 # include <linux/irq-entry-common.h>
76 # endif
77 #endif
78
79 #include <uapi/linux/sched/types.h>
80
81 #include <asm/irq_regs.h>
82 #include <asm/switch_to.h>
83 #include <asm/tlb.h>
84
85 #define CREATE_TRACE_POINTS
86 #include <linux/sched/rseq_api.h>
87 #include <trace/events/sched.h>
88 #include <trace/events/ipi.h>
89 #undef CREATE_TRACE_POINTS
90
91 #include "sched.h"
92 #include "stats.h"
93
94 #include "autogroup.h"
95 #include "pelt.h"
96 #include "smp.h"
97
98 #include "../workqueue_internal.h"
99 #include "../../io_uring/io-wq.h"
100 #include "../smpboot.h"
101 #include "../locking/mutex.h"
102
103 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
104 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
105
106 /*
107 * Export tracepoints that act as a bare tracehook (ie: have no trace event
108 * associated with them) to allow external modules to probe them.
109 */
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
119 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
120 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
121 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
122
123 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
124 DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
125
126 #ifdef CONFIG_SCHED_PROXY_EXEC
127 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
setup_proxy_exec(char * str)128 static int __init setup_proxy_exec(char *str)
129 {
130 bool proxy_enable = true;
131
132 if (*str && kstrtobool(str + 1, &proxy_enable)) {
133 pr_warn("Unable to parse sched_proxy_exec=\n");
134 return 0;
135 }
136
137 if (proxy_enable) {
138 pr_info("sched_proxy_exec enabled via boot arg\n");
139 static_branch_enable(&__sched_proxy_exec);
140 } else {
141 pr_info("sched_proxy_exec disabled via boot arg\n");
142 static_branch_disable(&__sched_proxy_exec);
143 }
144 return 1;
145 }
146 #else
setup_proxy_exec(char * str)147 static int __init setup_proxy_exec(char *str)
148 {
149 pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
150 return 0;
151 }
152 #endif
153 __setup("sched_proxy_exec", setup_proxy_exec);
154
155 /*
156 * Debugging: various feature bits
157 *
158 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
159 * sysctl_sched_features, defined in sched.h, to allow constants propagation
160 * at compile time and compiler optimization based on features default.
161 */
162 #define SCHED_FEAT(name, enabled) \
163 (1UL << __SCHED_FEAT_##name) * enabled |
164 __read_mostly unsigned int sysctl_sched_features =
165 #include "features.h"
166 0;
167 #undef SCHED_FEAT
168
169 /*
170 * Print a warning if need_resched is set for the given duration (if
171 * LATENCY_WARN is enabled).
172 *
173 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
174 * per boot.
175 */
176 __read_mostly int sysctl_resched_latency_warn_ms = 100;
177 __read_mostly int sysctl_resched_latency_warn_once = 1;
178
179 /*
180 * Number of tasks to iterate in a single balance run.
181 * Limited because this is done with IRQs disabled.
182 */
183 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
184
185 __read_mostly int scheduler_running;
186
187 #ifdef CONFIG_SCHED_CORE
188
189 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
190
191 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)192 static inline int __task_prio(const struct task_struct *p)
193 {
194 if (p->sched_class == &stop_sched_class) /* trumps deadline */
195 return -2;
196
197 if (p->dl_server)
198 return -1; /* deadline */
199
200 if (rt_or_dl_prio(p->prio))
201 return p->prio; /* [-1, 99] */
202
203 if (p->sched_class == &idle_sched_class)
204 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
205
206 if (task_on_scx(p))
207 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
208
209 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
210 }
211
212 /*
213 * l(a,b)
214 * le(a,b) := !l(b,a)
215 * g(a,b) := l(b,a)
216 * ge(a,b) := !l(a,b)
217 */
218
219 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)220 static inline bool prio_less(const struct task_struct *a,
221 const struct task_struct *b, bool in_fi)
222 {
223
224 int pa = __task_prio(a), pb = __task_prio(b);
225
226 if (-pa < -pb)
227 return true;
228
229 if (-pb < -pa)
230 return false;
231
232 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
233 const struct sched_dl_entity *a_dl, *b_dl;
234
235 a_dl = &a->dl;
236 /*
237 * Since,'a' and 'b' can be CFS tasks served by DL server,
238 * __task_prio() can return -1 (for DL) even for those. In that
239 * case, get to the dl_server's DL entity.
240 */
241 if (a->dl_server)
242 a_dl = a->dl_server;
243
244 b_dl = &b->dl;
245 if (b->dl_server)
246 b_dl = b->dl_server;
247
248 return !dl_time_before(a_dl->deadline, b_dl->deadline);
249 }
250
251 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
252 return cfs_prio_less(a, b, in_fi);
253
254 #ifdef CONFIG_SCHED_CLASS_EXT
255 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
256 return scx_prio_less(a, b, in_fi);
257 #endif
258
259 return false;
260 }
261
__sched_core_less(const struct task_struct * a,const struct task_struct * b)262 static inline bool __sched_core_less(const struct task_struct *a,
263 const struct task_struct *b)
264 {
265 if (a->core_cookie < b->core_cookie)
266 return true;
267
268 if (a->core_cookie > b->core_cookie)
269 return false;
270
271 /* flip prio, so high prio is leftmost */
272 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
273 return true;
274
275 return false;
276 }
277
278 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
279
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)280 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
281 {
282 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
283 }
284
rb_sched_core_cmp(const void * key,const struct rb_node * node)285 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
286 {
287 const struct task_struct *p = __node_2_sc(node);
288 unsigned long cookie = (unsigned long)key;
289
290 if (cookie < p->core_cookie)
291 return -1;
292
293 if (cookie > p->core_cookie)
294 return 1;
295
296 return 0;
297 }
298
sched_core_enqueue(struct rq * rq,struct task_struct * p)299 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
300 {
301 if (p->se.sched_delayed)
302 return;
303
304 rq->core->core_task_seq++;
305
306 if (!p->core_cookie)
307 return;
308
309 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
310 }
311
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)312 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
313 {
314 if (p->se.sched_delayed)
315 return;
316
317 rq->core->core_task_seq++;
318
319 if (sched_core_enqueued(p)) {
320 rb_erase(&p->core_node, &rq->core_tree);
321 RB_CLEAR_NODE(&p->core_node);
322 }
323
324 /*
325 * Migrating the last task off the cpu, with the cpu in forced idle
326 * state. Reschedule to create an accounting edge for forced idle,
327 * and re-examine whether the core is still in forced idle state.
328 */
329 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
330 rq->core->core_forceidle_count && rq->curr == rq->idle)
331 resched_curr(rq);
332 }
333
sched_task_is_throttled(struct task_struct * p,int cpu)334 static int sched_task_is_throttled(struct task_struct *p, int cpu)
335 {
336 if (p->sched_class->task_is_throttled)
337 return p->sched_class->task_is_throttled(p, cpu);
338
339 return 0;
340 }
341
sched_core_next(struct task_struct * p,unsigned long cookie)342 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
343 {
344 struct rb_node *node = &p->core_node;
345 int cpu = task_cpu(p);
346
347 do {
348 node = rb_next(node);
349 if (!node)
350 return NULL;
351
352 p = __node_2_sc(node);
353 if (p->core_cookie != cookie)
354 return NULL;
355
356 } while (sched_task_is_throttled(p, cpu));
357
358 return p;
359 }
360
361 /*
362 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
363 * If no suitable task is found, NULL will be returned.
364 */
sched_core_find(struct rq * rq,unsigned long cookie)365 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
366 {
367 struct task_struct *p;
368 struct rb_node *node;
369
370 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
371 if (!node)
372 return NULL;
373
374 p = __node_2_sc(node);
375 if (!sched_task_is_throttled(p, rq->cpu))
376 return p;
377
378 return sched_core_next(p, cookie);
379 }
380
381 /*
382 * Magic required such that:
383 *
384 * raw_spin_rq_lock(rq);
385 * ...
386 * raw_spin_rq_unlock(rq);
387 *
388 * ends up locking and unlocking the _same_ lock, and all CPUs
389 * always agree on what rq has what lock.
390 *
391 * XXX entirely possible to selectively enable cores, don't bother for now.
392 */
393
394 static DEFINE_MUTEX(sched_core_mutex);
395 static atomic_t sched_core_count;
396 static struct cpumask sched_core_mask;
397
sched_core_lock(int cpu,unsigned long * flags)398 static void sched_core_lock(int cpu, unsigned long *flags)
399 {
400 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
401 int t, i = 0;
402
403 local_irq_save(*flags);
404 for_each_cpu(t, smt_mask)
405 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
406 }
407
sched_core_unlock(int cpu,unsigned long * flags)408 static void sched_core_unlock(int cpu, unsigned long *flags)
409 {
410 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
411 int t;
412
413 for_each_cpu(t, smt_mask)
414 raw_spin_unlock(&cpu_rq(t)->__lock);
415 local_irq_restore(*flags);
416 }
417
__sched_core_flip(bool enabled)418 static void __sched_core_flip(bool enabled)
419 {
420 unsigned long flags;
421 int cpu, t;
422
423 cpus_read_lock();
424
425 /*
426 * Toggle the online cores, one by one.
427 */
428 cpumask_copy(&sched_core_mask, cpu_online_mask);
429 for_each_cpu(cpu, &sched_core_mask) {
430 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
431
432 sched_core_lock(cpu, &flags);
433
434 for_each_cpu(t, smt_mask)
435 cpu_rq(t)->core_enabled = enabled;
436
437 cpu_rq(cpu)->core->core_forceidle_start = 0;
438
439 sched_core_unlock(cpu, &flags);
440
441 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
442 }
443
444 /*
445 * Toggle the offline CPUs.
446 */
447 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
448 cpu_rq(cpu)->core_enabled = enabled;
449
450 cpus_read_unlock();
451 }
452
sched_core_assert_empty(void)453 static void sched_core_assert_empty(void)
454 {
455 int cpu;
456
457 for_each_possible_cpu(cpu)
458 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
459 }
460
__sched_core_enable(void)461 static void __sched_core_enable(void)
462 {
463 static_branch_enable(&__sched_core_enabled);
464 /*
465 * Ensure all previous instances of raw_spin_rq_*lock() have finished
466 * and future ones will observe !sched_core_disabled().
467 */
468 synchronize_rcu();
469 __sched_core_flip(true);
470 sched_core_assert_empty();
471 }
472
__sched_core_disable(void)473 static void __sched_core_disable(void)
474 {
475 sched_core_assert_empty();
476 __sched_core_flip(false);
477 static_branch_disable(&__sched_core_enabled);
478 }
479
sched_core_get(void)480 void sched_core_get(void)
481 {
482 if (atomic_inc_not_zero(&sched_core_count))
483 return;
484
485 mutex_lock(&sched_core_mutex);
486 if (!atomic_read(&sched_core_count))
487 __sched_core_enable();
488
489 smp_mb__before_atomic();
490 atomic_inc(&sched_core_count);
491 mutex_unlock(&sched_core_mutex);
492 }
493
__sched_core_put(struct work_struct * work)494 static void __sched_core_put(struct work_struct *work)
495 {
496 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
497 __sched_core_disable();
498 mutex_unlock(&sched_core_mutex);
499 }
500 }
501
sched_core_put(void)502 void sched_core_put(void)
503 {
504 static DECLARE_WORK(_work, __sched_core_put);
505
506 /*
507 * "There can be only one"
508 *
509 * Either this is the last one, or we don't actually need to do any
510 * 'work'. If it is the last *again*, we rely on
511 * WORK_STRUCT_PENDING_BIT.
512 */
513 if (!atomic_add_unless(&sched_core_count, -1, 1))
514 schedule_work(&_work);
515 }
516
517 #else /* !CONFIG_SCHED_CORE: */
518
sched_core_enqueue(struct rq * rq,struct task_struct * p)519 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
520 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)521 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
522
523 #endif /* !CONFIG_SCHED_CORE */
524
525 /* need a wrapper since we may need to trace from modules */
526 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
527
528 /* Call via the helper macro trace_set_current_state. */
__trace_set_current_state(int state_value)529 void __trace_set_current_state(int state_value)
530 {
531 trace_sched_set_state_tp(current, state_value);
532 }
533 EXPORT_SYMBOL(__trace_set_current_state);
534
535 /*
536 * Serialization rules:
537 *
538 * Lock order:
539 *
540 * p->pi_lock
541 * rq->lock
542 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
543 *
544 * rq1->lock
545 * rq2->lock where: rq1 < rq2
546 *
547 * Regular state:
548 *
549 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
550 * local CPU's rq->lock, it optionally removes the task from the runqueue and
551 * always looks at the local rq data structures to find the most eligible task
552 * to run next.
553 *
554 * Task enqueue is also under rq->lock, possibly taken from another CPU.
555 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
556 * the local CPU to avoid bouncing the runqueue state around [ see
557 * ttwu_queue_wakelist() ]
558 *
559 * Task wakeup, specifically wakeups that involve migration, are horribly
560 * complicated to avoid having to take two rq->locks.
561 *
562 * Special state:
563 *
564 * System-calls and anything external will use task_rq_lock() which acquires
565 * both p->pi_lock and rq->lock. As a consequence the state they change is
566 * stable while holding either lock:
567 *
568 * - sched_setaffinity()/
569 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
570 * - set_user_nice(): p->se.load, p->*prio
571 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
572 * p->se.load, p->rt_priority,
573 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
574 * - sched_setnuma(): p->numa_preferred_nid
575 * - sched_move_task(): p->sched_task_group
576 * - uclamp_update_active() p->uclamp*
577 *
578 * p->state <- TASK_*:
579 *
580 * is changed locklessly using set_current_state(), __set_current_state() or
581 * set_special_state(), see their respective comments, or by
582 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
583 * concurrent self.
584 *
585 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
586 *
587 * is set by activate_task() and cleared by deactivate_task()/block_task(),
588 * under rq->lock. Non-zero indicates the task is runnable, the special
589 * ON_RQ_MIGRATING state is used for migration without holding both
590 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
591 *
592 * Additionally it is possible to be ->on_rq but still be considered not
593 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
594 * but will be dequeued as soon as they get picked again. See the
595 * task_is_runnable() helper.
596 *
597 * p->on_cpu <- { 0, 1 }:
598 *
599 * is set by prepare_task() and cleared by finish_task() such that it will be
600 * set before p is scheduled-in and cleared after p is scheduled-out, both
601 * under rq->lock. Non-zero indicates the task is running on its CPU.
602 *
603 * [ The astute reader will observe that it is possible for two tasks on one
604 * CPU to have ->on_cpu = 1 at the same time. ]
605 *
606 * task_cpu(p): is changed by set_task_cpu(), the rules are:
607 *
608 * - Don't call set_task_cpu() on a blocked task:
609 *
610 * We don't care what CPU we're not running on, this simplifies hotplug,
611 * the CPU assignment of blocked tasks isn't required to be valid.
612 *
613 * - for try_to_wake_up(), called under p->pi_lock:
614 *
615 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
616 *
617 * - for migration called under rq->lock:
618 * [ see task_on_rq_migrating() in task_rq_lock() ]
619 *
620 * o move_queued_task()
621 * o detach_task()
622 *
623 * - for migration called under double_rq_lock():
624 *
625 * o __migrate_swap_task()
626 * o push_rt_task() / pull_rt_task()
627 * o push_dl_task() / pull_dl_task()
628 * o dl_task_offline_migration()
629 *
630 */
631
raw_spin_rq_lock_nested(struct rq * rq,int subclass)632 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
633 {
634 raw_spinlock_t *lock;
635
636 /* Matches synchronize_rcu() in __sched_core_enable() */
637 preempt_disable();
638 if (sched_core_disabled()) {
639 raw_spin_lock_nested(&rq->__lock, subclass);
640 /* preempt_count *MUST* be > 1 */
641 preempt_enable_no_resched();
642 return;
643 }
644
645 for (;;) {
646 lock = __rq_lockp(rq);
647 raw_spin_lock_nested(lock, subclass);
648 if (likely(lock == __rq_lockp(rq))) {
649 /* preempt_count *MUST* be > 1 */
650 preempt_enable_no_resched();
651 return;
652 }
653 raw_spin_unlock(lock);
654 }
655 }
656
raw_spin_rq_trylock(struct rq * rq)657 bool raw_spin_rq_trylock(struct rq *rq)
658 {
659 raw_spinlock_t *lock;
660 bool ret;
661
662 /* Matches synchronize_rcu() in __sched_core_enable() */
663 preempt_disable();
664 if (sched_core_disabled()) {
665 ret = raw_spin_trylock(&rq->__lock);
666 preempt_enable();
667 return ret;
668 }
669
670 for (;;) {
671 lock = __rq_lockp(rq);
672 ret = raw_spin_trylock(lock);
673 if (!ret || (likely(lock == __rq_lockp(rq)))) {
674 preempt_enable();
675 return ret;
676 }
677 raw_spin_unlock(lock);
678 }
679 }
680
raw_spin_rq_unlock(struct rq * rq)681 void raw_spin_rq_unlock(struct rq *rq)
682 {
683 raw_spin_unlock(rq_lockp(rq));
684 }
685
686 /*
687 * double_rq_lock - safely lock two runqueues
688 */
double_rq_lock(struct rq * rq1,struct rq * rq2)689 void double_rq_lock(struct rq *rq1, struct rq *rq2)
690 {
691 lockdep_assert_irqs_disabled();
692
693 if (rq_order_less(rq2, rq1))
694 swap(rq1, rq2);
695
696 raw_spin_rq_lock(rq1);
697 if (__rq_lockp(rq1) != __rq_lockp(rq2))
698 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
699
700 double_rq_clock_clear_update(rq1, rq2);
701 }
702
703 /*
704 * __task_rq_lock - lock the rq @p resides on.
705 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)706 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
707 __acquires(rq->lock)
708 {
709 struct rq *rq;
710
711 lockdep_assert_held(&p->pi_lock);
712
713 for (;;) {
714 rq = task_rq(p);
715 raw_spin_rq_lock(rq);
716 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
717 rq_pin_lock(rq, rf);
718 return rq;
719 }
720 raw_spin_rq_unlock(rq);
721
722 while (unlikely(task_on_rq_migrating(p)))
723 cpu_relax();
724 }
725 }
726
727 /*
728 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
729 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)730 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
731 __acquires(p->pi_lock)
732 __acquires(rq->lock)
733 {
734 struct rq *rq;
735
736 for (;;) {
737 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
738 rq = task_rq(p);
739 raw_spin_rq_lock(rq);
740 /*
741 * move_queued_task() task_rq_lock()
742 *
743 * ACQUIRE (rq->lock)
744 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
745 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
746 * [S] ->cpu = new_cpu [L] task_rq()
747 * [L] ->on_rq
748 * RELEASE (rq->lock)
749 *
750 * If we observe the old CPU in task_rq_lock(), the acquire of
751 * the old rq->lock will fully serialize against the stores.
752 *
753 * If we observe the new CPU in task_rq_lock(), the address
754 * dependency headed by '[L] rq = task_rq()' and the acquire
755 * will pair with the WMB to ensure we then also see migrating.
756 */
757 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
758 rq_pin_lock(rq, rf);
759 return rq;
760 }
761 raw_spin_rq_unlock(rq);
762 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
763
764 while (unlikely(task_on_rq_migrating(p)))
765 cpu_relax();
766 }
767 }
768
769 /*
770 * RQ-clock updating methods:
771 */
772
update_rq_clock_task(struct rq * rq,s64 delta)773 static void update_rq_clock_task(struct rq *rq, s64 delta)
774 {
775 /*
776 * In theory, the compile should just see 0 here, and optimize out the call
777 * to sched_rt_avg_update. But I don't trust it...
778 */
779 s64 __maybe_unused steal = 0, irq_delta = 0;
780
781 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
782 if (irqtime_enabled()) {
783 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
784
785 /*
786 * Since irq_time is only updated on {soft,}irq_exit, we might run into
787 * this case when a previous update_rq_clock() happened inside a
788 * {soft,}IRQ region.
789 *
790 * When this happens, we stop ->clock_task and only update the
791 * prev_irq_time stamp to account for the part that fit, so that a next
792 * update will consume the rest. This ensures ->clock_task is
793 * monotonic.
794 *
795 * It does however cause some slight miss-attribution of {soft,}IRQ
796 * time, a more accurate solution would be to update the irq_time using
797 * the current rq->clock timestamp, except that would require using
798 * atomic ops.
799 */
800 if (irq_delta > delta)
801 irq_delta = delta;
802
803 rq->prev_irq_time += irq_delta;
804 delta -= irq_delta;
805 delayacct_irq(rq->curr, irq_delta);
806 }
807 #endif
808 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
809 if (static_key_false((¶virt_steal_rq_enabled))) {
810 u64 prev_steal;
811
812 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
813 steal -= rq->prev_steal_time_rq;
814
815 if (unlikely(steal > delta))
816 steal = delta;
817
818 rq->prev_steal_time_rq = prev_steal;
819 delta -= steal;
820 }
821 #endif
822
823 rq->clock_task += delta;
824
825 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
826 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
827 update_irq_load_avg(rq, irq_delta + steal);
828 #endif
829 update_rq_clock_pelt(rq, delta);
830 }
831
update_rq_clock(struct rq * rq)832 void update_rq_clock(struct rq *rq)
833 {
834 s64 delta;
835 u64 clock;
836
837 lockdep_assert_rq_held(rq);
838
839 if (rq->clock_update_flags & RQCF_ACT_SKIP)
840 return;
841
842 if (sched_feat(WARN_DOUBLE_CLOCK))
843 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
844 rq->clock_update_flags |= RQCF_UPDATED;
845
846 clock = sched_clock_cpu(cpu_of(rq));
847 scx_rq_clock_update(rq, clock);
848
849 delta = clock - rq->clock;
850 if (delta < 0)
851 return;
852 rq->clock += delta;
853
854 update_rq_clock_task(rq, delta);
855 }
856
857 #ifdef CONFIG_SCHED_HRTICK
858 /*
859 * Use HR-timers to deliver accurate preemption points.
860 */
861
hrtick_clear(struct rq * rq)862 static void hrtick_clear(struct rq *rq)
863 {
864 if (hrtimer_active(&rq->hrtick_timer))
865 hrtimer_cancel(&rq->hrtick_timer);
866 }
867
868 /*
869 * High-resolution timer tick.
870 * Runs from hardirq context with interrupts disabled.
871 */
hrtick(struct hrtimer * timer)872 static enum hrtimer_restart hrtick(struct hrtimer *timer)
873 {
874 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
875 struct rq_flags rf;
876
877 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
878
879 rq_lock(rq, &rf);
880 update_rq_clock(rq);
881 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
882 rq_unlock(rq, &rf);
883
884 return HRTIMER_NORESTART;
885 }
886
__hrtick_restart(struct rq * rq)887 static void __hrtick_restart(struct rq *rq)
888 {
889 struct hrtimer *timer = &rq->hrtick_timer;
890 ktime_t time = rq->hrtick_time;
891
892 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
893 }
894
895 /*
896 * called from hardirq (IPI) context
897 */
__hrtick_start(void * arg)898 static void __hrtick_start(void *arg)
899 {
900 struct rq *rq = arg;
901 struct rq_flags rf;
902
903 rq_lock(rq, &rf);
904 __hrtick_restart(rq);
905 rq_unlock(rq, &rf);
906 }
907
908 /*
909 * Called to set the hrtick timer state.
910 *
911 * called with rq->lock held and IRQs disabled
912 */
hrtick_start(struct rq * rq,u64 delay)913 void hrtick_start(struct rq *rq, u64 delay)
914 {
915 struct hrtimer *timer = &rq->hrtick_timer;
916 s64 delta;
917
918 /*
919 * Don't schedule slices shorter than 10000ns, that just
920 * doesn't make sense and can cause timer DoS.
921 */
922 delta = max_t(s64, delay, 10000LL);
923 rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta);
924
925 if (rq == this_rq())
926 __hrtick_restart(rq);
927 else
928 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
929 }
930
hrtick_rq_init(struct rq * rq)931 static void hrtick_rq_init(struct rq *rq)
932 {
933 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
934 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
935 }
936 #else /* !CONFIG_SCHED_HRTICK: */
hrtick_clear(struct rq * rq)937 static inline void hrtick_clear(struct rq *rq)
938 {
939 }
940
hrtick_rq_init(struct rq * rq)941 static inline void hrtick_rq_init(struct rq *rq)
942 {
943 }
944 #endif /* !CONFIG_SCHED_HRTICK */
945
946 /*
947 * try_cmpxchg based fetch_or() macro so it works for different integer types:
948 */
949 #define fetch_or(ptr, mask) \
950 ({ \
951 typeof(ptr) _ptr = (ptr); \
952 typeof(mask) _mask = (mask); \
953 typeof(*_ptr) _val = *_ptr; \
954 \
955 do { \
956 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
957 _val; \
958 })
959
960 #ifdef TIF_POLLING_NRFLAG
961 /*
962 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
963 * this avoids any races wrt polling state changes and thereby avoids
964 * spurious IPIs.
965 */
set_nr_and_not_polling(struct thread_info * ti,int tif)966 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
967 {
968 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
969 }
970
971 /*
972 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
973 *
974 * If this returns true, then the idle task promises to call
975 * sched_ttwu_pending() and reschedule soon.
976 */
set_nr_if_polling(struct task_struct * p)977 static bool set_nr_if_polling(struct task_struct *p)
978 {
979 struct thread_info *ti = task_thread_info(p);
980 typeof(ti->flags) val = READ_ONCE(ti->flags);
981
982 do {
983 if (!(val & _TIF_POLLING_NRFLAG))
984 return false;
985 if (val & _TIF_NEED_RESCHED)
986 return true;
987 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
988
989 return true;
990 }
991
992 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)993 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
994 {
995 set_ti_thread_flag(ti, tif);
996 return true;
997 }
998
set_nr_if_polling(struct task_struct * p)999 static inline bool set_nr_if_polling(struct task_struct *p)
1000 {
1001 return false;
1002 }
1003 #endif
1004
__wake_q_add(struct wake_q_head * head,struct task_struct * task)1005 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
1006 {
1007 struct wake_q_node *node = &task->wake_q;
1008
1009 /*
1010 * Atomically grab the task, if ->wake_q is !nil already it means
1011 * it's already queued (either by us or someone else) and will get the
1012 * wakeup due to that.
1013 *
1014 * In order to ensure that a pending wakeup will observe our pending
1015 * state, even in the failed case, an explicit smp_mb() must be used.
1016 */
1017 smp_mb__before_atomic();
1018 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1019 return false;
1020
1021 /*
1022 * The head is context local, there can be no concurrency.
1023 */
1024 *head->lastp = node;
1025 head->lastp = &node->next;
1026 return true;
1027 }
1028
1029 /**
1030 * wake_q_add() - queue a wakeup for 'later' waking.
1031 * @head: the wake_q_head to add @task to
1032 * @task: the task to queue for 'later' wakeup
1033 *
1034 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1035 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1036 * instantly.
1037 *
1038 * This function must be used as-if it were wake_up_process(); IOW the task
1039 * must be ready to be woken at this location.
1040 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1041 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1042 {
1043 if (__wake_q_add(head, task))
1044 get_task_struct(task);
1045 }
1046
1047 /**
1048 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1049 * @head: the wake_q_head to add @task to
1050 * @task: the task to queue for 'later' wakeup
1051 *
1052 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1053 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1054 * instantly.
1055 *
1056 * This function must be used as-if it were wake_up_process(); IOW the task
1057 * must be ready to be woken at this location.
1058 *
1059 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1060 * that already hold reference to @task can call the 'safe' version and trust
1061 * wake_q to do the right thing depending whether or not the @task is already
1062 * queued for wakeup.
1063 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1064 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1065 {
1066 if (!__wake_q_add(head, task))
1067 put_task_struct(task);
1068 }
1069
wake_up_q(struct wake_q_head * head)1070 void wake_up_q(struct wake_q_head *head)
1071 {
1072 struct wake_q_node *node = head->first;
1073
1074 while (node != WAKE_Q_TAIL) {
1075 struct task_struct *task;
1076
1077 task = container_of(node, struct task_struct, wake_q);
1078 node = node->next;
1079 /* pairs with cmpxchg_relaxed() in __wake_q_add() */
1080 WRITE_ONCE(task->wake_q.next, NULL);
1081 /* Task can safely be re-inserted now. */
1082
1083 /*
1084 * wake_up_process() executes a full barrier, which pairs with
1085 * the queueing in wake_q_add() so as not to miss wakeups.
1086 */
1087 wake_up_process(task);
1088 put_task_struct(task);
1089 }
1090 }
1091
1092 /*
1093 * resched_curr - mark rq's current task 'to be rescheduled now'.
1094 *
1095 * On UP this means the setting of the need_resched flag, on SMP it
1096 * might also involve a cross-CPU call to trigger the scheduler on
1097 * the target CPU.
1098 */
__resched_curr(struct rq * rq,int tif)1099 static void __resched_curr(struct rq *rq, int tif)
1100 {
1101 struct task_struct *curr = rq->curr;
1102 struct thread_info *cti = task_thread_info(curr);
1103 int cpu;
1104
1105 lockdep_assert_rq_held(rq);
1106
1107 /*
1108 * Always immediately preempt the idle task; no point in delaying doing
1109 * actual work.
1110 */
1111 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1112 tif = TIF_NEED_RESCHED;
1113
1114 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1115 return;
1116
1117 cpu = cpu_of(rq);
1118
1119 trace_sched_set_need_resched_tp(curr, cpu, tif);
1120 if (cpu == smp_processor_id()) {
1121 set_ti_thread_flag(cti, tif);
1122 if (tif == TIF_NEED_RESCHED)
1123 set_preempt_need_resched();
1124 return;
1125 }
1126
1127 if (set_nr_and_not_polling(cti, tif)) {
1128 if (tif == TIF_NEED_RESCHED)
1129 smp_send_reschedule(cpu);
1130 } else {
1131 trace_sched_wake_idle_without_ipi(cpu);
1132 }
1133 }
1134
__trace_set_need_resched(struct task_struct * curr,int tif)1135 void __trace_set_need_resched(struct task_struct *curr, int tif)
1136 {
1137 trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
1138 }
1139
resched_curr(struct rq * rq)1140 void resched_curr(struct rq *rq)
1141 {
1142 __resched_curr(rq, TIF_NEED_RESCHED);
1143 }
1144
1145 #ifdef CONFIG_PREEMPT_DYNAMIC
1146 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1147 static __always_inline bool dynamic_preempt_lazy(void)
1148 {
1149 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1150 }
1151 #else
dynamic_preempt_lazy(void)1152 static __always_inline bool dynamic_preempt_lazy(void)
1153 {
1154 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1155 }
1156 #endif
1157
get_lazy_tif_bit(void)1158 static __always_inline int get_lazy_tif_bit(void)
1159 {
1160 if (dynamic_preempt_lazy())
1161 return TIF_NEED_RESCHED_LAZY;
1162
1163 return TIF_NEED_RESCHED;
1164 }
1165
resched_curr_lazy(struct rq * rq)1166 void resched_curr_lazy(struct rq *rq)
1167 {
1168 __resched_curr(rq, get_lazy_tif_bit());
1169 }
1170
resched_cpu(int cpu)1171 void resched_cpu(int cpu)
1172 {
1173 struct rq *rq = cpu_rq(cpu);
1174 unsigned long flags;
1175
1176 raw_spin_rq_lock_irqsave(rq, flags);
1177 if (cpu_online(cpu) || cpu == smp_processor_id())
1178 resched_curr(rq);
1179 raw_spin_rq_unlock_irqrestore(rq, flags);
1180 }
1181
1182 #ifdef CONFIG_NO_HZ_COMMON
1183 /*
1184 * In the semi idle case, use the nearest busy CPU for migrating timers
1185 * from an idle CPU. This is good for power-savings.
1186 *
1187 * We don't do similar optimization for completely idle system, as
1188 * selecting an idle CPU will add more delays to the timers than intended
1189 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1190 */
get_nohz_timer_target(void)1191 int get_nohz_timer_target(void)
1192 {
1193 int i, cpu = smp_processor_id(), default_cpu = -1;
1194 struct sched_domain *sd;
1195 const struct cpumask *hk_mask;
1196
1197 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1198 if (!idle_cpu(cpu))
1199 return cpu;
1200 default_cpu = cpu;
1201 }
1202
1203 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1204
1205 guard(rcu)();
1206
1207 for_each_domain(cpu, sd) {
1208 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1209 if (cpu == i)
1210 continue;
1211
1212 if (!idle_cpu(i))
1213 return i;
1214 }
1215 }
1216
1217 if (default_cpu == -1)
1218 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1219
1220 return default_cpu;
1221 }
1222
1223 /*
1224 * When add_timer_on() enqueues a timer into the timer wheel of an
1225 * idle CPU then this timer might expire before the next timer event
1226 * which is scheduled to wake up that CPU. In case of a completely
1227 * idle system the next event might even be infinite time into the
1228 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1229 * leaves the inner idle loop so the newly added timer is taken into
1230 * account when the CPU goes back to idle and evaluates the timer
1231 * wheel for the next timer event.
1232 */
wake_up_idle_cpu(int cpu)1233 static void wake_up_idle_cpu(int cpu)
1234 {
1235 struct rq *rq = cpu_rq(cpu);
1236
1237 if (cpu == smp_processor_id())
1238 return;
1239
1240 /*
1241 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1242 * part of the idle loop. This forces an exit from the idle loop
1243 * and a round trip to schedule(). Now this could be optimized
1244 * because a simple new idle loop iteration is enough to
1245 * re-evaluate the next tick. Provided some re-ordering of tick
1246 * nohz functions that would need to follow TIF_NR_POLLING
1247 * clearing:
1248 *
1249 * - On most architectures, a simple fetch_or on ti::flags with a
1250 * "0" value would be enough to know if an IPI needs to be sent.
1251 *
1252 * - x86 needs to perform a last need_resched() check between
1253 * monitor and mwait which doesn't take timers into account.
1254 * There a dedicated TIF_TIMER flag would be required to
1255 * fetch_or here and be checked along with TIF_NEED_RESCHED
1256 * before mwait().
1257 *
1258 * However, remote timer enqueue is not such a frequent event
1259 * and testing of the above solutions didn't appear to report
1260 * much benefits.
1261 */
1262 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1263 smp_send_reschedule(cpu);
1264 else
1265 trace_sched_wake_idle_without_ipi(cpu);
1266 }
1267
wake_up_full_nohz_cpu(int cpu)1268 static bool wake_up_full_nohz_cpu(int cpu)
1269 {
1270 /*
1271 * We just need the target to call irq_exit() and re-evaluate
1272 * the next tick. The nohz full kick at least implies that.
1273 * If needed we can still optimize that later with an
1274 * empty IRQ.
1275 */
1276 if (cpu_is_offline(cpu))
1277 return true; /* Don't try to wake offline CPUs. */
1278 if (tick_nohz_full_cpu(cpu)) {
1279 if (cpu != smp_processor_id() ||
1280 tick_nohz_tick_stopped())
1281 tick_nohz_full_kick_cpu(cpu);
1282 return true;
1283 }
1284
1285 return false;
1286 }
1287
1288 /*
1289 * Wake up the specified CPU. If the CPU is going offline, it is the
1290 * caller's responsibility to deal with the lost wakeup, for example,
1291 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1292 */
wake_up_nohz_cpu(int cpu)1293 void wake_up_nohz_cpu(int cpu)
1294 {
1295 if (!wake_up_full_nohz_cpu(cpu))
1296 wake_up_idle_cpu(cpu);
1297 }
1298
nohz_csd_func(void * info)1299 static void nohz_csd_func(void *info)
1300 {
1301 struct rq *rq = info;
1302 int cpu = cpu_of(rq);
1303 unsigned int flags;
1304
1305 /*
1306 * Release the rq::nohz_csd.
1307 */
1308 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1309 WARN_ON(!(flags & NOHZ_KICK_MASK));
1310
1311 rq->idle_balance = idle_cpu(cpu);
1312 if (rq->idle_balance) {
1313 rq->nohz_idle_balance = flags;
1314 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1315 }
1316 }
1317
1318 #endif /* CONFIG_NO_HZ_COMMON */
1319
1320 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1321 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1322 {
1323 if (rq->nr_running != 1)
1324 return false;
1325
1326 if (p->sched_class != &fair_sched_class)
1327 return false;
1328
1329 if (!task_on_rq_queued(p))
1330 return false;
1331
1332 return true;
1333 }
1334
sched_can_stop_tick(struct rq * rq)1335 bool sched_can_stop_tick(struct rq *rq)
1336 {
1337 int fifo_nr_running;
1338
1339 /* Deadline tasks, even if single, need the tick */
1340 if (rq->dl.dl_nr_running)
1341 return false;
1342
1343 /*
1344 * If there are more than one RR tasks, we need the tick to affect the
1345 * actual RR behaviour.
1346 */
1347 if (rq->rt.rr_nr_running) {
1348 if (rq->rt.rr_nr_running == 1)
1349 return true;
1350 else
1351 return false;
1352 }
1353
1354 /*
1355 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1356 * forced preemption between FIFO tasks.
1357 */
1358 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1359 if (fifo_nr_running)
1360 return true;
1361
1362 /*
1363 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1364 * left. For CFS, if there's more than one we need the tick for
1365 * involuntary preemption. For SCX, ask.
1366 */
1367 if (scx_enabled() && !scx_can_stop_tick(rq))
1368 return false;
1369
1370 if (rq->cfs.h_nr_queued > 1)
1371 return false;
1372
1373 /*
1374 * If there is one task and it has CFS runtime bandwidth constraints
1375 * and it's on the cpu now we don't want to stop the tick.
1376 * This check prevents clearing the bit if a newly enqueued task here is
1377 * dequeued by migrating while the constrained task continues to run.
1378 * E.g. going from 2->1 without going through pick_next_task().
1379 */
1380 if (__need_bw_check(rq, rq->curr)) {
1381 if (cfs_task_bw_constrained(rq->curr))
1382 return false;
1383 }
1384
1385 return true;
1386 }
1387 #endif /* CONFIG_NO_HZ_FULL */
1388
1389 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
1390 /*
1391 * Iterate task_group tree rooted at *from, calling @down when first entering a
1392 * node and @up when leaving it for the final time.
1393 *
1394 * Caller must hold rcu_lock or sufficient equivalent.
1395 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1396 int walk_tg_tree_from(struct task_group *from,
1397 tg_visitor down, tg_visitor up, void *data)
1398 {
1399 struct task_group *parent, *child;
1400 int ret;
1401
1402 parent = from;
1403
1404 down:
1405 ret = (*down)(parent, data);
1406 if (ret)
1407 goto out;
1408 list_for_each_entry_rcu(child, &parent->children, siblings) {
1409 parent = child;
1410 goto down;
1411
1412 up:
1413 continue;
1414 }
1415 ret = (*up)(parent, data);
1416 if (ret || parent == from)
1417 goto out;
1418
1419 child = parent;
1420 parent = parent->parent;
1421 if (parent)
1422 goto up;
1423 out:
1424 return ret;
1425 }
1426
tg_nop(struct task_group * tg,void * data)1427 int tg_nop(struct task_group *tg, void *data)
1428 {
1429 return 0;
1430 }
1431 #endif
1432
set_load_weight(struct task_struct * p,bool update_load)1433 void set_load_weight(struct task_struct *p, bool update_load)
1434 {
1435 int prio = p->static_prio - MAX_RT_PRIO;
1436 struct load_weight lw;
1437
1438 if (task_has_idle_policy(p)) {
1439 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1440 lw.inv_weight = WMULT_IDLEPRIO;
1441 } else {
1442 lw.weight = scale_load(sched_prio_to_weight[prio]);
1443 lw.inv_weight = sched_prio_to_wmult[prio];
1444 }
1445
1446 /*
1447 * SCHED_OTHER tasks have to update their load when changing their
1448 * weight
1449 */
1450 if (update_load && p->sched_class->reweight_task)
1451 p->sched_class->reweight_task(task_rq(p), p, &lw);
1452 else
1453 p->se.load = lw;
1454 }
1455
1456 #ifdef CONFIG_UCLAMP_TASK
1457 /*
1458 * Serializes updates of utilization clamp values
1459 *
1460 * The (slow-path) user-space triggers utilization clamp value updates which
1461 * can require updates on (fast-path) scheduler's data structures used to
1462 * support enqueue/dequeue operations.
1463 * While the per-CPU rq lock protects fast-path update operations, user-space
1464 * requests are serialized using a mutex to reduce the risk of conflicting
1465 * updates or API abuses.
1466 */
1467 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1468
1469 /* Max allowed minimum utilization */
1470 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1471
1472 /* Max allowed maximum utilization */
1473 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1474
1475 /*
1476 * By default RT tasks run at the maximum performance point/capacity of the
1477 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1478 * SCHED_CAPACITY_SCALE.
1479 *
1480 * This knob allows admins to change the default behavior when uclamp is being
1481 * used. In battery powered devices, particularly, running at the maximum
1482 * capacity and frequency will increase energy consumption and shorten the
1483 * battery life.
1484 *
1485 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1486 *
1487 * This knob will not override the system default sched_util_clamp_min defined
1488 * above.
1489 */
1490 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1491
1492 /* All clamps are required to be less or equal than these values */
1493 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1494
1495 /*
1496 * This static key is used to reduce the uclamp overhead in the fast path. It
1497 * primarily disables the call to uclamp_rq_{inc, dec}() in
1498 * enqueue/dequeue_task().
1499 *
1500 * This allows users to continue to enable uclamp in their kernel config with
1501 * minimum uclamp overhead in the fast path.
1502 *
1503 * As soon as userspace modifies any of the uclamp knobs, the static key is
1504 * enabled, since we have an actual users that make use of uclamp
1505 * functionality.
1506 *
1507 * The knobs that would enable this static key are:
1508 *
1509 * * A task modifying its uclamp value with sched_setattr().
1510 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1511 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1512 */
1513 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1514
1515 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1516 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1517 unsigned int clamp_value)
1518 {
1519 /*
1520 * Avoid blocked utilization pushing up the frequency when we go
1521 * idle (which drops the max-clamp) by retaining the last known
1522 * max-clamp.
1523 */
1524 if (clamp_id == UCLAMP_MAX) {
1525 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1526 return clamp_value;
1527 }
1528
1529 return uclamp_none(UCLAMP_MIN);
1530 }
1531
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1532 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1533 unsigned int clamp_value)
1534 {
1535 /* Reset max-clamp retention only on idle exit */
1536 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1537 return;
1538
1539 uclamp_rq_set(rq, clamp_id, clamp_value);
1540 }
1541
1542 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1543 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1544 unsigned int clamp_value)
1545 {
1546 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1547 int bucket_id = UCLAMP_BUCKETS - 1;
1548
1549 /*
1550 * Since both min and max clamps are max aggregated, find the
1551 * top most bucket with tasks in.
1552 */
1553 for ( ; bucket_id >= 0; bucket_id--) {
1554 if (!bucket[bucket_id].tasks)
1555 continue;
1556 return bucket[bucket_id].value;
1557 }
1558
1559 /* No tasks -- default clamp values */
1560 return uclamp_idle_value(rq, clamp_id, clamp_value);
1561 }
1562
__uclamp_update_util_min_rt_default(struct task_struct * p)1563 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1564 {
1565 unsigned int default_util_min;
1566 struct uclamp_se *uc_se;
1567
1568 lockdep_assert_held(&p->pi_lock);
1569
1570 uc_se = &p->uclamp_req[UCLAMP_MIN];
1571
1572 /* Only sync if user didn't override the default */
1573 if (uc_se->user_defined)
1574 return;
1575
1576 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1577 uclamp_se_set(uc_se, default_util_min, false);
1578 }
1579
uclamp_update_util_min_rt_default(struct task_struct * p)1580 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1581 {
1582 if (!rt_task(p))
1583 return;
1584
1585 /* Protect updates to p->uclamp_* */
1586 guard(task_rq_lock)(p);
1587 __uclamp_update_util_min_rt_default(p);
1588 }
1589
1590 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1591 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1592 {
1593 /* Copy by value as we could modify it */
1594 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1595 #ifdef CONFIG_UCLAMP_TASK_GROUP
1596 unsigned int tg_min, tg_max, value;
1597
1598 /*
1599 * Tasks in autogroups or root task group will be
1600 * restricted by system defaults.
1601 */
1602 if (task_group_is_autogroup(task_group(p)))
1603 return uc_req;
1604 if (task_group(p) == &root_task_group)
1605 return uc_req;
1606
1607 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1608 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1609 value = uc_req.value;
1610 value = clamp(value, tg_min, tg_max);
1611 uclamp_se_set(&uc_req, value, false);
1612 #endif
1613
1614 return uc_req;
1615 }
1616
1617 /*
1618 * The effective clamp bucket index of a task depends on, by increasing
1619 * priority:
1620 * - the task specific clamp value, when explicitly requested from userspace
1621 * - the task group effective clamp value, for tasks not either in the root
1622 * group or in an autogroup
1623 * - the system default clamp value, defined by the sysadmin
1624 */
1625 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1626 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1627 {
1628 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1629 struct uclamp_se uc_max = uclamp_default[clamp_id];
1630
1631 /* System default restrictions always apply */
1632 if (unlikely(uc_req.value > uc_max.value))
1633 return uc_max;
1634
1635 return uc_req;
1636 }
1637
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1638 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1639 {
1640 struct uclamp_se uc_eff;
1641
1642 /* Task currently refcounted: use back-annotated (effective) value */
1643 if (p->uclamp[clamp_id].active)
1644 return (unsigned long)p->uclamp[clamp_id].value;
1645
1646 uc_eff = uclamp_eff_get(p, clamp_id);
1647
1648 return (unsigned long)uc_eff.value;
1649 }
1650
1651 /*
1652 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1653 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1654 * updates the rq's clamp value if required.
1655 *
1656 * Tasks can have a task-specific value requested from user-space, track
1657 * within each bucket the maximum value for tasks refcounted in it.
1658 * This "local max aggregation" allows to track the exact "requested" value
1659 * for each bucket when all its RUNNABLE tasks require the same clamp.
1660 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1661 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1662 enum uclamp_id clamp_id)
1663 {
1664 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1665 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1666 struct uclamp_bucket *bucket;
1667
1668 lockdep_assert_rq_held(rq);
1669
1670 /* Update task effective clamp */
1671 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1672
1673 bucket = &uc_rq->bucket[uc_se->bucket_id];
1674 bucket->tasks++;
1675 uc_se->active = true;
1676
1677 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1678
1679 /*
1680 * Local max aggregation: rq buckets always track the max
1681 * "requested" clamp value of its RUNNABLE tasks.
1682 */
1683 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1684 bucket->value = uc_se->value;
1685
1686 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1687 uclamp_rq_set(rq, clamp_id, uc_se->value);
1688 }
1689
1690 /*
1691 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1692 * is released. If this is the last task reference counting the rq's max
1693 * active clamp value, then the rq's clamp value is updated.
1694 *
1695 * Both refcounted tasks and rq's cached clamp values are expected to be
1696 * always valid. If it's detected they are not, as defensive programming,
1697 * enforce the expected state and warn.
1698 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1699 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1700 enum uclamp_id clamp_id)
1701 {
1702 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1703 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1704 struct uclamp_bucket *bucket;
1705 unsigned int bkt_clamp;
1706 unsigned int rq_clamp;
1707
1708 lockdep_assert_rq_held(rq);
1709
1710 /*
1711 * If sched_uclamp_used was enabled after task @p was enqueued,
1712 * we could end up with unbalanced call to uclamp_rq_dec_id().
1713 *
1714 * In this case the uc_se->active flag should be false since no uclamp
1715 * accounting was performed at enqueue time and we can just return
1716 * here.
1717 *
1718 * Need to be careful of the following enqueue/dequeue ordering
1719 * problem too
1720 *
1721 * enqueue(taskA)
1722 * // sched_uclamp_used gets enabled
1723 * enqueue(taskB)
1724 * dequeue(taskA)
1725 * // Must not decrement bucket->tasks here
1726 * dequeue(taskB)
1727 *
1728 * where we could end up with stale data in uc_se and
1729 * bucket[uc_se->bucket_id].
1730 *
1731 * The following check here eliminates the possibility of such race.
1732 */
1733 if (unlikely(!uc_se->active))
1734 return;
1735
1736 bucket = &uc_rq->bucket[uc_se->bucket_id];
1737
1738 WARN_ON_ONCE(!bucket->tasks);
1739 if (likely(bucket->tasks))
1740 bucket->tasks--;
1741
1742 uc_se->active = false;
1743
1744 /*
1745 * Keep "local max aggregation" simple and accept to (possibly)
1746 * overboost some RUNNABLE tasks in the same bucket.
1747 * The rq clamp bucket value is reset to its base value whenever
1748 * there are no more RUNNABLE tasks refcounting it.
1749 */
1750 if (likely(bucket->tasks))
1751 return;
1752
1753 rq_clamp = uclamp_rq_get(rq, clamp_id);
1754 /*
1755 * Defensive programming: this should never happen. If it happens,
1756 * e.g. due to future modification, warn and fix up the expected value.
1757 */
1758 WARN_ON_ONCE(bucket->value > rq_clamp);
1759 if (bucket->value >= rq_clamp) {
1760 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1761 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1762 }
1763 }
1764
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)1765 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
1766 {
1767 enum uclamp_id clamp_id;
1768
1769 /*
1770 * Avoid any overhead until uclamp is actually used by the userspace.
1771 *
1772 * The condition is constructed such that a NOP is generated when
1773 * sched_uclamp_used is disabled.
1774 */
1775 if (!uclamp_is_used())
1776 return;
1777
1778 if (unlikely(!p->sched_class->uclamp_enabled))
1779 return;
1780
1781 /* Only inc the delayed task which being woken up. */
1782 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
1783 return;
1784
1785 for_each_clamp_id(clamp_id)
1786 uclamp_rq_inc_id(rq, p, clamp_id);
1787
1788 /* Reset clamp idle holding when there is one RUNNABLE task */
1789 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1790 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1791 }
1792
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1793 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1794 {
1795 enum uclamp_id clamp_id;
1796
1797 /*
1798 * Avoid any overhead until uclamp is actually used by the userspace.
1799 *
1800 * The condition is constructed such that a NOP is generated when
1801 * sched_uclamp_used is disabled.
1802 */
1803 if (!uclamp_is_used())
1804 return;
1805
1806 if (unlikely(!p->sched_class->uclamp_enabled))
1807 return;
1808
1809 if (p->se.sched_delayed)
1810 return;
1811
1812 for_each_clamp_id(clamp_id)
1813 uclamp_rq_dec_id(rq, p, clamp_id);
1814 }
1815
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1816 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1817 enum uclamp_id clamp_id)
1818 {
1819 if (!p->uclamp[clamp_id].active)
1820 return;
1821
1822 uclamp_rq_dec_id(rq, p, clamp_id);
1823 uclamp_rq_inc_id(rq, p, clamp_id);
1824
1825 /*
1826 * Make sure to clear the idle flag if we've transiently reached 0
1827 * active tasks on rq.
1828 */
1829 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1830 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1831 }
1832
1833 static inline void
uclamp_update_active(struct task_struct * p)1834 uclamp_update_active(struct task_struct *p)
1835 {
1836 enum uclamp_id clamp_id;
1837 struct rq_flags rf;
1838 struct rq *rq;
1839
1840 /*
1841 * Lock the task and the rq where the task is (or was) queued.
1842 *
1843 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1844 * price to pay to safely serialize util_{min,max} updates with
1845 * enqueues, dequeues and migration operations.
1846 * This is the same locking schema used by __set_cpus_allowed_ptr().
1847 */
1848 rq = task_rq_lock(p, &rf);
1849
1850 /*
1851 * Setting the clamp bucket is serialized by task_rq_lock().
1852 * If the task is not yet RUNNABLE and its task_struct is not
1853 * affecting a valid clamp bucket, the next time it's enqueued,
1854 * it will already see the updated clamp bucket value.
1855 */
1856 for_each_clamp_id(clamp_id)
1857 uclamp_rq_reinc_id(rq, p, clamp_id);
1858
1859 task_rq_unlock(rq, p, &rf);
1860 }
1861
1862 #ifdef CONFIG_UCLAMP_TASK_GROUP
1863 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1864 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1865 {
1866 struct css_task_iter it;
1867 struct task_struct *p;
1868
1869 css_task_iter_start(css, 0, &it);
1870 while ((p = css_task_iter_next(&it)))
1871 uclamp_update_active(p);
1872 css_task_iter_end(&it);
1873 }
1874
1875 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1876 #endif
1877
1878 #ifdef CONFIG_SYSCTL
1879 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1880 static void uclamp_update_root_tg(void)
1881 {
1882 struct task_group *tg = &root_task_group;
1883
1884 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1885 sysctl_sched_uclamp_util_min, false);
1886 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1887 sysctl_sched_uclamp_util_max, false);
1888
1889 guard(rcu)();
1890 cpu_util_update_eff(&root_task_group.css);
1891 }
1892 #else
uclamp_update_root_tg(void)1893 static void uclamp_update_root_tg(void) { }
1894 #endif
1895
uclamp_sync_util_min_rt_default(void)1896 static void uclamp_sync_util_min_rt_default(void)
1897 {
1898 struct task_struct *g, *p;
1899
1900 /*
1901 * copy_process() sysctl_uclamp
1902 * uclamp_min_rt = X;
1903 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1904 * // link thread smp_mb__after_spinlock()
1905 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1906 * sched_post_fork() for_each_process_thread()
1907 * __uclamp_sync_rt() __uclamp_sync_rt()
1908 *
1909 * Ensures that either sched_post_fork() will observe the new
1910 * uclamp_min_rt or for_each_process_thread() will observe the new
1911 * task.
1912 */
1913 read_lock(&tasklist_lock);
1914 smp_mb__after_spinlock();
1915 read_unlock(&tasklist_lock);
1916
1917 guard(rcu)();
1918 for_each_process_thread(g, p)
1919 uclamp_update_util_min_rt_default(p);
1920 }
1921
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1922 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1923 void *buffer, size_t *lenp, loff_t *ppos)
1924 {
1925 bool update_root_tg = false;
1926 int old_min, old_max, old_min_rt;
1927 int result;
1928
1929 guard(mutex)(&uclamp_mutex);
1930
1931 old_min = sysctl_sched_uclamp_util_min;
1932 old_max = sysctl_sched_uclamp_util_max;
1933 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1934
1935 result = proc_dointvec(table, write, buffer, lenp, ppos);
1936 if (result)
1937 goto undo;
1938 if (!write)
1939 return 0;
1940
1941 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1942 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1943 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1944
1945 result = -EINVAL;
1946 goto undo;
1947 }
1948
1949 if (old_min != sysctl_sched_uclamp_util_min) {
1950 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1951 sysctl_sched_uclamp_util_min, false);
1952 update_root_tg = true;
1953 }
1954 if (old_max != sysctl_sched_uclamp_util_max) {
1955 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1956 sysctl_sched_uclamp_util_max, false);
1957 update_root_tg = true;
1958 }
1959
1960 if (update_root_tg) {
1961 sched_uclamp_enable();
1962 uclamp_update_root_tg();
1963 }
1964
1965 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1966 sched_uclamp_enable();
1967 uclamp_sync_util_min_rt_default();
1968 }
1969
1970 /*
1971 * We update all RUNNABLE tasks only when task groups are in use.
1972 * Otherwise, keep it simple and do just a lazy update at each next
1973 * task enqueue time.
1974 */
1975 return 0;
1976
1977 undo:
1978 sysctl_sched_uclamp_util_min = old_min;
1979 sysctl_sched_uclamp_util_max = old_max;
1980 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1981 return result;
1982 }
1983 #endif /* CONFIG_SYSCTL */
1984
uclamp_fork(struct task_struct * p)1985 static void uclamp_fork(struct task_struct *p)
1986 {
1987 enum uclamp_id clamp_id;
1988
1989 /*
1990 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1991 * as the task is still at its early fork stages.
1992 */
1993 for_each_clamp_id(clamp_id)
1994 p->uclamp[clamp_id].active = false;
1995
1996 if (likely(!p->sched_reset_on_fork))
1997 return;
1998
1999 for_each_clamp_id(clamp_id) {
2000 uclamp_se_set(&p->uclamp_req[clamp_id],
2001 uclamp_none(clamp_id), false);
2002 }
2003 }
2004
uclamp_post_fork(struct task_struct * p)2005 static void uclamp_post_fork(struct task_struct *p)
2006 {
2007 uclamp_update_util_min_rt_default(p);
2008 }
2009
init_uclamp_rq(struct rq * rq)2010 static void __init init_uclamp_rq(struct rq *rq)
2011 {
2012 enum uclamp_id clamp_id;
2013 struct uclamp_rq *uc_rq = rq->uclamp;
2014
2015 for_each_clamp_id(clamp_id) {
2016 uc_rq[clamp_id] = (struct uclamp_rq) {
2017 .value = uclamp_none(clamp_id)
2018 };
2019 }
2020
2021 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2022 }
2023
init_uclamp(void)2024 static void __init init_uclamp(void)
2025 {
2026 struct uclamp_se uc_max = {};
2027 enum uclamp_id clamp_id;
2028 int cpu;
2029
2030 for_each_possible_cpu(cpu)
2031 init_uclamp_rq(cpu_rq(cpu));
2032
2033 for_each_clamp_id(clamp_id) {
2034 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2035 uclamp_none(clamp_id), false);
2036 }
2037
2038 /* System defaults allow max clamp values for both indexes */
2039 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2040 for_each_clamp_id(clamp_id) {
2041 uclamp_default[clamp_id] = uc_max;
2042 #ifdef CONFIG_UCLAMP_TASK_GROUP
2043 root_task_group.uclamp_req[clamp_id] = uc_max;
2044 root_task_group.uclamp[clamp_id] = uc_max;
2045 #endif
2046 }
2047 }
2048
2049 #else /* !CONFIG_UCLAMP_TASK: */
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)2050 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2051 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2052 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2053 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2054 static inline void init_uclamp(void) { }
2055 #endif /* !CONFIG_UCLAMP_TASK */
2056
sched_task_on_rq(struct task_struct * p)2057 bool sched_task_on_rq(struct task_struct *p)
2058 {
2059 return task_on_rq_queued(p);
2060 }
2061
get_wchan(struct task_struct * p)2062 unsigned long get_wchan(struct task_struct *p)
2063 {
2064 unsigned long ip = 0;
2065 unsigned int state;
2066
2067 if (!p || p == current)
2068 return 0;
2069
2070 /* Only get wchan if task is blocked and we can keep it that way. */
2071 raw_spin_lock_irq(&p->pi_lock);
2072 state = READ_ONCE(p->__state);
2073 smp_rmb(); /* see try_to_wake_up() */
2074 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2075 ip = __get_wchan(p);
2076 raw_spin_unlock_irq(&p->pi_lock);
2077
2078 return ip;
2079 }
2080
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2081 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2082 {
2083 if (!(flags & ENQUEUE_NOCLOCK))
2084 update_rq_clock(rq);
2085
2086 /*
2087 * Can be before ->enqueue_task() because uclamp considers the
2088 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2089 * in ->enqueue_task().
2090 */
2091 uclamp_rq_inc(rq, p, flags);
2092
2093 rq->queue_mask |= p->sched_class->queue_mask;
2094 p->sched_class->enqueue_task(rq, p, flags);
2095
2096 psi_enqueue(p, flags);
2097
2098 if (!(flags & ENQUEUE_RESTORE))
2099 sched_info_enqueue(rq, p);
2100
2101 if (sched_core_enabled(rq))
2102 sched_core_enqueue(rq, p);
2103 }
2104
2105 /*
2106 * Must only return false when DEQUEUE_SLEEP.
2107 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2108 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2109 {
2110 if (sched_core_enabled(rq))
2111 sched_core_dequeue(rq, p, flags);
2112
2113 if (!(flags & DEQUEUE_NOCLOCK))
2114 update_rq_clock(rq);
2115
2116 if (!(flags & DEQUEUE_SAVE))
2117 sched_info_dequeue(rq, p);
2118
2119 psi_dequeue(p, flags);
2120
2121 /*
2122 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2123 * and mark the task ->sched_delayed.
2124 */
2125 uclamp_rq_dec(rq, p);
2126 rq->queue_mask |= p->sched_class->queue_mask;
2127 return p->sched_class->dequeue_task(rq, p, flags);
2128 }
2129
activate_task(struct rq * rq,struct task_struct * p,int flags)2130 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2131 {
2132 if (task_on_rq_migrating(p))
2133 flags |= ENQUEUE_MIGRATED;
2134
2135 enqueue_task(rq, p, flags);
2136
2137 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2138 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2139 }
2140
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2141 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2142 {
2143 WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
2144
2145 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2146 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2147
2148 /*
2149 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2150 * dequeue_task() and cleared *after* enqueue_task().
2151 */
2152
2153 dequeue_task(rq, p, flags);
2154 }
2155
block_task(struct rq * rq,struct task_struct * p,int flags)2156 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2157 {
2158 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2159 __block_task(rq, p);
2160 }
2161
2162 /**
2163 * task_curr - is this task currently executing on a CPU?
2164 * @p: the task in question.
2165 *
2166 * Return: 1 if the task is currently executing. 0 otherwise.
2167 */
task_curr(const struct task_struct * p)2168 inline int task_curr(const struct task_struct *p)
2169 {
2170 return cpu_curr(task_cpu(p)) == p;
2171 }
2172
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2173 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2174 {
2175 struct task_struct *donor = rq->donor;
2176
2177 if (p->sched_class == donor->sched_class)
2178 donor->sched_class->wakeup_preempt(rq, p, flags);
2179 else if (sched_class_above(p->sched_class, donor->sched_class))
2180 resched_curr(rq);
2181
2182 /*
2183 * A queue event has occurred, and we're going to schedule. In
2184 * this case, we can save a useless back to back clock update.
2185 */
2186 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2187 rq_clock_skip_update(rq);
2188 }
2189
2190 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2191 int __task_state_match(struct task_struct *p, unsigned int state)
2192 {
2193 if (READ_ONCE(p->__state) & state)
2194 return 1;
2195
2196 if (READ_ONCE(p->saved_state) & state)
2197 return -1;
2198
2199 return 0;
2200 }
2201
2202 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2203 int task_state_match(struct task_struct *p, unsigned int state)
2204 {
2205 /*
2206 * Serialize against current_save_and_set_rtlock_wait_state(),
2207 * current_restore_rtlock_saved_state(), and __refrigerator().
2208 */
2209 guard(raw_spinlock_irq)(&p->pi_lock);
2210 return __task_state_match(p, state);
2211 }
2212
2213 /*
2214 * wait_task_inactive - wait for a thread to unschedule.
2215 *
2216 * Wait for the thread to block in any of the states set in @match_state.
2217 * If it changes, i.e. @p might have woken up, then return zero. When we
2218 * succeed in waiting for @p to be off its CPU, we return a positive number
2219 * (its total switch count). If a second call a short while later returns the
2220 * same number, the caller can be sure that @p has remained unscheduled the
2221 * whole time.
2222 *
2223 * The caller must ensure that the task *will* unschedule sometime soon,
2224 * else this function might spin for a *long* time. This function can't
2225 * be called with interrupts off, or it may introduce deadlock with
2226 * smp_call_function() if an IPI is sent by the same process we are
2227 * waiting to become inactive.
2228 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2229 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2230 {
2231 int running, queued, match;
2232 struct rq_flags rf;
2233 unsigned long ncsw;
2234 struct rq *rq;
2235
2236 for (;;) {
2237 /*
2238 * We do the initial early heuristics without holding
2239 * any task-queue locks at all. We'll only try to get
2240 * the runqueue lock when things look like they will
2241 * work out!
2242 */
2243 rq = task_rq(p);
2244
2245 /*
2246 * If the task is actively running on another CPU
2247 * still, just relax and busy-wait without holding
2248 * any locks.
2249 *
2250 * NOTE! Since we don't hold any locks, it's not
2251 * even sure that "rq" stays as the right runqueue!
2252 * But we don't care, since "task_on_cpu()" will
2253 * return false if the runqueue has changed and p
2254 * is actually now running somewhere else!
2255 */
2256 while (task_on_cpu(rq, p)) {
2257 if (!task_state_match(p, match_state))
2258 return 0;
2259 cpu_relax();
2260 }
2261
2262 /*
2263 * Ok, time to look more closely! We need the rq
2264 * lock now, to be *sure*. If we're wrong, we'll
2265 * just go back and repeat.
2266 */
2267 rq = task_rq_lock(p, &rf);
2268 /*
2269 * If task is sched_delayed, force dequeue it, to avoid always
2270 * hitting the tick timeout in the queued case
2271 */
2272 if (p->se.sched_delayed)
2273 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
2274 trace_sched_wait_task(p);
2275 running = task_on_cpu(rq, p);
2276 queued = task_on_rq_queued(p);
2277 ncsw = 0;
2278 if ((match = __task_state_match(p, match_state))) {
2279 /*
2280 * When matching on p->saved_state, consider this task
2281 * still queued so it will wait.
2282 */
2283 if (match < 0)
2284 queued = 1;
2285 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2286 }
2287 task_rq_unlock(rq, p, &rf);
2288
2289 /*
2290 * If it changed from the expected state, bail out now.
2291 */
2292 if (unlikely(!ncsw))
2293 break;
2294
2295 /*
2296 * Was it really running after all now that we
2297 * checked with the proper locks actually held?
2298 *
2299 * Oops. Go back and try again..
2300 */
2301 if (unlikely(running)) {
2302 cpu_relax();
2303 continue;
2304 }
2305
2306 /*
2307 * It's not enough that it's not actively running,
2308 * it must be off the runqueue _entirely_, and not
2309 * preempted!
2310 *
2311 * So if it was still runnable (but just not actively
2312 * running right now), it's preempted, and we should
2313 * yield - it could be a while.
2314 */
2315 if (unlikely(queued)) {
2316 ktime_t to = NSEC_PER_SEC / HZ;
2317
2318 set_current_state(TASK_UNINTERRUPTIBLE);
2319 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2320 continue;
2321 }
2322
2323 /*
2324 * Ahh, all good. It wasn't running, and it wasn't
2325 * runnable, which means that it will never become
2326 * running in the future either. We're all done!
2327 */
2328 break;
2329 }
2330
2331 return ncsw;
2332 }
2333
2334 static void
2335 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2336
migrate_disable_switch(struct rq * rq,struct task_struct * p)2337 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2338 {
2339 struct affinity_context ac = {
2340 .new_mask = cpumask_of(rq->cpu),
2341 .flags = SCA_MIGRATE_DISABLE,
2342 };
2343
2344 if (likely(!p->migration_disabled))
2345 return;
2346
2347 if (p->cpus_ptr != &p->cpus_mask)
2348 return;
2349
2350 scoped_guard (task_rq_lock, p)
2351 do_set_cpus_allowed(p, &ac);
2352 }
2353
___migrate_enable(void)2354 void ___migrate_enable(void)
2355 {
2356 struct task_struct *p = current;
2357 struct affinity_context ac = {
2358 .new_mask = &p->cpus_mask,
2359 .flags = SCA_MIGRATE_ENABLE,
2360 };
2361
2362 __set_cpus_allowed_ptr(p, &ac);
2363 }
2364 EXPORT_SYMBOL_GPL(___migrate_enable);
2365
migrate_disable(void)2366 void migrate_disable(void)
2367 {
2368 __migrate_disable();
2369 }
2370 EXPORT_SYMBOL_GPL(migrate_disable);
2371
migrate_enable(void)2372 void migrate_enable(void)
2373 {
2374 __migrate_enable();
2375 }
2376 EXPORT_SYMBOL_GPL(migrate_enable);
2377
rq_has_pinned_tasks(struct rq * rq)2378 static inline bool rq_has_pinned_tasks(struct rq *rq)
2379 {
2380 return rq->nr_pinned;
2381 }
2382
2383 /*
2384 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2385 * __set_cpus_allowed_ptr() and select_fallback_rq().
2386 */
is_cpu_allowed(struct task_struct * p,int cpu)2387 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2388 {
2389 /* When not in the task's cpumask, no point in looking further. */
2390 if (!task_allowed_on_cpu(p, cpu))
2391 return false;
2392
2393 /* migrate_disabled() must be allowed to finish. */
2394 if (is_migration_disabled(p))
2395 return cpu_online(cpu);
2396
2397 /* Non kernel threads are not allowed during either online or offline. */
2398 if (!(p->flags & PF_KTHREAD))
2399 return cpu_active(cpu);
2400
2401 /* KTHREAD_IS_PER_CPU is always allowed. */
2402 if (kthread_is_per_cpu(p))
2403 return cpu_online(cpu);
2404
2405 /* Regular kernel threads don't get to stay during offline. */
2406 if (cpu_dying(cpu))
2407 return false;
2408
2409 /* But are allowed during online. */
2410 return cpu_online(cpu);
2411 }
2412
2413 /*
2414 * This is how migration works:
2415 *
2416 * 1) we invoke migration_cpu_stop() on the target CPU using
2417 * stop_one_cpu().
2418 * 2) stopper starts to run (implicitly forcing the migrated thread
2419 * off the CPU)
2420 * 3) it checks whether the migrated task is still in the wrong runqueue.
2421 * 4) if it's in the wrong runqueue then the migration thread removes
2422 * it and puts it into the right queue.
2423 * 5) stopper completes and stop_one_cpu() returns and the migration
2424 * is done.
2425 */
2426
2427 /*
2428 * move_queued_task - move a queued task to new rq.
2429 *
2430 * Returns (locked) new rq. Old rq's lock is released.
2431 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2432 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2433 struct task_struct *p, int new_cpu)
2434 {
2435 lockdep_assert_rq_held(rq);
2436
2437 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2438 set_task_cpu(p, new_cpu);
2439 rq_unlock(rq, rf);
2440
2441 rq = cpu_rq(new_cpu);
2442
2443 rq_lock(rq, rf);
2444 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2445 activate_task(rq, p, 0);
2446 wakeup_preempt(rq, p, 0);
2447
2448 return rq;
2449 }
2450
2451 struct migration_arg {
2452 struct task_struct *task;
2453 int dest_cpu;
2454 struct set_affinity_pending *pending;
2455 };
2456
2457 /*
2458 * @refs: number of wait_for_completion()
2459 * @stop_pending: is @stop_work in use
2460 */
2461 struct set_affinity_pending {
2462 refcount_t refs;
2463 unsigned int stop_pending;
2464 struct completion done;
2465 struct cpu_stop_work stop_work;
2466 struct migration_arg arg;
2467 };
2468
2469 /*
2470 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2471 * this because either it can't run here any more (set_cpus_allowed()
2472 * away from this CPU, or CPU going down), or because we're
2473 * attempting to rebalance this task on exec (sched_exec).
2474 *
2475 * So we race with normal scheduler movements, but that's OK, as long
2476 * as the task is no longer on this CPU.
2477 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2478 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2479 struct task_struct *p, int dest_cpu)
2480 {
2481 /* Affinity changed (again). */
2482 if (!is_cpu_allowed(p, dest_cpu))
2483 return rq;
2484
2485 rq = move_queued_task(rq, rf, p, dest_cpu);
2486
2487 return rq;
2488 }
2489
2490 /*
2491 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2492 * and performs thread migration by bumping thread off CPU then
2493 * 'pushing' onto another runqueue.
2494 */
migration_cpu_stop(void * data)2495 static int migration_cpu_stop(void *data)
2496 {
2497 struct migration_arg *arg = data;
2498 struct set_affinity_pending *pending = arg->pending;
2499 struct task_struct *p = arg->task;
2500 struct rq *rq = this_rq();
2501 bool complete = false;
2502 struct rq_flags rf;
2503
2504 /*
2505 * The original target CPU might have gone down and we might
2506 * be on another CPU but it doesn't matter.
2507 */
2508 local_irq_save(rf.flags);
2509 /*
2510 * We need to explicitly wake pending tasks before running
2511 * __migrate_task() such that we will not miss enforcing cpus_ptr
2512 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2513 */
2514 flush_smp_call_function_queue();
2515
2516 raw_spin_lock(&p->pi_lock);
2517 rq_lock(rq, &rf);
2518
2519 /*
2520 * If we were passed a pending, then ->stop_pending was set, thus
2521 * p->migration_pending must have remained stable.
2522 */
2523 WARN_ON_ONCE(pending && pending != p->migration_pending);
2524
2525 /*
2526 * If task_rq(p) != rq, it cannot be migrated here, because we're
2527 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2528 * we're holding p->pi_lock.
2529 */
2530 if (task_rq(p) == rq) {
2531 if (is_migration_disabled(p))
2532 goto out;
2533
2534 if (pending) {
2535 p->migration_pending = NULL;
2536 complete = true;
2537
2538 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2539 goto out;
2540 }
2541
2542 if (task_on_rq_queued(p)) {
2543 update_rq_clock(rq);
2544 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2545 } else {
2546 p->wake_cpu = arg->dest_cpu;
2547 }
2548
2549 /*
2550 * XXX __migrate_task() can fail, at which point we might end
2551 * up running on a dodgy CPU, AFAICT this can only happen
2552 * during CPU hotplug, at which point we'll get pushed out
2553 * anyway, so it's probably not a big deal.
2554 */
2555
2556 } else if (pending) {
2557 /*
2558 * This happens when we get migrated between migrate_enable()'s
2559 * preempt_enable() and scheduling the stopper task. At that
2560 * point we're a regular task again and not current anymore.
2561 *
2562 * A !PREEMPT kernel has a giant hole here, which makes it far
2563 * more likely.
2564 */
2565
2566 /*
2567 * The task moved before the stopper got to run. We're holding
2568 * ->pi_lock, so the allowed mask is stable - if it got
2569 * somewhere allowed, we're done.
2570 */
2571 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2572 p->migration_pending = NULL;
2573 complete = true;
2574 goto out;
2575 }
2576
2577 /*
2578 * When migrate_enable() hits a rq mis-match we can't reliably
2579 * determine is_migration_disabled() and so have to chase after
2580 * it.
2581 */
2582 WARN_ON_ONCE(!pending->stop_pending);
2583 preempt_disable();
2584 rq_unlock(rq, &rf);
2585 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2586 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2587 &pending->arg, &pending->stop_work);
2588 preempt_enable();
2589 return 0;
2590 }
2591 out:
2592 if (pending)
2593 pending->stop_pending = false;
2594 rq_unlock(rq, &rf);
2595 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2596
2597 if (complete)
2598 complete_all(&pending->done);
2599
2600 return 0;
2601 }
2602
push_cpu_stop(void * arg)2603 int push_cpu_stop(void *arg)
2604 {
2605 struct rq *lowest_rq = NULL, *rq = this_rq();
2606 struct task_struct *p = arg;
2607
2608 raw_spin_lock_irq(&p->pi_lock);
2609 raw_spin_rq_lock(rq);
2610
2611 if (task_rq(p) != rq)
2612 goto out_unlock;
2613
2614 if (is_migration_disabled(p)) {
2615 p->migration_flags |= MDF_PUSH;
2616 goto out_unlock;
2617 }
2618
2619 p->migration_flags &= ~MDF_PUSH;
2620
2621 if (p->sched_class->find_lock_rq)
2622 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2623
2624 if (!lowest_rq)
2625 goto out_unlock;
2626
2627 // XXX validate p is still the highest prio task
2628 if (task_rq(p) == rq) {
2629 move_queued_task_locked(rq, lowest_rq, p);
2630 resched_curr(lowest_rq);
2631 }
2632
2633 double_unlock_balance(rq, lowest_rq);
2634
2635 out_unlock:
2636 rq->push_busy = false;
2637 raw_spin_rq_unlock(rq);
2638 raw_spin_unlock_irq(&p->pi_lock);
2639
2640 put_task_struct(p);
2641 return 0;
2642 }
2643
2644 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask);
2645
2646 /*
2647 * sched_class::set_cpus_allowed must do the below, but is not required to
2648 * actually call this function.
2649 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2650 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2651 {
2652 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2653 p->cpus_ptr = ctx->new_mask;
2654 return;
2655 }
2656
2657 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2658 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2659 mm_update_cpus_allowed(p->mm, ctx->new_mask);
2660
2661 /*
2662 * Swap in a new user_cpus_ptr if SCA_USER flag set
2663 */
2664 if (ctx->flags & SCA_USER)
2665 swap(p->user_cpus_ptr, ctx->user_mask);
2666 }
2667
2668 static void
do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2669 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2670 {
2671 scoped_guard (sched_change, p, DEQUEUE_SAVE)
2672 p->sched_class->set_cpus_allowed(p, ctx);
2673 }
2674
2675 /*
2676 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2677 * affinity (if any) should be destroyed too.
2678 */
set_cpus_allowed_force(struct task_struct * p,const struct cpumask * new_mask)2679 void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask)
2680 {
2681 struct affinity_context ac = {
2682 .new_mask = new_mask,
2683 .user_mask = NULL,
2684 .flags = SCA_USER, /* clear the user requested mask */
2685 };
2686 union cpumask_rcuhead {
2687 cpumask_t cpumask;
2688 struct rcu_head rcu;
2689 };
2690
2691 scoped_guard (__task_rq_lock, p)
2692 do_set_cpus_allowed(p, &ac);
2693
2694 /*
2695 * Because this is called with p->pi_lock held, it is not possible
2696 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2697 * kfree_rcu().
2698 */
2699 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2700 }
2701
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2702 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2703 int node)
2704 {
2705 cpumask_t *user_mask;
2706 unsigned long flags;
2707
2708 /*
2709 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2710 * may differ by now due to racing.
2711 */
2712 dst->user_cpus_ptr = NULL;
2713
2714 /*
2715 * This check is racy and losing the race is a valid situation.
2716 * It is not worth the extra overhead of taking the pi_lock on
2717 * every fork/clone.
2718 */
2719 if (data_race(!src->user_cpus_ptr))
2720 return 0;
2721
2722 user_mask = alloc_user_cpus_ptr(node);
2723 if (!user_mask)
2724 return -ENOMEM;
2725
2726 /*
2727 * Use pi_lock to protect content of user_cpus_ptr
2728 *
2729 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2730 * set_cpus_allowed_force().
2731 */
2732 raw_spin_lock_irqsave(&src->pi_lock, flags);
2733 if (src->user_cpus_ptr) {
2734 swap(dst->user_cpus_ptr, user_mask);
2735 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2736 }
2737 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2738
2739 if (unlikely(user_mask))
2740 kfree(user_mask);
2741
2742 return 0;
2743 }
2744
clear_user_cpus_ptr(struct task_struct * p)2745 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2746 {
2747 struct cpumask *user_mask = NULL;
2748
2749 swap(p->user_cpus_ptr, user_mask);
2750
2751 return user_mask;
2752 }
2753
release_user_cpus_ptr(struct task_struct * p)2754 void release_user_cpus_ptr(struct task_struct *p)
2755 {
2756 kfree(clear_user_cpus_ptr(p));
2757 }
2758
2759 /*
2760 * This function is wildly self concurrent; here be dragons.
2761 *
2762 *
2763 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2764 * designated task is enqueued on an allowed CPU. If that task is currently
2765 * running, we have to kick it out using the CPU stopper.
2766 *
2767 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2768 * Consider:
2769 *
2770 * Initial conditions: P0->cpus_mask = [0, 1]
2771 *
2772 * P0@CPU0 P1
2773 *
2774 * migrate_disable();
2775 * <preempted>
2776 * set_cpus_allowed_ptr(P0, [1]);
2777 *
2778 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2779 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2780 * This means we need the following scheme:
2781 *
2782 * P0@CPU0 P1
2783 *
2784 * migrate_disable();
2785 * <preempted>
2786 * set_cpus_allowed_ptr(P0, [1]);
2787 * <blocks>
2788 * <resumes>
2789 * migrate_enable();
2790 * __set_cpus_allowed_ptr();
2791 * <wakes local stopper>
2792 * `--> <woken on migration completion>
2793 *
2794 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2795 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2796 * task p are serialized by p->pi_lock, which we can leverage: the one that
2797 * should come into effect at the end of the Migrate-Disable region is the last
2798 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2799 * but we still need to properly signal those waiting tasks at the appropriate
2800 * moment.
2801 *
2802 * This is implemented using struct set_affinity_pending. The first
2803 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2804 * setup an instance of that struct and install it on the targeted task_struct.
2805 * Any and all further callers will reuse that instance. Those then wait for
2806 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2807 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2808 *
2809 *
2810 * (1) In the cases covered above. There is one more where the completion is
2811 * signaled within affine_move_task() itself: when a subsequent affinity request
2812 * occurs after the stopper bailed out due to the targeted task still being
2813 * Migrate-Disable. Consider:
2814 *
2815 * Initial conditions: P0->cpus_mask = [0, 1]
2816 *
2817 * CPU0 P1 P2
2818 * <P0>
2819 * migrate_disable();
2820 * <preempted>
2821 * set_cpus_allowed_ptr(P0, [1]);
2822 * <blocks>
2823 * <migration/0>
2824 * migration_cpu_stop()
2825 * is_migration_disabled()
2826 * <bails>
2827 * set_cpus_allowed_ptr(P0, [0, 1]);
2828 * <signal completion>
2829 * <awakes>
2830 *
2831 * Note that the above is safe vs a concurrent migrate_enable(), as any
2832 * pending affinity completion is preceded by an uninstallation of
2833 * p->migration_pending done with p->pi_lock held.
2834 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2835 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2836 int dest_cpu, unsigned int flags)
2837 __releases(rq->lock)
2838 __releases(p->pi_lock)
2839 {
2840 struct set_affinity_pending my_pending = { }, *pending = NULL;
2841 bool stop_pending, complete = false;
2842
2843 /*
2844 * Can the task run on the task's current CPU? If so, we're done
2845 *
2846 * We are also done if the task is the current donor, boosting a lock-
2847 * holding proxy, (and potentially has been migrated outside its
2848 * current or previous affinity mask)
2849 */
2850 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
2851 (task_current_donor(rq, p) && !task_current(rq, p))) {
2852 struct task_struct *push_task = NULL;
2853
2854 if ((flags & SCA_MIGRATE_ENABLE) &&
2855 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2856 rq->push_busy = true;
2857 push_task = get_task_struct(p);
2858 }
2859
2860 /*
2861 * If there are pending waiters, but no pending stop_work,
2862 * then complete now.
2863 */
2864 pending = p->migration_pending;
2865 if (pending && !pending->stop_pending) {
2866 p->migration_pending = NULL;
2867 complete = true;
2868 }
2869
2870 preempt_disable();
2871 task_rq_unlock(rq, p, rf);
2872 if (push_task) {
2873 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2874 p, &rq->push_work);
2875 }
2876 preempt_enable();
2877
2878 if (complete)
2879 complete_all(&pending->done);
2880
2881 return 0;
2882 }
2883
2884 if (!(flags & SCA_MIGRATE_ENABLE)) {
2885 /* serialized by p->pi_lock */
2886 if (!p->migration_pending) {
2887 /* Install the request */
2888 refcount_set(&my_pending.refs, 1);
2889 init_completion(&my_pending.done);
2890 my_pending.arg = (struct migration_arg) {
2891 .task = p,
2892 .dest_cpu = dest_cpu,
2893 .pending = &my_pending,
2894 };
2895
2896 p->migration_pending = &my_pending;
2897 } else {
2898 pending = p->migration_pending;
2899 refcount_inc(&pending->refs);
2900 /*
2901 * Affinity has changed, but we've already installed a
2902 * pending. migration_cpu_stop() *must* see this, else
2903 * we risk a completion of the pending despite having a
2904 * task on a disallowed CPU.
2905 *
2906 * Serialized by p->pi_lock, so this is safe.
2907 */
2908 pending->arg.dest_cpu = dest_cpu;
2909 }
2910 }
2911 pending = p->migration_pending;
2912 /*
2913 * - !MIGRATE_ENABLE:
2914 * we'll have installed a pending if there wasn't one already.
2915 *
2916 * - MIGRATE_ENABLE:
2917 * we're here because the current CPU isn't matching anymore,
2918 * the only way that can happen is because of a concurrent
2919 * set_cpus_allowed_ptr() call, which should then still be
2920 * pending completion.
2921 *
2922 * Either way, we really should have a @pending here.
2923 */
2924 if (WARN_ON_ONCE(!pending)) {
2925 task_rq_unlock(rq, p, rf);
2926 return -EINVAL;
2927 }
2928
2929 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2930 /*
2931 * MIGRATE_ENABLE gets here because 'p == current', but for
2932 * anything else we cannot do is_migration_disabled(), punt
2933 * and have the stopper function handle it all race-free.
2934 */
2935 stop_pending = pending->stop_pending;
2936 if (!stop_pending)
2937 pending->stop_pending = true;
2938
2939 if (flags & SCA_MIGRATE_ENABLE)
2940 p->migration_flags &= ~MDF_PUSH;
2941
2942 preempt_disable();
2943 task_rq_unlock(rq, p, rf);
2944 if (!stop_pending) {
2945 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2946 &pending->arg, &pending->stop_work);
2947 }
2948 preempt_enable();
2949
2950 if (flags & SCA_MIGRATE_ENABLE)
2951 return 0;
2952 } else {
2953
2954 if (!is_migration_disabled(p)) {
2955 if (task_on_rq_queued(p))
2956 rq = move_queued_task(rq, rf, p, dest_cpu);
2957
2958 if (!pending->stop_pending) {
2959 p->migration_pending = NULL;
2960 complete = true;
2961 }
2962 }
2963 task_rq_unlock(rq, p, rf);
2964
2965 if (complete)
2966 complete_all(&pending->done);
2967 }
2968
2969 wait_for_completion(&pending->done);
2970
2971 if (refcount_dec_and_test(&pending->refs))
2972 wake_up_var(&pending->refs); /* No UaF, just an address */
2973
2974 /*
2975 * Block the original owner of &pending until all subsequent callers
2976 * have seen the completion and decremented the refcount
2977 */
2978 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2979
2980 /* ARGH */
2981 WARN_ON_ONCE(my_pending.stop_pending);
2982
2983 return 0;
2984 }
2985
2986 /*
2987 * Called with both p->pi_lock and rq->lock held; drops both before returning.
2988 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)2989 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2990 struct affinity_context *ctx,
2991 struct rq *rq,
2992 struct rq_flags *rf)
2993 __releases(rq->lock)
2994 __releases(p->pi_lock)
2995 {
2996 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
2997 const struct cpumask *cpu_valid_mask = cpu_active_mask;
2998 bool kthread = p->flags & PF_KTHREAD;
2999 unsigned int dest_cpu;
3000 int ret = 0;
3001
3002 if (kthread || is_migration_disabled(p)) {
3003 /*
3004 * Kernel threads are allowed on online && !active CPUs,
3005 * however, during cpu-hot-unplug, even these might get pushed
3006 * away if not KTHREAD_IS_PER_CPU.
3007 *
3008 * Specifically, migration_disabled() tasks must not fail the
3009 * cpumask_any_and_distribute() pick below, esp. so on
3010 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3011 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3012 */
3013 cpu_valid_mask = cpu_online_mask;
3014 }
3015
3016 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3017 ret = -EINVAL;
3018 goto out;
3019 }
3020
3021 /*
3022 * Must re-check here, to close a race against __kthread_bind(),
3023 * sched_setaffinity() is not guaranteed to observe the flag.
3024 */
3025 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3026 ret = -EINVAL;
3027 goto out;
3028 }
3029
3030 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3031 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3032 if (ctx->flags & SCA_USER)
3033 swap(p->user_cpus_ptr, ctx->user_mask);
3034 goto out;
3035 }
3036
3037 if (WARN_ON_ONCE(p == current &&
3038 is_migration_disabled(p) &&
3039 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3040 ret = -EBUSY;
3041 goto out;
3042 }
3043 }
3044
3045 /*
3046 * Picking a ~random cpu helps in cases where we are changing affinity
3047 * for groups of tasks (ie. cpuset), so that load balancing is not
3048 * immediately required to distribute the tasks within their new mask.
3049 */
3050 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3051 if (dest_cpu >= nr_cpu_ids) {
3052 ret = -EINVAL;
3053 goto out;
3054 }
3055
3056 do_set_cpus_allowed(p, ctx);
3057
3058 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3059
3060 out:
3061 task_rq_unlock(rq, p, rf);
3062
3063 return ret;
3064 }
3065
3066 /*
3067 * Change a given task's CPU affinity. Migrate the thread to a
3068 * proper CPU and schedule it away if the CPU it's executing on
3069 * is removed from the allowed bitmask.
3070 *
3071 * NOTE: the caller must have a valid reference to the task, the
3072 * task must not exit() & deallocate itself prematurely. The
3073 * call is not atomic; no spinlocks may be held.
3074 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3075 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3076 {
3077 struct rq_flags rf;
3078 struct rq *rq;
3079
3080 rq = task_rq_lock(p, &rf);
3081 /*
3082 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3083 * flags are set.
3084 */
3085 if (p->user_cpus_ptr &&
3086 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3087 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3088 ctx->new_mask = rq->scratch_mask;
3089
3090 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3091 }
3092
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3093 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3094 {
3095 struct affinity_context ac = {
3096 .new_mask = new_mask,
3097 .flags = 0,
3098 };
3099
3100 return __set_cpus_allowed_ptr(p, &ac);
3101 }
3102 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3103
3104 /*
3105 * Change a given task's CPU affinity to the intersection of its current
3106 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3107 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3108 * affinity or use cpu_online_mask instead.
3109 *
3110 * If the resulting mask is empty, leave the affinity unchanged and return
3111 * -EINVAL.
3112 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3113 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3114 struct cpumask *new_mask,
3115 const struct cpumask *subset_mask)
3116 {
3117 struct affinity_context ac = {
3118 .new_mask = new_mask,
3119 .flags = 0,
3120 };
3121 struct rq_flags rf;
3122 struct rq *rq;
3123 int err;
3124
3125 rq = task_rq_lock(p, &rf);
3126
3127 /*
3128 * Forcefully restricting the affinity of a deadline task is
3129 * likely to cause problems, so fail and noisily override the
3130 * mask entirely.
3131 */
3132 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3133 err = -EPERM;
3134 goto err_unlock;
3135 }
3136
3137 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3138 err = -EINVAL;
3139 goto err_unlock;
3140 }
3141
3142 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3143
3144 err_unlock:
3145 task_rq_unlock(rq, p, &rf);
3146 return err;
3147 }
3148
3149 /*
3150 * Restrict the CPU affinity of task @p so that it is a subset of
3151 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3152 * old affinity mask. If the resulting mask is empty, we warn and walk
3153 * up the cpuset hierarchy until we find a suitable mask.
3154 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3155 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3156 {
3157 cpumask_var_t new_mask;
3158 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3159
3160 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3161
3162 /*
3163 * __migrate_task() can fail silently in the face of concurrent
3164 * offlining of the chosen destination CPU, so take the hotplug
3165 * lock to ensure that the migration succeeds.
3166 */
3167 cpus_read_lock();
3168 if (!cpumask_available(new_mask))
3169 goto out_set_mask;
3170
3171 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3172 goto out_free_mask;
3173
3174 /*
3175 * We failed to find a valid subset of the affinity mask for the
3176 * task, so override it based on its cpuset hierarchy.
3177 */
3178 cpuset_cpus_allowed(p, new_mask);
3179 override_mask = new_mask;
3180
3181 out_set_mask:
3182 if (printk_ratelimit()) {
3183 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3184 task_pid_nr(p), p->comm,
3185 cpumask_pr_args(override_mask));
3186 }
3187
3188 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3189 out_free_mask:
3190 cpus_read_unlock();
3191 free_cpumask_var(new_mask);
3192 }
3193
3194 /*
3195 * Restore the affinity of a task @p which was previously restricted by a
3196 * call to force_compatible_cpus_allowed_ptr().
3197 *
3198 * It is the caller's responsibility to serialise this with any calls to
3199 * force_compatible_cpus_allowed_ptr(@p).
3200 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3201 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3202 {
3203 struct affinity_context ac = {
3204 .new_mask = task_user_cpus(p),
3205 .flags = 0,
3206 };
3207 int ret;
3208
3209 /*
3210 * Try to restore the old affinity mask with __sched_setaffinity().
3211 * Cpuset masking will be done there too.
3212 */
3213 ret = __sched_setaffinity(p, &ac);
3214 WARN_ON_ONCE(ret);
3215 }
3216
3217 #ifdef CONFIG_SMP
3218
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3219 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3220 {
3221 unsigned int state = READ_ONCE(p->__state);
3222
3223 /*
3224 * We should never call set_task_cpu() on a blocked task,
3225 * ttwu() will sort out the placement.
3226 */
3227 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3228
3229 /*
3230 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3231 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3232 * time relying on p->on_rq.
3233 */
3234 WARN_ON_ONCE(state == TASK_RUNNING &&
3235 p->sched_class == &fair_sched_class &&
3236 (p->on_rq && !task_on_rq_migrating(p)));
3237
3238 #ifdef CONFIG_LOCKDEP
3239 /*
3240 * The caller should hold either p->pi_lock or rq->lock, when changing
3241 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3242 *
3243 * sched_move_task() holds both and thus holding either pins the cgroup,
3244 * see task_group().
3245 *
3246 * Furthermore, all task_rq users should acquire both locks, see
3247 * task_rq_lock().
3248 */
3249 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3250 lockdep_is_held(__rq_lockp(task_rq(p)))));
3251 #endif
3252 /*
3253 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3254 */
3255 WARN_ON_ONCE(!cpu_online(new_cpu));
3256
3257 WARN_ON_ONCE(is_migration_disabled(p));
3258
3259 trace_sched_migrate_task(p, new_cpu);
3260
3261 if (task_cpu(p) != new_cpu) {
3262 if (p->sched_class->migrate_task_rq)
3263 p->sched_class->migrate_task_rq(p, new_cpu);
3264 p->se.nr_migrations++;
3265 perf_event_task_migrate(p);
3266 }
3267
3268 __set_task_cpu(p, new_cpu);
3269 }
3270 #endif /* CONFIG_SMP */
3271
3272 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3273 static void __migrate_swap_task(struct task_struct *p, int cpu)
3274 {
3275 if (task_on_rq_queued(p)) {
3276 struct rq *src_rq, *dst_rq;
3277 struct rq_flags srf, drf;
3278
3279 src_rq = task_rq(p);
3280 dst_rq = cpu_rq(cpu);
3281
3282 rq_pin_lock(src_rq, &srf);
3283 rq_pin_lock(dst_rq, &drf);
3284
3285 move_queued_task_locked(src_rq, dst_rq, p);
3286 wakeup_preempt(dst_rq, p, 0);
3287
3288 rq_unpin_lock(dst_rq, &drf);
3289 rq_unpin_lock(src_rq, &srf);
3290
3291 } else {
3292 /*
3293 * Task isn't running anymore; make it appear like we migrated
3294 * it before it went to sleep. This means on wakeup we make the
3295 * previous CPU our target instead of where it really is.
3296 */
3297 p->wake_cpu = cpu;
3298 }
3299 }
3300
3301 struct migration_swap_arg {
3302 struct task_struct *src_task, *dst_task;
3303 int src_cpu, dst_cpu;
3304 };
3305
migrate_swap_stop(void * data)3306 static int migrate_swap_stop(void *data)
3307 {
3308 struct migration_swap_arg *arg = data;
3309 struct rq *src_rq, *dst_rq;
3310
3311 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3312 return -EAGAIN;
3313
3314 src_rq = cpu_rq(arg->src_cpu);
3315 dst_rq = cpu_rq(arg->dst_cpu);
3316
3317 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3318 guard(double_rq_lock)(src_rq, dst_rq);
3319
3320 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3321 return -EAGAIN;
3322
3323 if (task_cpu(arg->src_task) != arg->src_cpu)
3324 return -EAGAIN;
3325
3326 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3327 return -EAGAIN;
3328
3329 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3330 return -EAGAIN;
3331
3332 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3333 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3334
3335 return 0;
3336 }
3337
3338 /*
3339 * Cross migrate two tasks
3340 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3341 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3342 int target_cpu, int curr_cpu)
3343 {
3344 struct migration_swap_arg arg;
3345 int ret = -EINVAL;
3346
3347 arg = (struct migration_swap_arg){
3348 .src_task = cur,
3349 .src_cpu = curr_cpu,
3350 .dst_task = p,
3351 .dst_cpu = target_cpu,
3352 };
3353
3354 if (arg.src_cpu == arg.dst_cpu)
3355 goto out;
3356
3357 /*
3358 * These three tests are all lockless; this is OK since all of them
3359 * will be re-checked with proper locks held further down the line.
3360 */
3361 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3362 goto out;
3363
3364 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3365 goto out;
3366
3367 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3368 goto out;
3369
3370 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3371 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3372
3373 out:
3374 return ret;
3375 }
3376 #endif /* CONFIG_NUMA_BALANCING */
3377
3378 /***
3379 * kick_process - kick a running thread to enter/exit the kernel
3380 * @p: the to-be-kicked thread
3381 *
3382 * Cause a process which is running on another CPU to enter
3383 * kernel-mode, without any delay. (to get signals handled.)
3384 *
3385 * NOTE: this function doesn't have to take the runqueue lock,
3386 * because all it wants to ensure is that the remote task enters
3387 * the kernel. If the IPI races and the task has been migrated
3388 * to another CPU then no harm is done and the purpose has been
3389 * achieved as well.
3390 */
kick_process(struct task_struct * p)3391 void kick_process(struct task_struct *p)
3392 {
3393 guard(preempt)();
3394 int cpu = task_cpu(p);
3395
3396 if ((cpu != smp_processor_id()) && task_curr(p))
3397 smp_send_reschedule(cpu);
3398 }
3399 EXPORT_SYMBOL_GPL(kick_process);
3400
3401 /*
3402 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3403 *
3404 * A few notes on cpu_active vs cpu_online:
3405 *
3406 * - cpu_active must be a subset of cpu_online
3407 *
3408 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3409 * see __set_cpus_allowed_ptr(). At this point the newly online
3410 * CPU isn't yet part of the sched domains, and balancing will not
3411 * see it.
3412 *
3413 * - on CPU-down we clear cpu_active() to mask the sched domains and
3414 * avoid the load balancer to place new tasks on the to be removed
3415 * CPU. Existing tasks will remain running there and will be taken
3416 * off.
3417 *
3418 * This means that fallback selection must not select !active CPUs.
3419 * And can assume that any active CPU must be online. Conversely
3420 * select_task_rq() below may allow selection of !active CPUs in order
3421 * to satisfy the above rules.
3422 */
select_fallback_rq(int cpu,struct task_struct * p)3423 static int select_fallback_rq(int cpu, struct task_struct *p)
3424 {
3425 int nid = cpu_to_node(cpu);
3426 const struct cpumask *nodemask = NULL;
3427 enum { cpuset, possible, fail } state = cpuset;
3428 int dest_cpu;
3429
3430 /*
3431 * If the node that the CPU is on has been offlined, cpu_to_node()
3432 * will return -1. There is no CPU on the node, and we should
3433 * select the CPU on the other node.
3434 */
3435 if (nid != -1) {
3436 nodemask = cpumask_of_node(nid);
3437
3438 /* Look for allowed, online CPU in same node. */
3439 for_each_cpu(dest_cpu, nodemask) {
3440 if (is_cpu_allowed(p, dest_cpu))
3441 return dest_cpu;
3442 }
3443 }
3444
3445 for (;;) {
3446 /* Any allowed, online CPU? */
3447 for_each_cpu(dest_cpu, p->cpus_ptr) {
3448 if (!is_cpu_allowed(p, dest_cpu))
3449 continue;
3450
3451 goto out;
3452 }
3453
3454 /* No more Mr. Nice Guy. */
3455 switch (state) {
3456 case cpuset:
3457 if (cpuset_cpus_allowed_fallback(p)) {
3458 state = possible;
3459 break;
3460 }
3461 fallthrough;
3462 case possible:
3463 set_cpus_allowed_force(p, task_cpu_fallback_mask(p));
3464 state = fail;
3465 break;
3466 case fail:
3467 BUG();
3468 break;
3469 }
3470 }
3471
3472 out:
3473 if (state != cpuset) {
3474 /*
3475 * Don't tell them about moving exiting tasks or
3476 * kernel threads (both mm NULL), since they never
3477 * leave kernel.
3478 */
3479 if (p->mm && printk_ratelimit()) {
3480 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3481 task_pid_nr(p), p->comm, cpu);
3482 }
3483 }
3484
3485 return dest_cpu;
3486 }
3487
3488 /*
3489 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3490 */
3491 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3492 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3493 {
3494 lockdep_assert_held(&p->pi_lock);
3495
3496 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3497 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3498 *wake_flags |= WF_RQ_SELECTED;
3499 } else {
3500 cpu = cpumask_any(p->cpus_ptr);
3501 }
3502
3503 /*
3504 * In order not to call set_task_cpu() on a blocking task we need
3505 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3506 * CPU.
3507 *
3508 * Since this is common to all placement strategies, this lives here.
3509 *
3510 * [ this allows ->select_task() to simply return task_cpu(p) and
3511 * not worry about this generic constraint ]
3512 */
3513 if (unlikely(!is_cpu_allowed(p, cpu)))
3514 cpu = select_fallback_rq(task_cpu(p), p);
3515
3516 return cpu;
3517 }
3518
sched_set_stop_task(int cpu,struct task_struct * stop)3519 void sched_set_stop_task(int cpu, struct task_struct *stop)
3520 {
3521 static struct lock_class_key stop_pi_lock;
3522 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3523 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3524
3525 if (stop) {
3526 /*
3527 * Make it appear like a SCHED_FIFO task, its something
3528 * userspace knows about and won't get confused about.
3529 *
3530 * Also, it will make PI more or less work without too
3531 * much confusion -- but then, stop work should not
3532 * rely on PI working anyway.
3533 */
3534 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3535
3536 stop->sched_class = &stop_sched_class;
3537
3538 /*
3539 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3540 * adjust the effective priority of a task. As a result,
3541 * rt_mutex_setprio() can trigger (RT) balancing operations,
3542 * which can then trigger wakeups of the stop thread to push
3543 * around the current task.
3544 *
3545 * The stop task itself will never be part of the PI-chain, it
3546 * never blocks, therefore that ->pi_lock recursion is safe.
3547 * Tell lockdep about this by placing the stop->pi_lock in its
3548 * own class.
3549 */
3550 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3551 }
3552
3553 cpu_rq(cpu)->stop = stop;
3554
3555 if (old_stop) {
3556 /*
3557 * Reset it back to a normal scheduling class so that
3558 * it can die in pieces.
3559 */
3560 old_stop->sched_class = &rt_sched_class;
3561 }
3562 }
3563
3564 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3565 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3566 {
3567 struct rq *rq;
3568
3569 if (!schedstat_enabled())
3570 return;
3571
3572 rq = this_rq();
3573
3574 if (cpu == rq->cpu) {
3575 __schedstat_inc(rq->ttwu_local);
3576 __schedstat_inc(p->stats.nr_wakeups_local);
3577 } else {
3578 struct sched_domain *sd;
3579
3580 __schedstat_inc(p->stats.nr_wakeups_remote);
3581
3582 guard(rcu)();
3583 for_each_domain(rq->cpu, sd) {
3584 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3585 __schedstat_inc(sd->ttwu_wake_remote);
3586 break;
3587 }
3588 }
3589 }
3590
3591 if (wake_flags & WF_MIGRATED)
3592 __schedstat_inc(p->stats.nr_wakeups_migrate);
3593
3594 __schedstat_inc(rq->ttwu_count);
3595 __schedstat_inc(p->stats.nr_wakeups);
3596
3597 if (wake_flags & WF_SYNC)
3598 __schedstat_inc(p->stats.nr_wakeups_sync);
3599 }
3600
3601 /*
3602 * Mark the task runnable.
3603 */
ttwu_do_wakeup(struct task_struct * p)3604 static inline void ttwu_do_wakeup(struct task_struct *p)
3605 {
3606 WRITE_ONCE(p->__state, TASK_RUNNING);
3607 trace_sched_wakeup(p);
3608 }
3609
3610 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3611 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3612 struct rq_flags *rf)
3613 {
3614 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3615
3616 lockdep_assert_rq_held(rq);
3617
3618 if (p->sched_contributes_to_load)
3619 rq->nr_uninterruptible--;
3620
3621 if (wake_flags & WF_RQ_SELECTED)
3622 en_flags |= ENQUEUE_RQ_SELECTED;
3623 if (wake_flags & WF_MIGRATED)
3624 en_flags |= ENQUEUE_MIGRATED;
3625 else
3626 if (p->in_iowait) {
3627 delayacct_blkio_end(p);
3628 atomic_dec(&task_rq(p)->nr_iowait);
3629 }
3630
3631 activate_task(rq, p, en_flags);
3632 wakeup_preempt(rq, p, wake_flags);
3633
3634 ttwu_do_wakeup(p);
3635
3636 if (p->sched_class->task_woken) {
3637 /*
3638 * Our task @p is fully woken up and running; so it's safe to
3639 * drop the rq->lock, hereafter rq is only used for statistics.
3640 */
3641 rq_unpin_lock(rq, rf);
3642 p->sched_class->task_woken(rq, p);
3643 rq_repin_lock(rq, rf);
3644 }
3645
3646 if (rq->idle_stamp) {
3647 u64 delta = rq_clock(rq) - rq->idle_stamp;
3648 u64 max = 2*rq->max_idle_balance_cost;
3649
3650 update_avg(&rq->avg_idle, delta);
3651
3652 if (rq->avg_idle > max)
3653 rq->avg_idle = max;
3654
3655 rq->idle_stamp = 0;
3656 }
3657 }
3658
3659 /*
3660 * Consider @p being inside a wait loop:
3661 *
3662 * for (;;) {
3663 * set_current_state(TASK_UNINTERRUPTIBLE);
3664 *
3665 * if (CONDITION)
3666 * break;
3667 *
3668 * schedule();
3669 * }
3670 * __set_current_state(TASK_RUNNING);
3671 *
3672 * between set_current_state() and schedule(). In this case @p is still
3673 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3674 * an atomic manner.
3675 *
3676 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3677 * then schedule() must still happen and p->state can be changed to
3678 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3679 * need to do a full wakeup with enqueue.
3680 *
3681 * Returns: %true when the wakeup is done,
3682 * %false otherwise.
3683 */
ttwu_runnable(struct task_struct * p,int wake_flags)3684 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3685 {
3686 struct rq_flags rf;
3687 struct rq *rq;
3688 int ret = 0;
3689
3690 rq = __task_rq_lock(p, &rf);
3691 if (task_on_rq_queued(p)) {
3692 update_rq_clock(rq);
3693 if (p->se.sched_delayed)
3694 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3695 if (!task_on_cpu(rq, p)) {
3696 /*
3697 * When on_rq && !on_cpu the task is preempted, see if
3698 * it should preempt the task that is current now.
3699 */
3700 wakeup_preempt(rq, p, wake_flags);
3701 }
3702 ttwu_do_wakeup(p);
3703 ret = 1;
3704 }
3705 __task_rq_unlock(rq, p, &rf);
3706
3707 return ret;
3708 }
3709
sched_ttwu_pending(void * arg)3710 void sched_ttwu_pending(void *arg)
3711 {
3712 struct llist_node *llist = arg;
3713 struct rq *rq = this_rq();
3714 struct task_struct *p, *t;
3715 struct rq_flags rf;
3716
3717 if (!llist)
3718 return;
3719
3720 rq_lock_irqsave(rq, &rf);
3721 update_rq_clock(rq);
3722
3723 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3724 if (WARN_ON_ONCE(p->on_cpu))
3725 smp_cond_load_acquire(&p->on_cpu, !VAL);
3726
3727 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3728 set_task_cpu(p, cpu_of(rq));
3729
3730 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3731 }
3732
3733 /*
3734 * Must be after enqueueing at least once task such that
3735 * idle_cpu() does not observe a false-negative -- if it does,
3736 * it is possible for select_idle_siblings() to stack a number
3737 * of tasks on this CPU during that window.
3738 *
3739 * It is OK to clear ttwu_pending when another task pending.
3740 * We will receive IPI after local IRQ enabled and then enqueue it.
3741 * Since now nr_running > 0, idle_cpu() will always get correct result.
3742 */
3743 WRITE_ONCE(rq->ttwu_pending, 0);
3744 rq_unlock_irqrestore(rq, &rf);
3745 }
3746
3747 /*
3748 * Prepare the scene for sending an IPI for a remote smp_call
3749 *
3750 * Returns true if the caller can proceed with sending the IPI.
3751 * Returns false otherwise.
3752 */
call_function_single_prep_ipi(int cpu)3753 bool call_function_single_prep_ipi(int cpu)
3754 {
3755 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3756 trace_sched_wake_idle_without_ipi(cpu);
3757 return false;
3758 }
3759
3760 return true;
3761 }
3762
3763 /*
3764 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3765 * necessary. The wakee CPU on receipt of the IPI will queue the task
3766 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3767 * of the wakeup instead of the waker.
3768 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3769 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3770 {
3771 struct rq *rq = cpu_rq(cpu);
3772
3773 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3774
3775 WRITE_ONCE(rq->ttwu_pending, 1);
3776 #ifdef CONFIG_SMP
3777 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3778 #endif
3779 }
3780
wake_up_if_idle(int cpu)3781 void wake_up_if_idle(int cpu)
3782 {
3783 struct rq *rq = cpu_rq(cpu);
3784
3785 guard(rcu)();
3786 if (is_idle_task(rcu_dereference(rq->curr))) {
3787 guard(rq_lock_irqsave)(rq);
3788 if (is_idle_task(rq->curr))
3789 resched_curr(rq);
3790 }
3791 }
3792
cpus_equal_capacity(int this_cpu,int that_cpu)3793 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3794 {
3795 if (!sched_asym_cpucap_active())
3796 return true;
3797
3798 if (this_cpu == that_cpu)
3799 return true;
3800
3801 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3802 }
3803
cpus_share_cache(int this_cpu,int that_cpu)3804 bool cpus_share_cache(int this_cpu, int that_cpu)
3805 {
3806 if (this_cpu == that_cpu)
3807 return true;
3808
3809 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3810 }
3811
3812 /*
3813 * Whether CPUs are share cache resources, which means LLC on non-cluster
3814 * machines and LLC tag or L2 on machines with clusters.
3815 */
cpus_share_resources(int this_cpu,int that_cpu)3816 bool cpus_share_resources(int this_cpu, int that_cpu)
3817 {
3818 if (this_cpu == that_cpu)
3819 return true;
3820
3821 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3822 }
3823
ttwu_queue_cond(struct task_struct * p,int cpu)3824 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3825 {
3826 /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3827 if (!scx_allow_ttwu_queue(p))
3828 return false;
3829
3830 #ifdef CONFIG_SMP
3831 if (p->sched_class == &stop_sched_class)
3832 return false;
3833 #endif
3834
3835 /*
3836 * Do not complicate things with the async wake_list while the CPU is
3837 * in hotplug state.
3838 */
3839 if (!cpu_active(cpu))
3840 return false;
3841
3842 /* Ensure the task will still be allowed to run on the CPU. */
3843 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3844 return false;
3845
3846 /*
3847 * If the CPU does not share cache, then queue the task on the
3848 * remote rqs wakelist to avoid accessing remote data.
3849 */
3850 if (!cpus_share_cache(smp_processor_id(), cpu))
3851 return true;
3852
3853 if (cpu == smp_processor_id())
3854 return false;
3855
3856 /*
3857 * If the wakee cpu is idle, or the task is descheduling and the
3858 * only running task on the CPU, then use the wakelist to offload
3859 * the task activation to the idle (or soon-to-be-idle) CPU as
3860 * the current CPU is likely busy. nr_running is checked to
3861 * avoid unnecessary task stacking.
3862 *
3863 * Note that we can only get here with (wakee) p->on_rq=0,
3864 * p->on_cpu can be whatever, we've done the dequeue, so
3865 * the wakee has been accounted out of ->nr_running.
3866 */
3867 if (!cpu_rq(cpu)->nr_running)
3868 return true;
3869
3870 return false;
3871 }
3872
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3873 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3874 {
3875 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3876 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3877 __ttwu_queue_wakelist(p, cpu, wake_flags);
3878 return true;
3879 }
3880
3881 return false;
3882 }
3883
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3884 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3885 {
3886 struct rq *rq = cpu_rq(cpu);
3887 struct rq_flags rf;
3888
3889 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3890 return;
3891
3892 rq_lock(rq, &rf);
3893 update_rq_clock(rq);
3894 ttwu_do_activate(rq, p, wake_flags, &rf);
3895 rq_unlock(rq, &rf);
3896 }
3897
3898 /*
3899 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3900 *
3901 * The caller holds p::pi_lock if p != current or has preemption
3902 * disabled when p == current.
3903 *
3904 * The rules of saved_state:
3905 *
3906 * The related locking code always holds p::pi_lock when updating
3907 * p::saved_state, which means the code is fully serialized in both cases.
3908 *
3909 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3910 * No other bits set. This allows to distinguish all wakeup scenarios.
3911 *
3912 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3913 * allows us to prevent early wakeup of tasks before they can be run on
3914 * asymmetric ISA architectures (eg ARMv9).
3915 */
3916 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)3917 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3918 {
3919 int match;
3920
3921 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3922 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3923 state != TASK_RTLOCK_WAIT);
3924 }
3925
3926 *success = !!(match = __task_state_match(p, state));
3927
3928 /*
3929 * Saved state preserves the task state across blocking on
3930 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
3931 * set p::saved_state to TASK_RUNNING, but do not wake the task
3932 * because it waits for a lock wakeup or __thaw_task(). Also
3933 * indicate success because from the regular waker's point of
3934 * view this has succeeded.
3935 *
3936 * After acquiring the lock the task will restore p::__state
3937 * from p::saved_state which ensures that the regular
3938 * wakeup is not lost. The restore will also set
3939 * p::saved_state to TASK_RUNNING so any further tests will
3940 * not result in false positives vs. @success
3941 */
3942 if (match < 0)
3943 p->saved_state = TASK_RUNNING;
3944
3945 return match > 0;
3946 }
3947
3948 /*
3949 * Notes on Program-Order guarantees on SMP systems.
3950 *
3951 * MIGRATION
3952 *
3953 * The basic program-order guarantee on SMP systems is that when a task [t]
3954 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3955 * execution on its new CPU [c1].
3956 *
3957 * For migration (of runnable tasks) this is provided by the following means:
3958 *
3959 * A) UNLOCK of the rq(c0)->lock scheduling out task t
3960 * B) migration for t is required to synchronize *both* rq(c0)->lock and
3961 * rq(c1)->lock (if not at the same time, then in that order).
3962 * C) LOCK of the rq(c1)->lock scheduling in task
3963 *
3964 * Release/acquire chaining guarantees that B happens after A and C after B.
3965 * Note: the CPU doing B need not be c0 or c1
3966 *
3967 * Example:
3968 *
3969 * CPU0 CPU1 CPU2
3970 *
3971 * LOCK rq(0)->lock
3972 * sched-out X
3973 * sched-in Y
3974 * UNLOCK rq(0)->lock
3975 *
3976 * LOCK rq(0)->lock // orders against CPU0
3977 * dequeue X
3978 * UNLOCK rq(0)->lock
3979 *
3980 * LOCK rq(1)->lock
3981 * enqueue X
3982 * UNLOCK rq(1)->lock
3983 *
3984 * LOCK rq(1)->lock // orders against CPU2
3985 * sched-out Z
3986 * sched-in X
3987 * UNLOCK rq(1)->lock
3988 *
3989 *
3990 * BLOCKING -- aka. SLEEP + WAKEUP
3991 *
3992 * For blocking we (obviously) need to provide the same guarantee as for
3993 * migration. However the means are completely different as there is no lock
3994 * chain to provide order. Instead we do:
3995 *
3996 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
3997 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
3998 *
3999 * Example:
4000 *
4001 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4002 *
4003 * LOCK rq(0)->lock LOCK X->pi_lock
4004 * dequeue X
4005 * sched-out X
4006 * smp_store_release(X->on_cpu, 0);
4007 *
4008 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4009 * X->state = WAKING
4010 * set_task_cpu(X,2)
4011 *
4012 * LOCK rq(2)->lock
4013 * enqueue X
4014 * X->state = RUNNING
4015 * UNLOCK rq(2)->lock
4016 *
4017 * LOCK rq(2)->lock // orders against CPU1
4018 * sched-out Z
4019 * sched-in X
4020 * UNLOCK rq(2)->lock
4021 *
4022 * UNLOCK X->pi_lock
4023 * UNLOCK rq(0)->lock
4024 *
4025 *
4026 * However, for wakeups there is a second guarantee we must provide, namely we
4027 * must ensure that CONDITION=1 done by the caller can not be reordered with
4028 * accesses to the task state; see try_to_wake_up() and set_current_state().
4029 */
4030
4031 /**
4032 * try_to_wake_up - wake up a thread
4033 * @p: the thread to be awakened
4034 * @state: the mask of task states that can be woken
4035 * @wake_flags: wake modifier flags (WF_*)
4036 *
4037 * Conceptually does:
4038 *
4039 * If (@state & @p->state) @p->state = TASK_RUNNING.
4040 *
4041 * If the task was not queued/runnable, also place it back on a runqueue.
4042 *
4043 * This function is atomic against schedule() which would dequeue the task.
4044 *
4045 * It issues a full memory barrier before accessing @p->state, see the comment
4046 * with set_current_state().
4047 *
4048 * Uses p->pi_lock to serialize against concurrent wake-ups.
4049 *
4050 * Relies on p->pi_lock stabilizing:
4051 * - p->sched_class
4052 * - p->cpus_ptr
4053 * - p->sched_task_group
4054 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4055 *
4056 * Tries really hard to only take one task_rq(p)->lock for performance.
4057 * Takes rq->lock in:
4058 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4059 * - ttwu_queue() -- new rq, for enqueue of the task;
4060 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4061 *
4062 * As a consequence we race really badly with just about everything. See the
4063 * many memory barriers and their comments for details.
4064 *
4065 * Return: %true if @p->state changes (an actual wakeup was done),
4066 * %false otherwise.
4067 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4068 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4069 {
4070 guard(preempt)();
4071 int cpu, success = 0;
4072
4073 wake_flags |= WF_TTWU;
4074
4075 if (p == current) {
4076 /*
4077 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4078 * == smp_processor_id()'. Together this means we can special
4079 * case the whole 'p->on_rq && ttwu_runnable()' case below
4080 * without taking any locks.
4081 *
4082 * Specifically, given current runs ttwu() we must be before
4083 * schedule()'s block_task(), as such this must not observe
4084 * sched_delayed.
4085 *
4086 * In particular:
4087 * - we rely on Program-Order guarantees for all the ordering,
4088 * - we're serialized against set_special_state() by virtue of
4089 * it disabling IRQs (this allows not taking ->pi_lock).
4090 */
4091 WARN_ON_ONCE(p->se.sched_delayed);
4092 if (!ttwu_state_match(p, state, &success))
4093 goto out;
4094
4095 trace_sched_waking(p);
4096 ttwu_do_wakeup(p);
4097 goto out;
4098 }
4099
4100 /*
4101 * If we are going to wake up a thread waiting for CONDITION we
4102 * need to ensure that CONDITION=1 done by the caller can not be
4103 * reordered with p->state check below. This pairs with smp_store_mb()
4104 * in set_current_state() that the waiting thread does.
4105 */
4106 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4107 smp_mb__after_spinlock();
4108 if (!ttwu_state_match(p, state, &success))
4109 break;
4110
4111 trace_sched_waking(p);
4112
4113 /*
4114 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4115 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4116 * in smp_cond_load_acquire() below.
4117 *
4118 * sched_ttwu_pending() try_to_wake_up()
4119 * STORE p->on_rq = 1 LOAD p->state
4120 * UNLOCK rq->lock
4121 *
4122 * __schedule() (switch to task 'p')
4123 * LOCK rq->lock smp_rmb();
4124 * smp_mb__after_spinlock();
4125 * UNLOCK rq->lock
4126 *
4127 * [task p]
4128 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4129 *
4130 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4131 * __schedule(). See the comment for smp_mb__after_spinlock().
4132 *
4133 * A similar smp_rmb() lives in __task_needs_rq_lock().
4134 */
4135 smp_rmb();
4136 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4137 break;
4138
4139 /*
4140 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4141 * possible to, falsely, observe p->on_cpu == 0.
4142 *
4143 * One must be running (->on_cpu == 1) in order to remove oneself
4144 * from the runqueue.
4145 *
4146 * __schedule() (switch to task 'p') try_to_wake_up()
4147 * STORE p->on_cpu = 1 LOAD p->on_rq
4148 * UNLOCK rq->lock
4149 *
4150 * __schedule() (put 'p' to sleep)
4151 * LOCK rq->lock smp_rmb();
4152 * smp_mb__after_spinlock();
4153 * STORE p->on_rq = 0 LOAD p->on_cpu
4154 *
4155 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4156 * __schedule(). See the comment for smp_mb__after_spinlock().
4157 *
4158 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4159 * schedule()'s block_task() has 'happened' and p will no longer
4160 * care about it's own p->state. See the comment in __schedule().
4161 */
4162 smp_acquire__after_ctrl_dep();
4163
4164 /*
4165 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4166 * == 0), which means we need to do an enqueue, change p->state to
4167 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4168 * enqueue, such as ttwu_queue_wakelist().
4169 */
4170 WRITE_ONCE(p->__state, TASK_WAKING);
4171
4172 /*
4173 * If the owning (remote) CPU is still in the middle of schedule() with
4174 * this task as prev, considering queueing p on the remote CPUs wake_list
4175 * which potentially sends an IPI instead of spinning on p->on_cpu to
4176 * let the waker make forward progress. This is safe because IRQs are
4177 * disabled and the IPI will deliver after on_cpu is cleared.
4178 *
4179 * Ensure we load task_cpu(p) after p->on_cpu:
4180 *
4181 * set_task_cpu(p, cpu);
4182 * STORE p->cpu = @cpu
4183 * __schedule() (switch to task 'p')
4184 * LOCK rq->lock
4185 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4186 * STORE p->on_cpu = 1 LOAD p->cpu
4187 *
4188 * to ensure we observe the correct CPU on which the task is currently
4189 * scheduling.
4190 */
4191 if (smp_load_acquire(&p->on_cpu) &&
4192 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4193 break;
4194
4195 /*
4196 * If the owning (remote) CPU is still in the middle of schedule() with
4197 * this task as prev, wait until it's done referencing the task.
4198 *
4199 * Pairs with the smp_store_release() in finish_task().
4200 *
4201 * This ensures that tasks getting woken will be fully ordered against
4202 * their previous state and preserve Program Order.
4203 */
4204 smp_cond_load_acquire(&p->on_cpu, !VAL);
4205
4206 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4207 if (task_cpu(p) != cpu) {
4208 if (p->in_iowait) {
4209 delayacct_blkio_end(p);
4210 atomic_dec(&task_rq(p)->nr_iowait);
4211 }
4212
4213 wake_flags |= WF_MIGRATED;
4214 psi_ttwu_dequeue(p);
4215 set_task_cpu(p, cpu);
4216 }
4217
4218 ttwu_queue(p, cpu, wake_flags);
4219 }
4220 out:
4221 if (success)
4222 ttwu_stat(p, task_cpu(p), wake_flags);
4223
4224 return success;
4225 }
4226
__task_needs_rq_lock(struct task_struct * p)4227 static bool __task_needs_rq_lock(struct task_struct *p)
4228 {
4229 unsigned int state = READ_ONCE(p->__state);
4230
4231 /*
4232 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4233 * the task is blocked. Make sure to check @state since ttwu() can drop
4234 * locks at the end, see ttwu_queue_wakelist().
4235 */
4236 if (state == TASK_RUNNING || state == TASK_WAKING)
4237 return true;
4238
4239 /*
4240 * Ensure we load p->on_rq after p->__state, otherwise it would be
4241 * possible to, falsely, observe p->on_rq == 0.
4242 *
4243 * See try_to_wake_up() for a longer comment.
4244 */
4245 smp_rmb();
4246 if (p->on_rq)
4247 return true;
4248
4249 /*
4250 * Ensure the task has finished __schedule() and will not be referenced
4251 * anymore. Again, see try_to_wake_up() for a longer comment.
4252 */
4253 smp_rmb();
4254 smp_cond_load_acquire(&p->on_cpu, !VAL);
4255
4256 return false;
4257 }
4258
4259 /**
4260 * task_call_func - Invoke a function on task in fixed state
4261 * @p: Process for which the function is to be invoked, can be @current.
4262 * @func: Function to invoke.
4263 * @arg: Argument to function.
4264 *
4265 * Fix the task in it's current state by avoiding wakeups and or rq operations
4266 * and call @func(@arg) on it. This function can use task_is_runnable() and
4267 * task_curr() to work out what the state is, if required. Given that @func
4268 * can be invoked with a runqueue lock held, it had better be quite
4269 * lightweight.
4270 *
4271 * Returns:
4272 * Whatever @func returns
4273 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4274 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4275 {
4276 struct rq *rq = NULL;
4277 struct rq_flags rf;
4278 int ret;
4279
4280 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4281
4282 if (__task_needs_rq_lock(p))
4283 rq = __task_rq_lock(p, &rf);
4284
4285 /*
4286 * At this point the task is pinned; either:
4287 * - blocked and we're holding off wakeups (pi->lock)
4288 * - woken, and we're holding off enqueue (rq->lock)
4289 * - queued, and we're holding off schedule (rq->lock)
4290 * - running, and we're holding off de-schedule (rq->lock)
4291 *
4292 * The called function (@func) can use: task_curr(), p->on_rq and
4293 * p->__state to differentiate between these states.
4294 */
4295 ret = func(p, arg);
4296
4297 if (rq)
4298 __task_rq_unlock(rq, p, &rf);
4299
4300 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4301 return ret;
4302 }
4303
4304 /**
4305 * cpu_curr_snapshot - Return a snapshot of the currently running task
4306 * @cpu: The CPU on which to snapshot the task.
4307 *
4308 * Returns the task_struct pointer of the task "currently" running on
4309 * the specified CPU.
4310 *
4311 * If the specified CPU was offline, the return value is whatever it
4312 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4313 * task, but there is no guarantee. Callers wishing a useful return
4314 * value must take some action to ensure that the specified CPU remains
4315 * online throughout.
4316 *
4317 * This function executes full memory barriers before and after fetching
4318 * the pointer, which permits the caller to confine this function's fetch
4319 * with respect to the caller's accesses to other shared variables.
4320 */
cpu_curr_snapshot(int cpu)4321 struct task_struct *cpu_curr_snapshot(int cpu)
4322 {
4323 struct rq *rq = cpu_rq(cpu);
4324 struct task_struct *t;
4325 struct rq_flags rf;
4326
4327 rq_lock_irqsave(rq, &rf);
4328 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4329 t = rcu_dereference(cpu_curr(cpu));
4330 rq_unlock_irqrestore(rq, &rf);
4331 smp_mb(); /* Pairing determined by caller's synchronization design. */
4332
4333 return t;
4334 }
4335
4336 /**
4337 * wake_up_process - Wake up a specific process
4338 * @p: The process to be woken up.
4339 *
4340 * Attempt to wake up the nominated process and move it to the set of runnable
4341 * processes.
4342 *
4343 * Return: 1 if the process was woken up, 0 if it was already running.
4344 *
4345 * This function executes a full memory barrier before accessing the task state.
4346 */
wake_up_process(struct task_struct * p)4347 int wake_up_process(struct task_struct *p)
4348 {
4349 return try_to_wake_up(p, TASK_NORMAL, 0);
4350 }
4351 EXPORT_SYMBOL(wake_up_process);
4352
wake_up_state(struct task_struct * p,unsigned int state)4353 int wake_up_state(struct task_struct *p, unsigned int state)
4354 {
4355 return try_to_wake_up(p, state, 0);
4356 }
4357
4358 /*
4359 * Perform scheduler related setup for a newly forked process p.
4360 * p is forked by current.
4361 *
4362 * __sched_fork() is basic setup which is also used by sched_init() to
4363 * initialize the boot CPU's idle task.
4364 */
__sched_fork(u64 clone_flags,struct task_struct * p)4365 static void __sched_fork(u64 clone_flags, struct task_struct *p)
4366 {
4367 p->on_rq = 0;
4368
4369 p->se.on_rq = 0;
4370 p->se.exec_start = 0;
4371 p->se.sum_exec_runtime = 0;
4372 p->se.prev_sum_exec_runtime = 0;
4373 p->se.nr_migrations = 0;
4374 p->se.vruntime = 0;
4375 p->se.vlag = 0;
4376 INIT_LIST_HEAD(&p->se.group_node);
4377
4378 /* A delayed task cannot be in clone(). */
4379 WARN_ON_ONCE(p->se.sched_delayed);
4380
4381 #ifdef CONFIG_FAIR_GROUP_SCHED
4382 p->se.cfs_rq = NULL;
4383 #ifdef CONFIG_CFS_BANDWIDTH
4384 init_cfs_throttle_work(p);
4385 #endif
4386 #endif
4387
4388 #ifdef CONFIG_SCHEDSTATS
4389 /* Even if schedstat is disabled, there should not be garbage */
4390 memset(&p->stats, 0, sizeof(p->stats));
4391 #endif
4392
4393 init_dl_entity(&p->dl);
4394
4395 INIT_LIST_HEAD(&p->rt.run_list);
4396 p->rt.timeout = 0;
4397 p->rt.time_slice = sched_rr_timeslice;
4398 p->rt.on_rq = 0;
4399 p->rt.on_list = 0;
4400
4401 #ifdef CONFIG_SCHED_CLASS_EXT
4402 init_scx_entity(&p->scx);
4403 #endif
4404
4405 #ifdef CONFIG_PREEMPT_NOTIFIERS
4406 INIT_HLIST_HEAD(&p->preempt_notifiers);
4407 #endif
4408
4409 #ifdef CONFIG_COMPACTION
4410 p->capture_control = NULL;
4411 #endif
4412 init_numa_balancing(clone_flags, p);
4413 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4414 p->migration_pending = NULL;
4415 }
4416
4417 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4418
4419 #ifdef CONFIG_NUMA_BALANCING
4420
4421 int sysctl_numa_balancing_mode;
4422
__set_numabalancing_state(bool enabled)4423 static void __set_numabalancing_state(bool enabled)
4424 {
4425 if (enabled)
4426 static_branch_enable(&sched_numa_balancing);
4427 else
4428 static_branch_disable(&sched_numa_balancing);
4429 }
4430
set_numabalancing_state(bool enabled)4431 void set_numabalancing_state(bool enabled)
4432 {
4433 if (enabled)
4434 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4435 else
4436 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4437 __set_numabalancing_state(enabled);
4438 }
4439
4440 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4441 static void reset_memory_tiering(void)
4442 {
4443 struct pglist_data *pgdat;
4444
4445 for_each_online_pgdat(pgdat) {
4446 pgdat->nbp_threshold = 0;
4447 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4448 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4449 }
4450 }
4451
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4452 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4453 void *buffer, size_t *lenp, loff_t *ppos)
4454 {
4455 struct ctl_table t;
4456 int err;
4457 int state = sysctl_numa_balancing_mode;
4458
4459 if (write && !capable(CAP_SYS_ADMIN))
4460 return -EPERM;
4461
4462 t = *table;
4463 t.data = &state;
4464 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4465 if (err < 0)
4466 return err;
4467 if (write) {
4468 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4469 (state & NUMA_BALANCING_MEMORY_TIERING))
4470 reset_memory_tiering();
4471 sysctl_numa_balancing_mode = state;
4472 __set_numabalancing_state(state);
4473 }
4474 return err;
4475 }
4476 #endif /* CONFIG_PROC_SYSCTL */
4477 #endif /* CONFIG_NUMA_BALANCING */
4478
4479 #ifdef CONFIG_SCHEDSTATS
4480
4481 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4482
set_schedstats(bool enabled)4483 static void set_schedstats(bool enabled)
4484 {
4485 if (enabled)
4486 static_branch_enable(&sched_schedstats);
4487 else
4488 static_branch_disable(&sched_schedstats);
4489 }
4490
force_schedstat_enabled(void)4491 void force_schedstat_enabled(void)
4492 {
4493 if (!schedstat_enabled()) {
4494 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4495 static_branch_enable(&sched_schedstats);
4496 }
4497 }
4498
setup_schedstats(char * str)4499 static int __init setup_schedstats(char *str)
4500 {
4501 int ret = 0;
4502 if (!str)
4503 goto out;
4504
4505 if (!strcmp(str, "enable")) {
4506 set_schedstats(true);
4507 ret = 1;
4508 } else if (!strcmp(str, "disable")) {
4509 set_schedstats(false);
4510 ret = 1;
4511 }
4512 out:
4513 if (!ret)
4514 pr_warn("Unable to parse schedstats=\n");
4515
4516 return ret;
4517 }
4518 __setup("schedstats=", setup_schedstats);
4519
4520 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4521 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4522 size_t *lenp, loff_t *ppos)
4523 {
4524 struct ctl_table t;
4525 int err;
4526 int state = static_branch_likely(&sched_schedstats);
4527
4528 if (write && !capable(CAP_SYS_ADMIN))
4529 return -EPERM;
4530
4531 t = *table;
4532 t.data = &state;
4533 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4534 if (err < 0)
4535 return err;
4536 if (write)
4537 set_schedstats(state);
4538 return err;
4539 }
4540 #endif /* CONFIG_PROC_SYSCTL */
4541 #endif /* CONFIG_SCHEDSTATS */
4542
4543 #ifdef CONFIG_SYSCTL
4544 static const struct ctl_table sched_core_sysctls[] = {
4545 #ifdef CONFIG_SCHEDSTATS
4546 {
4547 .procname = "sched_schedstats",
4548 .data = NULL,
4549 .maxlen = sizeof(unsigned int),
4550 .mode = 0644,
4551 .proc_handler = sysctl_schedstats,
4552 .extra1 = SYSCTL_ZERO,
4553 .extra2 = SYSCTL_ONE,
4554 },
4555 #endif /* CONFIG_SCHEDSTATS */
4556 #ifdef CONFIG_UCLAMP_TASK
4557 {
4558 .procname = "sched_util_clamp_min",
4559 .data = &sysctl_sched_uclamp_util_min,
4560 .maxlen = sizeof(unsigned int),
4561 .mode = 0644,
4562 .proc_handler = sysctl_sched_uclamp_handler,
4563 },
4564 {
4565 .procname = "sched_util_clamp_max",
4566 .data = &sysctl_sched_uclamp_util_max,
4567 .maxlen = sizeof(unsigned int),
4568 .mode = 0644,
4569 .proc_handler = sysctl_sched_uclamp_handler,
4570 },
4571 {
4572 .procname = "sched_util_clamp_min_rt_default",
4573 .data = &sysctl_sched_uclamp_util_min_rt_default,
4574 .maxlen = sizeof(unsigned int),
4575 .mode = 0644,
4576 .proc_handler = sysctl_sched_uclamp_handler,
4577 },
4578 #endif /* CONFIG_UCLAMP_TASK */
4579 #ifdef CONFIG_NUMA_BALANCING
4580 {
4581 .procname = "numa_balancing",
4582 .data = NULL, /* filled in by handler */
4583 .maxlen = sizeof(unsigned int),
4584 .mode = 0644,
4585 .proc_handler = sysctl_numa_balancing,
4586 .extra1 = SYSCTL_ZERO,
4587 .extra2 = SYSCTL_FOUR,
4588 },
4589 #endif /* CONFIG_NUMA_BALANCING */
4590 };
sched_core_sysctl_init(void)4591 static int __init sched_core_sysctl_init(void)
4592 {
4593 register_sysctl_init("kernel", sched_core_sysctls);
4594 return 0;
4595 }
4596 late_initcall(sched_core_sysctl_init);
4597 #endif /* CONFIG_SYSCTL */
4598
4599 /*
4600 * fork()/clone()-time setup:
4601 */
sched_fork(u64 clone_flags,struct task_struct * p)4602 int sched_fork(u64 clone_flags, struct task_struct *p)
4603 {
4604 __sched_fork(clone_flags, p);
4605 /*
4606 * We mark the process as NEW here. This guarantees that
4607 * nobody will actually run it, and a signal or other external
4608 * event cannot wake it up and insert it on the runqueue either.
4609 */
4610 p->__state = TASK_NEW;
4611
4612 /*
4613 * Make sure we do not leak PI boosting priority to the child.
4614 */
4615 p->prio = current->normal_prio;
4616
4617 uclamp_fork(p);
4618
4619 /*
4620 * Revert to default priority/policy on fork if requested.
4621 */
4622 if (unlikely(p->sched_reset_on_fork)) {
4623 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4624 p->policy = SCHED_NORMAL;
4625 p->static_prio = NICE_TO_PRIO(0);
4626 p->rt_priority = 0;
4627 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4628 p->static_prio = NICE_TO_PRIO(0);
4629
4630 p->prio = p->normal_prio = p->static_prio;
4631 set_load_weight(p, false);
4632 p->se.custom_slice = 0;
4633 p->se.slice = sysctl_sched_base_slice;
4634
4635 /*
4636 * We don't need the reset flag anymore after the fork. It has
4637 * fulfilled its duty:
4638 */
4639 p->sched_reset_on_fork = 0;
4640 }
4641
4642 if (dl_prio(p->prio))
4643 return -EAGAIN;
4644
4645 scx_pre_fork(p);
4646
4647 if (rt_prio(p->prio)) {
4648 p->sched_class = &rt_sched_class;
4649 #ifdef CONFIG_SCHED_CLASS_EXT
4650 } else if (task_should_scx(p->policy)) {
4651 p->sched_class = &ext_sched_class;
4652 #endif
4653 } else {
4654 p->sched_class = &fair_sched_class;
4655 }
4656
4657 init_entity_runnable_average(&p->se);
4658
4659
4660 #ifdef CONFIG_SCHED_INFO
4661 if (likely(sched_info_on()))
4662 memset(&p->sched_info, 0, sizeof(p->sched_info));
4663 #endif
4664 p->on_cpu = 0;
4665 init_task_preempt_count(p);
4666 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4667 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4668
4669 return 0;
4670 }
4671
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4672 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4673 {
4674 unsigned long flags;
4675
4676 /*
4677 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4678 * required yet, but lockdep gets upset if rules are violated.
4679 */
4680 raw_spin_lock_irqsave(&p->pi_lock, flags);
4681 #ifdef CONFIG_CGROUP_SCHED
4682 if (1) {
4683 struct task_group *tg;
4684 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4685 struct task_group, css);
4686 tg = autogroup_task_group(p, tg);
4687 p->sched_task_group = tg;
4688 }
4689 #endif
4690 /*
4691 * We're setting the CPU for the first time, we don't migrate,
4692 * so use __set_task_cpu().
4693 */
4694 __set_task_cpu(p, smp_processor_id());
4695 if (p->sched_class->task_fork)
4696 p->sched_class->task_fork(p);
4697 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4698
4699 return scx_fork(p);
4700 }
4701
sched_cancel_fork(struct task_struct * p)4702 void sched_cancel_fork(struct task_struct *p)
4703 {
4704 scx_cancel_fork(p);
4705 }
4706
sched_post_fork(struct task_struct * p)4707 void sched_post_fork(struct task_struct *p)
4708 {
4709 uclamp_post_fork(p);
4710 scx_post_fork(p);
4711 }
4712
to_ratio(u64 period,u64 runtime)4713 unsigned long to_ratio(u64 period, u64 runtime)
4714 {
4715 if (runtime == RUNTIME_INF)
4716 return BW_UNIT;
4717
4718 /*
4719 * Doing this here saves a lot of checks in all
4720 * the calling paths, and returning zero seems
4721 * safe for them anyway.
4722 */
4723 if (period == 0)
4724 return 0;
4725
4726 return div64_u64(runtime << BW_SHIFT, period);
4727 }
4728
4729 /*
4730 * wake_up_new_task - wake up a newly created task for the first time.
4731 *
4732 * This function will do some initial scheduler statistics housekeeping
4733 * that must be done for every newly created context, then puts the task
4734 * on the runqueue and wakes it.
4735 */
wake_up_new_task(struct task_struct * p)4736 void wake_up_new_task(struct task_struct *p)
4737 {
4738 struct rq_flags rf;
4739 struct rq *rq;
4740 int wake_flags = WF_FORK;
4741
4742 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4743 WRITE_ONCE(p->__state, TASK_RUNNING);
4744 /*
4745 * Fork balancing, do it here and not earlier because:
4746 * - cpus_ptr can change in the fork path
4747 * - any previously selected CPU might disappear through hotplug
4748 *
4749 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4750 * as we're not fully set-up yet.
4751 */
4752 p->recent_used_cpu = task_cpu(p);
4753 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4754 rq = __task_rq_lock(p, &rf);
4755 update_rq_clock(rq);
4756 post_init_entity_util_avg(p);
4757
4758 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4759 trace_sched_wakeup_new(p);
4760 wakeup_preempt(rq, p, wake_flags);
4761 if (p->sched_class->task_woken) {
4762 /*
4763 * Nothing relies on rq->lock after this, so it's fine to
4764 * drop it.
4765 */
4766 rq_unpin_lock(rq, &rf);
4767 p->sched_class->task_woken(rq, p);
4768 rq_repin_lock(rq, &rf);
4769 }
4770 task_rq_unlock(rq, p, &rf);
4771 }
4772
4773 #ifdef CONFIG_PREEMPT_NOTIFIERS
4774
4775 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4776
preempt_notifier_inc(void)4777 void preempt_notifier_inc(void)
4778 {
4779 static_branch_inc(&preempt_notifier_key);
4780 }
4781 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4782
preempt_notifier_dec(void)4783 void preempt_notifier_dec(void)
4784 {
4785 static_branch_dec(&preempt_notifier_key);
4786 }
4787 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4788
4789 /**
4790 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4791 * @notifier: notifier struct to register
4792 */
preempt_notifier_register(struct preempt_notifier * notifier)4793 void preempt_notifier_register(struct preempt_notifier *notifier)
4794 {
4795 if (!static_branch_unlikely(&preempt_notifier_key))
4796 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4797
4798 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4799 }
4800 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4801
4802 /**
4803 * preempt_notifier_unregister - no longer interested in preemption notifications
4804 * @notifier: notifier struct to unregister
4805 *
4806 * This is *not* safe to call from within a preemption notifier.
4807 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4808 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4809 {
4810 hlist_del(¬ifier->link);
4811 }
4812 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4813
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4814 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4815 {
4816 struct preempt_notifier *notifier;
4817
4818 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4819 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4820 }
4821
fire_sched_in_preempt_notifiers(struct task_struct * curr)4822 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4823 {
4824 if (static_branch_unlikely(&preempt_notifier_key))
4825 __fire_sched_in_preempt_notifiers(curr);
4826 }
4827
4828 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4829 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4830 struct task_struct *next)
4831 {
4832 struct preempt_notifier *notifier;
4833
4834 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4835 notifier->ops->sched_out(notifier, next);
4836 }
4837
4838 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4839 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4840 struct task_struct *next)
4841 {
4842 if (static_branch_unlikely(&preempt_notifier_key))
4843 __fire_sched_out_preempt_notifiers(curr, next);
4844 }
4845
4846 #else /* !CONFIG_PREEMPT_NOTIFIERS: */
4847
fire_sched_in_preempt_notifiers(struct task_struct * curr)4848 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4849 {
4850 }
4851
4852 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4853 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4854 struct task_struct *next)
4855 {
4856 }
4857
4858 #endif /* !CONFIG_PREEMPT_NOTIFIERS */
4859
prepare_task(struct task_struct * next)4860 static inline void prepare_task(struct task_struct *next)
4861 {
4862 /*
4863 * Claim the task as running, we do this before switching to it
4864 * such that any running task will have this set.
4865 *
4866 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4867 * its ordering comment.
4868 */
4869 WRITE_ONCE(next->on_cpu, 1);
4870 }
4871
finish_task(struct task_struct * prev)4872 static inline void finish_task(struct task_struct *prev)
4873 {
4874 /*
4875 * This must be the very last reference to @prev from this CPU. After
4876 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4877 * must ensure this doesn't happen until the switch is completely
4878 * finished.
4879 *
4880 * In particular, the load of prev->state in finish_task_switch() must
4881 * happen before this.
4882 *
4883 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4884 */
4885 smp_store_release(&prev->on_cpu, 0);
4886 }
4887
do_balance_callbacks(struct rq * rq,struct balance_callback * head)4888 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4889 {
4890 void (*func)(struct rq *rq);
4891 struct balance_callback *next;
4892
4893 lockdep_assert_rq_held(rq);
4894
4895 while (head) {
4896 func = (void (*)(struct rq *))head->func;
4897 next = head->next;
4898 head->next = NULL;
4899 head = next;
4900
4901 func(rq);
4902 }
4903 }
4904
4905 static void balance_push(struct rq *rq);
4906
4907 /*
4908 * balance_push_callback is a right abuse of the callback interface and plays
4909 * by significantly different rules.
4910 *
4911 * Where the normal balance_callback's purpose is to be ran in the same context
4912 * that queued it (only later, when it's safe to drop rq->lock again),
4913 * balance_push_callback is specifically targeted at __schedule().
4914 *
4915 * This abuse is tolerated because it places all the unlikely/odd cases behind
4916 * a single test, namely: rq->balance_callback == NULL.
4917 */
4918 struct balance_callback balance_push_callback = {
4919 .next = NULL,
4920 .func = balance_push,
4921 };
4922
4923 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)4924 __splice_balance_callbacks(struct rq *rq, bool split)
4925 {
4926 struct balance_callback *head = rq->balance_callback;
4927
4928 if (likely(!head))
4929 return NULL;
4930
4931 lockdep_assert_rq_held(rq);
4932 /*
4933 * Must not take balance_push_callback off the list when
4934 * splice_balance_callbacks() and balance_callbacks() are not
4935 * in the same rq->lock section.
4936 *
4937 * In that case it would be possible for __schedule() to interleave
4938 * and observe the list empty.
4939 */
4940 if (split && head == &balance_push_callback)
4941 head = NULL;
4942 else
4943 rq->balance_callback = NULL;
4944
4945 return head;
4946 }
4947
splice_balance_callbacks(struct rq * rq)4948 struct balance_callback *splice_balance_callbacks(struct rq *rq)
4949 {
4950 return __splice_balance_callbacks(rq, true);
4951 }
4952
__balance_callbacks(struct rq * rq)4953 static void __balance_callbacks(struct rq *rq)
4954 {
4955 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4956 }
4957
balance_callbacks(struct rq * rq,struct balance_callback * head)4958 void balance_callbacks(struct rq *rq, struct balance_callback *head)
4959 {
4960 unsigned long flags;
4961
4962 if (unlikely(head)) {
4963 raw_spin_rq_lock_irqsave(rq, flags);
4964 do_balance_callbacks(rq, head);
4965 raw_spin_rq_unlock_irqrestore(rq, flags);
4966 }
4967 }
4968
4969 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)4970 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
4971 {
4972 /*
4973 * Since the runqueue lock will be released by the next
4974 * task (which is an invalid locking op but in the case
4975 * of the scheduler it's an obvious special-case), so we
4976 * do an early lockdep release here:
4977 */
4978 rq_unpin_lock(rq, rf);
4979 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
4980 #ifdef CONFIG_DEBUG_SPINLOCK
4981 /* this is a valid case when another task releases the spinlock */
4982 rq_lockp(rq)->owner = next;
4983 #endif
4984 }
4985
finish_lock_switch(struct rq * rq)4986 static inline void finish_lock_switch(struct rq *rq)
4987 {
4988 /*
4989 * If we are tracking spinlock dependencies then we have to
4990 * fix up the runqueue lock - which gets 'carried over' from
4991 * prev into current:
4992 */
4993 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
4994 __balance_callbacks(rq);
4995 raw_spin_rq_unlock_irq(rq);
4996 }
4997
4998 /*
4999 * NOP if the arch has not defined these:
5000 */
5001
5002 #ifndef prepare_arch_switch
5003 # define prepare_arch_switch(next) do { } while (0)
5004 #endif
5005
5006 #ifndef finish_arch_post_lock_switch
5007 # define finish_arch_post_lock_switch() do { } while (0)
5008 #endif
5009
kmap_local_sched_out(void)5010 static inline void kmap_local_sched_out(void)
5011 {
5012 #ifdef CONFIG_KMAP_LOCAL
5013 if (unlikely(current->kmap_ctrl.idx))
5014 __kmap_local_sched_out();
5015 #endif
5016 }
5017
kmap_local_sched_in(void)5018 static inline void kmap_local_sched_in(void)
5019 {
5020 #ifdef CONFIG_KMAP_LOCAL
5021 if (unlikely(current->kmap_ctrl.idx))
5022 __kmap_local_sched_in();
5023 #endif
5024 }
5025
5026 /**
5027 * prepare_task_switch - prepare to switch tasks
5028 * @rq: the runqueue preparing to switch
5029 * @prev: the current task that is being switched out
5030 * @next: the task we are going to switch to.
5031 *
5032 * This is called with the rq lock held and interrupts off. It must
5033 * be paired with a subsequent finish_task_switch after the context
5034 * switch.
5035 *
5036 * prepare_task_switch sets up locking and calls architecture specific
5037 * hooks.
5038 */
5039 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5040 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5041 struct task_struct *next)
5042 {
5043 kcov_prepare_switch(prev);
5044 sched_info_switch(rq, prev, next);
5045 perf_event_task_sched_out(prev, next);
5046 fire_sched_out_preempt_notifiers(prev, next);
5047 kmap_local_sched_out();
5048 prepare_task(next);
5049 prepare_arch_switch(next);
5050 }
5051
5052 /**
5053 * finish_task_switch - clean up after a task-switch
5054 * @prev: the thread we just switched away from.
5055 *
5056 * finish_task_switch must be called after the context switch, paired
5057 * with a prepare_task_switch call before the context switch.
5058 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5059 * and do any other architecture-specific cleanup actions.
5060 *
5061 * Note that we may have delayed dropping an mm in context_switch(). If
5062 * so, we finish that here outside of the runqueue lock. (Doing it
5063 * with the lock held can cause deadlocks; see schedule() for
5064 * details.)
5065 *
5066 * The context switch have flipped the stack from under us and restored the
5067 * local variables which were saved when this task called schedule() in the
5068 * past. 'prev == current' is still correct but we need to recalculate this_rq
5069 * because prev may have moved to another CPU.
5070 */
finish_task_switch(struct task_struct * prev)5071 static struct rq *finish_task_switch(struct task_struct *prev)
5072 __releases(rq->lock)
5073 {
5074 struct rq *rq = this_rq();
5075 struct mm_struct *mm = rq->prev_mm;
5076 unsigned int prev_state;
5077
5078 /*
5079 * The previous task will have left us with a preempt_count of 2
5080 * because it left us after:
5081 *
5082 * schedule()
5083 * preempt_disable(); // 1
5084 * __schedule()
5085 * raw_spin_lock_irq(&rq->lock) // 2
5086 *
5087 * Also, see FORK_PREEMPT_COUNT.
5088 */
5089 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5090 "corrupted preempt_count: %s/%d/0x%x\n",
5091 current->comm, current->pid, preempt_count()))
5092 preempt_count_set(FORK_PREEMPT_COUNT);
5093
5094 rq->prev_mm = NULL;
5095
5096 /*
5097 * A task struct has one reference for the use as "current".
5098 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5099 * schedule one last time. The schedule call will never return, and
5100 * the scheduled task must drop that reference.
5101 *
5102 * We must observe prev->state before clearing prev->on_cpu (in
5103 * finish_task), otherwise a concurrent wakeup can get prev
5104 * running on another CPU and we could rave with its RUNNING -> DEAD
5105 * transition, resulting in a double drop.
5106 */
5107 prev_state = READ_ONCE(prev->__state);
5108 vtime_task_switch(prev);
5109 perf_event_task_sched_in(prev, current);
5110 finish_task(prev);
5111 tick_nohz_task_switch();
5112 finish_lock_switch(rq);
5113 finish_arch_post_lock_switch();
5114 kcov_finish_switch(current);
5115 /*
5116 * kmap_local_sched_out() is invoked with rq::lock held and
5117 * interrupts disabled. There is no requirement for that, but the
5118 * sched out code does not have an interrupt enabled section.
5119 * Restoring the maps on sched in does not require interrupts being
5120 * disabled either.
5121 */
5122 kmap_local_sched_in();
5123
5124 fire_sched_in_preempt_notifiers(current);
5125 /*
5126 * When switching through a kernel thread, the loop in
5127 * membarrier_{private,global}_expedited() may have observed that
5128 * kernel thread and not issued an IPI. It is therefore possible to
5129 * schedule between user->kernel->user threads without passing though
5130 * switch_mm(). Membarrier requires a barrier after storing to
5131 * rq->curr, before returning to userspace, so provide them here:
5132 *
5133 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5134 * provided by mmdrop_lazy_tlb(),
5135 * - a sync_core for SYNC_CORE.
5136 */
5137 if (mm) {
5138 membarrier_mm_sync_core_before_usermode(mm);
5139 mmdrop_lazy_tlb_sched(mm);
5140 }
5141
5142 if (unlikely(prev_state == TASK_DEAD)) {
5143 if (prev->sched_class->task_dead)
5144 prev->sched_class->task_dead(prev);
5145
5146 /*
5147 * sched_ext_dead() must come before cgroup_task_dead() to
5148 * prevent cgroups from being removed while its member tasks are
5149 * visible to SCX schedulers.
5150 */
5151 sched_ext_dead(prev);
5152 cgroup_task_dead(prev);
5153
5154 /* Task is done with its stack. */
5155 put_task_stack(prev);
5156
5157 put_task_struct_rcu_user(prev);
5158 }
5159
5160 return rq;
5161 }
5162
5163 /**
5164 * schedule_tail - first thing a freshly forked thread must call.
5165 * @prev: the thread we just switched away from.
5166 */
schedule_tail(struct task_struct * prev)5167 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5168 __releases(rq->lock)
5169 {
5170 /*
5171 * New tasks start with FORK_PREEMPT_COUNT, see there and
5172 * finish_task_switch() for details.
5173 *
5174 * finish_task_switch() will drop rq->lock() and lower preempt_count
5175 * and the preempt_enable() will end up enabling preemption (on
5176 * PREEMPT_COUNT kernels).
5177 */
5178
5179 finish_task_switch(prev);
5180 /*
5181 * This is a special case: the newly created task has just
5182 * switched the context for the first time. It is returning from
5183 * schedule for the first time in this path.
5184 */
5185 trace_sched_exit_tp(true);
5186 preempt_enable();
5187
5188 if (current->set_child_tid)
5189 put_user(task_pid_vnr(current), current->set_child_tid);
5190
5191 calculate_sigpending();
5192 }
5193
5194 /*
5195 * context_switch - switch to the new MM and the new thread's register state.
5196 */
5197 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5198 context_switch(struct rq *rq, struct task_struct *prev,
5199 struct task_struct *next, struct rq_flags *rf)
5200 {
5201 prepare_task_switch(rq, prev, next);
5202
5203 /*
5204 * For paravirt, this is coupled with an exit in switch_to to
5205 * combine the page table reload and the switch backend into
5206 * one hypercall.
5207 */
5208 arch_start_context_switch(prev);
5209
5210 /*
5211 * kernel -> kernel lazy + transfer active
5212 * user -> kernel lazy + mmgrab_lazy_tlb() active
5213 *
5214 * kernel -> user switch + mmdrop_lazy_tlb() active
5215 * user -> user switch
5216 */
5217 if (!next->mm) { // to kernel
5218 enter_lazy_tlb(prev->active_mm, next);
5219
5220 next->active_mm = prev->active_mm;
5221 if (prev->mm) // from user
5222 mmgrab_lazy_tlb(prev->active_mm);
5223 else
5224 prev->active_mm = NULL;
5225 } else { // to user
5226 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5227 /*
5228 * sys_membarrier() requires an smp_mb() between setting
5229 * rq->curr / membarrier_switch_mm() and returning to userspace.
5230 *
5231 * The below provides this either through switch_mm(), or in
5232 * case 'prev->active_mm == next->mm' through
5233 * finish_task_switch()'s mmdrop().
5234 */
5235 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5236 lru_gen_use_mm(next->mm);
5237
5238 if (!prev->mm) { // from kernel
5239 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5240 rq->prev_mm = prev->active_mm;
5241 prev->active_mm = NULL;
5242 }
5243 }
5244
5245 mm_cid_switch_to(prev, next);
5246
5247 /*
5248 * Tell rseq that the task was scheduled in. Must be after
5249 * switch_mm_cid() to get the TIF flag set.
5250 */
5251 rseq_sched_switch_event(next);
5252
5253 prepare_lock_switch(rq, next, rf);
5254
5255 /* Here we just switch the register state and the stack. */
5256 switch_to(prev, next, prev);
5257 barrier();
5258
5259 return finish_task_switch(prev);
5260 }
5261
5262 /*
5263 * nr_running and nr_context_switches:
5264 *
5265 * externally visible scheduler statistics: current number of runnable
5266 * threads, total number of context switches performed since bootup.
5267 */
nr_running(void)5268 unsigned int nr_running(void)
5269 {
5270 unsigned int i, sum = 0;
5271
5272 for_each_online_cpu(i)
5273 sum += cpu_rq(i)->nr_running;
5274
5275 return sum;
5276 }
5277
5278 /*
5279 * Check if only the current task is running on the CPU.
5280 *
5281 * Caution: this function does not check that the caller has disabled
5282 * preemption, thus the result might have a time-of-check-to-time-of-use
5283 * race. The caller is responsible to use it correctly, for example:
5284 *
5285 * - from a non-preemptible section (of course)
5286 *
5287 * - from a thread that is bound to a single CPU
5288 *
5289 * - in a loop with very short iterations (e.g. a polling loop)
5290 */
single_task_running(void)5291 bool single_task_running(void)
5292 {
5293 return raw_rq()->nr_running == 1;
5294 }
5295 EXPORT_SYMBOL(single_task_running);
5296
nr_context_switches_cpu(int cpu)5297 unsigned long long nr_context_switches_cpu(int cpu)
5298 {
5299 return cpu_rq(cpu)->nr_switches;
5300 }
5301
nr_context_switches(void)5302 unsigned long long nr_context_switches(void)
5303 {
5304 int i;
5305 unsigned long long sum = 0;
5306
5307 for_each_possible_cpu(i)
5308 sum += cpu_rq(i)->nr_switches;
5309
5310 return sum;
5311 }
5312
5313 /*
5314 * Consumers of these two interfaces, like for example the cpuidle menu
5315 * governor, are using nonsensical data. Preferring shallow idle state selection
5316 * for a CPU that has IO-wait which might not even end up running the task when
5317 * it does become runnable.
5318 */
5319
nr_iowait_cpu(int cpu)5320 unsigned int nr_iowait_cpu(int cpu)
5321 {
5322 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5323 }
5324
5325 /*
5326 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5327 *
5328 * The idea behind IO-wait account is to account the idle time that we could
5329 * have spend running if it were not for IO. That is, if we were to improve the
5330 * storage performance, we'd have a proportional reduction in IO-wait time.
5331 *
5332 * This all works nicely on UP, where, when a task blocks on IO, we account
5333 * idle time as IO-wait, because if the storage were faster, it could've been
5334 * running and we'd not be idle.
5335 *
5336 * This has been extended to SMP, by doing the same for each CPU. This however
5337 * is broken.
5338 *
5339 * Imagine for instance the case where two tasks block on one CPU, only the one
5340 * CPU will have IO-wait accounted, while the other has regular idle. Even
5341 * though, if the storage were faster, both could've ran at the same time,
5342 * utilising both CPUs.
5343 *
5344 * This means, that when looking globally, the current IO-wait accounting on
5345 * SMP is a lower bound, by reason of under accounting.
5346 *
5347 * Worse, since the numbers are provided per CPU, they are sometimes
5348 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5349 * associated with any one particular CPU, it can wake to another CPU than it
5350 * blocked on. This means the per CPU IO-wait number is meaningless.
5351 *
5352 * Task CPU affinities can make all that even more 'interesting'.
5353 */
5354
nr_iowait(void)5355 unsigned int nr_iowait(void)
5356 {
5357 unsigned int i, sum = 0;
5358
5359 for_each_possible_cpu(i)
5360 sum += nr_iowait_cpu(i);
5361
5362 return sum;
5363 }
5364
5365 /*
5366 * sched_exec - execve() is a valuable balancing opportunity, because at
5367 * this point the task has the smallest effective memory and cache footprint.
5368 */
sched_exec(void)5369 void sched_exec(void)
5370 {
5371 struct task_struct *p = current;
5372 struct migration_arg arg;
5373 int dest_cpu;
5374
5375 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5376 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5377 if (dest_cpu == smp_processor_id())
5378 return;
5379
5380 if (unlikely(!cpu_active(dest_cpu)))
5381 return;
5382
5383 arg = (struct migration_arg){ p, dest_cpu };
5384 }
5385 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5386 }
5387
5388 DEFINE_PER_CPU(struct kernel_stat, kstat);
5389 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5390
5391 EXPORT_PER_CPU_SYMBOL(kstat);
5392 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5393
5394 /*
5395 * The function fair_sched_class.update_curr accesses the struct curr
5396 * and its field curr->exec_start; when called from task_sched_runtime(),
5397 * we observe a high rate of cache misses in practice.
5398 * Prefetching this data results in improved performance.
5399 */
prefetch_curr_exec_start(struct task_struct * p)5400 static inline void prefetch_curr_exec_start(struct task_struct *p)
5401 {
5402 #ifdef CONFIG_FAIR_GROUP_SCHED
5403 struct sched_entity *curr = p->se.cfs_rq->curr;
5404 #else
5405 struct sched_entity *curr = task_rq(p)->cfs.curr;
5406 #endif
5407 prefetch(curr);
5408 prefetch(&curr->exec_start);
5409 }
5410
5411 /*
5412 * Return accounted runtime for the task.
5413 * In case the task is currently running, return the runtime plus current's
5414 * pending runtime that have not been accounted yet.
5415 */
task_sched_runtime(struct task_struct * p)5416 unsigned long long task_sched_runtime(struct task_struct *p)
5417 {
5418 struct rq_flags rf;
5419 struct rq *rq;
5420 u64 ns;
5421
5422 #ifdef CONFIG_64BIT
5423 /*
5424 * 64-bit doesn't need locks to atomically read a 64-bit value.
5425 * So we have a optimization chance when the task's delta_exec is 0.
5426 * Reading ->on_cpu is racy, but this is OK.
5427 *
5428 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5429 * If we race with it entering CPU, unaccounted time is 0. This is
5430 * indistinguishable from the read occurring a few cycles earlier.
5431 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5432 * been accounted, so we're correct here as well.
5433 */
5434 if (!p->on_cpu || !task_on_rq_queued(p))
5435 return p->se.sum_exec_runtime;
5436 #endif
5437
5438 rq = task_rq_lock(p, &rf);
5439 /*
5440 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5441 * project cycles that may never be accounted to this
5442 * thread, breaking clock_gettime().
5443 */
5444 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5445 prefetch_curr_exec_start(p);
5446 update_rq_clock(rq);
5447 p->sched_class->update_curr(rq);
5448 }
5449 ns = p->se.sum_exec_runtime;
5450 task_rq_unlock(rq, p, &rf);
5451
5452 return ns;
5453 }
5454
cpu_resched_latency(struct rq * rq)5455 static u64 cpu_resched_latency(struct rq *rq)
5456 {
5457 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5458 u64 resched_latency, now = rq_clock(rq);
5459 static bool warned_once;
5460
5461 if (sysctl_resched_latency_warn_once && warned_once)
5462 return 0;
5463
5464 if (!need_resched() || !latency_warn_ms)
5465 return 0;
5466
5467 if (system_state == SYSTEM_BOOTING)
5468 return 0;
5469
5470 if (!rq->last_seen_need_resched_ns) {
5471 rq->last_seen_need_resched_ns = now;
5472 rq->ticks_without_resched = 0;
5473 return 0;
5474 }
5475
5476 rq->ticks_without_resched++;
5477 resched_latency = now - rq->last_seen_need_resched_ns;
5478 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5479 return 0;
5480
5481 warned_once = true;
5482
5483 return resched_latency;
5484 }
5485
setup_resched_latency_warn_ms(char * str)5486 static int __init setup_resched_latency_warn_ms(char *str)
5487 {
5488 long val;
5489
5490 if ((kstrtol(str, 0, &val))) {
5491 pr_warn("Unable to set resched_latency_warn_ms\n");
5492 return 1;
5493 }
5494
5495 sysctl_resched_latency_warn_ms = val;
5496 return 1;
5497 }
5498 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5499
5500 /*
5501 * This function gets called by the timer code, with HZ frequency.
5502 * We call it with interrupts disabled.
5503 */
sched_tick(void)5504 void sched_tick(void)
5505 {
5506 int cpu = smp_processor_id();
5507 struct rq *rq = cpu_rq(cpu);
5508 /* accounting goes to the donor task */
5509 struct task_struct *donor;
5510 struct rq_flags rf;
5511 unsigned long hw_pressure;
5512 u64 resched_latency;
5513
5514 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5515 arch_scale_freq_tick();
5516
5517 sched_clock_tick();
5518
5519 rq_lock(rq, &rf);
5520 donor = rq->donor;
5521
5522 psi_account_irqtime(rq, donor, NULL);
5523
5524 update_rq_clock(rq);
5525 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5526 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5527
5528 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5529 resched_curr(rq);
5530
5531 donor->sched_class->task_tick(rq, donor, 0);
5532 if (sched_feat(LATENCY_WARN))
5533 resched_latency = cpu_resched_latency(rq);
5534 calc_global_load_tick(rq);
5535 sched_core_tick(rq);
5536 scx_tick(rq);
5537
5538 rq_unlock(rq, &rf);
5539
5540 if (sched_feat(LATENCY_WARN) && resched_latency)
5541 resched_latency_warn(cpu, resched_latency);
5542
5543 perf_event_task_tick();
5544
5545 if (donor->flags & PF_WQ_WORKER)
5546 wq_worker_tick(donor);
5547
5548 if (!scx_switched_all()) {
5549 rq->idle_balance = idle_cpu(cpu);
5550 sched_balance_trigger(rq);
5551 }
5552 }
5553
5554 #ifdef CONFIG_NO_HZ_FULL
5555
5556 struct tick_work {
5557 int cpu;
5558 atomic_t state;
5559 struct delayed_work work;
5560 };
5561 /* Values for ->state, see diagram below. */
5562 #define TICK_SCHED_REMOTE_OFFLINE 0
5563 #define TICK_SCHED_REMOTE_OFFLINING 1
5564 #define TICK_SCHED_REMOTE_RUNNING 2
5565
5566 /*
5567 * State diagram for ->state:
5568 *
5569 *
5570 * TICK_SCHED_REMOTE_OFFLINE
5571 * | ^
5572 * | |
5573 * | | sched_tick_remote()
5574 * | |
5575 * | |
5576 * +--TICK_SCHED_REMOTE_OFFLINING
5577 * | ^
5578 * | |
5579 * sched_tick_start() | | sched_tick_stop()
5580 * | |
5581 * V |
5582 * TICK_SCHED_REMOTE_RUNNING
5583 *
5584 *
5585 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5586 * and sched_tick_start() are happy to leave the state in RUNNING.
5587 */
5588
5589 static struct tick_work __percpu *tick_work_cpu;
5590
sched_tick_remote(struct work_struct * work)5591 static void sched_tick_remote(struct work_struct *work)
5592 {
5593 struct delayed_work *dwork = to_delayed_work(work);
5594 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5595 int cpu = twork->cpu;
5596 struct rq *rq = cpu_rq(cpu);
5597 int os;
5598
5599 /*
5600 * Handle the tick only if it appears the remote CPU is running in full
5601 * dynticks mode. The check is racy by nature, but missing a tick or
5602 * having one too much is no big deal because the scheduler tick updates
5603 * statistics and checks timeslices in a time-independent way, regardless
5604 * of when exactly it is running.
5605 */
5606 if (tick_nohz_tick_stopped_cpu(cpu)) {
5607 guard(rq_lock_irq)(rq);
5608 struct task_struct *curr = rq->curr;
5609
5610 if (cpu_online(cpu)) {
5611 /*
5612 * Since this is a remote tick for full dynticks mode,
5613 * we are always sure that there is no proxy (only a
5614 * single task is running).
5615 */
5616 WARN_ON_ONCE(rq->curr != rq->donor);
5617 update_rq_clock(rq);
5618
5619 if (!is_idle_task(curr)) {
5620 /*
5621 * Make sure the next tick runs within a
5622 * reasonable amount of time.
5623 */
5624 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5625 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 30);
5626 }
5627 curr->sched_class->task_tick(rq, curr, 0);
5628
5629 calc_load_nohz_remote(rq);
5630 }
5631 }
5632
5633 /*
5634 * Run the remote tick once per second (1Hz). This arbitrary
5635 * frequency is large enough to avoid overload but short enough
5636 * to keep scheduler internal stats reasonably up to date. But
5637 * first update state to reflect hotplug activity if required.
5638 */
5639 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5640 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5641 if (os == TICK_SCHED_REMOTE_RUNNING)
5642 queue_delayed_work(system_unbound_wq, dwork, HZ);
5643 }
5644
sched_tick_start(int cpu)5645 static void sched_tick_start(int cpu)
5646 {
5647 int os;
5648 struct tick_work *twork;
5649
5650 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5651 return;
5652
5653 WARN_ON_ONCE(!tick_work_cpu);
5654
5655 twork = per_cpu_ptr(tick_work_cpu, cpu);
5656 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5657 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5658 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5659 twork->cpu = cpu;
5660 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5661 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5662 }
5663 }
5664
5665 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5666 static void sched_tick_stop(int cpu)
5667 {
5668 struct tick_work *twork;
5669 int os;
5670
5671 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5672 return;
5673
5674 WARN_ON_ONCE(!tick_work_cpu);
5675
5676 twork = per_cpu_ptr(tick_work_cpu, cpu);
5677 /* There cannot be competing actions, but don't rely on stop-machine. */
5678 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5679 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5680 /* Don't cancel, as this would mess up the state machine. */
5681 }
5682 #endif /* CONFIG_HOTPLUG_CPU */
5683
sched_tick_offload_init(void)5684 int __init sched_tick_offload_init(void)
5685 {
5686 tick_work_cpu = alloc_percpu(struct tick_work);
5687 BUG_ON(!tick_work_cpu);
5688 return 0;
5689 }
5690
5691 #else /* !CONFIG_NO_HZ_FULL: */
sched_tick_start(int cpu)5692 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5693 static inline void sched_tick_stop(int cpu) { }
5694 #endif /* !CONFIG_NO_HZ_FULL */
5695
5696 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5697 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5698 /*
5699 * If the value passed in is equal to the current preempt count
5700 * then we just disabled preemption. Start timing the latency.
5701 */
preempt_latency_start(int val)5702 static inline void preempt_latency_start(int val)
5703 {
5704 if (preempt_count() == val) {
5705 unsigned long ip = get_lock_parent_ip();
5706 #ifdef CONFIG_DEBUG_PREEMPT
5707 current->preempt_disable_ip = ip;
5708 #endif
5709 trace_preempt_off(CALLER_ADDR0, ip);
5710 }
5711 }
5712
preempt_count_add(int val)5713 void preempt_count_add(int val)
5714 {
5715 #ifdef CONFIG_DEBUG_PREEMPT
5716 /*
5717 * Underflow?
5718 */
5719 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5720 return;
5721 #endif
5722 __preempt_count_add(val);
5723 #ifdef CONFIG_DEBUG_PREEMPT
5724 /*
5725 * Spinlock count overflowing soon?
5726 */
5727 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5728 PREEMPT_MASK - 10);
5729 #endif
5730 preempt_latency_start(val);
5731 }
5732 EXPORT_SYMBOL(preempt_count_add);
5733 NOKPROBE_SYMBOL(preempt_count_add);
5734
5735 /*
5736 * If the value passed in equals to the current preempt count
5737 * then we just enabled preemption. Stop timing the latency.
5738 */
preempt_latency_stop(int val)5739 static inline void preempt_latency_stop(int val)
5740 {
5741 if (preempt_count() == val)
5742 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5743 }
5744
preempt_count_sub(int val)5745 void preempt_count_sub(int val)
5746 {
5747 #ifdef CONFIG_DEBUG_PREEMPT
5748 /*
5749 * Underflow?
5750 */
5751 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5752 return;
5753 /*
5754 * Is the spinlock portion underflowing?
5755 */
5756 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5757 !(preempt_count() & PREEMPT_MASK)))
5758 return;
5759 #endif
5760
5761 preempt_latency_stop(val);
5762 __preempt_count_sub(val);
5763 }
5764 EXPORT_SYMBOL(preempt_count_sub);
5765 NOKPROBE_SYMBOL(preempt_count_sub);
5766
5767 #else
preempt_latency_start(int val)5768 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5769 static inline void preempt_latency_stop(int val) { }
5770 #endif
5771
get_preempt_disable_ip(struct task_struct * p)5772 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5773 {
5774 #ifdef CONFIG_DEBUG_PREEMPT
5775 return p->preempt_disable_ip;
5776 #else
5777 return 0;
5778 #endif
5779 }
5780
5781 /*
5782 * Print scheduling while atomic bug:
5783 */
__schedule_bug(struct task_struct * prev)5784 static noinline void __schedule_bug(struct task_struct *prev)
5785 {
5786 /* Save this before calling printk(), since that will clobber it */
5787 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5788
5789 if (oops_in_progress)
5790 return;
5791
5792 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5793 prev->comm, prev->pid, preempt_count());
5794
5795 debug_show_held_locks(prev);
5796 print_modules();
5797 if (irqs_disabled())
5798 print_irqtrace_events(prev);
5799 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5800 pr_err("Preemption disabled at:");
5801 print_ip_sym(KERN_ERR, preempt_disable_ip);
5802 }
5803 check_panic_on_warn("scheduling while atomic");
5804
5805 dump_stack();
5806 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5807 }
5808
5809 /*
5810 * Various schedule()-time debugging checks and statistics:
5811 */
schedule_debug(struct task_struct * prev,bool preempt)5812 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5813 {
5814 #ifdef CONFIG_SCHED_STACK_END_CHECK
5815 if (task_stack_end_corrupted(prev))
5816 panic("corrupted stack end detected inside scheduler\n");
5817
5818 if (task_scs_end_corrupted(prev))
5819 panic("corrupted shadow stack detected inside scheduler\n");
5820 #endif
5821
5822 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5823 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5824 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5825 prev->comm, prev->pid, prev->non_block_count);
5826 dump_stack();
5827 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5828 }
5829 #endif
5830
5831 if (unlikely(in_atomic_preempt_off())) {
5832 __schedule_bug(prev);
5833 preempt_count_set(PREEMPT_DISABLED);
5834 }
5835 rcu_sleep_check();
5836 WARN_ON_ONCE(ct_state() == CT_STATE_USER);
5837
5838 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5839
5840 schedstat_inc(this_rq()->sched_count);
5841 }
5842
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5843 static void prev_balance(struct rq *rq, struct task_struct *prev,
5844 struct rq_flags *rf)
5845 {
5846 const struct sched_class *start_class = prev->sched_class;
5847 const struct sched_class *class;
5848
5849 /*
5850 * We must do the balancing pass before put_prev_task(), such
5851 * that when we release the rq->lock the task is in the same
5852 * state as before we took rq->lock.
5853 *
5854 * We can terminate the balance pass as soon as we know there is
5855 * a runnable task of @class priority or higher.
5856 */
5857 for_active_class_range(class, start_class, &idle_sched_class) {
5858 if (class->balance && class->balance(rq, prev, rf))
5859 break;
5860 }
5861 }
5862
5863 /*
5864 * Pick up the highest-prio task:
5865 */
5866 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5867 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5868 {
5869 const struct sched_class *class;
5870 struct task_struct *p;
5871
5872 rq->dl_server = NULL;
5873
5874 if (scx_enabled())
5875 goto restart;
5876
5877 /*
5878 * Optimization: we know that if all tasks are in the fair class we can
5879 * call that function directly, but only if the @prev task wasn't of a
5880 * higher scheduling class, because otherwise those lose the
5881 * opportunity to pull in more work from other CPUs.
5882 */
5883 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5884 rq->nr_running == rq->cfs.h_nr_queued)) {
5885
5886 p = pick_next_task_fair(rq, prev, rf);
5887 if (unlikely(p == RETRY_TASK))
5888 goto restart;
5889
5890 /* Assume the next prioritized class is idle_sched_class */
5891 if (!p) {
5892 p = pick_task_idle(rq, rf);
5893 put_prev_set_next_task(rq, prev, p);
5894 }
5895
5896 return p;
5897 }
5898
5899 restart:
5900 prev_balance(rq, prev, rf);
5901
5902 for_each_active_class(class) {
5903 if (class->pick_next_task) {
5904 p = class->pick_next_task(rq, prev, rf);
5905 if (unlikely(p == RETRY_TASK))
5906 goto restart;
5907 if (p)
5908 return p;
5909 } else {
5910 p = class->pick_task(rq, rf);
5911 if (unlikely(p == RETRY_TASK))
5912 goto restart;
5913 if (p) {
5914 put_prev_set_next_task(rq, prev, p);
5915 return p;
5916 }
5917 }
5918 }
5919
5920 BUG(); /* The idle class should always have a runnable task. */
5921 }
5922
5923 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)5924 static inline bool is_task_rq_idle(struct task_struct *t)
5925 {
5926 return (task_rq(t)->idle == t);
5927 }
5928
cookie_equals(struct task_struct * a,unsigned long cookie)5929 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5930 {
5931 return is_task_rq_idle(a) || (a->core_cookie == cookie);
5932 }
5933
cookie_match(struct task_struct * a,struct task_struct * b)5934 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
5935 {
5936 if (is_task_rq_idle(a) || is_task_rq_idle(b))
5937 return true;
5938
5939 return a->core_cookie == b->core_cookie;
5940 }
5941
5942 /*
5943 * Careful; this can return RETRY_TASK, it does not include the retry-loop
5944 * itself due to the whole SMT pick retry thing below.
5945 */
pick_task(struct rq * rq,struct rq_flags * rf)5946 static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
5947 {
5948 const struct sched_class *class;
5949 struct task_struct *p;
5950
5951 rq->dl_server = NULL;
5952
5953 for_each_active_class(class) {
5954 p = class->pick_task(rq, rf);
5955 if (p)
5956 return p;
5957 }
5958
5959 BUG(); /* The idle class should always have a runnable task. */
5960 }
5961
5962 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
5963
5964 static void queue_core_balance(struct rq *rq);
5965
5966 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5967 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5968 {
5969 struct task_struct *next, *p, *max;
5970 const struct cpumask *smt_mask;
5971 bool fi_before = false;
5972 bool core_clock_updated = (rq == rq->core);
5973 unsigned long cookie;
5974 int i, cpu, occ = 0;
5975 struct rq *rq_i;
5976 bool need_sync;
5977
5978 if (!sched_core_enabled(rq))
5979 return __pick_next_task(rq, prev, rf);
5980
5981 cpu = cpu_of(rq);
5982
5983 /* Stopper task is switching into idle, no need core-wide selection. */
5984 if (cpu_is_offline(cpu)) {
5985 /*
5986 * Reset core_pick so that we don't enter the fastpath when
5987 * coming online. core_pick would already be migrated to
5988 * another cpu during offline.
5989 */
5990 rq->core_pick = NULL;
5991 rq->core_dl_server = NULL;
5992 return __pick_next_task(rq, prev, rf);
5993 }
5994
5995 /*
5996 * If there were no {en,de}queues since we picked (IOW, the task
5997 * pointers are all still valid), and we haven't scheduled the last
5998 * pick yet, do so now.
5999 *
6000 * rq->core_pick can be NULL if no selection was made for a CPU because
6001 * it was either offline or went offline during a sibling's core-wide
6002 * selection. In this case, do a core-wide selection.
6003 */
6004 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6005 rq->core->core_pick_seq != rq->core_sched_seq &&
6006 rq->core_pick) {
6007 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6008
6009 next = rq->core_pick;
6010 rq->dl_server = rq->core_dl_server;
6011 rq->core_pick = NULL;
6012 rq->core_dl_server = NULL;
6013 goto out_set_next;
6014 }
6015
6016 prev_balance(rq, prev, rf);
6017
6018 smt_mask = cpu_smt_mask(cpu);
6019 need_sync = !!rq->core->core_cookie;
6020
6021 /* reset state */
6022 rq->core->core_cookie = 0UL;
6023 if (rq->core->core_forceidle_count) {
6024 if (!core_clock_updated) {
6025 update_rq_clock(rq->core);
6026 core_clock_updated = true;
6027 }
6028 sched_core_account_forceidle(rq);
6029 /* reset after accounting force idle */
6030 rq->core->core_forceidle_start = 0;
6031 rq->core->core_forceidle_count = 0;
6032 rq->core->core_forceidle_occupation = 0;
6033 need_sync = true;
6034 fi_before = true;
6035 }
6036
6037 /*
6038 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6039 *
6040 * @task_seq guards the task state ({en,de}queues)
6041 * @pick_seq is the @task_seq we did a selection on
6042 * @sched_seq is the @pick_seq we scheduled
6043 *
6044 * However, preemptions can cause multiple picks on the same task set.
6045 * 'Fix' this by also increasing @task_seq for every pick.
6046 */
6047 rq->core->core_task_seq++;
6048
6049 /*
6050 * Optimize for common case where this CPU has no cookies
6051 * and there are no cookied tasks running on siblings.
6052 */
6053 if (!need_sync) {
6054 restart_single:
6055 next = pick_task(rq, rf);
6056 if (unlikely(next == RETRY_TASK))
6057 goto restart_single;
6058 if (!next->core_cookie) {
6059 rq->core_pick = NULL;
6060 rq->core_dl_server = NULL;
6061 /*
6062 * For robustness, update the min_vruntime_fi for
6063 * unconstrained picks as well.
6064 */
6065 WARN_ON_ONCE(fi_before);
6066 task_vruntime_update(rq, next, false);
6067 goto out_set_next;
6068 }
6069 }
6070
6071 /*
6072 * For each thread: do the regular task pick and find the max prio task
6073 * amongst them.
6074 *
6075 * Tie-break prio towards the current CPU
6076 */
6077 restart_multi:
6078 max = NULL;
6079 for_each_cpu_wrap(i, smt_mask, cpu) {
6080 rq_i = cpu_rq(i);
6081
6082 /*
6083 * Current cpu always has its clock updated on entrance to
6084 * pick_next_task(). If the current cpu is not the core,
6085 * the core may also have been updated above.
6086 */
6087 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6088 update_rq_clock(rq_i);
6089
6090 p = pick_task(rq_i, rf);
6091 if (unlikely(p == RETRY_TASK))
6092 goto restart_multi;
6093
6094 rq_i->core_pick = p;
6095 rq_i->core_dl_server = rq_i->dl_server;
6096
6097 if (!max || prio_less(max, p, fi_before))
6098 max = p;
6099 }
6100
6101 cookie = rq->core->core_cookie = max->core_cookie;
6102
6103 /*
6104 * For each thread: try and find a runnable task that matches @max or
6105 * force idle.
6106 */
6107 for_each_cpu(i, smt_mask) {
6108 rq_i = cpu_rq(i);
6109 p = rq_i->core_pick;
6110
6111 if (!cookie_equals(p, cookie)) {
6112 p = NULL;
6113 if (cookie)
6114 p = sched_core_find(rq_i, cookie);
6115 if (!p)
6116 p = idle_sched_class.pick_task(rq_i, rf);
6117 }
6118
6119 rq_i->core_pick = p;
6120 rq_i->core_dl_server = NULL;
6121
6122 if (p == rq_i->idle) {
6123 if (rq_i->nr_running) {
6124 rq->core->core_forceidle_count++;
6125 if (!fi_before)
6126 rq->core->core_forceidle_seq++;
6127 }
6128 } else {
6129 occ++;
6130 }
6131 }
6132
6133 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6134 rq->core->core_forceidle_start = rq_clock(rq->core);
6135 rq->core->core_forceidle_occupation = occ;
6136 }
6137
6138 rq->core->core_pick_seq = rq->core->core_task_seq;
6139 next = rq->core_pick;
6140 rq->core_sched_seq = rq->core->core_pick_seq;
6141
6142 /* Something should have been selected for current CPU */
6143 WARN_ON_ONCE(!next);
6144
6145 /*
6146 * Reschedule siblings
6147 *
6148 * NOTE: L1TF -- at this point we're no longer running the old task and
6149 * sending an IPI (below) ensures the sibling will no longer be running
6150 * their task. This ensures there is no inter-sibling overlap between
6151 * non-matching user state.
6152 */
6153 for_each_cpu(i, smt_mask) {
6154 rq_i = cpu_rq(i);
6155
6156 /*
6157 * An online sibling might have gone offline before a task
6158 * could be picked for it, or it might be offline but later
6159 * happen to come online, but its too late and nothing was
6160 * picked for it. That's Ok - it will pick tasks for itself,
6161 * so ignore it.
6162 */
6163 if (!rq_i->core_pick)
6164 continue;
6165
6166 /*
6167 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6168 * fi_before fi update?
6169 * 0 0 1
6170 * 0 1 1
6171 * 1 0 1
6172 * 1 1 0
6173 */
6174 if (!(fi_before && rq->core->core_forceidle_count))
6175 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6176
6177 rq_i->core_pick->core_occupation = occ;
6178
6179 if (i == cpu) {
6180 rq_i->core_pick = NULL;
6181 rq_i->core_dl_server = NULL;
6182 continue;
6183 }
6184
6185 /* Did we break L1TF mitigation requirements? */
6186 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6187
6188 if (rq_i->curr == rq_i->core_pick) {
6189 rq_i->core_pick = NULL;
6190 rq_i->core_dl_server = NULL;
6191 continue;
6192 }
6193
6194 resched_curr(rq_i);
6195 }
6196
6197 out_set_next:
6198 put_prev_set_next_task(rq, prev, next);
6199 if (rq->core->core_forceidle_count && next == rq->idle)
6200 queue_core_balance(rq);
6201
6202 return next;
6203 }
6204
try_steal_cookie(int this,int that)6205 static bool try_steal_cookie(int this, int that)
6206 {
6207 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6208 struct task_struct *p;
6209 unsigned long cookie;
6210 bool success = false;
6211
6212 guard(irq)();
6213 guard(double_rq_lock)(dst, src);
6214
6215 cookie = dst->core->core_cookie;
6216 if (!cookie)
6217 return false;
6218
6219 if (dst->curr != dst->idle)
6220 return false;
6221
6222 p = sched_core_find(src, cookie);
6223 if (!p)
6224 return false;
6225
6226 do {
6227 if (p == src->core_pick || p == src->curr)
6228 goto next;
6229
6230 if (!is_cpu_allowed(p, this))
6231 goto next;
6232
6233 if (p->core_occupation > dst->idle->core_occupation)
6234 goto next;
6235 /*
6236 * sched_core_find() and sched_core_next() will ensure
6237 * that task @p is not throttled now, we also need to
6238 * check whether the runqueue of the destination CPU is
6239 * being throttled.
6240 */
6241 if (sched_task_is_throttled(p, this))
6242 goto next;
6243
6244 move_queued_task_locked(src, dst, p);
6245 resched_curr(dst);
6246
6247 success = true;
6248 break;
6249
6250 next:
6251 p = sched_core_next(p, cookie);
6252 } while (p);
6253
6254 return success;
6255 }
6256
steal_cookie_task(int cpu,struct sched_domain * sd)6257 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6258 {
6259 int i;
6260
6261 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6262 if (i == cpu)
6263 continue;
6264
6265 if (need_resched())
6266 break;
6267
6268 if (try_steal_cookie(cpu, i))
6269 return true;
6270 }
6271
6272 return false;
6273 }
6274
sched_core_balance(struct rq * rq)6275 static void sched_core_balance(struct rq *rq)
6276 {
6277 struct sched_domain *sd;
6278 int cpu = cpu_of(rq);
6279
6280 guard(preempt)();
6281 guard(rcu)();
6282
6283 raw_spin_rq_unlock_irq(rq);
6284 for_each_domain(cpu, sd) {
6285 if (need_resched())
6286 break;
6287
6288 if (steal_cookie_task(cpu, sd))
6289 break;
6290 }
6291 raw_spin_rq_lock_irq(rq);
6292 }
6293
6294 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6295
queue_core_balance(struct rq * rq)6296 static void queue_core_balance(struct rq *rq)
6297 {
6298 if (!sched_core_enabled(rq))
6299 return;
6300
6301 if (!rq->core->core_cookie)
6302 return;
6303
6304 if (!rq->nr_running) /* not forced idle */
6305 return;
6306
6307 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6308 }
6309
6310 DEFINE_LOCK_GUARD_1(core_lock, int,
6311 sched_core_lock(*_T->lock, &_T->flags),
6312 sched_core_unlock(*_T->lock, &_T->flags),
6313 unsigned long flags)
6314
sched_core_cpu_starting(unsigned int cpu)6315 static void sched_core_cpu_starting(unsigned int cpu)
6316 {
6317 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6318 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6319 int t;
6320
6321 guard(core_lock)(&cpu);
6322
6323 WARN_ON_ONCE(rq->core != rq);
6324
6325 /* if we're the first, we'll be our own leader */
6326 if (cpumask_weight(smt_mask) == 1)
6327 return;
6328
6329 /* find the leader */
6330 for_each_cpu(t, smt_mask) {
6331 if (t == cpu)
6332 continue;
6333 rq = cpu_rq(t);
6334 if (rq->core == rq) {
6335 core_rq = rq;
6336 break;
6337 }
6338 }
6339
6340 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6341 return;
6342
6343 /* install and validate core_rq */
6344 for_each_cpu(t, smt_mask) {
6345 rq = cpu_rq(t);
6346
6347 if (t == cpu)
6348 rq->core = core_rq;
6349
6350 WARN_ON_ONCE(rq->core != core_rq);
6351 }
6352 }
6353
sched_core_cpu_deactivate(unsigned int cpu)6354 static void sched_core_cpu_deactivate(unsigned int cpu)
6355 {
6356 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6357 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6358 int t;
6359
6360 guard(core_lock)(&cpu);
6361
6362 /* if we're the last man standing, nothing to do */
6363 if (cpumask_weight(smt_mask) == 1) {
6364 WARN_ON_ONCE(rq->core != rq);
6365 return;
6366 }
6367
6368 /* if we're not the leader, nothing to do */
6369 if (rq->core != rq)
6370 return;
6371
6372 /* find a new leader */
6373 for_each_cpu(t, smt_mask) {
6374 if (t == cpu)
6375 continue;
6376 core_rq = cpu_rq(t);
6377 break;
6378 }
6379
6380 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6381 return;
6382
6383 /* copy the shared state to the new leader */
6384 core_rq->core_task_seq = rq->core_task_seq;
6385 core_rq->core_pick_seq = rq->core_pick_seq;
6386 core_rq->core_cookie = rq->core_cookie;
6387 core_rq->core_forceidle_count = rq->core_forceidle_count;
6388 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6389 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6390
6391 /*
6392 * Accounting edge for forced idle is handled in pick_next_task().
6393 * Don't need another one here, since the hotplug thread shouldn't
6394 * have a cookie.
6395 */
6396 core_rq->core_forceidle_start = 0;
6397
6398 /* install new leader */
6399 for_each_cpu(t, smt_mask) {
6400 rq = cpu_rq(t);
6401 rq->core = core_rq;
6402 }
6403 }
6404
sched_core_cpu_dying(unsigned int cpu)6405 static inline void sched_core_cpu_dying(unsigned int cpu)
6406 {
6407 struct rq *rq = cpu_rq(cpu);
6408
6409 if (rq->core != rq)
6410 rq->core = rq;
6411 }
6412
6413 #else /* !CONFIG_SCHED_CORE: */
6414
sched_core_cpu_starting(unsigned int cpu)6415 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6416 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6417 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6418
6419 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6420 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6421 {
6422 return __pick_next_task(rq, prev, rf);
6423 }
6424
6425 #endif /* !CONFIG_SCHED_CORE */
6426
6427 /*
6428 * Constants for the sched_mode argument of __schedule().
6429 *
6430 * The mode argument allows RT enabled kernels to differentiate a
6431 * preemption from blocking on an 'sleeping' spin/rwlock.
6432 */
6433 #define SM_IDLE (-1)
6434 #define SM_NONE 0
6435 #define SM_PREEMPT 1
6436 #define SM_RTLOCK_WAIT 2
6437
6438 /*
6439 * Helper function for __schedule()
6440 *
6441 * Tries to deactivate the task, unless the should_block arg
6442 * is false or if a signal is pending. In the case a signal
6443 * is pending, marks the task's __state as RUNNING (and clear
6444 * blocked_on).
6445 */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long * task_state_p,bool should_block)6446 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6447 unsigned long *task_state_p, bool should_block)
6448 {
6449 unsigned long task_state = *task_state_p;
6450 int flags = DEQUEUE_NOCLOCK;
6451
6452 if (signal_pending_state(task_state, p)) {
6453 WRITE_ONCE(p->__state, TASK_RUNNING);
6454 *task_state_p = TASK_RUNNING;
6455 return false;
6456 }
6457
6458 /*
6459 * We check should_block after signal_pending because we
6460 * will want to wake the task in that case. But if
6461 * should_block is false, its likely due to the task being
6462 * blocked on a mutex, and we want to keep it on the runqueue
6463 * to be selectable for proxy-execution.
6464 */
6465 if (!should_block)
6466 return false;
6467
6468 p->sched_contributes_to_load =
6469 (task_state & TASK_UNINTERRUPTIBLE) &&
6470 !(task_state & TASK_NOLOAD) &&
6471 !(task_state & TASK_FROZEN);
6472
6473 if (unlikely(is_special_task_state(task_state)))
6474 flags |= DEQUEUE_SPECIAL;
6475
6476 /*
6477 * __schedule() ttwu()
6478 * prev_state = prev->state; if (p->on_rq && ...)
6479 * if (prev_state) goto out;
6480 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6481 * p->state = TASK_WAKING
6482 *
6483 * Where __schedule() and ttwu() have matching control dependencies.
6484 *
6485 * After this, schedule() must not care about p->state any more.
6486 */
6487 block_task(rq, p, flags);
6488 return true;
6489 }
6490
6491 #ifdef CONFIG_SCHED_PROXY_EXEC
proxy_resched_idle(struct rq * rq)6492 static inline struct task_struct *proxy_resched_idle(struct rq *rq)
6493 {
6494 put_prev_set_next_task(rq, rq->donor, rq->idle);
6495 rq_set_donor(rq, rq->idle);
6496 set_tsk_need_resched(rq->idle);
6497 return rq->idle;
6498 }
6499
__proxy_deactivate(struct rq * rq,struct task_struct * donor)6500 static bool __proxy_deactivate(struct rq *rq, struct task_struct *donor)
6501 {
6502 unsigned long state = READ_ONCE(donor->__state);
6503
6504 /* Don't deactivate if the state has been changed to TASK_RUNNING */
6505 if (state == TASK_RUNNING)
6506 return false;
6507 /*
6508 * Because we got donor from pick_next_task(), it is *crucial*
6509 * that we call proxy_resched_idle() before we deactivate it.
6510 * As once we deactivate donor, donor->on_rq is set to zero,
6511 * which allows ttwu() to immediately try to wake the task on
6512 * another rq. So we cannot use *any* references to donor
6513 * after that point. So things like cfs_rq->curr or rq->donor
6514 * need to be changed from next *before* we deactivate.
6515 */
6516 proxy_resched_idle(rq);
6517 return try_to_block_task(rq, donor, &state, true);
6518 }
6519
proxy_deactivate(struct rq * rq,struct task_struct * donor)6520 static struct task_struct *proxy_deactivate(struct rq *rq, struct task_struct *donor)
6521 {
6522 if (!__proxy_deactivate(rq, donor)) {
6523 /*
6524 * XXX: For now, if deactivation failed, set donor
6525 * as unblocked, as we aren't doing proxy-migrations
6526 * yet (more logic will be needed then).
6527 */
6528 donor->blocked_on = NULL;
6529 }
6530 return NULL;
6531 }
6532
6533 /*
6534 * Find runnable lock owner to proxy for mutex blocked donor
6535 *
6536 * Follow the blocked-on relation:
6537 * task->blocked_on -> mutex->owner -> task...
6538 *
6539 * Lock order:
6540 *
6541 * p->pi_lock
6542 * rq->lock
6543 * mutex->wait_lock
6544 *
6545 * Returns the task that is going to be used as execution context (the one
6546 * that is actually going to be run on cpu_of(rq)).
6547 */
6548 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6549 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6550 {
6551 struct task_struct *owner = NULL;
6552 int this_cpu = cpu_of(rq);
6553 struct task_struct *p;
6554 struct mutex *mutex;
6555
6556 /* Follow blocked_on chain. */
6557 for (p = donor; task_is_blocked(p); p = owner) {
6558 mutex = p->blocked_on;
6559 /* Something changed in the chain, so pick again */
6560 if (!mutex)
6561 return NULL;
6562 /*
6563 * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6564 * and ensure @owner sticks around.
6565 */
6566 guard(raw_spinlock)(&mutex->wait_lock);
6567
6568 /* Check again that p is blocked with wait_lock held */
6569 if (mutex != __get_task_blocked_on(p)) {
6570 /*
6571 * Something changed in the blocked_on chain and
6572 * we don't know if only at this level. So, let's
6573 * just bail out completely and let __schedule()
6574 * figure things out (pick_again loop).
6575 */
6576 return NULL;
6577 }
6578
6579 owner = __mutex_owner(mutex);
6580 if (!owner) {
6581 __clear_task_blocked_on(p, mutex);
6582 return p;
6583 }
6584
6585 if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
6586 /* XXX Don't handle blocked owners/delayed dequeue yet */
6587 return proxy_deactivate(rq, donor);
6588 }
6589
6590 if (task_cpu(owner) != this_cpu) {
6591 /* XXX Don't handle migrations yet */
6592 return proxy_deactivate(rq, donor);
6593 }
6594
6595 if (task_on_rq_migrating(owner)) {
6596 /*
6597 * One of the chain of mutex owners is currently migrating to this
6598 * CPU, but has not yet been enqueued because we are holding the
6599 * rq lock. As a simple solution, just schedule rq->idle to give
6600 * the migration a chance to complete. Much like the migrate_task
6601 * case we should end up back in find_proxy_task(), this time
6602 * hopefully with all relevant tasks already enqueued.
6603 */
6604 return proxy_resched_idle(rq);
6605 }
6606
6607 /*
6608 * Its possible to race where after we check owner->on_rq
6609 * but before we check (owner_cpu != this_cpu) that the
6610 * task on another cpu was migrated back to this cpu. In
6611 * that case it could slip by our checks. So double check
6612 * we are still on this cpu and not migrating. If we get
6613 * inconsistent results, try again.
6614 */
6615 if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
6616 return NULL;
6617
6618 if (owner == p) {
6619 /*
6620 * It's possible we interleave with mutex_unlock like:
6621 *
6622 * lock(&rq->lock);
6623 * find_proxy_task()
6624 * mutex_unlock()
6625 * lock(&wait_lock);
6626 * donor(owner) = current->blocked_donor;
6627 * unlock(&wait_lock);
6628 *
6629 * wake_up_q();
6630 * ...
6631 * ttwu_runnable()
6632 * __task_rq_lock()
6633 * lock(&wait_lock);
6634 * owner == p
6635 *
6636 * Which leaves us to finish the ttwu_runnable() and make it go.
6637 *
6638 * So schedule rq->idle so that ttwu_runnable() can get the rq
6639 * lock and mark owner as running.
6640 */
6641 return proxy_resched_idle(rq);
6642 }
6643 /*
6644 * OK, now we're absolutely sure @owner is on this
6645 * rq, therefore holding @rq->lock is sufficient to
6646 * guarantee its existence, as per ttwu_remote().
6647 */
6648 }
6649
6650 WARN_ON_ONCE(owner && !owner->on_rq);
6651 return owner;
6652 }
6653 #else /* SCHED_PROXY_EXEC */
6654 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6655 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6656 {
6657 WARN_ONCE(1, "This should never be called in the !SCHED_PROXY_EXEC case\n");
6658 return donor;
6659 }
6660 #endif /* SCHED_PROXY_EXEC */
6661
proxy_tag_curr(struct rq * rq,struct task_struct * owner)6662 static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
6663 {
6664 if (!sched_proxy_exec())
6665 return;
6666 /*
6667 * pick_next_task() calls set_next_task() on the chosen task
6668 * at some point, which ensures it is not push/pullable.
6669 * However, the chosen/donor task *and* the mutex owner form an
6670 * atomic pair wrt push/pull.
6671 *
6672 * Make sure owner we run is not pushable. Unfortunately we can
6673 * only deal with that by means of a dequeue/enqueue cycle. :-/
6674 */
6675 dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
6676 enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
6677 }
6678
6679 /*
6680 * __schedule() is the main scheduler function.
6681 *
6682 * The main means of driving the scheduler and thus entering this function are:
6683 *
6684 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6685 *
6686 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6687 * paths. For example, see arch/x86/entry_64.S.
6688 *
6689 * To drive preemption between tasks, the scheduler sets the flag in timer
6690 * interrupt handler sched_tick().
6691 *
6692 * 3. Wakeups don't really cause entry into schedule(). They add a
6693 * task to the run-queue and that's it.
6694 *
6695 * Now, if the new task added to the run-queue preempts the current
6696 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6697 * called on the nearest possible occasion:
6698 *
6699 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6700 *
6701 * - in syscall or exception context, at the next outmost
6702 * preempt_enable(). (this might be as soon as the wake_up()'s
6703 * spin_unlock()!)
6704 *
6705 * - in IRQ context, return from interrupt-handler to
6706 * preemptible context
6707 *
6708 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6709 * then at the next:
6710 *
6711 * - cond_resched() call
6712 * - explicit schedule() call
6713 * - return from syscall or exception to user-space
6714 * - return from interrupt-handler to user-space
6715 *
6716 * WARNING: must be called with preemption disabled!
6717 */
__schedule(int sched_mode)6718 static void __sched notrace __schedule(int sched_mode)
6719 {
6720 struct task_struct *prev, *next;
6721 /*
6722 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6723 * as a preemption by schedule_debug() and RCU.
6724 */
6725 bool preempt = sched_mode > SM_NONE;
6726 bool is_switch = false;
6727 unsigned long *switch_count;
6728 unsigned long prev_state;
6729 struct rq_flags rf;
6730 struct rq *rq;
6731 int cpu;
6732
6733 /* Trace preemptions consistently with task switches */
6734 trace_sched_entry_tp(sched_mode == SM_PREEMPT);
6735
6736 cpu = smp_processor_id();
6737 rq = cpu_rq(cpu);
6738 prev = rq->curr;
6739
6740 schedule_debug(prev, preempt);
6741
6742 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6743 hrtick_clear(rq);
6744
6745 klp_sched_try_switch(prev);
6746
6747 local_irq_disable();
6748 rcu_note_context_switch(preempt);
6749 migrate_disable_switch(rq, prev);
6750
6751 /*
6752 * Make sure that signal_pending_state()->signal_pending() below
6753 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6754 * done by the caller to avoid the race with signal_wake_up():
6755 *
6756 * __set_current_state(@state) signal_wake_up()
6757 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6758 * wake_up_state(p, state)
6759 * LOCK rq->lock LOCK p->pi_state
6760 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6761 * if (signal_pending_state()) if (p->state & @state)
6762 *
6763 * Also, the membarrier system call requires a full memory barrier
6764 * after coming from user-space, before storing to rq->curr; this
6765 * barrier matches a full barrier in the proximity of the membarrier
6766 * system call exit.
6767 */
6768 rq_lock(rq, &rf);
6769 smp_mb__after_spinlock();
6770
6771 /* Promote REQ to ACT */
6772 rq->clock_update_flags <<= 1;
6773 update_rq_clock(rq);
6774 rq->clock_update_flags = RQCF_UPDATED;
6775
6776 switch_count = &prev->nivcsw;
6777
6778 /* Task state changes only considers SM_PREEMPT as preemption */
6779 preempt = sched_mode == SM_PREEMPT;
6780
6781 /*
6782 * We must load prev->state once (task_struct::state is volatile), such
6783 * that we form a control dependency vs deactivate_task() below.
6784 */
6785 prev_state = READ_ONCE(prev->__state);
6786 if (sched_mode == SM_IDLE) {
6787 /* SCX must consult the BPF scheduler to tell if rq is empty */
6788 if (!rq->nr_running && !scx_enabled()) {
6789 next = prev;
6790 goto picked;
6791 }
6792 } else if (!preempt && prev_state) {
6793 /*
6794 * We pass task_is_blocked() as the should_block arg
6795 * in order to keep mutex-blocked tasks on the runqueue
6796 * for slection with proxy-exec (without proxy-exec
6797 * task_is_blocked() will always be false).
6798 */
6799 try_to_block_task(rq, prev, &prev_state,
6800 !task_is_blocked(prev));
6801 switch_count = &prev->nvcsw;
6802 }
6803
6804 pick_again:
6805 next = pick_next_task(rq, rq->donor, &rf);
6806 rq_set_donor(rq, next);
6807 if (unlikely(task_is_blocked(next))) {
6808 next = find_proxy_task(rq, next, &rf);
6809 if (!next)
6810 goto pick_again;
6811 if (next == rq->idle)
6812 goto keep_resched;
6813 }
6814 picked:
6815 clear_tsk_need_resched(prev);
6816 clear_preempt_need_resched();
6817 keep_resched:
6818 rq->last_seen_need_resched_ns = 0;
6819
6820 is_switch = prev != next;
6821 if (likely(is_switch)) {
6822 rq->nr_switches++;
6823 /*
6824 * RCU users of rcu_dereference(rq->curr) may not see
6825 * changes to task_struct made by pick_next_task().
6826 */
6827 RCU_INIT_POINTER(rq->curr, next);
6828
6829 if (!task_current_donor(rq, next))
6830 proxy_tag_curr(rq, next);
6831
6832 /*
6833 * The membarrier system call requires each architecture
6834 * to have a full memory barrier after updating
6835 * rq->curr, before returning to user-space.
6836 *
6837 * Here are the schemes providing that barrier on the
6838 * various architectures:
6839 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6840 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6841 * on PowerPC and on RISC-V.
6842 * - finish_lock_switch() for weakly-ordered
6843 * architectures where spin_unlock is a full barrier,
6844 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6845 * is a RELEASE barrier),
6846 *
6847 * The barrier matches a full barrier in the proximity of
6848 * the membarrier system call entry.
6849 *
6850 * On RISC-V, this barrier pairing is also needed for the
6851 * SYNC_CORE command when switching between processes, cf.
6852 * the inline comments in membarrier_arch_switch_mm().
6853 */
6854 ++*switch_count;
6855
6856 psi_account_irqtime(rq, prev, next);
6857 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6858 prev->se.sched_delayed);
6859
6860 trace_sched_switch(preempt, prev, next, prev_state);
6861
6862 /* Also unlocks the rq: */
6863 rq = context_switch(rq, prev, next, &rf);
6864 } else {
6865 /* In case next was already curr but just got blocked_donor */
6866 if (!task_current_donor(rq, next))
6867 proxy_tag_curr(rq, next);
6868
6869 rq_unpin_lock(rq, &rf);
6870 __balance_callbacks(rq);
6871 raw_spin_rq_unlock_irq(rq);
6872 }
6873 trace_sched_exit_tp(is_switch);
6874 }
6875
do_task_dead(void)6876 void __noreturn do_task_dead(void)
6877 {
6878 /* Causes final put_task_struct in finish_task_switch(): */
6879 set_special_state(TASK_DEAD);
6880
6881 /* Tell freezer to ignore us: */
6882 current->flags |= PF_NOFREEZE;
6883
6884 __schedule(SM_NONE);
6885 BUG();
6886
6887 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6888 for (;;)
6889 cpu_relax();
6890 }
6891
sched_submit_work(struct task_struct * tsk)6892 static inline void sched_submit_work(struct task_struct *tsk)
6893 {
6894 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6895 unsigned int task_flags;
6896
6897 /*
6898 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6899 * will use a blocking primitive -- which would lead to recursion.
6900 */
6901 lock_map_acquire_try(&sched_map);
6902
6903 task_flags = tsk->flags;
6904 /*
6905 * If a worker goes to sleep, notify and ask workqueue whether it
6906 * wants to wake up a task to maintain concurrency.
6907 */
6908 if (task_flags & PF_WQ_WORKER)
6909 wq_worker_sleeping(tsk);
6910 else if (task_flags & PF_IO_WORKER)
6911 io_wq_worker_sleeping(tsk);
6912
6913 /*
6914 * spinlock and rwlock must not flush block requests. This will
6915 * deadlock if the callback attempts to acquire a lock which is
6916 * already acquired.
6917 */
6918 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
6919
6920 /*
6921 * If we are going to sleep and we have plugged IO queued,
6922 * make sure to submit it to avoid deadlocks.
6923 */
6924 blk_flush_plug(tsk->plug, true);
6925
6926 lock_map_release(&sched_map);
6927 }
6928
sched_update_worker(struct task_struct * tsk)6929 static void sched_update_worker(struct task_struct *tsk)
6930 {
6931 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6932 if (tsk->flags & PF_BLOCK_TS)
6933 blk_plug_invalidate_ts(tsk);
6934 if (tsk->flags & PF_WQ_WORKER)
6935 wq_worker_running(tsk);
6936 else if (tsk->flags & PF_IO_WORKER)
6937 io_wq_worker_running(tsk);
6938 }
6939 }
6940
__schedule_loop(int sched_mode)6941 static __always_inline void __schedule_loop(int sched_mode)
6942 {
6943 do {
6944 preempt_disable();
6945 __schedule(sched_mode);
6946 sched_preempt_enable_no_resched();
6947 } while (need_resched());
6948 }
6949
schedule(void)6950 asmlinkage __visible void __sched schedule(void)
6951 {
6952 struct task_struct *tsk = current;
6953
6954 #ifdef CONFIG_RT_MUTEXES
6955 lockdep_assert(!tsk->sched_rt_mutex);
6956 #endif
6957
6958 if (!task_is_running(tsk))
6959 sched_submit_work(tsk);
6960 __schedule_loop(SM_NONE);
6961 sched_update_worker(tsk);
6962 }
6963 EXPORT_SYMBOL(schedule);
6964
6965 /*
6966 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6967 * state (have scheduled out non-voluntarily) by making sure that all
6968 * tasks have either left the run queue or have gone into user space.
6969 * As idle tasks do not do either, they must not ever be preempted
6970 * (schedule out non-voluntarily).
6971 *
6972 * schedule_idle() is similar to schedule_preempt_disable() except that it
6973 * never enables preemption because it does not call sched_submit_work().
6974 */
schedule_idle(void)6975 void __sched schedule_idle(void)
6976 {
6977 /*
6978 * As this skips calling sched_submit_work(), which the idle task does
6979 * regardless because that function is a NOP when the task is in a
6980 * TASK_RUNNING state, make sure this isn't used someplace that the
6981 * current task can be in any other state. Note, idle is always in the
6982 * TASK_RUNNING state.
6983 */
6984 WARN_ON_ONCE(current->__state);
6985 do {
6986 __schedule(SM_IDLE);
6987 } while (need_resched());
6988 }
6989
6990 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6991 asmlinkage __visible void __sched schedule_user(void)
6992 {
6993 /*
6994 * If we come here after a random call to set_need_resched(),
6995 * or we have been woken up remotely but the IPI has not yet arrived,
6996 * we haven't yet exited the RCU idle mode. Do it here manually until
6997 * we find a better solution.
6998 *
6999 * NB: There are buggy callers of this function. Ideally we
7000 * should warn if prev_state != CT_STATE_USER, but that will trigger
7001 * too frequently to make sense yet.
7002 */
7003 enum ctx_state prev_state = exception_enter();
7004 schedule();
7005 exception_exit(prev_state);
7006 }
7007 #endif
7008
7009 /**
7010 * schedule_preempt_disabled - called with preemption disabled
7011 *
7012 * Returns with preemption disabled. Note: preempt_count must be 1
7013 */
schedule_preempt_disabled(void)7014 void __sched schedule_preempt_disabled(void)
7015 {
7016 sched_preempt_enable_no_resched();
7017 schedule();
7018 preempt_disable();
7019 }
7020
7021 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)7022 void __sched notrace schedule_rtlock(void)
7023 {
7024 __schedule_loop(SM_RTLOCK_WAIT);
7025 }
7026 NOKPROBE_SYMBOL(schedule_rtlock);
7027 #endif
7028
preempt_schedule_common(void)7029 static void __sched notrace preempt_schedule_common(void)
7030 {
7031 do {
7032 /*
7033 * Because the function tracer can trace preempt_count_sub()
7034 * and it also uses preempt_enable/disable_notrace(), if
7035 * NEED_RESCHED is set, the preempt_enable_notrace() called
7036 * by the function tracer will call this function again and
7037 * cause infinite recursion.
7038 *
7039 * Preemption must be disabled here before the function
7040 * tracer can trace. Break up preempt_disable() into two
7041 * calls. One to disable preemption without fear of being
7042 * traced. The other to still record the preemption latency,
7043 * which can also be traced by the function tracer.
7044 */
7045 preempt_disable_notrace();
7046 preempt_latency_start(1);
7047 __schedule(SM_PREEMPT);
7048 preempt_latency_stop(1);
7049 preempt_enable_no_resched_notrace();
7050
7051 /*
7052 * Check again in case we missed a preemption opportunity
7053 * between schedule and now.
7054 */
7055 } while (need_resched());
7056 }
7057
7058 #ifdef CONFIG_PREEMPTION
7059 /*
7060 * This is the entry point to schedule() from in-kernel preemption
7061 * off of preempt_enable.
7062 */
preempt_schedule(void)7063 asmlinkage __visible void __sched notrace preempt_schedule(void)
7064 {
7065 /*
7066 * If there is a non-zero preempt_count or interrupts are disabled,
7067 * we do not want to preempt the current task. Just return..
7068 */
7069 if (likely(!preemptible()))
7070 return;
7071 preempt_schedule_common();
7072 }
7073 NOKPROBE_SYMBOL(preempt_schedule);
7074 EXPORT_SYMBOL(preempt_schedule);
7075
7076 #ifdef CONFIG_PREEMPT_DYNAMIC
7077 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7078 # ifndef preempt_schedule_dynamic_enabled
7079 # define preempt_schedule_dynamic_enabled preempt_schedule
7080 # define preempt_schedule_dynamic_disabled NULL
7081 # endif
7082 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
7083 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
7084 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7085 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)7086 void __sched notrace dynamic_preempt_schedule(void)
7087 {
7088 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7089 return;
7090 preempt_schedule();
7091 }
7092 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7093 EXPORT_SYMBOL(dynamic_preempt_schedule);
7094 # endif
7095 #endif /* CONFIG_PREEMPT_DYNAMIC */
7096
7097 /**
7098 * preempt_schedule_notrace - preempt_schedule called by tracing
7099 *
7100 * The tracing infrastructure uses preempt_enable_notrace to prevent
7101 * recursion and tracing preempt enabling caused by the tracing
7102 * infrastructure itself. But as tracing can happen in areas coming
7103 * from userspace or just about to enter userspace, a preempt enable
7104 * can occur before user_exit() is called. This will cause the scheduler
7105 * to be called when the system is still in usermode.
7106 *
7107 * To prevent this, the preempt_enable_notrace will use this function
7108 * instead of preempt_schedule() to exit user context if needed before
7109 * calling the scheduler.
7110 */
preempt_schedule_notrace(void)7111 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7112 {
7113 enum ctx_state prev_ctx;
7114
7115 if (likely(!preemptible()))
7116 return;
7117
7118 do {
7119 /*
7120 * Because the function tracer can trace preempt_count_sub()
7121 * and it also uses preempt_enable/disable_notrace(), if
7122 * NEED_RESCHED is set, the preempt_enable_notrace() called
7123 * by the function tracer will call this function again and
7124 * cause infinite recursion.
7125 *
7126 * Preemption must be disabled here before the function
7127 * tracer can trace. Break up preempt_disable() into two
7128 * calls. One to disable preemption without fear of being
7129 * traced. The other to still record the preemption latency,
7130 * which can also be traced by the function tracer.
7131 */
7132 preempt_disable_notrace();
7133 preempt_latency_start(1);
7134 /*
7135 * Needs preempt disabled in case user_exit() is traced
7136 * and the tracer calls preempt_enable_notrace() causing
7137 * an infinite recursion.
7138 */
7139 prev_ctx = exception_enter();
7140 __schedule(SM_PREEMPT);
7141 exception_exit(prev_ctx);
7142
7143 preempt_latency_stop(1);
7144 preempt_enable_no_resched_notrace();
7145 } while (need_resched());
7146 }
7147 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7148
7149 #ifdef CONFIG_PREEMPT_DYNAMIC
7150 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7151 # ifndef preempt_schedule_notrace_dynamic_enabled
7152 # define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7153 # define preempt_schedule_notrace_dynamic_disabled NULL
7154 # endif
7155 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7156 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7157 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7158 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7159 void __sched notrace dynamic_preempt_schedule_notrace(void)
7160 {
7161 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7162 return;
7163 preempt_schedule_notrace();
7164 }
7165 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7166 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7167 # endif
7168 #endif
7169
7170 #endif /* CONFIG_PREEMPTION */
7171
7172 /*
7173 * This is the entry point to schedule() from kernel preemption
7174 * off of IRQ context.
7175 * Note, that this is called and return with IRQs disabled. This will
7176 * protect us against recursive calling from IRQ contexts.
7177 */
preempt_schedule_irq(void)7178 asmlinkage __visible void __sched preempt_schedule_irq(void)
7179 {
7180 enum ctx_state prev_state;
7181
7182 /* Catch callers which need to be fixed */
7183 BUG_ON(preempt_count() || !irqs_disabled());
7184
7185 prev_state = exception_enter();
7186
7187 do {
7188 preempt_disable();
7189 local_irq_enable();
7190 __schedule(SM_PREEMPT);
7191 local_irq_disable();
7192 sched_preempt_enable_no_resched();
7193 } while (need_resched());
7194
7195 exception_exit(prev_state);
7196 }
7197
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7198 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7199 void *key)
7200 {
7201 WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7202 return try_to_wake_up(curr->private, mode, wake_flags);
7203 }
7204 EXPORT_SYMBOL(default_wake_function);
7205
__setscheduler_class(int policy,int prio)7206 const struct sched_class *__setscheduler_class(int policy, int prio)
7207 {
7208 if (dl_prio(prio))
7209 return &dl_sched_class;
7210
7211 if (rt_prio(prio))
7212 return &rt_sched_class;
7213
7214 #ifdef CONFIG_SCHED_CLASS_EXT
7215 if (task_should_scx(policy))
7216 return &ext_sched_class;
7217 #endif
7218
7219 return &fair_sched_class;
7220 }
7221
7222 #ifdef CONFIG_RT_MUTEXES
7223
7224 /*
7225 * Would be more useful with typeof()/auto_type but they don't mix with
7226 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7227 * name such that if someone were to implement this function we get to compare
7228 * notes.
7229 */
7230 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7231
rt_mutex_pre_schedule(void)7232 void rt_mutex_pre_schedule(void)
7233 {
7234 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7235 sched_submit_work(current);
7236 }
7237
rt_mutex_schedule(void)7238 void rt_mutex_schedule(void)
7239 {
7240 lockdep_assert(current->sched_rt_mutex);
7241 __schedule_loop(SM_NONE);
7242 }
7243
rt_mutex_post_schedule(void)7244 void rt_mutex_post_schedule(void)
7245 {
7246 sched_update_worker(current);
7247 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7248 }
7249
7250 /*
7251 * rt_mutex_setprio - set the current priority of a task
7252 * @p: task to boost
7253 * @pi_task: donor task
7254 *
7255 * This function changes the 'effective' priority of a task. It does
7256 * not touch ->normal_prio like __setscheduler().
7257 *
7258 * Used by the rt_mutex code to implement priority inheritance
7259 * logic. Call site only calls if the priority of the task changed.
7260 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7261 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7262 {
7263 int prio, oldprio, queue_flag =
7264 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7265 const struct sched_class *prev_class, *next_class;
7266 struct rq_flags rf;
7267 struct rq *rq;
7268
7269 /* XXX used to be waiter->prio, not waiter->task->prio */
7270 prio = __rt_effective_prio(pi_task, p->normal_prio);
7271
7272 /*
7273 * If nothing changed; bail early.
7274 */
7275 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7276 return;
7277
7278 rq = __task_rq_lock(p, &rf);
7279 update_rq_clock(rq);
7280 /*
7281 * Set under pi_lock && rq->lock, such that the value can be used under
7282 * either lock.
7283 *
7284 * Note that there is loads of tricky to make this pointer cache work
7285 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7286 * ensure a task is de-boosted (pi_task is set to NULL) before the
7287 * task is allowed to run again (and can exit). This ensures the pointer
7288 * points to a blocked task -- which guarantees the task is present.
7289 */
7290 p->pi_top_task = pi_task;
7291
7292 /*
7293 * For FIFO/RR we only need to set prio, if that matches we're done.
7294 */
7295 if (prio == p->prio && !dl_prio(prio))
7296 goto out_unlock;
7297
7298 /*
7299 * Idle task boosting is a no-no in general. There is one
7300 * exception, when PREEMPT_RT and NOHZ is active:
7301 *
7302 * The idle task calls get_next_timer_interrupt() and holds
7303 * the timer wheel base->lock on the CPU and another CPU wants
7304 * to access the timer (probably to cancel it). We can safely
7305 * ignore the boosting request, as the idle CPU runs this code
7306 * with interrupts disabled and will complete the lock
7307 * protected section without being interrupted. So there is no
7308 * real need to boost.
7309 */
7310 if (unlikely(p == rq->idle)) {
7311 WARN_ON(p != rq->curr);
7312 WARN_ON(p->pi_blocked_on);
7313 goto out_unlock;
7314 }
7315
7316 trace_sched_pi_setprio(p, pi_task);
7317 oldprio = p->prio;
7318
7319 if (oldprio == prio)
7320 queue_flag &= ~DEQUEUE_MOVE;
7321
7322 prev_class = p->sched_class;
7323 next_class = __setscheduler_class(p->policy, prio);
7324
7325 if (prev_class != next_class)
7326 queue_flag |= DEQUEUE_CLASS;
7327
7328 scoped_guard (sched_change, p, queue_flag) {
7329 /*
7330 * Boosting condition are:
7331 * 1. -rt task is running and holds mutex A
7332 * --> -dl task blocks on mutex A
7333 *
7334 * 2. -dl task is running and holds mutex A
7335 * --> -dl task blocks on mutex A and could preempt the
7336 * running task
7337 */
7338 if (dl_prio(prio)) {
7339 if (!dl_prio(p->normal_prio) ||
7340 (pi_task && dl_prio(pi_task->prio) &&
7341 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7342 p->dl.pi_se = pi_task->dl.pi_se;
7343 scope->flags |= ENQUEUE_REPLENISH;
7344 } else {
7345 p->dl.pi_se = &p->dl;
7346 }
7347 } else if (rt_prio(prio)) {
7348 if (dl_prio(oldprio))
7349 p->dl.pi_se = &p->dl;
7350 if (oldprio < prio)
7351 scope->flags |= ENQUEUE_HEAD;
7352 } else {
7353 if (dl_prio(oldprio))
7354 p->dl.pi_se = &p->dl;
7355 if (rt_prio(oldprio))
7356 p->rt.timeout = 0;
7357 }
7358
7359 p->sched_class = next_class;
7360 p->prio = prio;
7361 }
7362 out_unlock:
7363 /* Avoid rq from going away on us: */
7364 preempt_disable();
7365
7366 rq_unpin_lock(rq, &rf);
7367 __balance_callbacks(rq);
7368 rq_repin_lock(rq, &rf);
7369 __task_rq_unlock(rq, p, &rf);
7370
7371 preempt_enable();
7372 }
7373 #endif /* CONFIG_RT_MUTEXES */
7374
7375 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7376 int __sched __cond_resched(void)
7377 {
7378 if (should_resched(0) && !irqs_disabled()) {
7379 preempt_schedule_common();
7380 return 1;
7381 }
7382 /*
7383 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
7384 * whether the current CPU is in an RCU read-side critical section,
7385 * so the tick can report quiescent states even for CPUs looping
7386 * in kernel context. In contrast, in non-preemptible kernels,
7387 * RCU readers leave no in-memory hints, which means that CPU-bound
7388 * processes executing in kernel context might never report an
7389 * RCU quiescent state. Therefore, the following code causes
7390 * cond_resched() to report a quiescent state, but only when RCU
7391 * is in urgent need of one.
7392 * A third case, preemptible, but non-PREEMPT_RCU provides for
7393 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
7394 */
7395 #ifndef CONFIG_PREEMPT_RCU
7396 rcu_all_qs();
7397 #endif
7398 return 0;
7399 }
7400 EXPORT_SYMBOL(__cond_resched);
7401 #endif
7402
7403 #ifdef CONFIG_PREEMPT_DYNAMIC
7404 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7405 # define cond_resched_dynamic_enabled __cond_resched
7406 # define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7407 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7408 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7409
7410 # define might_resched_dynamic_enabled __cond_resched
7411 # define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7412 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7413 EXPORT_STATIC_CALL_TRAMP(might_resched);
7414 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7415 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7416 int __sched dynamic_cond_resched(void)
7417 {
7418 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7419 return 0;
7420 return __cond_resched();
7421 }
7422 EXPORT_SYMBOL(dynamic_cond_resched);
7423
7424 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7425 int __sched dynamic_might_resched(void)
7426 {
7427 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7428 return 0;
7429 return __cond_resched();
7430 }
7431 EXPORT_SYMBOL(dynamic_might_resched);
7432 # endif
7433 #endif /* CONFIG_PREEMPT_DYNAMIC */
7434
7435 /*
7436 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7437 * call schedule, and on return reacquire the lock.
7438 *
7439 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7440 * operations here to prevent schedule() from being called twice (once via
7441 * spin_unlock(), once by hand).
7442 */
__cond_resched_lock(spinlock_t * lock)7443 int __cond_resched_lock(spinlock_t *lock)
7444 {
7445 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7446 int ret = 0;
7447
7448 lockdep_assert_held(lock);
7449
7450 if (spin_needbreak(lock) || resched) {
7451 spin_unlock(lock);
7452 if (!_cond_resched())
7453 cpu_relax();
7454 ret = 1;
7455 spin_lock(lock);
7456 }
7457 return ret;
7458 }
7459 EXPORT_SYMBOL(__cond_resched_lock);
7460
__cond_resched_rwlock_read(rwlock_t * lock)7461 int __cond_resched_rwlock_read(rwlock_t *lock)
7462 {
7463 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7464 int ret = 0;
7465
7466 lockdep_assert_held_read(lock);
7467
7468 if (rwlock_needbreak(lock) || resched) {
7469 read_unlock(lock);
7470 if (!_cond_resched())
7471 cpu_relax();
7472 ret = 1;
7473 read_lock(lock);
7474 }
7475 return ret;
7476 }
7477 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7478
__cond_resched_rwlock_write(rwlock_t * lock)7479 int __cond_resched_rwlock_write(rwlock_t *lock)
7480 {
7481 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7482 int ret = 0;
7483
7484 lockdep_assert_held_write(lock);
7485
7486 if (rwlock_needbreak(lock) || resched) {
7487 write_unlock(lock);
7488 if (!_cond_resched())
7489 cpu_relax();
7490 ret = 1;
7491 write_lock(lock);
7492 }
7493 return ret;
7494 }
7495 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7496
7497 #ifdef CONFIG_PREEMPT_DYNAMIC
7498
7499 # ifdef CONFIG_GENERIC_IRQ_ENTRY
7500 # include <linux/irq-entry-common.h>
7501 # endif
7502
7503 /*
7504 * SC:cond_resched
7505 * SC:might_resched
7506 * SC:preempt_schedule
7507 * SC:preempt_schedule_notrace
7508 * SC:irqentry_exit_cond_resched
7509 *
7510 *
7511 * NONE:
7512 * cond_resched <- __cond_resched
7513 * might_resched <- RET0
7514 * preempt_schedule <- NOP
7515 * preempt_schedule_notrace <- NOP
7516 * irqentry_exit_cond_resched <- NOP
7517 * dynamic_preempt_lazy <- false
7518 *
7519 * VOLUNTARY:
7520 * cond_resched <- __cond_resched
7521 * might_resched <- __cond_resched
7522 * preempt_schedule <- NOP
7523 * preempt_schedule_notrace <- NOP
7524 * irqentry_exit_cond_resched <- NOP
7525 * dynamic_preempt_lazy <- false
7526 *
7527 * FULL:
7528 * cond_resched <- RET0
7529 * might_resched <- RET0
7530 * preempt_schedule <- preempt_schedule
7531 * preempt_schedule_notrace <- preempt_schedule_notrace
7532 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7533 * dynamic_preempt_lazy <- false
7534 *
7535 * LAZY:
7536 * cond_resched <- RET0
7537 * might_resched <- RET0
7538 * preempt_schedule <- preempt_schedule
7539 * preempt_schedule_notrace <- preempt_schedule_notrace
7540 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7541 * dynamic_preempt_lazy <- true
7542 */
7543
7544 enum {
7545 preempt_dynamic_undefined = -1,
7546 preempt_dynamic_none,
7547 preempt_dynamic_voluntary,
7548 preempt_dynamic_full,
7549 preempt_dynamic_lazy,
7550 };
7551
7552 int preempt_dynamic_mode = preempt_dynamic_undefined;
7553
sched_dynamic_mode(const char * str)7554 int sched_dynamic_mode(const char *str)
7555 {
7556 # ifndef CONFIG_PREEMPT_RT
7557 if (!strcmp(str, "none"))
7558 return preempt_dynamic_none;
7559
7560 if (!strcmp(str, "voluntary"))
7561 return preempt_dynamic_voluntary;
7562 # endif
7563
7564 if (!strcmp(str, "full"))
7565 return preempt_dynamic_full;
7566
7567 # ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7568 if (!strcmp(str, "lazy"))
7569 return preempt_dynamic_lazy;
7570 # endif
7571
7572 return -EINVAL;
7573 }
7574
7575 # define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7576 # define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7577
7578 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7579 # define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7580 # define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7581 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7582 # define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7583 # define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7584 # else
7585 # error "Unsupported PREEMPT_DYNAMIC mechanism"
7586 # endif
7587
7588 static DEFINE_MUTEX(sched_dynamic_mutex);
7589
__sched_dynamic_update(int mode)7590 static void __sched_dynamic_update(int mode)
7591 {
7592 /*
7593 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7594 * the ZERO state, which is invalid.
7595 */
7596 preempt_dynamic_enable(cond_resched);
7597 preempt_dynamic_enable(might_resched);
7598 preempt_dynamic_enable(preempt_schedule);
7599 preempt_dynamic_enable(preempt_schedule_notrace);
7600 preempt_dynamic_enable(irqentry_exit_cond_resched);
7601 preempt_dynamic_key_disable(preempt_lazy);
7602
7603 switch (mode) {
7604 case preempt_dynamic_none:
7605 preempt_dynamic_enable(cond_resched);
7606 preempt_dynamic_disable(might_resched);
7607 preempt_dynamic_disable(preempt_schedule);
7608 preempt_dynamic_disable(preempt_schedule_notrace);
7609 preempt_dynamic_disable(irqentry_exit_cond_resched);
7610 preempt_dynamic_key_disable(preempt_lazy);
7611 if (mode != preempt_dynamic_mode)
7612 pr_info("Dynamic Preempt: none\n");
7613 break;
7614
7615 case preempt_dynamic_voluntary:
7616 preempt_dynamic_enable(cond_resched);
7617 preempt_dynamic_enable(might_resched);
7618 preempt_dynamic_disable(preempt_schedule);
7619 preempt_dynamic_disable(preempt_schedule_notrace);
7620 preempt_dynamic_disable(irqentry_exit_cond_resched);
7621 preempt_dynamic_key_disable(preempt_lazy);
7622 if (mode != preempt_dynamic_mode)
7623 pr_info("Dynamic Preempt: voluntary\n");
7624 break;
7625
7626 case preempt_dynamic_full:
7627 preempt_dynamic_disable(cond_resched);
7628 preempt_dynamic_disable(might_resched);
7629 preempt_dynamic_enable(preempt_schedule);
7630 preempt_dynamic_enable(preempt_schedule_notrace);
7631 preempt_dynamic_enable(irqentry_exit_cond_resched);
7632 preempt_dynamic_key_disable(preempt_lazy);
7633 if (mode != preempt_dynamic_mode)
7634 pr_info("Dynamic Preempt: full\n");
7635 break;
7636
7637 case preempt_dynamic_lazy:
7638 preempt_dynamic_disable(cond_resched);
7639 preempt_dynamic_disable(might_resched);
7640 preempt_dynamic_enable(preempt_schedule);
7641 preempt_dynamic_enable(preempt_schedule_notrace);
7642 preempt_dynamic_enable(irqentry_exit_cond_resched);
7643 preempt_dynamic_key_enable(preempt_lazy);
7644 if (mode != preempt_dynamic_mode)
7645 pr_info("Dynamic Preempt: lazy\n");
7646 break;
7647 }
7648
7649 preempt_dynamic_mode = mode;
7650 }
7651
sched_dynamic_update(int mode)7652 void sched_dynamic_update(int mode)
7653 {
7654 mutex_lock(&sched_dynamic_mutex);
7655 __sched_dynamic_update(mode);
7656 mutex_unlock(&sched_dynamic_mutex);
7657 }
7658
setup_preempt_mode(char * str)7659 static int __init setup_preempt_mode(char *str)
7660 {
7661 int mode = sched_dynamic_mode(str);
7662 if (mode < 0) {
7663 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7664 return 0;
7665 }
7666
7667 sched_dynamic_update(mode);
7668 return 1;
7669 }
7670 __setup("preempt=", setup_preempt_mode);
7671
preempt_dynamic_init(void)7672 static void __init preempt_dynamic_init(void)
7673 {
7674 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7675 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7676 sched_dynamic_update(preempt_dynamic_none);
7677 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7678 sched_dynamic_update(preempt_dynamic_voluntary);
7679 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7680 sched_dynamic_update(preempt_dynamic_lazy);
7681 } else {
7682 /* Default static call setting, nothing to do */
7683 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7684 preempt_dynamic_mode = preempt_dynamic_full;
7685 pr_info("Dynamic Preempt: full\n");
7686 }
7687 }
7688 }
7689
7690 # define PREEMPT_MODEL_ACCESSOR(mode) \
7691 bool preempt_model_##mode(void) \
7692 { \
7693 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7694 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7695 } \
7696 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7697
7698 PREEMPT_MODEL_ACCESSOR(none);
7699 PREEMPT_MODEL_ACCESSOR(voluntary);
7700 PREEMPT_MODEL_ACCESSOR(full);
7701 PREEMPT_MODEL_ACCESSOR(lazy);
7702
7703 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7704
7705 #define preempt_dynamic_mode -1
7706
preempt_dynamic_init(void)7707 static inline void preempt_dynamic_init(void) { }
7708
7709 #endif /* CONFIG_PREEMPT_DYNAMIC */
7710
7711 const char *preempt_modes[] = {
7712 "none", "voluntary", "full", "lazy", NULL,
7713 };
7714
preempt_model_str(void)7715 const char *preempt_model_str(void)
7716 {
7717 bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
7718 (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
7719 IS_ENABLED(CONFIG_PREEMPT_LAZY));
7720 static char buf[128];
7721
7722 if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
7723 struct seq_buf s;
7724
7725 seq_buf_init(&s, buf, sizeof(buf));
7726 seq_buf_puts(&s, "PREEMPT");
7727
7728 if (IS_ENABLED(CONFIG_PREEMPT_RT))
7729 seq_buf_printf(&s, "%sRT%s",
7730 brace ? "_{" : "_",
7731 brace ? "," : "");
7732
7733 if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
7734 seq_buf_printf(&s, "(%s)%s",
7735 preempt_dynamic_mode >= 0 ?
7736 preempt_modes[preempt_dynamic_mode] : "undef",
7737 brace ? "}" : "");
7738 return seq_buf_str(&s);
7739 }
7740
7741 if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7742 seq_buf_printf(&s, "LAZY%s",
7743 brace ? "}" : "");
7744 return seq_buf_str(&s);
7745 }
7746
7747 return seq_buf_str(&s);
7748 }
7749
7750 if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
7751 return "VOLUNTARY";
7752
7753 return "NONE";
7754 }
7755
io_schedule_prepare(void)7756 int io_schedule_prepare(void)
7757 {
7758 int old_iowait = current->in_iowait;
7759
7760 current->in_iowait = 1;
7761 blk_flush_plug(current->plug, true);
7762 return old_iowait;
7763 }
7764
io_schedule_finish(int token)7765 void io_schedule_finish(int token)
7766 {
7767 current->in_iowait = token;
7768 }
7769
7770 /*
7771 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7772 * that process accounting knows that this is a task in IO wait state.
7773 */
io_schedule_timeout(long timeout)7774 long __sched io_schedule_timeout(long timeout)
7775 {
7776 int token;
7777 long ret;
7778
7779 token = io_schedule_prepare();
7780 ret = schedule_timeout(timeout);
7781 io_schedule_finish(token);
7782
7783 return ret;
7784 }
7785 EXPORT_SYMBOL(io_schedule_timeout);
7786
io_schedule(void)7787 void __sched io_schedule(void)
7788 {
7789 int token;
7790
7791 token = io_schedule_prepare();
7792 schedule();
7793 io_schedule_finish(token);
7794 }
7795 EXPORT_SYMBOL(io_schedule);
7796
sched_show_task(struct task_struct * p)7797 void sched_show_task(struct task_struct *p)
7798 {
7799 unsigned long free;
7800 int ppid;
7801
7802 if (!try_get_task_stack(p))
7803 return;
7804
7805 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7806
7807 if (task_is_running(p))
7808 pr_cont(" running task ");
7809 free = stack_not_used(p);
7810 ppid = 0;
7811 rcu_read_lock();
7812 if (pid_alive(p))
7813 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7814 rcu_read_unlock();
7815 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7816 free, task_pid_nr(p), task_tgid_nr(p),
7817 ppid, p->flags, read_task_thread_flags(p));
7818
7819 print_worker_info(KERN_INFO, p);
7820 print_stop_info(KERN_INFO, p);
7821 print_scx_info(KERN_INFO, p);
7822 show_stack(p, NULL, KERN_INFO);
7823 put_task_stack(p);
7824 }
7825 EXPORT_SYMBOL_GPL(sched_show_task);
7826
7827 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7828 state_filter_match(unsigned long state_filter, struct task_struct *p)
7829 {
7830 unsigned int state = READ_ONCE(p->__state);
7831
7832 /* no filter, everything matches */
7833 if (!state_filter)
7834 return true;
7835
7836 /* filter, but doesn't match */
7837 if (!(state & state_filter))
7838 return false;
7839
7840 /*
7841 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7842 * TASK_KILLABLE).
7843 */
7844 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7845 return false;
7846
7847 return true;
7848 }
7849
7850
show_state_filter(unsigned int state_filter)7851 void show_state_filter(unsigned int state_filter)
7852 {
7853 struct task_struct *g, *p;
7854
7855 rcu_read_lock();
7856 for_each_process_thread(g, p) {
7857 /*
7858 * reset the NMI-timeout, listing all files on a slow
7859 * console might take a lot of time:
7860 * Also, reset softlockup watchdogs on all CPUs, because
7861 * another CPU might be blocked waiting for us to process
7862 * an IPI.
7863 */
7864 touch_nmi_watchdog();
7865 touch_all_softlockup_watchdogs();
7866 if (state_filter_match(state_filter, p))
7867 sched_show_task(p);
7868 }
7869
7870 if (!state_filter)
7871 sysrq_sched_debug_show();
7872
7873 rcu_read_unlock();
7874 /*
7875 * Only show locks if all tasks are dumped:
7876 */
7877 if (!state_filter)
7878 debug_show_all_locks();
7879 }
7880
7881 /**
7882 * init_idle - set up an idle thread for a given CPU
7883 * @idle: task in question
7884 * @cpu: CPU the idle task belongs to
7885 *
7886 * NOTE: this function does not set the idle thread's NEED_RESCHED
7887 * flag, to make booting more robust.
7888 */
init_idle(struct task_struct * idle,int cpu)7889 void __init init_idle(struct task_struct *idle, int cpu)
7890 {
7891 struct affinity_context ac = (struct affinity_context) {
7892 .new_mask = cpumask_of(cpu),
7893 .flags = 0,
7894 };
7895 struct rq *rq = cpu_rq(cpu);
7896 unsigned long flags;
7897
7898 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7899 raw_spin_rq_lock(rq);
7900
7901 idle->__state = TASK_RUNNING;
7902 idle->se.exec_start = sched_clock();
7903 /*
7904 * PF_KTHREAD should already be set at this point; regardless, make it
7905 * look like a proper per-CPU kthread.
7906 */
7907 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7908 kthread_set_per_cpu(idle, cpu);
7909
7910 /*
7911 * No validation and serialization required at boot time and for
7912 * setting up the idle tasks of not yet online CPUs.
7913 */
7914 set_cpus_allowed_common(idle, &ac);
7915 /*
7916 * We're having a chicken and egg problem, even though we are
7917 * holding rq->lock, the CPU isn't yet set to this CPU so the
7918 * lockdep check in task_group() will fail.
7919 *
7920 * Similar case to sched_fork(). / Alternatively we could
7921 * use task_rq_lock() here and obtain the other rq->lock.
7922 *
7923 * Silence PROVE_RCU
7924 */
7925 rcu_read_lock();
7926 __set_task_cpu(idle, cpu);
7927 rcu_read_unlock();
7928
7929 rq->idle = idle;
7930 rq_set_donor(rq, idle);
7931 rcu_assign_pointer(rq->curr, idle);
7932 idle->on_rq = TASK_ON_RQ_QUEUED;
7933 idle->on_cpu = 1;
7934 raw_spin_rq_unlock(rq);
7935 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7936
7937 /* Set the preempt count _outside_ the spinlocks! */
7938 init_idle_preempt_count(idle, cpu);
7939
7940 /*
7941 * The idle tasks have their own, simple scheduling class:
7942 */
7943 idle->sched_class = &idle_sched_class;
7944 ftrace_graph_init_idle_task(idle, cpu);
7945 vtime_init_idle(idle, cpu);
7946 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7947 }
7948
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7949 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7950 const struct cpumask *trial)
7951 {
7952 int ret = 1;
7953
7954 if (cpumask_empty(cur))
7955 return ret;
7956
7957 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7958
7959 return ret;
7960 }
7961
task_can_attach(struct task_struct * p)7962 int task_can_attach(struct task_struct *p)
7963 {
7964 int ret = 0;
7965
7966 /*
7967 * Kthreads which disallow setaffinity shouldn't be moved
7968 * to a new cpuset; we don't want to change their CPU
7969 * affinity and isolating such threads by their set of
7970 * allowed nodes is unnecessary. Thus, cpusets are not
7971 * applicable for such threads. This prevents checking for
7972 * success of set_cpus_allowed_ptr() on all attached tasks
7973 * before cpus_mask may be changed.
7974 */
7975 if (p->flags & PF_NO_SETAFFINITY)
7976 ret = -EINVAL;
7977
7978 return ret;
7979 }
7980
7981 bool sched_smp_initialized __read_mostly;
7982
7983 #ifdef CONFIG_NUMA_BALANCING
7984 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7985 int migrate_task_to(struct task_struct *p, int target_cpu)
7986 {
7987 struct migration_arg arg = { p, target_cpu };
7988 int curr_cpu = task_cpu(p);
7989
7990 if (curr_cpu == target_cpu)
7991 return 0;
7992
7993 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7994 return -EINVAL;
7995
7996 /* TODO: This is not properly updating schedstats */
7997
7998 trace_sched_move_numa(p, curr_cpu, target_cpu);
7999 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8000 }
8001
8002 /*
8003 * Requeue a task on a given node and accurately track the number of NUMA
8004 * tasks on the runqueues
8005 */
sched_setnuma(struct task_struct * p,int nid)8006 void sched_setnuma(struct task_struct *p, int nid)
8007 {
8008 guard(task_rq_lock)(p);
8009 scoped_guard (sched_change, p, DEQUEUE_SAVE)
8010 p->numa_preferred_nid = nid;
8011 }
8012 #endif /* CONFIG_NUMA_BALANCING */
8013
8014 #ifdef CONFIG_HOTPLUG_CPU
8015 /*
8016 * Invoked on the outgoing CPU in context of the CPU hotplug thread
8017 * after ensuring that there are no user space tasks left on the CPU.
8018 *
8019 * If there is a lazy mm in use on the hotplug thread, drop it and
8020 * switch to init_mm.
8021 *
8022 * The reference count on init_mm is dropped in finish_cpu().
8023 */
sched_force_init_mm(void)8024 static void sched_force_init_mm(void)
8025 {
8026 struct mm_struct *mm = current->active_mm;
8027
8028 if (mm != &init_mm) {
8029 mmgrab_lazy_tlb(&init_mm);
8030 local_irq_disable();
8031 current->active_mm = &init_mm;
8032 switch_mm_irqs_off(mm, &init_mm, current);
8033 local_irq_enable();
8034 finish_arch_post_lock_switch();
8035 mmdrop_lazy_tlb(mm);
8036 }
8037
8038 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8039 }
8040
__balance_push_cpu_stop(void * arg)8041 static int __balance_push_cpu_stop(void *arg)
8042 {
8043 struct task_struct *p = arg;
8044 struct rq *rq = this_rq();
8045 struct rq_flags rf;
8046 int cpu;
8047
8048 scoped_guard (raw_spinlock_irq, &p->pi_lock) {
8049 cpu = select_fallback_rq(rq->cpu, p);
8050
8051 rq_lock(rq, &rf);
8052 update_rq_clock(rq);
8053 if (task_rq(p) == rq && task_on_rq_queued(p))
8054 rq = __migrate_task(rq, &rf, p, cpu);
8055 rq_unlock(rq, &rf);
8056 }
8057
8058 put_task_struct(p);
8059
8060 return 0;
8061 }
8062
8063 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8064
8065 /*
8066 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8067 *
8068 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8069 * effective when the hotplug motion is down.
8070 */
balance_push(struct rq * rq)8071 static void balance_push(struct rq *rq)
8072 {
8073 struct task_struct *push_task = rq->curr;
8074
8075 lockdep_assert_rq_held(rq);
8076
8077 /*
8078 * Ensure the thing is persistent until balance_push_set(.on = false);
8079 */
8080 rq->balance_callback = &balance_push_callback;
8081
8082 /*
8083 * Only active while going offline and when invoked on the outgoing
8084 * CPU.
8085 */
8086 if (!cpu_dying(rq->cpu) || rq != this_rq())
8087 return;
8088
8089 /*
8090 * Both the cpu-hotplug and stop task are in this case and are
8091 * required to complete the hotplug process.
8092 */
8093 if (kthread_is_per_cpu(push_task) ||
8094 is_migration_disabled(push_task)) {
8095
8096 /*
8097 * If this is the idle task on the outgoing CPU try to wake
8098 * up the hotplug control thread which might wait for the
8099 * last task to vanish. The rcuwait_active() check is
8100 * accurate here because the waiter is pinned on this CPU
8101 * and can't obviously be running in parallel.
8102 *
8103 * On RT kernels this also has to check whether there are
8104 * pinned and scheduled out tasks on the runqueue. They
8105 * need to leave the migrate disabled section first.
8106 */
8107 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8108 rcuwait_active(&rq->hotplug_wait)) {
8109 raw_spin_rq_unlock(rq);
8110 rcuwait_wake_up(&rq->hotplug_wait);
8111 raw_spin_rq_lock(rq);
8112 }
8113 return;
8114 }
8115
8116 get_task_struct(push_task);
8117 /*
8118 * Temporarily drop rq->lock such that we can wake-up the stop task.
8119 * Both preemption and IRQs are still disabled.
8120 */
8121 preempt_disable();
8122 raw_spin_rq_unlock(rq);
8123 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8124 this_cpu_ptr(&push_work));
8125 preempt_enable();
8126 /*
8127 * At this point need_resched() is true and we'll take the loop in
8128 * schedule(). The next pick is obviously going to be the stop task
8129 * which kthread_is_per_cpu() and will push this task away.
8130 */
8131 raw_spin_rq_lock(rq);
8132 }
8133
balance_push_set(int cpu,bool on)8134 static void balance_push_set(int cpu, bool on)
8135 {
8136 struct rq *rq = cpu_rq(cpu);
8137 struct rq_flags rf;
8138
8139 rq_lock_irqsave(rq, &rf);
8140 if (on) {
8141 WARN_ON_ONCE(rq->balance_callback);
8142 rq->balance_callback = &balance_push_callback;
8143 } else if (rq->balance_callback == &balance_push_callback) {
8144 rq->balance_callback = NULL;
8145 }
8146 rq_unlock_irqrestore(rq, &rf);
8147 }
8148
8149 /*
8150 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8151 * inactive. All tasks which are not per CPU kernel threads are either
8152 * pushed off this CPU now via balance_push() or placed on a different CPU
8153 * during wakeup. Wait until the CPU is quiescent.
8154 */
balance_hotplug_wait(void)8155 static void balance_hotplug_wait(void)
8156 {
8157 struct rq *rq = this_rq();
8158
8159 rcuwait_wait_event(&rq->hotplug_wait,
8160 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8161 TASK_UNINTERRUPTIBLE);
8162 }
8163
8164 #else /* !CONFIG_HOTPLUG_CPU: */
8165
balance_push(struct rq * rq)8166 static inline void balance_push(struct rq *rq)
8167 {
8168 }
8169
balance_push_set(int cpu,bool on)8170 static inline void balance_push_set(int cpu, bool on)
8171 {
8172 }
8173
balance_hotplug_wait(void)8174 static inline void balance_hotplug_wait(void)
8175 {
8176 }
8177
8178 #endif /* !CONFIG_HOTPLUG_CPU */
8179
set_rq_online(struct rq * rq)8180 void set_rq_online(struct rq *rq)
8181 {
8182 if (!rq->online) {
8183 const struct sched_class *class;
8184
8185 cpumask_set_cpu(rq->cpu, rq->rd->online);
8186 rq->online = 1;
8187
8188 for_each_class(class) {
8189 if (class->rq_online)
8190 class->rq_online(rq);
8191 }
8192 }
8193 }
8194
set_rq_offline(struct rq * rq)8195 void set_rq_offline(struct rq *rq)
8196 {
8197 if (rq->online) {
8198 const struct sched_class *class;
8199
8200 update_rq_clock(rq);
8201 for_each_class(class) {
8202 if (class->rq_offline)
8203 class->rq_offline(rq);
8204 }
8205
8206 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8207 rq->online = 0;
8208 }
8209 }
8210
sched_set_rq_online(struct rq * rq,int cpu)8211 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8212 {
8213 struct rq_flags rf;
8214
8215 rq_lock_irqsave(rq, &rf);
8216 if (rq->rd) {
8217 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8218 set_rq_online(rq);
8219 }
8220 rq_unlock_irqrestore(rq, &rf);
8221 }
8222
sched_set_rq_offline(struct rq * rq,int cpu)8223 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8224 {
8225 struct rq_flags rf;
8226
8227 rq_lock_irqsave(rq, &rf);
8228 if (rq->rd) {
8229 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8230 set_rq_offline(rq);
8231 }
8232 rq_unlock_irqrestore(rq, &rf);
8233 }
8234
8235 /*
8236 * used to mark begin/end of suspend/resume:
8237 */
8238 static int num_cpus_frozen;
8239
8240 /*
8241 * Update cpusets according to cpu_active mask. If cpusets are
8242 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8243 * around partition_sched_domains().
8244 *
8245 * If we come here as part of a suspend/resume, don't touch cpusets because we
8246 * want to restore it back to its original state upon resume anyway.
8247 */
cpuset_cpu_active(void)8248 static void cpuset_cpu_active(void)
8249 {
8250 if (cpuhp_tasks_frozen) {
8251 /*
8252 * num_cpus_frozen tracks how many CPUs are involved in suspend
8253 * resume sequence. As long as this is not the last online
8254 * operation in the resume sequence, just build a single sched
8255 * domain, ignoring cpusets.
8256 */
8257 cpuset_reset_sched_domains();
8258 if (--num_cpus_frozen)
8259 return;
8260 /*
8261 * This is the last CPU online operation. So fall through and
8262 * restore the original sched domains by considering the
8263 * cpuset configurations.
8264 */
8265 cpuset_force_rebuild();
8266 }
8267 cpuset_update_active_cpus();
8268 }
8269
cpuset_cpu_inactive(unsigned int cpu)8270 static void cpuset_cpu_inactive(unsigned int cpu)
8271 {
8272 if (!cpuhp_tasks_frozen) {
8273 cpuset_update_active_cpus();
8274 } else {
8275 num_cpus_frozen++;
8276 cpuset_reset_sched_domains();
8277 }
8278 }
8279
sched_smt_present_inc(int cpu)8280 static inline void sched_smt_present_inc(int cpu)
8281 {
8282 #ifdef CONFIG_SCHED_SMT
8283 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8284 static_branch_inc_cpuslocked(&sched_smt_present);
8285 #endif
8286 }
8287
sched_smt_present_dec(int cpu)8288 static inline void sched_smt_present_dec(int cpu)
8289 {
8290 #ifdef CONFIG_SCHED_SMT
8291 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8292 static_branch_dec_cpuslocked(&sched_smt_present);
8293 #endif
8294 }
8295
sched_cpu_activate(unsigned int cpu)8296 int sched_cpu_activate(unsigned int cpu)
8297 {
8298 struct rq *rq = cpu_rq(cpu);
8299
8300 /*
8301 * Clear the balance_push callback and prepare to schedule
8302 * regular tasks.
8303 */
8304 balance_push_set(cpu, false);
8305
8306 /*
8307 * When going up, increment the number of cores with SMT present.
8308 */
8309 sched_smt_present_inc(cpu);
8310 set_cpu_active(cpu, true);
8311
8312 if (sched_smp_initialized) {
8313 sched_update_numa(cpu, true);
8314 sched_domains_numa_masks_set(cpu);
8315 cpuset_cpu_active();
8316 }
8317
8318 scx_rq_activate(rq);
8319
8320 /*
8321 * Put the rq online, if not already. This happens:
8322 *
8323 * 1) In the early boot process, because we build the real domains
8324 * after all CPUs have been brought up.
8325 *
8326 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8327 * domains.
8328 */
8329 sched_set_rq_online(rq, cpu);
8330
8331 return 0;
8332 }
8333
sched_cpu_deactivate(unsigned int cpu)8334 int sched_cpu_deactivate(unsigned int cpu)
8335 {
8336 struct rq *rq = cpu_rq(cpu);
8337 int ret;
8338
8339 ret = dl_bw_deactivate(cpu);
8340
8341 if (ret)
8342 return ret;
8343
8344 /*
8345 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8346 * load balancing when not active
8347 */
8348 nohz_balance_exit_idle(rq);
8349
8350 set_cpu_active(cpu, false);
8351
8352 /*
8353 * From this point forward, this CPU will refuse to run any task that
8354 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8355 * push those tasks away until this gets cleared, see
8356 * sched_cpu_dying().
8357 */
8358 balance_push_set(cpu, true);
8359
8360 /*
8361 * We've cleared cpu_active_mask / set balance_push, wait for all
8362 * preempt-disabled and RCU users of this state to go away such that
8363 * all new such users will observe it.
8364 *
8365 * Specifically, we rely on ttwu to no longer target this CPU, see
8366 * ttwu_queue_cond() and is_cpu_allowed().
8367 *
8368 * Do sync before park smpboot threads to take care the RCU boost case.
8369 */
8370 synchronize_rcu();
8371
8372 sched_set_rq_offline(rq, cpu);
8373
8374 scx_rq_deactivate(rq);
8375
8376 /*
8377 * When going down, decrement the number of cores with SMT present.
8378 */
8379 sched_smt_present_dec(cpu);
8380
8381 #ifdef CONFIG_SCHED_SMT
8382 sched_core_cpu_deactivate(cpu);
8383 #endif
8384
8385 if (!sched_smp_initialized)
8386 return 0;
8387
8388 sched_update_numa(cpu, false);
8389 cpuset_cpu_inactive(cpu);
8390 sched_domains_numa_masks_clear(cpu);
8391 return 0;
8392 }
8393
sched_rq_cpu_starting(unsigned int cpu)8394 static void sched_rq_cpu_starting(unsigned int cpu)
8395 {
8396 struct rq *rq = cpu_rq(cpu);
8397
8398 rq->calc_load_update = calc_load_update;
8399 update_max_interval();
8400 }
8401
sched_cpu_starting(unsigned int cpu)8402 int sched_cpu_starting(unsigned int cpu)
8403 {
8404 sched_core_cpu_starting(cpu);
8405 sched_rq_cpu_starting(cpu);
8406 sched_tick_start(cpu);
8407 return 0;
8408 }
8409
8410 #ifdef CONFIG_HOTPLUG_CPU
8411
8412 /*
8413 * Invoked immediately before the stopper thread is invoked to bring the
8414 * CPU down completely. At this point all per CPU kthreads except the
8415 * hotplug thread (current) and the stopper thread (inactive) have been
8416 * either parked or have been unbound from the outgoing CPU. Ensure that
8417 * any of those which might be on the way out are gone.
8418 *
8419 * If after this point a bound task is being woken on this CPU then the
8420 * responsible hotplug callback has failed to do it's job.
8421 * sched_cpu_dying() will catch it with the appropriate fireworks.
8422 */
sched_cpu_wait_empty(unsigned int cpu)8423 int sched_cpu_wait_empty(unsigned int cpu)
8424 {
8425 balance_hotplug_wait();
8426 sched_force_init_mm();
8427 return 0;
8428 }
8429
8430 /*
8431 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8432 * might have. Called from the CPU stopper task after ensuring that the
8433 * stopper is the last running task on the CPU, so nr_active count is
8434 * stable. We need to take the tear-down thread which is calling this into
8435 * account, so we hand in adjust = 1 to the load calculation.
8436 *
8437 * Also see the comment "Global load-average calculations".
8438 */
calc_load_migrate(struct rq * rq)8439 static void calc_load_migrate(struct rq *rq)
8440 {
8441 long delta = calc_load_fold_active(rq, 1);
8442
8443 if (delta)
8444 atomic_long_add(delta, &calc_load_tasks);
8445 }
8446
dump_rq_tasks(struct rq * rq,const char * loglvl)8447 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8448 {
8449 struct task_struct *g, *p;
8450 int cpu = cpu_of(rq);
8451
8452 lockdep_assert_rq_held(rq);
8453
8454 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8455 for_each_process_thread(g, p) {
8456 if (task_cpu(p) != cpu)
8457 continue;
8458
8459 if (!task_on_rq_queued(p))
8460 continue;
8461
8462 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8463 }
8464 }
8465
sched_cpu_dying(unsigned int cpu)8466 int sched_cpu_dying(unsigned int cpu)
8467 {
8468 struct rq *rq = cpu_rq(cpu);
8469 struct rq_flags rf;
8470
8471 /* Handle pending wakeups and then migrate everything off */
8472 sched_tick_stop(cpu);
8473
8474 rq_lock_irqsave(rq, &rf);
8475 update_rq_clock(rq);
8476 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8477 WARN(true, "Dying CPU not properly vacated!");
8478 dump_rq_tasks(rq, KERN_WARNING);
8479 }
8480 dl_server_stop(&rq->fair_server);
8481 rq_unlock_irqrestore(rq, &rf);
8482
8483 calc_load_migrate(rq);
8484 update_max_interval();
8485 hrtick_clear(rq);
8486 sched_core_cpu_dying(cpu);
8487 return 0;
8488 }
8489 #endif /* CONFIG_HOTPLUG_CPU */
8490
sched_init_smp(void)8491 void __init sched_init_smp(void)
8492 {
8493 sched_init_numa(NUMA_NO_NODE);
8494
8495 prandom_init_once(&sched_rnd_state);
8496
8497 /*
8498 * There's no userspace yet to cause hotplug operations; hence all the
8499 * CPU masks are stable and all blatant races in the below code cannot
8500 * happen.
8501 */
8502 sched_domains_mutex_lock();
8503 sched_init_domains(cpu_active_mask);
8504 sched_domains_mutex_unlock();
8505
8506 /* Move init over to a non-isolated CPU */
8507 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8508 BUG();
8509 current->flags &= ~PF_NO_SETAFFINITY;
8510 sched_init_granularity();
8511
8512 init_sched_rt_class();
8513 init_sched_dl_class();
8514
8515 sched_init_dl_servers();
8516
8517 sched_smp_initialized = true;
8518 }
8519
migration_init(void)8520 static int __init migration_init(void)
8521 {
8522 sched_cpu_starting(smp_processor_id());
8523 return 0;
8524 }
8525 early_initcall(migration_init);
8526
in_sched_functions(unsigned long addr)8527 int in_sched_functions(unsigned long addr)
8528 {
8529 return in_lock_functions(addr) ||
8530 (addr >= (unsigned long)__sched_text_start
8531 && addr < (unsigned long)__sched_text_end);
8532 }
8533
8534 #ifdef CONFIG_CGROUP_SCHED
8535 /*
8536 * Default task group.
8537 * Every task in system belongs to this group at bootup.
8538 */
8539 struct task_group root_task_group;
8540 LIST_HEAD(task_groups);
8541
8542 /* Cacheline aligned slab cache for task_group */
8543 static struct kmem_cache *task_group_cache __ro_after_init;
8544 #endif
8545
sched_init(void)8546 void __init sched_init(void)
8547 {
8548 unsigned long ptr = 0;
8549 int i;
8550
8551 /* Make sure the linker didn't screw up */
8552 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8553 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8554 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8555 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8556 #ifdef CONFIG_SCHED_CLASS_EXT
8557 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8558 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8559 #endif
8560
8561 wait_bit_init();
8562
8563 #ifdef CONFIG_FAIR_GROUP_SCHED
8564 ptr += 2 * nr_cpu_ids * sizeof(void **);
8565 #endif
8566 #ifdef CONFIG_RT_GROUP_SCHED
8567 ptr += 2 * nr_cpu_ids * sizeof(void **);
8568 #endif
8569 if (ptr) {
8570 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8571
8572 #ifdef CONFIG_FAIR_GROUP_SCHED
8573 root_task_group.se = (struct sched_entity **)ptr;
8574 ptr += nr_cpu_ids * sizeof(void **);
8575
8576 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8577 ptr += nr_cpu_ids * sizeof(void **);
8578
8579 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8580 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8581 #endif /* CONFIG_FAIR_GROUP_SCHED */
8582 #ifdef CONFIG_EXT_GROUP_SCHED
8583 scx_tg_init(&root_task_group);
8584 #endif /* CONFIG_EXT_GROUP_SCHED */
8585 #ifdef CONFIG_RT_GROUP_SCHED
8586 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8587 ptr += nr_cpu_ids * sizeof(void **);
8588
8589 root_task_group.rt_rq = (struct rt_rq **)ptr;
8590 ptr += nr_cpu_ids * sizeof(void **);
8591
8592 #endif /* CONFIG_RT_GROUP_SCHED */
8593 }
8594
8595 init_defrootdomain();
8596
8597 #ifdef CONFIG_RT_GROUP_SCHED
8598 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8599 global_rt_period(), global_rt_runtime());
8600 #endif /* CONFIG_RT_GROUP_SCHED */
8601
8602 #ifdef CONFIG_CGROUP_SCHED
8603 task_group_cache = KMEM_CACHE(task_group, 0);
8604
8605 list_add(&root_task_group.list, &task_groups);
8606 INIT_LIST_HEAD(&root_task_group.children);
8607 INIT_LIST_HEAD(&root_task_group.siblings);
8608 autogroup_init(&init_task);
8609 #endif /* CONFIG_CGROUP_SCHED */
8610
8611 for_each_possible_cpu(i) {
8612 struct rq *rq;
8613
8614 rq = cpu_rq(i);
8615 raw_spin_lock_init(&rq->__lock);
8616 rq->nr_running = 0;
8617 rq->calc_load_active = 0;
8618 rq->calc_load_update = jiffies + LOAD_FREQ;
8619 init_cfs_rq(&rq->cfs);
8620 init_rt_rq(&rq->rt);
8621 init_dl_rq(&rq->dl);
8622 #ifdef CONFIG_FAIR_GROUP_SCHED
8623 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8624 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8625 /*
8626 * How much CPU bandwidth does root_task_group get?
8627 *
8628 * In case of task-groups formed through the cgroup filesystem, it
8629 * gets 100% of the CPU resources in the system. This overall
8630 * system CPU resource is divided among the tasks of
8631 * root_task_group and its child task-groups in a fair manner,
8632 * based on each entity's (task or task-group's) weight
8633 * (se->load.weight).
8634 *
8635 * In other words, if root_task_group has 10 tasks of weight
8636 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8637 * then A0's share of the CPU resource is:
8638 *
8639 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8640 *
8641 * We achieve this by letting root_task_group's tasks sit
8642 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8643 */
8644 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8645 #endif /* CONFIG_FAIR_GROUP_SCHED */
8646
8647 #ifdef CONFIG_RT_GROUP_SCHED
8648 /*
8649 * This is required for init cpu because rt.c:__enable_runtime()
8650 * starts working after scheduler_running, which is not the case
8651 * yet.
8652 */
8653 rq->rt.rt_runtime = global_rt_runtime();
8654 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8655 #endif
8656 rq->sd = NULL;
8657 rq->rd = NULL;
8658 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8659 rq->balance_callback = &balance_push_callback;
8660 rq->active_balance = 0;
8661 rq->next_balance = jiffies;
8662 rq->push_cpu = 0;
8663 rq->cpu = i;
8664 rq->online = 0;
8665 rq->idle_stamp = 0;
8666 rq->avg_idle = 2*sysctl_sched_migration_cost;
8667 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8668
8669 INIT_LIST_HEAD(&rq->cfs_tasks);
8670
8671 rq_attach_root(rq, &def_root_domain);
8672 #ifdef CONFIG_NO_HZ_COMMON
8673 rq->last_blocked_load_update_tick = jiffies;
8674 atomic_set(&rq->nohz_flags, 0);
8675
8676 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8677 #endif
8678 #ifdef CONFIG_HOTPLUG_CPU
8679 rcuwait_init(&rq->hotplug_wait);
8680 #endif
8681 hrtick_rq_init(rq);
8682 atomic_set(&rq->nr_iowait, 0);
8683 fair_server_init(rq);
8684
8685 #ifdef CONFIG_SCHED_CORE
8686 rq->core = rq;
8687 rq->core_pick = NULL;
8688 rq->core_dl_server = NULL;
8689 rq->core_enabled = 0;
8690 rq->core_tree = RB_ROOT;
8691 rq->core_forceidle_count = 0;
8692 rq->core_forceidle_occupation = 0;
8693 rq->core_forceidle_start = 0;
8694
8695 rq->core_cookie = 0UL;
8696 #endif
8697 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8698 }
8699
8700 set_load_weight(&init_task, false);
8701 init_task.se.slice = sysctl_sched_base_slice,
8702
8703 /*
8704 * The boot idle thread does lazy MMU switching as well:
8705 */
8706 mmgrab_lazy_tlb(&init_mm);
8707 enter_lazy_tlb(&init_mm, current);
8708
8709 /*
8710 * The idle task doesn't need the kthread struct to function, but it
8711 * is dressed up as a per-CPU kthread and thus needs to play the part
8712 * if we want to avoid special-casing it in code that deals with per-CPU
8713 * kthreads.
8714 */
8715 WARN_ON(!set_kthread_struct(current));
8716
8717 /*
8718 * Make us the idle thread. Technically, schedule() should not be
8719 * called from this thread, however somewhere below it might be,
8720 * but because we are the idle thread, we just pick up running again
8721 * when this runqueue becomes "idle".
8722 */
8723 __sched_fork(0, current);
8724 init_idle(current, smp_processor_id());
8725
8726 calc_load_update = jiffies + LOAD_FREQ;
8727
8728 idle_thread_set_boot_cpu();
8729
8730 balance_push_set(smp_processor_id(), false);
8731 init_sched_fair_class();
8732 init_sched_ext_class();
8733
8734 psi_init();
8735
8736 init_uclamp();
8737
8738 preempt_dynamic_init();
8739
8740 scheduler_running = 1;
8741 }
8742
8743 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8744
__might_sleep(const char * file,int line)8745 void __might_sleep(const char *file, int line)
8746 {
8747 unsigned int state = get_current_state();
8748 /*
8749 * Blocking primitives will set (and therefore destroy) current->state,
8750 * since we will exit with TASK_RUNNING make sure we enter with it,
8751 * otherwise we will destroy state.
8752 */
8753 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8754 "do not call blocking ops when !TASK_RUNNING; "
8755 "state=%x set at [<%p>] %pS\n", state,
8756 (void *)current->task_state_change,
8757 (void *)current->task_state_change);
8758
8759 __might_resched(file, line, 0);
8760 }
8761 EXPORT_SYMBOL(__might_sleep);
8762
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8763 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8764 {
8765 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8766 return;
8767
8768 if (preempt_count() == preempt_offset)
8769 return;
8770
8771 pr_err("Preemption disabled at:");
8772 print_ip_sym(KERN_ERR, ip);
8773 }
8774
resched_offsets_ok(unsigned int offsets)8775 static inline bool resched_offsets_ok(unsigned int offsets)
8776 {
8777 unsigned int nested = preempt_count();
8778
8779 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8780
8781 return nested == offsets;
8782 }
8783
__might_resched(const char * file,int line,unsigned int offsets)8784 void __might_resched(const char *file, int line, unsigned int offsets)
8785 {
8786 /* Ratelimiting timestamp: */
8787 static unsigned long prev_jiffy;
8788
8789 unsigned long preempt_disable_ip;
8790
8791 /* WARN_ON_ONCE() by default, no rate limit required: */
8792 rcu_sleep_check();
8793
8794 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8795 !is_idle_task(current) && !current->non_block_count) ||
8796 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8797 oops_in_progress)
8798 return;
8799
8800 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8801 return;
8802 prev_jiffy = jiffies;
8803
8804 /* Save this before calling printk(), since that will clobber it: */
8805 preempt_disable_ip = get_preempt_disable_ip(current);
8806
8807 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8808 file, line);
8809 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8810 in_atomic(), irqs_disabled(), current->non_block_count,
8811 current->pid, current->comm);
8812 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8813 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8814
8815 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8816 pr_err("RCU nest depth: %d, expected: %u\n",
8817 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8818 }
8819
8820 if (task_stack_end_corrupted(current))
8821 pr_emerg("Thread overran stack, or stack corrupted\n");
8822
8823 debug_show_held_locks(current);
8824 if (irqs_disabled())
8825 print_irqtrace_events(current);
8826
8827 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8828 preempt_disable_ip);
8829
8830 dump_stack();
8831 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8832 }
8833 EXPORT_SYMBOL(__might_resched);
8834
__cant_sleep(const char * file,int line,int preempt_offset)8835 void __cant_sleep(const char *file, int line, int preempt_offset)
8836 {
8837 static unsigned long prev_jiffy;
8838
8839 if (irqs_disabled())
8840 return;
8841
8842 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8843 return;
8844
8845 if (preempt_count() > preempt_offset)
8846 return;
8847
8848 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8849 return;
8850 prev_jiffy = jiffies;
8851
8852 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8853 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8854 in_atomic(), irqs_disabled(),
8855 current->pid, current->comm);
8856
8857 debug_show_held_locks(current);
8858 dump_stack();
8859 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8860 }
8861 EXPORT_SYMBOL_GPL(__cant_sleep);
8862
8863 # ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8864 void __cant_migrate(const char *file, int line)
8865 {
8866 static unsigned long prev_jiffy;
8867
8868 if (irqs_disabled())
8869 return;
8870
8871 if (is_migration_disabled(current))
8872 return;
8873
8874 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8875 return;
8876
8877 if (preempt_count() > 0)
8878 return;
8879
8880 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8881 return;
8882 prev_jiffy = jiffies;
8883
8884 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8885 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8886 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8887 current->pid, current->comm);
8888
8889 debug_show_held_locks(current);
8890 dump_stack();
8891 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8892 }
8893 EXPORT_SYMBOL_GPL(__cant_migrate);
8894 # endif /* CONFIG_SMP */
8895 #endif /* CONFIG_DEBUG_ATOMIC_SLEEP */
8896
8897 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8898 void normalize_rt_tasks(void)
8899 {
8900 struct task_struct *g, *p;
8901 struct sched_attr attr = {
8902 .sched_policy = SCHED_NORMAL,
8903 };
8904
8905 read_lock(&tasklist_lock);
8906 for_each_process_thread(g, p) {
8907 /*
8908 * Only normalize user tasks:
8909 */
8910 if (p->flags & PF_KTHREAD)
8911 continue;
8912
8913 p->se.exec_start = 0;
8914 schedstat_set(p->stats.wait_start, 0);
8915 schedstat_set(p->stats.sleep_start, 0);
8916 schedstat_set(p->stats.block_start, 0);
8917
8918 if (!rt_or_dl_task(p)) {
8919 /*
8920 * Renice negative nice level userspace
8921 * tasks back to 0:
8922 */
8923 if (task_nice(p) < 0)
8924 set_user_nice(p, 0);
8925 continue;
8926 }
8927
8928 __sched_setscheduler(p, &attr, false, false);
8929 }
8930 read_unlock(&tasklist_lock);
8931 }
8932
8933 #endif /* CONFIG_MAGIC_SYSRQ */
8934
8935 #ifdef CONFIG_KGDB_KDB
8936 /*
8937 * These functions are only useful for KDB.
8938 *
8939 * They can only be called when the whole system has been
8940 * stopped - every CPU needs to be quiescent, and no scheduling
8941 * activity can take place. Using them for anything else would
8942 * be a serious bug, and as a result, they aren't even visible
8943 * under any other configuration.
8944 */
8945
8946 /**
8947 * curr_task - return the current task for a given CPU.
8948 * @cpu: the processor in question.
8949 *
8950 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8951 *
8952 * Return: The current task for @cpu.
8953 */
curr_task(int cpu)8954 struct task_struct *curr_task(int cpu)
8955 {
8956 return cpu_curr(cpu);
8957 }
8958
8959 #endif /* CONFIG_KGDB_KDB */
8960
8961 #ifdef CONFIG_CGROUP_SCHED
8962 /* task_group_lock serializes the addition/removal of task groups */
8963 static DEFINE_SPINLOCK(task_group_lock);
8964
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8965 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8966 struct task_group *parent)
8967 {
8968 #ifdef CONFIG_UCLAMP_TASK_GROUP
8969 enum uclamp_id clamp_id;
8970
8971 for_each_clamp_id(clamp_id) {
8972 uclamp_se_set(&tg->uclamp_req[clamp_id],
8973 uclamp_none(clamp_id), false);
8974 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8975 }
8976 #endif
8977 }
8978
sched_free_group(struct task_group * tg)8979 static void sched_free_group(struct task_group *tg)
8980 {
8981 free_fair_sched_group(tg);
8982 free_rt_sched_group(tg);
8983 autogroup_free(tg);
8984 kmem_cache_free(task_group_cache, tg);
8985 }
8986
sched_free_group_rcu(struct rcu_head * rcu)8987 static void sched_free_group_rcu(struct rcu_head *rcu)
8988 {
8989 sched_free_group(container_of(rcu, struct task_group, rcu));
8990 }
8991
sched_unregister_group(struct task_group * tg)8992 static void sched_unregister_group(struct task_group *tg)
8993 {
8994 unregister_fair_sched_group(tg);
8995 unregister_rt_sched_group(tg);
8996 /*
8997 * We have to wait for yet another RCU grace period to expire, as
8998 * print_cfs_stats() might run concurrently.
8999 */
9000 call_rcu(&tg->rcu, sched_free_group_rcu);
9001 }
9002
9003 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)9004 struct task_group *sched_create_group(struct task_group *parent)
9005 {
9006 struct task_group *tg;
9007
9008 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
9009 if (!tg)
9010 return ERR_PTR(-ENOMEM);
9011
9012 if (!alloc_fair_sched_group(tg, parent))
9013 goto err;
9014
9015 if (!alloc_rt_sched_group(tg, parent))
9016 goto err;
9017
9018 scx_tg_init(tg);
9019 alloc_uclamp_sched_group(tg, parent);
9020
9021 return tg;
9022
9023 err:
9024 sched_free_group(tg);
9025 return ERR_PTR(-ENOMEM);
9026 }
9027
sched_online_group(struct task_group * tg,struct task_group * parent)9028 void sched_online_group(struct task_group *tg, struct task_group *parent)
9029 {
9030 unsigned long flags;
9031
9032 spin_lock_irqsave(&task_group_lock, flags);
9033 list_add_tail_rcu(&tg->list, &task_groups);
9034
9035 /* Root should already exist: */
9036 WARN_ON(!parent);
9037
9038 tg->parent = parent;
9039 INIT_LIST_HEAD(&tg->children);
9040 list_add_rcu(&tg->siblings, &parent->children);
9041 spin_unlock_irqrestore(&task_group_lock, flags);
9042
9043 online_fair_sched_group(tg);
9044 }
9045
9046 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)9047 static void sched_unregister_group_rcu(struct rcu_head *rhp)
9048 {
9049 /* Now it should be safe to free those cfs_rqs: */
9050 sched_unregister_group(container_of(rhp, struct task_group, rcu));
9051 }
9052
sched_destroy_group(struct task_group * tg)9053 void sched_destroy_group(struct task_group *tg)
9054 {
9055 /* Wait for possible concurrent references to cfs_rqs complete: */
9056 call_rcu(&tg->rcu, sched_unregister_group_rcu);
9057 }
9058
sched_release_group(struct task_group * tg)9059 void sched_release_group(struct task_group *tg)
9060 {
9061 unsigned long flags;
9062
9063 /*
9064 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9065 * sched_cfs_period_timer()).
9066 *
9067 * For this to be effective, we have to wait for all pending users of
9068 * this task group to leave their RCU critical section to ensure no new
9069 * user will see our dying task group any more. Specifically ensure
9070 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9071 *
9072 * We therefore defer calling unregister_fair_sched_group() to
9073 * sched_unregister_group() which is guarantied to get called only after the
9074 * current RCU grace period has expired.
9075 */
9076 spin_lock_irqsave(&task_group_lock, flags);
9077 list_del_rcu(&tg->list);
9078 list_del_rcu(&tg->siblings);
9079 spin_unlock_irqrestore(&task_group_lock, flags);
9080 }
9081
sched_change_group(struct task_struct * tsk)9082 static void sched_change_group(struct task_struct *tsk)
9083 {
9084 struct task_group *tg;
9085
9086 /*
9087 * All callers are synchronized by task_rq_lock(); we do not use RCU
9088 * which is pointless here. Thus, we pass "true" to task_css_check()
9089 * to prevent lockdep warnings.
9090 */
9091 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9092 struct task_group, css);
9093 tg = autogroup_task_group(tsk, tg);
9094 tsk->sched_task_group = tg;
9095
9096 #ifdef CONFIG_FAIR_GROUP_SCHED
9097 if (tsk->sched_class->task_change_group)
9098 tsk->sched_class->task_change_group(tsk);
9099 else
9100 #endif
9101 set_task_rq(tsk, task_cpu(tsk));
9102 }
9103
9104 /*
9105 * Change task's runqueue when it moves between groups.
9106 *
9107 * The caller of this function should have put the task in its new group by
9108 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9109 * its new group.
9110 */
sched_move_task(struct task_struct * tsk,bool for_autogroup)9111 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9112 {
9113 unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
9114 bool resched = false;
9115 struct rq *rq;
9116
9117 CLASS(task_rq_lock, rq_guard)(tsk);
9118 rq = rq_guard.rq;
9119
9120 scoped_guard (sched_change, tsk, queue_flags) {
9121 sched_change_group(tsk);
9122 if (!for_autogroup)
9123 scx_cgroup_move_task(tsk);
9124 if (scope->running)
9125 resched = true;
9126 }
9127
9128 if (resched)
9129 resched_curr(rq);
9130 }
9131
9132 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9133 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9134 {
9135 struct task_group *parent = css_tg(parent_css);
9136 struct task_group *tg;
9137
9138 if (!parent) {
9139 /* This is early initialization for the top cgroup */
9140 return &root_task_group.css;
9141 }
9142
9143 tg = sched_create_group(parent);
9144 if (IS_ERR(tg))
9145 return ERR_PTR(-ENOMEM);
9146
9147 return &tg->css;
9148 }
9149
9150 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9151 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9152 {
9153 struct task_group *tg = css_tg(css);
9154 struct task_group *parent = css_tg(css->parent);
9155 int ret;
9156
9157 ret = scx_tg_online(tg);
9158 if (ret)
9159 return ret;
9160
9161 if (parent)
9162 sched_online_group(tg, parent);
9163
9164 #ifdef CONFIG_UCLAMP_TASK_GROUP
9165 /* Propagate the effective uclamp value for the new group */
9166 guard(mutex)(&uclamp_mutex);
9167 guard(rcu)();
9168 cpu_util_update_eff(css);
9169 #endif
9170
9171 return 0;
9172 }
9173
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9174 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9175 {
9176 struct task_group *tg = css_tg(css);
9177
9178 scx_tg_offline(tg);
9179 }
9180
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9181 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9182 {
9183 struct task_group *tg = css_tg(css);
9184
9185 sched_release_group(tg);
9186 }
9187
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9188 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9189 {
9190 struct task_group *tg = css_tg(css);
9191
9192 /*
9193 * Relies on the RCU grace period between css_released() and this.
9194 */
9195 sched_unregister_group(tg);
9196 }
9197
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9198 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9199 {
9200 #ifdef CONFIG_RT_GROUP_SCHED
9201 struct task_struct *task;
9202 struct cgroup_subsys_state *css;
9203
9204 if (!rt_group_sched_enabled())
9205 goto scx_check;
9206
9207 cgroup_taskset_for_each(task, css, tset) {
9208 if (!sched_rt_can_attach(css_tg(css), task))
9209 return -EINVAL;
9210 }
9211 scx_check:
9212 #endif /* CONFIG_RT_GROUP_SCHED */
9213 return scx_cgroup_can_attach(tset);
9214 }
9215
cpu_cgroup_attach(struct cgroup_taskset * tset)9216 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9217 {
9218 struct task_struct *task;
9219 struct cgroup_subsys_state *css;
9220
9221 cgroup_taskset_for_each(task, css, tset)
9222 sched_move_task(task, false);
9223 }
9224
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9225 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9226 {
9227 scx_cgroup_cancel_attach(tset);
9228 }
9229
9230 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9231 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9232 {
9233 struct cgroup_subsys_state *top_css = css;
9234 struct uclamp_se *uc_parent = NULL;
9235 struct uclamp_se *uc_se = NULL;
9236 unsigned int eff[UCLAMP_CNT];
9237 enum uclamp_id clamp_id;
9238 unsigned int clamps;
9239
9240 lockdep_assert_held(&uclamp_mutex);
9241 WARN_ON_ONCE(!rcu_read_lock_held());
9242
9243 css_for_each_descendant_pre(css, top_css) {
9244 uc_parent = css_tg(css)->parent
9245 ? css_tg(css)->parent->uclamp : NULL;
9246
9247 for_each_clamp_id(clamp_id) {
9248 /* Assume effective clamps matches requested clamps */
9249 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9250 /* Cap effective clamps with parent's effective clamps */
9251 if (uc_parent &&
9252 eff[clamp_id] > uc_parent[clamp_id].value) {
9253 eff[clamp_id] = uc_parent[clamp_id].value;
9254 }
9255 }
9256 /* Ensure protection is always capped by limit */
9257 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9258
9259 /* Propagate most restrictive effective clamps */
9260 clamps = 0x0;
9261 uc_se = css_tg(css)->uclamp;
9262 for_each_clamp_id(clamp_id) {
9263 if (eff[clamp_id] == uc_se[clamp_id].value)
9264 continue;
9265 uc_se[clamp_id].value = eff[clamp_id];
9266 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9267 clamps |= (0x1 << clamp_id);
9268 }
9269 if (!clamps) {
9270 css = css_rightmost_descendant(css);
9271 continue;
9272 }
9273
9274 /* Immediately update descendants RUNNABLE tasks */
9275 uclamp_update_active_tasks(css);
9276 }
9277 }
9278
9279 /*
9280 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9281 * C expression. Since there is no way to convert a macro argument (N) into a
9282 * character constant, use two levels of macros.
9283 */
9284 #define _POW10(exp) ((unsigned int)1e##exp)
9285 #define POW10(exp) _POW10(exp)
9286
9287 struct uclamp_request {
9288 #define UCLAMP_PERCENT_SHIFT 2
9289 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9290 s64 percent;
9291 u64 util;
9292 int ret;
9293 };
9294
9295 static inline struct uclamp_request
capacity_from_percent(char * buf)9296 capacity_from_percent(char *buf)
9297 {
9298 struct uclamp_request req = {
9299 .percent = UCLAMP_PERCENT_SCALE,
9300 .util = SCHED_CAPACITY_SCALE,
9301 .ret = 0,
9302 };
9303
9304 buf = strim(buf);
9305 if (strcmp(buf, "max")) {
9306 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9307 &req.percent);
9308 if (req.ret)
9309 return req;
9310 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9311 req.ret = -ERANGE;
9312 return req;
9313 }
9314
9315 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9316 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9317 }
9318
9319 return req;
9320 }
9321
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9322 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9323 size_t nbytes, loff_t off,
9324 enum uclamp_id clamp_id)
9325 {
9326 struct uclamp_request req;
9327 struct task_group *tg;
9328
9329 req = capacity_from_percent(buf);
9330 if (req.ret)
9331 return req.ret;
9332
9333 sched_uclamp_enable();
9334
9335 guard(mutex)(&uclamp_mutex);
9336 guard(rcu)();
9337
9338 tg = css_tg(of_css(of));
9339 if (tg->uclamp_req[clamp_id].value != req.util)
9340 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9341
9342 /*
9343 * Because of not recoverable conversion rounding we keep track of the
9344 * exact requested value
9345 */
9346 tg->uclamp_pct[clamp_id] = req.percent;
9347
9348 /* Update effective clamps to track the most restrictive value */
9349 cpu_util_update_eff(of_css(of));
9350
9351 return nbytes;
9352 }
9353
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9354 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9355 char *buf, size_t nbytes,
9356 loff_t off)
9357 {
9358 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9359 }
9360
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9361 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9362 char *buf, size_t nbytes,
9363 loff_t off)
9364 {
9365 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9366 }
9367
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9368 static inline void cpu_uclamp_print(struct seq_file *sf,
9369 enum uclamp_id clamp_id)
9370 {
9371 struct task_group *tg;
9372 u64 util_clamp;
9373 u64 percent;
9374 u32 rem;
9375
9376 scoped_guard (rcu) {
9377 tg = css_tg(seq_css(sf));
9378 util_clamp = tg->uclamp_req[clamp_id].value;
9379 }
9380
9381 if (util_clamp == SCHED_CAPACITY_SCALE) {
9382 seq_puts(sf, "max\n");
9383 return;
9384 }
9385
9386 percent = tg->uclamp_pct[clamp_id];
9387 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9388 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9389 }
9390
cpu_uclamp_min_show(struct seq_file * sf,void * v)9391 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9392 {
9393 cpu_uclamp_print(sf, UCLAMP_MIN);
9394 return 0;
9395 }
9396
cpu_uclamp_max_show(struct seq_file * sf,void * v)9397 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9398 {
9399 cpu_uclamp_print(sf, UCLAMP_MAX);
9400 return 0;
9401 }
9402 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9403
9404 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9405 static unsigned long tg_weight(struct task_group *tg)
9406 {
9407 #ifdef CONFIG_FAIR_GROUP_SCHED
9408 return scale_load_down(tg->shares);
9409 #else
9410 return sched_weight_from_cgroup(tg->scx.weight);
9411 #endif
9412 }
9413
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9414 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9415 struct cftype *cftype, u64 shareval)
9416 {
9417 int ret;
9418
9419 if (shareval > scale_load_down(ULONG_MAX))
9420 shareval = MAX_SHARES;
9421 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9422 if (!ret)
9423 scx_group_set_weight(css_tg(css),
9424 sched_weight_to_cgroup(shareval));
9425 return ret;
9426 }
9427
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9428 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9429 struct cftype *cft)
9430 {
9431 return tg_weight(css_tg(css));
9432 }
9433 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9434
9435 #ifdef CONFIG_CFS_BANDWIDTH
9436 static DEFINE_MUTEX(cfs_constraints_mutex);
9437
9438 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9439
tg_set_cfs_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)9440 static int tg_set_cfs_bandwidth(struct task_group *tg,
9441 u64 period_us, u64 quota_us, u64 burst_us)
9442 {
9443 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9444 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9445 u64 period, quota, burst;
9446
9447 period = (u64)period_us * NSEC_PER_USEC;
9448
9449 if (quota_us == RUNTIME_INF)
9450 quota = RUNTIME_INF;
9451 else
9452 quota = (u64)quota_us * NSEC_PER_USEC;
9453
9454 burst = (u64)burst_us * NSEC_PER_USEC;
9455
9456 /*
9457 * Prevent race between setting of cfs_rq->runtime_enabled and
9458 * unthrottle_offline_cfs_rqs().
9459 */
9460 guard(cpus_read_lock)();
9461 guard(mutex)(&cfs_constraints_mutex);
9462
9463 ret = __cfs_schedulable(tg, period, quota);
9464 if (ret)
9465 return ret;
9466
9467 runtime_enabled = quota != RUNTIME_INF;
9468 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9469 /*
9470 * If we need to toggle cfs_bandwidth_used, off->on must occur
9471 * before making related changes, and on->off must occur afterwards
9472 */
9473 if (runtime_enabled && !runtime_was_enabled)
9474 cfs_bandwidth_usage_inc();
9475
9476 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9477 cfs_b->period = ns_to_ktime(period);
9478 cfs_b->quota = quota;
9479 cfs_b->burst = burst;
9480
9481 __refill_cfs_bandwidth_runtime(cfs_b);
9482
9483 /*
9484 * Restart the period timer (if active) to handle new
9485 * period expiry:
9486 */
9487 if (runtime_enabled)
9488 start_cfs_bandwidth(cfs_b);
9489 }
9490
9491 for_each_online_cpu(i) {
9492 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9493 struct rq *rq = cfs_rq->rq;
9494
9495 guard(rq_lock_irq)(rq);
9496 cfs_rq->runtime_enabled = runtime_enabled;
9497 cfs_rq->runtime_remaining = 1;
9498
9499 if (cfs_rq->throttled)
9500 unthrottle_cfs_rq(cfs_rq);
9501 }
9502
9503 if (runtime_was_enabled && !runtime_enabled)
9504 cfs_bandwidth_usage_dec();
9505
9506 return 0;
9507 }
9508
tg_get_cfs_period(struct task_group * tg)9509 static u64 tg_get_cfs_period(struct task_group *tg)
9510 {
9511 u64 cfs_period_us;
9512
9513 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9514 do_div(cfs_period_us, NSEC_PER_USEC);
9515
9516 return cfs_period_us;
9517 }
9518
tg_get_cfs_quota(struct task_group * tg)9519 static u64 tg_get_cfs_quota(struct task_group *tg)
9520 {
9521 u64 quota_us;
9522
9523 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9524 return RUNTIME_INF;
9525
9526 quota_us = tg->cfs_bandwidth.quota;
9527 do_div(quota_us, NSEC_PER_USEC);
9528
9529 return quota_us;
9530 }
9531
tg_get_cfs_burst(struct task_group * tg)9532 static u64 tg_get_cfs_burst(struct task_group *tg)
9533 {
9534 u64 burst_us;
9535
9536 burst_us = tg->cfs_bandwidth.burst;
9537 do_div(burst_us, NSEC_PER_USEC);
9538
9539 return burst_us;
9540 }
9541
9542 struct cfs_schedulable_data {
9543 struct task_group *tg;
9544 u64 period, quota;
9545 };
9546
9547 /*
9548 * normalize group quota/period to be quota/max_period
9549 * note: units are usecs
9550 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9551 static u64 normalize_cfs_quota(struct task_group *tg,
9552 struct cfs_schedulable_data *d)
9553 {
9554 u64 quota, period;
9555
9556 if (tg == d->tg) {
9557 period = d->period;
9558 quota = d->quota;
9559 } else {
9560 period = tg_get_cfs_period(tg);
9561 quota = tg_get_cfs_quota(tg);
9562 }
9563
9564 /* note: these should typically be equivalent */
9565 if (quota == RUNTIME_INF || quota == -1)
9566 return RUNTIME_INF;
9567
9568 return to_ratio(period, quota);
9569 }
9570
tg_cfs_schedulable_down(struct task_group * tg,void * data)9571 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9572 {
9573 struct cfs_schedulable_data *d = data;
9574 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9575 s64 quota = 0, parent_quota = -1;
9576
9577 if (!tg->parent) {
9578 quota = RUNTIME_INF;
9579 } else {
9580 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9581
9582 quota = normalize_cfs_quota(tg, d);
9583 parent_quota = parent_b->hierarchical_quota;
9584
9585 /*
9586 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9587 * always take the non-RUNTIME_INF min. On cgroup1, only
9588 * inherit when no limit is set. In both cases this is used
9589 * by the scheduler to determine if a given CFS task has a
9590 * bandwidth constraint at some higher level.
9591 */
9592 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9593 if (quota == RUNTIME_INF)
9594 quota = parent_quota;
9595 else if (parent_quota != RUNTIME_INF)
9596 quota = min(quota, parent_quota);
9597 } else {
9598 if (quota == RUNTIME_INF)
9599 quota = parent_quota;
9600 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9601 return -EINVAL;
9602 }
9603 }
9604 cfs_b->hierarchical_quota = quota;
9605
9606 return 0;
9607 }
9608
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9609 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9610 {
9611 struct cfs_schedulable_data data = {
9612 .tg = tg,
9613 .period = period,
9614 .quota = quota,
9615 };
9616
9617 if (quota != RUNTIME_INF) {
9618 do_div(data.period, NSEC_PER_USEC);
9619 do_div(data.quota, NSEC_PER_USEC);
9620 }
9621
9622 guard(rcu)();
9623 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9624 }
9625
cpu_cfs_stat_show(struct seq_file * sf,void * v)9626 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9627 {
9628 struct task_group *tg = css_tg(seq_css(sf));
9629 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9630
9631 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9632 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9633 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9634
9635 if (schedstat_enabled() && tg != &root_task_group) {
9636 struct sched_statistics *stats;
9637 u64 ws = 0;
9638 int i;
9639
9640 for_each_possible_cpu(i) {
9641 stats = __schedstats_from_se(tg->se[i]);
9642 ws += schedstat_val(stats->wait_sum);
9643 }
9644
9645 seq_printf(sf, "wait_sum %llu\n", ws);
9646 }
9647
9648 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9649 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9650
9651 return 0;
9652 }
9653
throttled_time_self(struct task_group * tg)9654 static u64 throttled_time_self(struct task_group *tg)
9655 {
9656 int i;
9657 u64 total = 0;
9658
9659 for_each_possible_cpu(i) {
9660 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9661 }
9662
9663 return total;
9664 }
9665
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9666 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9667 {
9668 struct task_group *tg = css_tg(seq_css(sf));
9669
9670 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9671
9672 return 0;
9673 }
9674 #endif /* CONFIG_CFS_BANDWIDTH */
9675
9676 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9677 const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC; /* 1s */
9678 static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC; /* 1ms */
9679 /* More than 203 days if BW_SHIFT equals 20. */
9680 static const u64 max_bw_runtime_us = MAX_BW;
9681
tg_bandwidth(struct task_group * tg,u64 * period_us_p,u64 * quota_us_p,u64 * burst_us_p)9682 static void tg_bandwidth(struct task_group *tg,
9683 u64 *period_us_p, u64 *quota_us_p, u64 *burst_us_p)
9684 {
9685 #ifdef CONFIG_CFS_BANDWIDTH
9686 if (period_us_p)
9687 *period_us_p = tg_get_cfs_period(tg);
9688 if (quota_us_p)
9689 *quota_us_p = tg_get_cfs_quota(tg);
9690 if (burst_us_p)
9691 *burst_us_p = tg_get_cfs_burst(tg);
9692 #else /* !CONFIG_CFS_BANDWIDTH */
9693 if (period_us_p)
9694 *period_us_p = tg->scx.bw_period_us;
9695 if (quota_us_p)
9696 *quota_us_p = tg->scx.bw_quota_us;
9697 if (burst_us_p)
9698 *burst_us_p = tg->scx.bw_burst_us;
9699 #endif /* CONFIG_CFS_BANDWIDTH */
9700 }
9701
cpu_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9702 static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
9703 struct cftype *cft)
9704 {
9705 u64 period_us;
9706
9707 tg_bandwidth(css_tg(css), &period_us, NULL, NULL);
9708 return period_us;
9709 }
9710
tg_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)9711 static int tg_set_bandwidth(struct task_group *tg,
9712 u64 period_us, u64 quota_us, u64 burst_us)
9713 {
9714 const u64 max_usec = U64_MAX / NSEC_PER_USEC;
9715 int ret = 0;
9716
9717 if (tg == &root_task_group)
9718 return -EINVAL;
9719
9720 /* Values should survive translation to nsec */
9721 if (period_us > max_usec ||
9722 (quota_us != RUNTIME_INF && quota_us > max_usec) ||
9723 burst_us > max_usec)
9724 return -EINVAL;
9725
9726 /*
9727 * Ensure we have some amount of bandwidth every period. This is to
9728 * prevent reaching a state of large arrears when throttled via
9729 * entity_tick() resulting in prolonged exit starvation.
9730 */
9731 if (quota_us < min_bw_quota_period_us ||
9732 period_us < min_bw_quota_period_us)
9733 return -EINVAL;
9734
9735 /*
9736 * Likewise, bound things on the other side by preventing insane quota
9737 * periods. This also allows us to normalize in computing quota
9738 * feasibility.
9739 */
9740 if (period_us > max_bw_quota_period_us)
9741 return -EINVAL;
9742
9743 /*
9744 * Bound quota to defend quota against overflow during bandwidth shift.
9745 */
9746 if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us)
9747 return -EINVAL;
9748
9749 if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
9750 burst_us + quota_us > max_bw_runtime_us))
9751 return -EINVAL;
9752
9753 #ifdef CONFIG_CFS_BANDWIDTH
9754 ret = tg_set_cfs_bandwidth(tg, period_us, quota_us, burst_us);
9755 #endif /* CONFIG_CFS_BANDWIDTH */
9756 if (!ret)
9757 scx_group_set_bandwidth(tg, period_us, quota_us, burst_us);
9758 return ret;
9759 }
9760
cpu_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9761 static s64 cpu_quota_read_s64(struct cgroup_subsys_state *css,
9762 struct cftype *cft)
9763 {
9764 u64 quota_us;
9765
9766 tg_bandwidth(css_tg(css), NULL, "a_us, NULL);
9767 return quota_us; /* (s64)RUNTIME_INF becomes -1 */
9768 }
9769
cpu_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9770 static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
9771 struct cftype *cft)
9772 {
9773 u64 burst_us;
9774
9775 tg_bandwidth(css_tg(css), NULL, NULL, &burst_us);
9776 return burst_us;
9777 }
9778
cpu_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 period_us)9779 static int cpu_period_write_u64(struct cgroup_subsys_state *css,
9780 struct cftype *cftype, u64 period_us)
9781 {
9782 struct task_group *tg = css_tg(css);
9783 u64 quota_us, burst_us;
9784
9785 tg_bandwidth(tg, NULL, "a_us, &burst_us);
9786 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9787 }
9788
cpu_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 quota_us)9789 static int cpu_quota_write_s64(struct cgroup_subsys_state *css,
9790 struct cftype *cftype, s64 quota_us)
9791 {
9792 struct task_group *tg = css_tg(css);
9793 u64 period_us, burst_us;
9794
9795 if (quota_us < 0)
9796 quota_us = RUNTIME_INF;
9797
9798 tg_bandwidth(tg, &period_us, NULL, &burst_us);
9799 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9800 }
9801
cpu_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 burst_us)9802 static int cpu_burst_write_u64(struct cgroup_subsys_state *css,
9803 struct cftype *cftype, u64 burst_us)
9804 {
9805 struct task_group *tg = css_tg(css);
9806 u64 period_us, quota_us;
9807
9808 tg_bandwidth(tg, &period_us, "a_us, NULL);
9809 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9810 }
9811 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */
9812
9813 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9814 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9815 struct cftype *cft, s64 val)
9816 {
9817 return sched_group_set_rt_runtime(css_tg(css), val);
9818 }
9819
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9820 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9821 struct cftype *cft)
9822 {
9823 return sched_group_rt_runtime(css_tg(css));
9824 }
9825
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9826 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9827 struct cftype *cftype, u64 rt_period_us)
9828 {
9829 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9830 }
9831
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9832 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9833 struct cftype *cft)
9834 {
9835 return sched_group_rt_period(css_tg(css));
9836 }
9837 #endif /* CONFIG_RT_GROUP_SCHED */
9838
9839 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9840 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9841 struct cftype *cft)
9842 {
9843 return css_tg(css)->idle;
9844 }
9845
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9846 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9847 struct cftype *cft, s64 idle)
9848 {
9849 int ret;
9850
9851 ret = sched_group_set_idle(css_tg(css), idle);
9852 if (!ret)
9853 scx_group_set_idle(css_tg(css), idle);
9854 return ret;
9855 }
9856 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9857
9858 static struct cftype cpu_legacy_files[] = {
9859 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9860 {
9861 .name = "shares",
9862 .read_u64 = cpu_shares_read_u64,
9863 .write_u64 = cpu_shares_write_u64,
9864 },
9865 {
9866 .name = "idle",
9867 .read_s64 = cpu_idle_read_s64,
9868 .write_s64 = cpu_idle_write_s64,
9869 },
9870 #endif
9871 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9872 {
9873 .name = "cfs_period_us",
9874 .read_u64 = cpu_period_read_u64,
9875 .write_u64 = cpu_period_write_u64,
9876 },
9877 {
9878 .name = "cfs_quota_us",
9879 .read_s64 = cpu_quota_read_s64,
9880 .write_s64 = cpu_quota_write_s64,
9881 },
9882 {
9883 .name = "cfs_burst_us",
9884 .read_u64 = cpu_burst_read_u64,
9885 .write_u64 = cpu_burst_write_u64,
9886 },
9887 #endif
9888 #ifdef CONFIG_CFS_BANDWIDTH
9889 {
9890 .name = "stat",
9891 .seq_show = cpu_cfs_stat_show,
9892 },
9893 {
9894 .name = "stat.local",
9895 .seq_show = cpu_cfs_local_stat_show,
9896 },
9897 #endif
9898 #ifdef CONFIG_UCLAMP_TASK_GROUP
9899 {
9900 .name = "uclamp.min",
9901 .flags = CFTYPE_NOT_ON_ROOT,
9902 .seq_show = cpu_uclamp_min_show,
9903 .write = cpu_uclamp_min_write,
9904 },
9905 {
9906 .name = "uclamp.max",
9907 .flags = CFTYPE_NOT_ON_ROOT,
9908 .seq_show = cpu_uclamp_max_show,
9909 .write = cpu_uclamp_max_write,
9910 },
9911 #endif
9912 { } /* Terminate */
9913 };
9914
9915 #ifdef CONFIG_RT_GROUP_SCHED
9916 static struct cftype rt_group_files[] = {
9917 {
9918 .name = "rt_runtime_us",
9919 .read_s64 = cpu_rt_runtime_read,
9920 .write_s64 = cpu_rt_runtime_write,
9921 },
9922 {
9923 .name = "rt_period_us",
9924 .read_u64 = cpu_rt_period_read_uint,
9925 .write_u64 = cpu_rt_period_write_uint,
9926 },
9927 { } /* Terminate */
9928 };
9929
9930 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
9931 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
9932 # else
9933 DEFINE_STATIC_KEY_TRUE(rt_group_sched);
9934 # endif
9935
setup_rt_group_sched(char * str)9936 static int __init setup_rt_group_sched(char *str)
9937 {
9938 long val;
9939
9940 if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
9941 pr_warn("Unable to set rt_group_sched\n");
9942 return 1;
9943 }
9944 if (val)
9945 static_branch_enable(&rt_group_sched);
9946 else
9947 static_branch_disable(&rt_group_sched);
9948
9949 return 1;
9950 }
9951 __setup("rt_group_sched=", setup_rt_group_sched);
9952
cpu_rt_group_init(void)9953 static int __init cpu_rt_group_init(void)
9954 {
9955 if (!rt_group_sched_enabled())
9956 return 0;
9957
9958 WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
9959 return 0;
9960 }
9961 subsys_initcall(cpu_rt_group_init);
9962 #endif /* CONFIG_RT_GROUP_SCHED */
9963
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9964 static int cpu_extra_stat_show(struct seq_file *sf,
9965 struct cgroup_subsys_state *css)
9966 {
9967 #ifdef CONFIG_CFS_BANDWIDTH
9968 {
9969 struct task_group *tg = css_tg(css);
9970 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9971 u64 throttled_usec, burst_usec;
9972
9973 throttled_usec = cfs_b->throttled_time;
9974 do_div(throttled_usec, NSEC_PER_USEC);
9975 burst_usec = cfs_b->burst_time;
9976 do_div(burst_usec, NSEC_PER_USEC);
9977
9978 seq_printf(sf, "nr_periods %d\n"
9979 "nr_throttled %d\n"
9980 "throttled_usec %llu\n"
9981 "nr_bursts %d\n"
9982 "burst_usec %llu\n",
9983 cfs_b->nr_periods, cfs_b->nr_throttled,
9984 throttled_usec, cfs_b->nr_burst, burst_usec);
9985 }
9986 #endif /* CONFIG_CFS_BANDWIDTH */
9987 return 0;
9988 }
9989
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9990 static int cpu_local_stat_show(struct seq_file *sf,
9991 struct cgroup_subsys_state *css)
9992 {
9993 #ifdef CONFIG_CFS_BANDWIDTH
9994 {
9995 struct task_group *tg = css_tg(css);
9996 u64 throttled_self_usec;
9997
9998 throttled_self_usec = throttled_time_self(tg);
9999 do_div(throttled_self_usec, NSEC_PER_USEC);
10000
10001 seq_printf(sf, "throttled_usec %llu\n",
10002 throttled_self_usec);
10003 }
10004 #endif
10005 return 0;
10006 }
10007
10008 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10009
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10010 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10011 struct cftype *cft)
10012 {
10013 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
10014 }
10015
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)10016 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10017 struct cftype *cft, u64 cgrp_weight)
10018 {
10019 unsigned long weight;
10020 int ret;
10021
10022 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
10023 return -ERANGE;
10024
10025 weight = sched_weight_from_cgroup(cgrp_weight);
10026
10027 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10028 if (!ret)
10029 scx_group_set_weight(css_tg(css), cgrp_weight);
10030 return ret;
10031 }
10032
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10033 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10034 struct cftype *cft)
10035 {
10036 unsigned long weight = tg_weight(css_tg(css));
10037 int last_delta = INT_MAX;
10038 int prio, delta;
10039
10040 /* find the closest nice value to the current weight */
10041 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10042 delta = abs(sched_prio_to_weight[prio] - weight);
10043 if (delta >= last_delta)
10044 break;
10045 last_delta = delta;
10046 }
10047
10048 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10049 }
10050
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)10051 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10052 struct cftype *cft, s64 nice)
10053 {
10054 unsigned long weight;
10055 int idx, ret;
10056
10057 if (nice < MIN_NICE || nice > MAX_NICE)
10058 return -ERANGE;
10059
10060 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10061 idx = array_index_nospec(idx, 40);
10062 weight = sched_prio_to_weight[idx];
10063
10064 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10065 if (!ret)
10066 scx_group_set_weight(css_tg(css),
10067 sched_weight_to_cgroup(weight));
10068 return ret;
10069 }
10070 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10071
cpu_period_quota_print(struct seq_file * sf,long period,long quota)10072 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10073 long period, long quota)
10074 {
10075 if (quota < 0)
10076 seq_puts(sf, "max");
10077 else
10078 seq_printf(sf, "%ld", quota);
10079
10080 seq_printf(sf, " %ld\n", period);
10081 }
10082
10083 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * period_us_p,u64 * quota_us_p)10084 static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *period_us_p,
10085 u64 *quota_us_p)
10086 {
10087 char tok[21]; /* U64_MAX */
10088
10089 if (sscanf(buf, "%20s %llu", tok, period_us_p) < 1)
10090 return -EINVAL;
10091
10092 if (sscanf(tok, "%llu", quota_us_p) < 1) {
10093 if (!strcmp(tok, "max"))
10094 *quota_us_p = RUNTIME_INF;
10095 else
10096 return -EINVAL;
10097 }
10098
10099 return 0;
10100 }
10101
10102 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)10103 static int cpu_max_show(struct seq_file *sf, void *v)
10104 {
10105 struct task_group *tg = css_tg(seq_css(sf));
10106 u64 period_us, quota_us;
10107
10108 tg_bandwidth(tg, &period_us, "a_us, NULL);
10109 cpu_period_quota_print(sf, period_us, quota_us);
10110 return 0;
10111 }
10112
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10113 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10114 char *buf, size_t nbytes, loff_t off)
10115 {
10116 struct task_group *tg = css_tg(of_css(of));
10117 u64 period_us, quota_us, burst_us;
10118 int ret;
10119
10120 tg_bandwidth(tg, &period_us, NULL, &burst_us);
10121 ret = cpu_period_quota_parse(buf, &period_us, "a_us);
10122 if (!ret)
10123 ret = tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10124 return ret ?: nbytes;
10125 }
10126 #endif /* CONFIG_CFS_BANDWIDTH */
10127
10128 static struct cftype cpu_files[] = {
10129 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10130 {
10131 .name = "weight",
10132 .flags = CFTYPE_NOT_ON_ROOT,
10133 .read_u64 = cpu_weight_read_u64,
10134 .write_u64 = cpu_weight_write_u64,
10135 },
10136 {
10137 .name = "weight.nice",
10138 .flags = CFTYPE_NOT_ON_ROOT,
10139 .read_s64 = cpu_weight_nice_read_s64,
10140 .write_s64 = cpu_weight_nice_write_s64,
10141 },
10142 {
10143 .name = "idle",
10144 .flags = CFTYPE_NOT_ON_ROOT,
10145 .read_s64 = cpu_idle_read_s64,
10146 .write_s64 = cpu_idle_write_s64,
10147 },
10148 #endif
10149 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10150 {
10151 .name = "max",
10152 .flags = CFTYPE_NOT_ON_ROOT,
10153 .seq_show = cpu_max_show,
10154 .write = cpu_max_write,
10155 },
10156 {
10157 .name = "max.burst",
10158 .flags = CFTYPE_NOT_ON_ROOT,
10159 .read_u64 = cpu_burst_read_u64,
10160 .write_u64 = cpu_burst_write_u64,
10161 },
10162 #endif /* CONFIG_CFS_BANDWIDTH */
10163 #ifdef CONFIG_UCLAMP_TASK_GROUP
10164 {
10165 .name = "uclamp.min",
10166 .flags = CFTYPE_NOT_ON_ROOT,
10167 .seq_show = cpu_uclamp_min_show,
10168 .write = cpu_uclamp_min_write,
10169 },
10170 {
10171 .name = "uclamp.max",
10172 .flags = CFTYPE_NOT_ON_ROOT,
10173 .seq_show = cpu_uclamp_max_show,
10174 .write = cpu_uclamp_max_write,
10175 },
10176 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10177 { } /* terminate */
10178 };
10179
10180 struct cgroup_subsys cpu_cgrp_subsys = {
10181 .css_alloc = cpu_cgroup_css_alloc,
10182 .css_online = cpu_cgroup_css_online,
10183 .css_offline = cpu_cgroup_css_offline,
10184 .css_released = cpu_cgroup_css_released,
10185 .css_free = cpu_cgroup_css_free,
10186 .css_extra_stat_show = cpu_extra_stat_show,
10187 .css_local_stat_show = cpu_local_stat_show,
10188 .can_attach = cpu_cgroup_can_attach,
10189 .attach = cpu_cgroup_attach,
10190 .cancel_attach = cpu_cgroup_cancel_attach,
10191 .legacy_cftypes = cpu_legacy_files,
10192 .dfl_cftypes = cpu_files,
10193 .early_init = true,
10194 .threaded = true,
10195 };
10196
10197 #endif /* CONFIG_CGROUP_SCHED */
10198
dump_cpu_task(int cpu)10199 void dump_cpu_task(int cpu)
10200 {
10201 if (in_hardirq() && cpu == smp_processor_id()) {
10202 struct pt_regs *regs;
10203
10204 regs = get_irq_regs();
10205 if (regs) {
10206 show_regs(regs);
10207 return;
10208 }
10209 }
10210
10211 if (trigger_single_cpu_backtrace(cpu))
10212 return;
10213
10214 pr_info("Task dump for CPU %d:\n", cpu);
10215 sched_show_task(cpu_curr(cpu));
10216 }
10217
10218 /*
10219 * Nice levels are multiplicative, with a gentle 10% change for every
10220 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10221 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10222 * that remained on nice 0.
10223 *
10224 * The "10% effect" is relative and cumulative: from _any_ nice level,
10225 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10226 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10227 * If a task goes up by ~10% and another task goes down by ~10% then
10228 * the relative distance between them is ~25%.)
10229 */
10230 const int sched_prio_to_weight[40] = {
10231 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10232 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10233 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10234 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10235 /* 0 */ 1024, 820, 655, 526, 423,
10236 /* 5 */ 335, 272, 215, 172, 137,
10237 /* 10 */ 110, 87, 70, 56, 45,
10238 /* 15 */ 36, 29, 23, 18, 15,
10239 };
10240
10241 /*
10242 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10243 *
10244 * In cases where the weight does not change often, we can use the
10245 * pre-calculated inverse to speed up arithmetics by turning divisions
10246 * into multiplications:
10247 */
10248 const u32 sched_prio_to_wmult[40] = {
10249 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10250 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10251 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10252 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10253 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10254 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10255 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10256 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10257 };
10258
call_trace_sched_update_nr_running(struct rq * rq,int count)10259 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10260 {
10261 trace_sched_update_nr_running_tp(rq, count);
10262 }
10263
10264 #ifdef CONFIG_SCHED_MM_CID
10265 /*
10266 * Concurrency IDentifier management
10267 *
10268 * Serialization rules:
10269 *
10270 * mm::mm_cid::mutex: Serializes fork() and exit() and therefore
10271 * protects mm::mm_cid::users.
10272 *
10273 * mm::mm_cid::lock: Serializes mm_update_max_cids() and
10274 * mm_update_cpus_allowed(). Nests in mm_cid::mutex
10275 * and runqueue lock.
10276 *
10277 * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
10278 * and can only be modified with atomic operations.
10279 *
10280 * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
10281 * lock.
10282 *
10283 * CID ownership:
10284 *
10285 * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or
10286 * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the
10287 * MM_CID_ONCPU bit set. During transition from CPU to task ownership mode,
10288 * MM_CID_TRANSIT is set on the per task CIDs. When this bit is set the
10289 * task needs to drop the CID into the pool when scheduling out. Both bits
10290 * (ONCPU and TRANSIT) are filtered out by task_cid() when the CID is
10291 * actually handed over to user space in the RSEQ memory.
10292 *
10293 * Mode switching:
10294 *
10295 * Switching to per CPU mode happens when the user count becomes greater
10296 * than the maximum number of CIDs, which is calculated by:
10297 *
10298 * opt_cids = min(mm_cid::nr_cpus_allowed, mm_cid::users);
10299 * max_cids = min(1.25 * opt_cids, num_possible_cpus());
10300 *
10301 * The +25% allowance is useful for tight CPU masks in scenarios where only
10302 * a few threads are created and destroyed to avoid frequent mode
10303 * switches. Though this allowance shrinks, the closer opt_cids becomes to
10304 * num_possible_cpus(), which is the (unfortunate) hard ABI limit.
10305 *
10306 * At the point of switching to per CPU mode the new user is not yet
10307 * visible in the system, so the task which initiated the fork() runs the
10308 * fixup function: mm_cid_fixup_tasks_to_cpu() walks the thread list and
10309 * either transfers each tasks owned CID to the CPU the task runs on or
10310 * drops it into the CID pool if a task is not on a CPU at that point in
10311 * time. Tasks which schedule in before the task walk reaches them do the
10312 * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus() completes
10313 * it's guaranteed that no task related to that MM owns a CID anymore.
10314 *
10315 * Switching back to task mode happens when the user count goes below the
10316 * threshold which was recorded on the per CPU mode switch:
10317 *
10318 * pcpu_thrs = min(opt_cids - (opt_cids / 4), num_possible_cpus() / 2);
10319 *
10320 * This threshold is updated when a affinity change increases the number of
10321 * allowed CPUs for the MM, which might cause a switch back to per task
10322 * mode.
10323 *
10324 * If the switch back was initiated by a exiting task, then that task runs
10325 * the fixup function. If it was initiated by a affinity change, then it's
10326 * run either in the deferred update function in context of a workqueue or
10327 * by a task which forks a new one or by a task which exits. Whatever
10328 * happens first. mm_cid_fixup_cpus_to_task() walks through the possible
10329 * CPUs and either transfers the CPU owned CIDs to a related task which
10330 * runs on the CPU or drops it into the pool. Tasks which schedule in on a
10331 * CPU which the walk did not cover yet do the handover themself.
10332 *
10333 * This transition from CPU to per task ownership happens in two phases:
10334 *
10335 * 1) mm:mm_cid.transit contains MM_CID_TRANSIT This is OR'ed on the task
10336 * CID and denotes that the CID is only temporarily owned by the
10337 * task. When it schedules out the task drops the CID back into the
10338 * pool if this bit is set.
10339 *
10340 * 2) The initiating context walks the per CPU space and after completion
10341 * clears mm:mm_cid.transit. So after that point the CIDs are strictly
10342 * task owned again.
10343 *
10344 * This two phase transition is required to prevent CID space exhaustion
10345 * during the transition as a direct transfer of ownership would fail if
10346 * two tasks are scheduled in on the same CPU before the fixup freed per
10347 * CPU CIDs.
10348 *
10349 * When mm_cid_fixup_cpus_to_tasks() completes it's guaranteed that no CID
10350 * related to that MM is owned by a CPU anymore.
10351 */
10352
10353 /*
10354 * Update the CID range properties when the constraints change. Invoked via
10355 * fork(), exit() and affinity changes
10356 */
__mm_update_max_cids(struct mm_mm_cid * mc)10357 static void __mm_update_max_cids(struct mm_mm_cid *mc)
10358 {
10359 unsigned int opt_cids, max_cids;
10360
10361 /* Calculate the new optimal constraint */
10362 opt_cids = min(mc->nr_cpus_allowed, mc->users);
10363
10364 /* Adjust the maximum CIDs to +25% limited by the number of possible CPUs */
10365 max_cids = min(opt_cids + (opt_cids / 4), num_possible_cpus());
10366 WRITE_ONCE(mc->max_cids, max_cids);
10367 }
10368
mm_cid_calc_pcpu_thrs(struct mm_mm_cid * mc)10369 static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc)
10370 {
10371 unsigned int opt_cids;
10372
10373 opt_cids = min(mc->nr_cpus_allowed, mc->users);
10374 /* Has to be at least 1 because 0 indicates PCPU mode off */
10375 return max(min(opt_cids - opt_cids / 4, num_possible_cpus() / 2), 1);
10376 }
10377
mm_update_max_cids(struct mm_struct * mm)10378 static bool mm_update_max_cids(struct mm_struct *mm)
10379 {
10380 struct mm_mm_cid *mc = &mm->mm_cid;
10381
10382 lockdep_assert_held(&mm->mm_cid.lock);
10383
10384 /* Clear deferred mode switch flag. A change is handled by the caller */
10385 mc->update_deferred = false;
10386 __mm_update_max_cids(mc);
10387
10388 /* Check whether owner mode must be changed */
10389 if (!mc->percpu) {
10390 /* Enable per CPU mode when the number of users is above max_cids */
10391 if (mc->users > mc->max_cids)
10392 mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10393 } else {
10394 /* Switch back to per task if user count under threshold */
10395 if (mc->users < mc->pcpu_thrs)
10396 mc->pcpu_thrs = 0;
10397 }
10398
10399 /* Mode change required? */
10400 if (!!mc->percpu == !!mc->pcpu_thrs)
10401 return false;
10402 /* When switching back to per TASK mode, set the transition flag */
10403 if (!mc->pcpu_thrs)
10404 WRITE_ONCE(mc->transit, MM_CID_TRANSIT);
10405 WRITE_ONCE(mc->percpu, !!mc->pcpu_thrs);
10406 return true;
10407 }
10408
mm_update_cpus_allowed(struct mm_struct * mm,const struct cpumask * affmsk)10409 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
10410 {
10411 struct cpumask *mm_allowed;
10412 struct mm_mm_cid *mc;
10413 unsigned int weight;
10414
10415 if (!mm || !READ_ONCE(mm->mm_cid.users))
10416 return;
10417 /*
10418 * mm::mm_cid::mm_cpus_allowed is the superset of each threads
10419 * allowed CPUs mask which means it can only grow.
10420 */
10421 mc = &mm->mm_cid;
10422 guard(raw_spinlock)(&mc->lock);
10423 mm_allowed = mm_cpus_allowed(mm);
10424 weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
10425 if (weight == mc->nr_cpus_allowed)
10426 return;
10427
10428 WRITE_ONCE(mc->nr_cpus_allowed, weight);
10429 __mm_update_max_cids(mc);
10430 if (!mc->percpu)
10431 return;
10432
10433 /* Adjust the threshold to the wider set */
10434 mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10435 /* Switch back to per task mode? */
10436 if (mc->users >= mc->pcpu_thrs)
10437 return;
10438
10439 /* Don't queue twice */
10440 if (mc->update_deferred)
10441 return;
10442
10443 /* Queue the irq work, which schedules the real work */
10444 mc->update_deferred = true;
10445 irq_work_queue(&mc->irq_work);
10446 }
10447
mm_cid_transit_to_task(struct task_struct * t,struct mm_cid_pcpu * pcp)10448 static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)
10449 {
10450 if (cid_on_cpu(t->mm_cid.cid)) {
10451 unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid);
10452
10453 t->mm_cid.cid = cid_to_transit_cid(cid);
10454 pcp->cid = t->mm_cid.cid;
10455 }
10456 }
10457
mm_cid_fixup_cpus_to_tasks(struct mm_struct * mm)10458 static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm)
10459 {
10460 unsigned int cpu;
10461
10462 /* Walk the CPUs and fixup all stale CIDs */
10463 for_each_possible_cpu(cpu) {
10464 struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
10465 struct rq *rq = cpu_rq(cpu);
10466
10467 /* Remote access to mm::mm_cid::pcpu requires rq_lock */
10468 guard(rq_lock_irq)(rq);
10469 /* Is the CID still owned by the CPU? */
10470 if (cid_on_cpu(pcp->cid)) {
10471 /*
10472 * If rq->curr has @mm, transfer it with the
10473 * transition bit set. Otherwise drop it.
10474 */
10475 if (rq->curr->mm == mm && rq->curr->mm_cid.active)
10476 mm_cid_transit_to_task(rq->curr, pcp);
10477 else
10478 mm_drop_cid_on_cpu(mm, pcp);
10479
10480 } else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
10481 unsigned int cid = rq->curr->mm_cid.cid;
10482
10483 /* Ensure it has the transition bit set */
10484 if (!cid_in_transit(cid)) {
10485 cid = cid_to_transit_cid(cid);
10486 rq->curr->mm_cid.cid = cid;
10487 pcp->cid = cid;
10488 }
10489 }
10490 }
10491 /* Clear the transition bit */
10492 WRITE_ONCE(mm->mm_cid.transit, 0);
10493 }
10494
mm_cid_transfer_to_cpu(struct task_struct * t,struct mm_cid_pcpu * pcp)10495 static inline void mm_cid_transfer_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)
10496 {
10497 if (cid_on_task(t->mm_cid.cid)) {
10498 t->mm_cid.cid = cid_to_cpu_cid(t->mm_cid.cid);
10499 pcp->cid = t->mm_cid.cid;
10500 }
10501 }
10502
mm_cid_fixup_task_to_cpu(struct task_struct * t,struct mm_struct * mm)10503 static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
10504 {
10505 /* Remote access to mm::mm_cid::pcpu requires rq_lock */
10506 guard(task_rq_lock)(t);
10507 /* If the task is not active it is not in the users count */
10508 if (!t->mm_cid.active)
10509 return false;
10510 if (cid_on_task(t->mm_cid.cid)) {
10511 /* If running on the CPU, transfer the CID, otherwise drop it */
10512 if (task_rq(t)->curr == t)
10513 mm_cid_transfer_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
10514 else
10515 mm_unset_cid_on_task(t);
10516 }
10517 return true;
10518 }
10519
mm_cid_fixup_tasks_to_cpus(void)10520 static void mm_cid_fixup_tasks_to_cpus(void)
10521 {
10522 struct mm_struct *mm = current->mm;
10523 struct task_struct *p, *t;
10524 unsigned int users;
10525
10526 /*
10527 * This can obviously race with a concurrent affinity change, which
10528 * increases the number of allowed CPUs for this mm, but that does
10529 * not affect the mode and only changes the CID constraints. A
10530 * possible switch back to per task mode happens either in the
10531 * deferred handler function or in the next fork()/exit().
10532 *
10533 * The caller has already transferred. The newly incoming task is
10534 * already accounted for, but not yet visible.
10535 */
10536 users = mm->mm_cid.users - 2;
10537 if (!users)
10538 return;
10539
10540 guard(rcu)();
10541 for_other_threads(current, t) {
10542 if (mm_cid_fixup_task_to_cpu(t, mm))
10543 users--;
10544 }
10545
10546 if (!users)
10547 return;
10548
10549 /* Happens only for VM_CLONE processes. */
10550 for_each_process_thread(p, t) {
10551 if (t == current || t->mm != mm)
10552 continue;
10553 if (mm_cid_fixup_task_to_cpu(t, mm)) {
10554 if (--users == 0)
10555 return;
10556 }
10557 }
10558 }
10559
sched_mm_cid_add_user(struct task_struct * t,struct mm_struct * mm)10560 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
10561 {
10562 t->mm_cid.active = 1;
10563 mm->mm_cid.users++;
10564 return mm_update_max_cids(mm);
10565 }
10566
sched_mm_cid_fork(struct task_struct * t)10567 void sched_mm_cid_fork(struct task_struct *t)
10568 {
10569 struct mm_struct *mm = t->mm;
10570 bool percpu;
10571
10572 WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
10573
10574 guard(mutex)(&mm->mm_cid.mutex);
10575 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10576 struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu);
10577
10578 /* First user ? */
10579 if (!mm->mm_cid.users) {
10580 sched_mm_cid_add_user(t, mm);
10581 t->mm_cid.cid = mm_get_cid(mm);
10582 /* Required for execve() */
10583 pcp->cid = t->mm_cid.cid;
10584 return;
10585 }
10586
10587 if (!sched_mm_cid_add_user(t, mm)) {
10588 if (!mm->mm_cid.percpu)
10589 t->mm_cid.cid = mm_get_cid(mm);
10590 return;
10591 }
10592
10593 /* Handle the mode change and transfer current's CID */
10594 percpu = !!mm->mm_cid.percpu;
10595 if (!percpu)
10596 mm_cid_transit_to_task(current, pcp);
10597 else
10598 mm_cid_transfer_to_cpu(current, pcp);
10599 }
10600
10601 if (percpu) {
10602 mm_cid_fixup_tasks_to_cpus();
10603 } else {
10604 mm_cid_fixup_cpus_to_tasks(mm);
10605 t->mm_cid.cid = mm_get_cid(mm);
10606 }
10607 }
10608
sched_mm_cid_remove_user(struct task_struct * t)10609 static bool sched_mm_cid_remove_user(struct task_struct *t)
10610 {
10611 t->mm_cid.active = 0;
10612 scoped_guard(preempt) {
10613 /* Clear the transition bit */
10614 t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
10615 mm_unset_cid_on_task(t);
10616 }
10617 t->mm->mm_cid.users--;
10618 return mm_update_max_cids(t->mm);
10619 }
10620
__sched_mm_cid_exit(struct task_struct * t)10621 static bool __sched_mm_cid_exit(struct task_struct *t)
10622 {
10623 struct mm_struct *mm = t->mm;
10624
10625 if (!sched_mm_cid_remove_user(t))
10626 return false;
10627 /*
10628 * Contrary to fork() this only deals with a switch back to per
10629 * task mode either because the above decreased users or an
10630 * affinity change increased the number of allowed CPUs and the
10631 * deferred fixup did not run yet.
10632 */
10633 if (WARN_ON_ONCE(mm->mm_cid.percpu))
10634 return false;
10635 /*
10636 * A failed fork(2) cleanup never gets here, so @current must have
10637 * the same MM as @t. That's true for exit() and the failed
10638 * pthread_create() cleanup case.
10639 */
10640 if (WARN_ON_ONCE(current->mm != mm))
10641 return false;
10642 return true;
10643 }
10644
10645 /*
10646 * When a task exits, the MM CID held by the task is not longer required as
10647 * the task cannot return to user space.
10648 */
sched_mm_cid_exit(struct task_struct * t)10649 void sched_mm_cid_exit(struct task_struct *t)
10650 {
10651 struct mm_struct *mm = t->mm;
10652
10653 if (!mm || !t->mm_cid.active)
10654 return;
10655 /*
10656 * Ensure that only one instance is doing MM CID operations within
10657 * a MM. The common case is uncontended. The rare fixup case adds
10658 * some overhead.
10659 */
10660 scoped_guard(mutex, &mm->mm_cid.mutex) {
10661 /* mm_cid::mutex is sufficient to protect mm_cid::users */
10662 if (likely(mm->mm_cid.users > 1)) {
10663 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10664 if (!__sched_mm_cid_exit(t))
10665 return;
10666 /* Mode change required. Transfer currents CID */
10667 mm_cid_transit_to_task(current, this_cpu_ptr(mm->mm_cid.pcpu));
10668 }
10669 mm_cid_fixup_cpus_to_tasks(mm);
10670 return;
10671 }
10672 /* Last user */
10673 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10674 /* Required across execve() */
10675 if (t == current)
10676 mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu));
10677 /* Ignore mode change. There is nothing to do. */
10678 sched_mm_cid_remove_user(t);
10679 }
10680 }
10681
10682 /*
10683 * As this is the last user (execve(), process exit or failed
10684 * fork(2)) there is no concurrency anymore.
10685 *
10686 * Synchronize eventually pending work to ensure that there are no
10687 * dangling references left. @t->mm_cid.users is zero so nothing
10688 * can queue this work anymore.
10689 */
10690 irq_work_sync(&mm->mm_cid.irq_work);
10691 cancel_work_sync(&mm->mm_cid.work);
10692 }
10693
10694 /* Deactivate MM CID allocation across execve() */
sched_mm_cid_before_execve(struct task_struct * t)10695 void sched_mm_cid_before_execve(struct task_struct *t)
10696 {
10697 sched_mm_cid_exit(t);
10698 }
10699
10700 /* Reactivate MM CID after successful execve() */
sched_mm_cid_after_execve(struct task_struct * t)10701 void sched_mm_cid_after_execve(struct task_struct *t)
10702 {
10703 sched_mm_cid_fork(t);
10704 }
10705
mm_cid_work_fn(struct work_struct * work)10706 static void mm_cid_work_fn(struct work_struct *work)
10707 {
10708 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
10709
10710 guard(mutex)(&mm->mm_cid.mutex);
10711 /* Did the last user task exit already? */
10712 if (!mm->mm_cid.users)
10713 return;
10714
10715 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10716 /* Have fork() or exit() handled it already? */
10717 if (!mm->mm_cid.update_deferred)
10718 return;
10719 /* This clears mm_cid::update_deferred */
10720 if (!mm_update_max_cids(mm))
10721 return;
10722 /* Affinity changes can only switch back to task mode */
10723 if (WARN_ON_ONCE(mm->mm_cid.percpu))
10724 return;
10725 }
10726 mm_cid_fixup_cpus_to_tasks(mm);
10727 }
10728
mm_cid_irq_work(struct irq_work * work)10729 static void mm_cid_irq_work(struct irq_work *work)
10730 {
10731 struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
10732
10733 /*
10734 * Needs to be unconditional because mm_cid::lock cannot be held
10735 * when scheduling work as mm_update_cpus_allowed() nests inside
10736 * rq::lock and schedule_work() might end up in wakeup...
10737 */
10738 schedule_work(&mm->mm_cid.work);
10739 }
10740
mm_init_cid(struct mm_struct * mm,struct task_struct * p)10741 void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
10742 {
10743 mm->mm_cid.max_cids = 0;
10744 mm->mm_cid.percpu = 0;
10745 mm->mm_cid.transit = 0;
10746 mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
10747 mm->mm_cid.users = 0;
10748 mm->mm_cid.pcpu_thrs = 0;
10749 mm->mm_cid.update_deferred = 0;
10750 raw_spin_lock_init(&mm->mm_cid.lock);
10751 mutex_init(&mm->mm_cid.mutex);
10752 mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
10753 INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
10754 cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
10755 bitmap_zero(mm_cidmask(mm), num_possible_cpus());
10756 }
10757 #else /* CONFIG_SCHED_MM_CID */
mm_update_cpus_allowed(struct mm_struct * mm,const struct cpumask * affmsk)10758 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
10759 #endif /* !CONFIG_SCHED_MM_CID */
10760
10761 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
10762
sched_change_begin(struct task_struct * p,unsigned int flags)10763 struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int flags)
10764 {
10765 struct sched_change_ctx *ctx = this_cpu_ptr(&sched_change_ctx);
10766 struct rq *rq = task_rq(p);
10767
10768 /*
10769 * Must exclusively use matched flags since this is both dequeue and
10770 * enqueue.
10771 */
10772 WARN_ON_ONCE(flags & 0xFFFF0000);
10773
10774 lockdep_assert_rq_held(rq);
10775
10776 if (!(flags & DEQUEUE_NOCLOCK)) {
10777 update_rq_clock(rq);
10778 flags |= DEQUEUE_NOCLOCK;
10779 }
10780
10781 if (flags & DEQUEUE_CLASS) {
10782 if (p->sched_class->switching_from)
10783 p->sched_class->switching_from(rq, p);
10784 }
10785
10786 *ctx = (struct sched_change_ctx){
10787 .p = p,
10788 .flags = flags,
10789 .queued = task_on_rq_queued(p),
10790 .running = task_current_donor(rq, p),
10791 };
10792
10793 if (!(flags & DEQUEUE_CLASS)) {
10794 if (p->sched_class->get_prio)
10795 ctx->prio = p->sched_class->get_prio(rq, p);
10796 else
10797 ctx->prio = p->prio;
10798 }
10799
10800 if (ctx->queued)
10801 dequeue_task(rq, p, flags);
10802 if (ctx->running)
10803 put_prev_task(rq, p);
10804
10805 if ((flags & DEQUEUE_CLASS) && p->sched_class->switched_from)
10806 p->sched_class->switched_from(rq, p);
10807
10808 return ctx;
10809 }
10810
sched_change_end(struct sched_change_ctx * ctx)10811 void sched_change_end(struct sched_change_ctx *ctx)
10812 {
10813 struct task_struct *p = ctx->p;
10814 struct rq *rq = task_rq(p);
10815
10816 lockdep_assert_rq_held(rq);
10817
10818 if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switching_to)
10819 p->sched_class->switching_to(rq, p);
10820
10821 if (ctx->queued)
10822 enqueue_task(rq, p, ctx->flags);
10823 if (ctx->running)
10824 set_next_task(rq, p);
10825
10826 if (ctx->flags & ENQUEUE_CLASS) {
10827 if (p->sched_class->switched_to)
10828 p->sched_class->switched_to(rq, p);
10829 } else {
10830 p->sched_class->prio_changed(rq, p, ctx->prio);
10831 }
10832 }
10833