1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 # include <linux/entry-common.h>
73 # endif
74 #endif
75
76 #include <uapi/linux/sched/types.h>
77
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87
88 #include "sched.h"
89 #include "stats.h"
90
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
103 /*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124 * Debugging: various feature bits
125 *
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
129 */
130 #define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 0;
135 #undef SCHED_FEAT
136
137 /*
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
140 *
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142 * per boot.
143 */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147
148 /*
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
151 */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153
154 __read_mostly int scheduler_running;
155
156 #ifdef CONFIG_SCHED_CORE
157
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159
160 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)161 static inline int __task_prio(const struct task_struct *p)
162 {
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 return -2;
165
166 if (p->dl_server)
167 return -1; /* deadline */
168
169 if (rt_or_dl_prio(p->prio))
170 return p->prio; /* [-1, 99] */
171
172 if (p->sched_class == &idle_sched_class)
173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174
175 if (task_on_scx(p))
176 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177
178 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180
181 /*
182 * l(a,b)
183 * le(a,b) := !l(b,a)
184 * g(a,b) := l(b,a)
185 * ge(a,b) := !l(a,b)
186 */
187
188 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)189 static inline bool prio_less(const struct task_struct *a,
190 const struct task_struct *b, bool in_fi)
191 {
192
193 int pa = __task_prio(a), pb = __task_prio(b);
194
195 if (-pa < -pb)
196 return true;
197
198 if (-pb < -pa)
199 return false;
200
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity *a_dl, *b_dl;
203
204 a_dl = &a->dl;
205 /*
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
209 */
210 if (a->dl_server)
211 a_dl = a->dl_server;
212
213 b_dl = &b->dl;
214 if (b->dl_server)
215 b_dl = b->dl_server;
216
217 return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 }
219
220 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
221 return cfs_prio_less(a, b, in_fi);
222
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
225 return scx_prio_less(a, b, in_fi);
226 #endif
227
228 return false;
229 }
230
__sched_core_less(const struct task_struct * a,const struct task_struct * b)231 static inline bool __sched_core_less(const struct task_struct *a,
232 const struct task_struct *b)
233 {
234 if (a->core_cookie < b->core_cookie)
235 return true;
236
237 if (a->core_cookie > b->core_cookie)
238 return false;
239
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 return true;
243
244 return false;
245 }
246
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253
rb_sched_core_cmp(const void * key,const struct rb_node * node)254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 const struct task_struct *p = __node_2_sc(node);
257 unsigned long cookie = (unsigned long)key;
258
259 if (cookie < p->core_cookie)
260 return -1;
261
262 if (cookie > p->core_cookie)
263 return 1;
264
265 return 0;
266 }
267
sched_core_enqueue(struct rq * rq,struct task_struct * p)268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 if (p->se.sched_delayed)
271 return;
272
273 rq->core->core_task_seq++;
274
275 if (!p->core_cookie)
276 return;
277
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 if (p->se.sched_delayed)
284 return;
285
286 rq->core->core_task_seq++;
287
288 if (sched_core_enqueued(p)) {
289 rb_erase(&p->core_node, &rq->core_tree);
290 RB_CLEAR_NODE(&p->core_node);
291 }
292
293 /*
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
297 */
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 rq->core->core_forceidle_count && rq->curr == rq->idle)
300 resched_curr(rq);
301 }
302
sched_task_is_throttled(struct task_struct * p,int cpu)303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 if (p->sched_class->task_is_throttled)
306 return p->sched_class->task_is_throttled(p, cpu);
307
308 return 0;
309 }
310
sched_core_next(struct task_struct * p,unsigned long cookie)311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 struct rb_node *node = &p->core_node;
314 int cpu = task_cpu(p);
315
316 do {
317 node = rb_next(node);
318 if (!node)
319 return NULL;
320
321 p = __node_2_sc(node);
322 if (p->core_cookie != cookie)
323 return NULL;
324
325 } while (sched_task_is_throttled(p, cpu));
326
327 return p;
328 }
329
330 /*
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
333 */
sched_core_find(struct rq * rq,unsigned long cookie)334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 struct task_struct *p;
337 struct rb_node *node;
338
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 if (!node)
341 return NULL;
342
343 p = __node_2_sc(node);
344 if (!sched_task_is_throttled(p, rq->cpu))
345 return p;
346
347 return sched_core_next(p, cookie);
348 }
349
350 /*
351 * Magic required such that:
352 *
353 * raw_spin_rq_lock(rq);
354 * ...
355 * raw_spin_rq_unlock(rq);
356 *
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
359 *
360 * XXX entirely possible to selectively enable cores, don't bother for now.
361 */
362
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366
sched_core_lock(int cpu,unsigned long * flags)367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 int t, i = 0;
371
372 local_irq_save(*flags);
373 for_each_cpu(t, smt_mask)
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376
sched_core_unlock(int cpu,unsigned long * flags)377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 int t;
381
382 for_each_cpu(t, smt_mask)
383 raw_spin_unlock(&cpu_rq(t)->__lock);
384 local_irq_restore(*flags);
385 }
386
__sched_core_flip(bool enabled)387 static void __sched_core_flip(bool enabled)
388 {
389 unsigned long flags;
390 int cpu, t;
391
392 cpus_read_lock();
393
394 /*
395 * Toggle the online cores, one by one.
396 */
397 cpumask_copy(&sched_core_mask, cpu_online_mask);
398 for_each_cpu(cpu, &sched_core_mask) {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400
401 sched_core_lock(cpu, &flags);
402
403 for_each_cpu(t, smt_mask)
404 cpu_rq(t)->core_enabled = enabled;
405
406 cpu_rq(cpu)->core->core_forceidle_start = 0;
407
408 sched_core_unlock(cpu, &flags);
409
410 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 }
412
413 /*
414 * Toggle the offline CPUs.
415 */
416 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 cpu_rq(cpu)->core_enabled = enabled;
418
419 cpus_read_unlock();
420 }
421
sched_core_assert_empty(void)422 static void sched_core_assert_empty(void)
423 {
424 int cpu;
425
426 for_each_possible_cpu(cpu)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429
__sched_core_enable(void)430 static void __sched_core_enable(void)
431 {
432 static_branch_enable(&__sched_core_enabled);
433 /*
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
436 */
437 synchronize_rcu();
438 __sched_core_flip(true);
439 sched_core_assert_empty();
440 }
441
__sched_core_disable(void)442 static void __sched_core_disable(void)
443 {
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled);
447 }
448
sched_core_get(void)449 void sched_core_get(void)
450 {
451 if (atomic_inc_not_zero(&sched_core_count))
452 return;
453
454 mutex_lock(&sched_core_mutex);
455 if (!atomic_read(&sched_core_count))
456 __sched_core_enable();
457
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count);
460 mutex_unlock(&sched_core_mutex);
461 }
462
__sched_core_put(struct work_struct * work)463 static void __sched_core_put(struct work_struct *work)
464 {
465 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex);
468 }
469 }
470
sched_core_put(void)471 void sched_core_put(void)
472 {
473 static DECLARE_WORK(_work, __sched_core_put);
474
475 /*
476 * "There can be only one"
477 *
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
481 */
482 if (!atomic_add_unless(&sched_core_count, -1, 1))
483 schedule_work(&_work);
484 }
485
486 #else /* !CONFIG_SCHED_CORE */
487
sched_core_enqueue(struct rq * rq,struct task_struct * p)488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491
492 #endif /* CONFIG_SCHED_CORE */
493
494 /*
495 * Serialization rules:
496 *
497 * Lock order:
498 *
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502 *
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
505 *
506 * Regular state:
507 *
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
511 * to run next.
512 *
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
517 *
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
520 *
521 * Special state:
522 *
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
526 *
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
536 *
537 * p->state <- TASK_*:
538 *
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
542 * concurrent self.
543 *
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545 *
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550 *
551 * p->on_cpu <- { 0, 1 }:
552 *
553 * is set by prepare_task() and cleared by finish_task() such that it will be
554 * set before p is scheduled-in and cleared after p is scheduled-out, both
555 * under rq->lock. Non-zero indicates the task is running on its CPU.
556 *
557 * [ The astute reader will observe that it is possible for two tasks on one
558 * CPU to have ->on_cpu = 1 at the same time. ]
559 *
560 * task_cpu(p): is changed by set_task_cpu(), the rules are:
561 *
562 * - Don't call set_task_cpu() on a blocked task:
563 *
564 * We don't care what CPU we're not running on, this simplifies hotplug,
565 * the CPU assignment of blocked tasks isn't required to be valid.
566 *
567 * - for try_to_wake_up(), called under p->pi_lock:
568 *
569 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
570 *
571 * - for migration called under rq->lock:
572 * [ see task_on_rq_migrating() in task_rq_lock() ]
573 *
574 * o move_queued_task()
575 * o detach_task()
576 *
577 * - for migration called under double_rq_lock():
578 *
579 * o __migrate_swap_task()
580 * o push_rt_task() / pull_rt_task()
581 * o push_dl_task() / pull_dl_task()
582 * o dl_task_offline_migration()
583 *
584 */
585
raw_spin_rq_lock_nested(struct rq * rq,int subclass)586 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
587 {
588 raw_spinlock_t *lock;
589
590 /* Matches synchronize_rcu() in __sched_core_enable() */
591 preempt_disable();
592 if (sched_core_disabled()) {
593 raw_spin_lock_nested(&rq->__lock, subclass);
594 /* preempt_count *MUST* be > 1 */
595 preempt_enable_no_resched();
596 return;
597 }
598
599 for (;;) {
600 lock = __rq_lockp(rq);
601 raw_spin_lock_nested(lock, subclass);
602 if (likely(lock == __rq_lockp(rq))) {
603 /* preempt_count *MUST* be > 1 */
604 preempt_enable_no_resched();
605 return;
606 }
607 raw_spin_unlock(lock);
608 }
609 }
610
raw_spin_rq_trylock(struct rq * rq)611 bool raw_spin_rq_trylock(struct rq *rq)
612 {
613 raw_spinlock_t *lock;
614 bool ret;
615
616 /* Matches synchronize_rcu() in __sched_core_enable() */
617 preempt_disable();
618 if (sched_core_disabled()) {
619 ret = raw_spin_trylock(&rq->__lock);
620 preempt_enable();
621 return ret;
622 }
623
624 for (;;) {
625 lock = __rq_lockp(rq);
626 ret = raw_spin_trylock(lock);
627 if (!ret || (likely(lock == __rq_lockp(rq)))) {
628 preempt_enable();
629 return ret;
630 }
631 raw_spin_unlock(lock);
632 }
633 }
634
raw_spin_rq_unlock(struct rq * rq)635 void raw_spin_rq_unlock(struct rq *rq)
636 {
637 raw_spin_unlock(rq_lockp(rq));
638 }
639
640 #ifdef CONFIG_SMP
641 /*
642 * double_rq_lock - safely lock two runqueues
643 */
double_rq_lock(struct rq * rq1,struct rq * rq2)644 void double_rq_lock(struct rq *rq1, struct rq *rq2)
645 {
646 lockdep_assert_irqs_disabled();
647
648 if (rq_order_less(rq2, rq1))
649 swap(rq1, rq2);
650
651 raw_spin_rq_lock(rq1);
652 if (__rq_lockp(rq1) != __rq_lockp(rq2))
653 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
654
655 double_rq_clock_clear_update(rq1, rq2);
656 }
657 #endif
658
659 /*
660 * __task_rq_lock - lock the rq @p resides on.
661 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)662 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
663 __acquires(rq->lock)
664 {
665 struct rq *rq;
666
667 lockdep_assert_held(&p->pi_lock);
668
669 for (;;) {
670 rq = task_rq(p);
671 raw_spin_rq_lock(rq);
672 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
673 rq_pin_lock(rq, rf);
674 return rq;
675 }
676 raw_spin_rq_unlock(rq);
677
678 while (unlikely(task_on_rq_migrating(p)))
679 cpu_relax();
680 }
681 }
682
683 /*
684 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
685 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)686 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
687 __acquires(p->pi_lock)
688 __acquires(rq->lock)
689 {
690 struct rq *rq;
691
692 for (;;) {
693 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
694 rq = task_rq(p);
695 raw_spin_rq_lock(rq);
696 /*
697 * move_queued_task() task_rq_lock()
698 *
699 * ACQUIRE (rq->lock)
700 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
701 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
702 * [S] ->cpu = new_cpu [L] task_rq()
703 * [L] ->on_rq
704 * RELEASE (rq->lock)
705 *
706 * If we observe the old CPU in task_rq_lock(), the acquire of
707 * the old rq->lock will fully serialize against the stores.
708 *
709 * If we observe the new CPU in task_rq_lock(), the address
710 * dependency headed by '[L] rq = task_rq()' and the acquire
711 * will pair with the WMB to ensure we then also see migrating.
712 */
713 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
714 rq_pin_lock(rq, rf);
715 return rq;
716 }
717 raw_spin_rq_unlock(rq);
718 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
719
720 while (unlikely(task_on_rq_migrating(p)))
721 cpu_relax();
722 }
723 }
724
725 /*
726 * RQ-clock updating methods:
727 */
728
update_rq_clock_task(struct rq * rq,s64 delta)729 static void update_rq_clock_task(struct rq *rq, s64 delta)
730 {
731 /*
732 * In theory, the compile should just see 0 here, and optimize out the call
733 * to sched_rt_avg_update. But I don't trust it...
734 */
735 s64 __maybe_unused steal = 0, irq_delta = 0;
736
737 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
738 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
739
740 /*
741 * Since irq_time is only updated on {soft,}irq_exit, we might run into
742 * this case when a previous update_rq_clock() happened inside a
743 * {soft,}IRQ region.
744 *
745 * When this happens, we stop ->clock_task and only update the
746 * prev_irq_time stamp to account for the part that fit, so that a next
747 * update will consume the rest. This ensures ->clock_task is
748 * monotonic.
749 *
750 * It does however cause some slight miss-attribution of {soft,}IRQ
751 * time, a more accurate solution would be to update the irq_time using
752 * the current rq->clock timestamp, except that would require using
753 * atomic ops.
754 */
755 if (irq_delta > delta)
756 irq_delta = delta;
757
758 rq->prev_irq_time += irq_delta;
759 delta -= irq_delta;
760 delayacct_irq(rq->curr, irq_delta);
761 #endif
762 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
763 if (static_key_false((¶virt_steal_rq_enabled))) {
764 steal = paravirt_steal_clock(cpu_of(rq));
765 steal -= rq->prev_steal_time_rq;
766
767 if (unlikely(steal > delta))
768 steal = delta;
769
770 rq->prev_steal_time_rq += steal;
771 delta -= steal;
772 }
773 #endif
774
775 rq->clock_task += delta;
776
777 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
778 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
779 update_irq_load_avg(rq, irq_delta + steal);
780 #endif
781 update_rq_clock_pelt(rq, delta);
782 }
783
update_rq_clock(struct rq * rq)784 void update_rq_clock(struct rq *rq)
785 {
786 s64 delta;
787
788 lockdep_assert_rq_held(rq);
789
790 if (rq->clock_update_flags & RQCF_ACT_SKIP)
791 return;
792
793 #ifdef CONFIG_SCHED_DEBUG
794 if (sched_feat(WARN_DOUBLE_CLOCK))
795 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
796 rq->clock_update_flags |= RQCF_UPDATED;
797 #endif
798
799 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
800 if (delta < 0)
801 return;
802 rq->clock += delta;
803 update_rq_clock_task(rq, delta);
804 }
805
806 #ifdef CONFIG_SCHED_HRTICK
807 /*
808 * Use HR-timers to deliver accurate preemption points.
809 */
810
hrtick_clear(struct rq * rq)811 static void hrtick_clear(struct rq *rq)
812 {
813 if (hrtimer_active(&rq->hrtick_timer))
814 hrtimer_cancel(&rq->hrtick_timer);
815 }
816
817 /*
818 * High-resolution timer tick.
819 * Runs from hardirq context with interrupts disabled.
820 */
hrtick(struct hrtimer * timer)821 static enum hrtimer_restart hrtick(struct hrtimer *timer)
822 {
823 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
824 struct rq_flags rf;
825
826 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
827
828 rq_lock(rq, &rf);
829 update_rq_clock(rq);
830 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
831 rq_unlock(rq, &rf);
832
833 return HRTIMER_NORESTART;
834 }
835
836 #ifdef CONFIG_SMP
837
__hrtick_restart(struct rq * rq)838 static void __hrtick_restart(struct rq *rq)
839 {
840 struct hrtimer *timer = &rq->hrtick_timer;
841 ktime_t time = rq->hrtick_time;
842
843 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
844 }
845
846 /*
847 * called from hardirq (IPI) context
848 */
__hrtick_start(void * arg)849 static void __hrtick_start(void *arg)
850 {
851 struct rq *rq = arg;
852 struct rq_flags rf;
853
854 rq_lock(rq, &rf);
855 __hrtick_restart(rq);
856 rq_unlock(rq, &rf);
857 }
858
859 /*
860 * Called to set the hrtick timer state.
861 *
862 * called with rq->lock held and IRQs disabled
863 */
hrtick_start(struct rq * rq,u64 delay)864 void hrtick_start(struct rq *rq, u64 delay)
865 {
866 struct hrtimer *timer = &rq->hrtick_timer;
867 s64 delta;
868
869 /*
870 * Don't schedule slices shorter than 10000ns, that just
871 * doesn't make sense and can cause timer DoS.
872 */
873 delta = max_t(s64, delay, 10000LL);
874 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
875
876 if (rq == this_rq())
877 __hrtick_restart(rq);
878 else
879 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
880 }
881
882 #else
883 /*
884 * Called to set the hrtick timer state.
885 *
886 * called with rq->lock held and IRQs disabled
887 */
hrtick_start(struct rq * rq,u64 delay)888 void hrtick_start(struct rq *rq, u64 delay)
889 {
890 /*
891 * Don't schedule slices shorter than 10000ns, that just
892 * doesn't make sense. Rely on vruntime for fairness.
893 */
894 delay = max_t(u64, delay, 10000LL);
895 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
896 HRTIMER_MODE_REL_PINNED_HARD);
897 }
898
899 #endif /* CONFIG_SMP */
900
hrtick_rq_init(struct rq * rq)901 static void hrtick_rq_init(struct rq *rq)
902 {
903 #ifdef CONFIG_SMP
904 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
905 #endif
906 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
907 rq->hrtick_timer.function = hrtick;
908 }
909 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)910 static inline void hrtick_clear(struct rq *rq)
911 {
912 }
913
hrtick_rq_init(struct rq * rq)914 static inline void hrtick_rq_init(struct rq *rq)
915 {
916 }
917 #endif /* CONFIG_SCHED_HRTICK */
918
919 /*
920 * try_cmpxchg based fetch_or() macro so it works for different integer types:
921 */
922 #define fetch_or(ptr, mask) \
923 ({ \
924 typeof(ptr) _ptr = (ptr); \
925 typeof(mask) _mask = (mask); \
926 typeof(*_ptr) _val = *_ptr; \
927 \
928 do { \
929 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
930 _val; \
931 })
932
933 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
934 /*
935 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
936 * this avoids any races wrt polling state changes and thereby avoids
937 * spurious IPIs.
938 */
set_nr_and_not_polling(struct task_struct * p)939 static inline bool set_nr_and_not_polling(struct task_struct *p)
940 {
941 struct thread_info *ti = task_thread_info(p);
942 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
943 }
944
945 /*
946 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
947 *
948 * If this returns true, then the idle task promises to call
949 * sched_ttwu_pending() and reschedule soon.
950 */
set_nr_if_polling(struct task_struct * p)951 static bool set_nr_if_polling(struct task_struct *p)
952 {
953 struct thread_info *ti = task_thread_info(p);
954 typeof(ti->flags) val = READ_ONCE(ti->flags);
955
956 do {
957 if (!(val & _TIF_POLLING_NRFLAG))
958 return false;
959 if (val & _TIF_NEED_RESCHED)
960 return true;
961 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
962
963 return true;
964 }
965
966 #else
set_nr_and_not_polling(struct task_struct * p)967 static inline bool set_nr_and_not_polling(struct task_struct *p)
968 {
969 set_tsk_need_resched(p);
970 return true;
971 }
972
973 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)974 static inline bool set_nr_if_polling(struct task_struct *p)
975 {
976 return false;
977 }
978 #endif
979 #endif
980
__wake_q_add(struct wake_q_head * head,struct task_struct * task)981 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
982 {
983 struct wake_q_node *node = &task->wake_q;
984
985 /*
986 * Atomically grab the task, if ->wake_q is !nil already it means
987 * it's already queued (either by us or someone else) and will get the
988 * wakeup due to that.
989 *
990 * In order to ensure that a pending wakeup will observe our pending
991 * state, even in the failed case, an explicit smp_mb() must be used.
992 */
993 smp_mb__before_atomic();
994 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
995 return false;
996
997 /*
998 * The head is context local, there can be no concurrency.
999 */
1000 *head->lastp = node;
1001 head->lastp = &node->next;
1002 return true;
1003 }
1004
1005 /**
1006 * wake_q_add() - queue a wakeup for 'later' waking.
1007 * @head: the wake_q_head to add @task to
1008 * @task: the task to queue for 'later' wakeup
1009 *
1010 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1011 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1012 * instantly.
1013 *
1014 * This function must be used as-if it were wake_up_process(); IOW the task
1015 * must be ready to be woken at this location.
1016 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1017 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1018 {
1019 if (__wake_q_add(head, task))
1020 get_task_struct(task);
1021 }
1022
1023 /**
1024 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1025 * @head: the wake_q_head to add @task to
1026 * @task: the task to queue for 'later' wakeup
1027 *
1028 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1029 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1030 * instantly.
1031 *
1032 * This function must be used as-if it were wake_up_process(); IOW the task
1033 * must be ready to be woken at this location.
1034 *
1035 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1036 * that already hold reference to @task can call the 'safe' version and trust
1037 * wake_q to do the right thing depending whether or not the @task is already
1038 * queued for wakeup.
1039 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1040 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1041 {
1042 if (!__wake_q_add(head, task))
1043 put_task_struct(task);
1044 }
1045
wake_up_q(struct wake_q_head * head)1046 void wake_up_q(struct wake_q_head *head)
1047 {
1048 struct wake_q_node *node = head->first;
1049
1050 while (node != WAKE_Q_TAIL) {
1051 struct task_struct *task;
1052
1053 task = container_of(node, struct task_struct, wake_q);
1054 /* Task can safely be re-inserted now: */
1055 node = node->next;
1056 task->wake_q.next = NULL;
1057
1058 /*
1059 * wake_up_process() executes a full barrier, which pairs with
1060 * the queueing in wake_q_add() so as not to miss wakeups.
1061 */
1062 wake_up_process(task);
1063 put_task_struct(task);
1064 }
1065 }
1066
1067 /*
1068 * resched_curr - mark rq's current task 'to be rescheduled now'.
1069 *
1070 * On UP this means the setting of the need_resched flag, on SMP it
1071 * might also involve a cross-CPU call to trigger the scheduler on
1072 * the target CPU.
1073 */
resched_curr(struct rq * rq)1074 void resched_curr(struct rq *rq)
1075 {
1076 struct task_struct *curr = rq->curr;
1077 int cpu;
1078
1079 lockdep_assert_rq_held(rq);
1080
1081 if (test_tsk_need_resched(curr))
1082 return;
1083
1084 cpu = cpu_of(rq);
1085
1086 if (cpu == smp_processor_id()) {
1087 set_tsk_need_resched(curr);
1088 set_preempt_need_resched();
1089 return;
1090 }
1091
1092 if (set_nr_and_not_polling(curr))
1093 smp_send_reschedule(cpu);
1094 else
1095 trace_sched_wake_idle_without_ipi(cpu);
1096 }
1097
resched_cpu(int cpu)1098 void resched_cpu(int cpu)
1099 {
1100 struct rq *rq = cpu_rq(cpu);
1101 unsigned long flags;
1102
1103 raw_spin_rq_lock_irqsave(rq, flags);
1104 if (cpu_online(cpu) || cpu == smp_processor_id())
1105 resched_curr(rq);
1106 raw_spin_rq_unlock_irqrestore(rq, flags);
1107 }
1108
1109 #ifdef CONFIG_SMP
1110 #ifdef CONFIG_NO_HZ_COMMON
1111 /*
1112 * In the semi idle case, use the nearest busy CPU for migrating timers
1113 * from an idle CPU. This is good for power-savings.
1114 *
1115 * We don't do similar optimization for completely idle system, as
1116 * selecting an idle CPU will add more delays to the timers than intended
1117 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1118 */
get_nohz_timer_target(void)1119 int get_nohz_timer_target(void)
1120 {
1121 int i, cpu = smp_processor_id(), default_cpu = -1;
1122 struct sched_domain *sd;
1123 const struct cpumask *hk_mask;
1124
1125 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1126 if (!idle_cpu(cpu))
1127 return cpu;
1128 default_cpu = cpu;
1129 }
1130
1131 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1132
1133 guard(rcu)();
1134
1135 for_each_domain(cpu, sd) {
1136 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1137 if (cpu == i)
1138 continue;
1139
1140 if (!idle_cpu(i))
1141 return i;
1142 }
1143 }
1144
1145 if (default_cpu == -1)
1146 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1147
1148 return default_cpu;
1149 }
1150
1151 /*
1152 * When add_timer_on() enqueues a timer into the timer wheel of an
1153 * idle CPU then this timer might expire before the next timer event
1154 * which is scheduled to wake up that CPU. In case of a completely
1155 * idle system the next event might even be infinite time into the
1156 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1157 * leaves the inner idle loop so the newly added timer is taken into
1158 * account when the CPU goes back to idle and evaluates the timer
1159 * wheel for the next timer event.
1160 */
wake_up_idle_cpu(int cpu)1161 static void wake_up_idle_cpu(int cpu)
1162 {
1163 struct rq *rq = cpu_rq(cpu);
1164
1165 if (cpu == smp_processor_id())
1166 return;
1167
1168 /*
1169 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1170 * part of the idle loop. This forces an exit from the idle loop
1171 * and a round trip to schedule(). Now this could be optimized
1172 * because a simple new idle loop iteration is enough to
1173 * re-evaluate the next tick. Provided some re-ordering of tick
1174 * nohz functions that would need to follow TIF_NR_POLLING
1175 * clearing:
1176 *
1177 * - On most architectures, a simple fetch_or on ti::flags with a
1178 * "0" value would be enough to know if an IPI needs to be sent.
1179 *
1180 * - x86 needs to perform a last need_resched() check between
1181 * monitor and mwait which doesn't take timers into account.
1182 * There a dedicated TIF_TIMER flag would be required to
1183 * fetch_or here and be checked along with TIF_NEED_RESCHED
1184 * before mwait().
1185 *
1186 * However, remote timer enqueue is not such a frequent event
1187 * and testing of the above solutions didn't appear to report
1188 * much benefits.
1189 */
1190 if (set_nr_and_not_polling(rq->idle))
1191 smp_send_reschedule(cpu);
1192 else
1193 trace_sched_wake_idle_without_ipi(cpu);
1194 }
1195
wake_up_full_nohz_cpu(int cpu)1196 static bool wake_up_full_nohz_cpu(int cpu)
1197 {
1198 /*
1199 * We just need the target to call irq_exit() and re-evaluate
1200 * the next tick. The nohz full kick at least implies that.
1201 * If needed we can still optimize that later with an
1202 * empty IRQ.
1203 */
1204 if (cpu_is_offline(cpu))
1205 return true; /* Don't try to wake offline CPUs. */
1206 if (tick_nohz_full_cpu(cpu)) {
1207 if (cpu != smp_processor_id() ||
1208 tick_nohz_tick_stopped())
1209 tick_nohz_full_kick_cpu(cpu);
1210 return true;
1211 }
1212
1213 return false;
1214 }
1215
1216 /*
1217 * Wake up the specified CPU. If the CPU is going offline, it is the
1218 * caller's responsibility to deal with the lost wakeup, for example,
1219 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1220 */
wake_up_nohz_cpu(int cpu)1221 void wake_up_nohz_cpu(int cpu)
1222 {
1223 if (!wake_up_full_nohz_cpu(cpu))
1224 wake_up_idle_cpu(cpu);
1225 }
1226
nohz_csd_func(void * info)1227 static void nohz_csd_func(void *info)
1228 {
1229 struct rq *rq = info;
1230 int cpu = cpu_of(rq);
1231 unsigned int flags;
1232
1233 /*
1234 * Release the rq::nohz_csd.
1235 */
1236 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1237 WARN_ON(!(flags & NOHZ_KICK_MASK));
1238
1239 rq->idle_balance = idle_cpu(cpu);
1240 if (rq->idle_balance && !need_resched()) {
1241 rq->nohz_idle_balance = flags;
1242 raise_softirq_irqoff(SCHED_SOFTIRQ);
1243 }
1244 }
1245
1246 #endif /* CONFIG_NO_HZ_COMMON */
1247
1248 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1249 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1250 {
1251 if (rq->nr_running != 1)
1252 return false;
1253
1254 if (p->sched_class != &fair_sched_class)
1255 return false;
1256
1257 if (!task_on_rq_queued(p))
1258 return false;
1259
1260 return true;
1261 }
1262
sched_can_stop_tick(struct rq * rq)1263 bool sched_can_stop_tick(struct rq *rq)
1264 {
1265 int fifo_nr_running;
1266
1267 /* Deadline tasks, even if single, need the tick */
1268 if (rq->dl.dl_nr_running)
1269 return false;
1270
1271 /*
1272 * If there are more than one RR tasks, we need the tick to affect the
1273 * actual RR behaviour.
1274 */
1275 if (rq->rt.rr_nr_running) {
1276 if (rq->rt.rr_nr_running == 1)
1277 return true;
1278 else
1279 return false;
1280 }
1281
1282 /*
1283 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1284 * forced preemption between FIFO tasks.
1285 */
1286 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1287 if (fifo_nr_running)
1288 return true;
1289
1290 /*
1291 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1292 * left. For CFS, if there's more than one we need the tick for
1293 * involuntary preemption. For SCX, ask.
1294 */
1295 if (scx_enabled() && !scx_can_stop_tick(rq))
1296 return false;
1297
1298 if (rq->cfs.nr_running > 1)
1299 return false;
1300
1301 /*
1302 * If there is one task and it has CFS runtime bandwidth constraints
1303 * and it's on the cpu now we don't want to stop the tick.
1304 * This check prevents clearing the bit if a newly enqueued task here is
1305 * dequeued by migrating while the constrained task continues to run.
1306 * E.g. going from 2->1 without going through pick_next_task().
1307 */
1308 if (__need_bw_check(rq, rq->curr)) {
1309 if (cfs_task_bw_constrained(rq->curr))
1310 return false;
1311 }
1312
1313 return true;
1314 }
1315 #endif /* CONFIG_NO_HZ_FULL */
1316 #endif /* CONFIG_SMP */
1317
1318 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1319 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1320 /*
1321 * Iterate task_group tree rooted at *from, calling @down when first entering a
1322 * node and @up when leaving it for the final time.
1323 *
1324 * Caller must hold rcu_lock or sufficient equivalent.
1325 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1326 int walk_tg_tree_from(struct task_group *from,
1327 tg_visitor down, tg_visitor up, void *data)
1328 {
1329 struct task_group *parent, *child;
1330 int ret;
1331
1332 parent = from;
1333
1334 down:
1335 ret = (*down)(parent, data);
1336 if (ret)
1337 goto out;
1338 list_for_each_entry_rcu(child, &parent->children, siblings) {
1339 parent = child;
1340 goto down;
1341
1342 up:
1343 continue;
1344 }
1345 ret = (*up)(parent, data);
1346 if (ret || parent == from)
1347 goto out;
1348
1349 child = parent;
1350 parent = parent->parent;
1351 if (parent)
1352 goto up;
1353 out:
1354 return ret;
1355 }
1356
tg_nop(struct task_group * tg,void * data)1357 int tg_nop(struct task_group *tg, void *data)
1358 {
1359 return 0;
1360 }
1361 #endif
1362
set_load_weight(struct task_struct * p,bool update_load)1363 void set_load_weight(struct task_struct *p, bool update_load)
1364 {
1365 int prio = p->static_prio - MAX_RT_PRIO;
1366 struct load_weight lw;
1367
1368 if (task_has_idle_policy(p)) {
1369 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1370 lw.inv_weight = WMULT_IDLEPRIO;
1371 } else {
1372 lw.weight = scale_load(sched_prio_to_weight[prio]);
1373 lw.inv_weight = sched_prio_to_wmult[prio];
1374 }
1375
1376 /*
1377 * SCHED_OTHER tasks have to update their load when changing their
1378 * weight
1379 */
1380 if (update_load && p->sched_class->reweight_task)
1381 p->sched_class->reweight_task(task_rq(p), p, &lw);
1382 else
1383 p->se.load = lw;
1384 }
1385
1386 #ifdef CONFIG_UCLAMP_TASK
1387 /*
1388 * Serializes updates of utilization clamp values
1389 *
1390 * The (slow-path) user-space triggers utilization clamp value updates which
1391 * can require updates on (fast-path) scheduler's data structures used to
1392 * support enqueue/dequeue operations.
1393 * While the per-CPU rq lock protects fast-path update operations, user-space
1394 * requests are serialized using a mutex to reduce the risk of conflicting
1395 * updates or API abuses.
1396 */
1397 static DEFINE_MUTEX(uclamp_mutex);
1398
1399 /* Max allowed minimum utilization */
1400 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1401
1402 /* Max allowed maximum utilization */
1403 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1404
1405 /*
1406 * By default RT tasks run at the maximum performance point/capacity of the
1407 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1408 * SCHED_CAPACITY_SCALE.
1409 *
1410 * This knob allows admins to change the default behavior when uclamp is being
1411 * used. In battery powered devices, particularly, running at the maximum
1412 * capacity and frequency will increase energy consumption and shorten the
1413 * battery life.
1414 *
1415 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1416 *
1417 * This knob will not override the system default sched_util_clamp_min defined
1418 * above.
1419 */
1420 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1421
1422 /* All clamps are required to be less or equal than these values */
1423 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1424
1425 /*
1426 * This static key is used to reduce the uclamp overhead in the fast path. It
1427 * primarily disables the call to uclamp_rq_{inc, dec}() in
1428 * enqueue/dequeue_task().
1429 *
1430 * This allows users to continue to enable uclamp in their kernel config with
1431 * minimum uclamp overhead in the fast path.
1432 *
1433 * As soon as userspace modifies any of the uclamp knobs, the static key is
1434 * enabled, since we have an actual users that make use of uclamp
1435 * functionality.
1436 *
1437 * The knobs that would enable this static key are:
1438 *
1439 * * A task modifying its uclamp value with sched_setattr().
1440 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1441 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1442 */
1443 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1444
1445 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1446 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1447 unsigned int clamp_value)
1448 {
1449 /*
1450 * Avoid blocked utilization pushing up the frequency when we go
1451 * idle (which drops the max-clamp) by retaining the last known
1452 * max-clamp.
1453 */
1454 if (clamp_id == UCLAMP_MAX) {
1455 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1456 return clamp_value;
1457 }
1458
1459 return uclamp_none(UCLAMP_MIN);
1460 }
1461
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1462 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1463 unsigned int clamp_value)
1464 {
1465 /* Reset max-clamp retention only on idle exit */
1466 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1467 return;
1468
1469 uclamp_rq_set(rq, clamp_id, clamp_value);
1470 }
1471
1472 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1473 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1474 unsigned int clamp_value)
1475 {
1476 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1477 int bucket_id = UCLAMP_BUCKETS - 1;
1478
1479 /*
1480 * Since both min and max clamps are max aggregated, find the
1481 * top most bucket with tasks in.
1482 */
1483 for ( ; bucket_id >= 0; bucket_id--) {
1484 if (!bucket[bucket_id].tasks)
1485 continue;
1486 return bucket[bucket_id].value;
1487 }
1488
1489 /* No tasks -- default clamp values */
1490 return uclamp_idle_value(rq, clamp_id, clamp_value);
1491 }
1492
__uclamp_update_util_min_rt_default(struct task_struct * p)1493 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1494 {
1495 unsigned int default_util_min;
1496 struct uclamp_se *uc_se;
1497
1498 lockdep_assert_held(&p->pi_lock);
1499
1500 uc_se = &p->uclamp_req[UCLAMP_MIN];
1501
1502 /* Only sync if user didn't override the default */
1503 if (uc_se->user_defined)
1504 return;
1505
1506 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1507 uclamp_se_set(uc_se, default_util_min, false);
1508 }
1509
uclamp_update_util_min_rt_default(struct task_struct * p)1510 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1511 {
1512 if (!rt_task(p))
1513 return;
1514
1515 /* Protect updates to p->uclamp_* */
1516 guard(task_rq_lock)(p);
1517 __uclamp_update_util_min_rt_default(p);
1518 }
1519
1520 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1521 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1522 {
1523 /* Copy by value as we could modify it */
1524 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1525 #ifdef CONFIG_UCLAMP_TASK_GROUP
1526 unsigned int tg_min, tg_max, value;
1527
1528 /*
1529 * Tasks in autogroups or root task group will be
1530 * restricted by system defaults.
1531 */
1532 if (task_group_is_autogroup(task_group(p)))
1533 return uc_req;
1534 if (task_group(p) == &root_task_group)
1535 return uc_req;
1536
1537 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1538 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1539 value = uc_req.value;
1540 value = clamp(value, tg_min, tg_max);
1541 uclamp_se_set(&uc_req, value, false);
1542 #endif
1543
1544 return uc_req;
1545 }
1546
1547 /*
1548 * The effective clamp bucket index of a task depends on, by increasing
1549 * priority:
1550 * - the task specific clamp value, when explicitly requested from userspace
1551 * - the task group effective clamp value, for tasks not either in the root
1552 * group or in an autogroup
1553 * - the system default clamp value, defined by the sysadmin
1554 */
1555 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1556 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1557 {
1558 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1559 struct uclamp_se uc_max = uclamp_default[clamp_id];
1560
1561 /* System default restrictions always apply */
1562 if (unlikely(uc_req.value > uc_max.value))
1563 return uc_max;
1564
1565 return uc_req;
1566 }
1567
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1568 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1569 {
1570 struct uclamp_se uc_eff;
1571
1572 /* Task currently refcounted: use back-annotated (effective) value */
1573 if (p->uclamp[clamp_id].active)
1574 return (unsigned long)p->uclamp[clamp_id].value;
1575
1576 uc_eff = uclamp_eff_get(p, clamp_id);
1577
1578 return (unsigned long)uc_eff.value;
1579 }
1580
1581 /*
1582 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1583 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1584 * updates the rq's clamp value if required.
1585 *
1586 * Tasks can have a task-specific value requested from user-space, track
1587 * within each bucket the maximum value for tasks refcounted in it.
1588 * This "local max aggregation" allows to track the exact "requested" value
1589 * for each bucket when all its RUNNABLE tasks require the same clamp.
1590 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1591 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1592 enum uclamp_id clamp_id)
1593 {
1594 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1595 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1596 struct uclamp_bucket *bucket;
1597
1598 lockdep_assert_rq_held(rq);
1599
1600 /* Update task effective clamp */
1601 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1602
1603 bucket = &uc_rq->bucket[uc_se->bucket_id];
1604 bucket->tasks++;
1605 uc_se->active = true;
1606
1607 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1608
1609 /*
1610 * Local max aggregation: rq buckets always track the max
1611 * "requested" clamp value of its RUNNABLE tasks.
1612 */
1613 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1614 bucket->value = uc_se->value;
1615
1616 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1617 uclamp_rq_set(rq, clamp_id, uc_se->value);
1618 }
1619
1620 /*
1621 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1622 * is released. If this is the last task reference counting the rq's max
1623 * active clamp value, then the rq's clamp value is updated.
1624 *
1625 * Both refcounted tasks and rq's cached clamp values are expected to be
1626 * always valid. If it's detected they are not, as defensive programming,
1627 * enforce the expected state and warn.
1628 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1629 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1630 enum uclamp_id clamp_id)
1631 {
1632 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1633 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1634 struct uclamp_bucket *bucket;
1635 unsigned int bkt_clamp;
1636 unsigned int rq_clamp;
1637
1638 lockdep_assert_rq_held(rq);
1639
1640 /*
1641 * If sched_uclamp_used was enabled after task @p was enqueued,
1642 * we could end up with unbalanced call to uclamp_rq_dec_id().
1643 *
1644 * In this case the uc_se->active flag should be false since no uclamp
1645 * accounting was performed at enqueue time and we can just return
1646 * here.
1647 *
1648 * Need to be careful of the following enqueue/dequeue ordering
1649 * problem too
1650 *
1651 * enqueue(taskA)
1652 * // sched_uclamp_used gets enabled
1653 * enqueue(taskB)
1654 * dequeue(taskA)
1655 * // Must not decrement bucket->tasks here
1656 * dequeue(taskB)
1657 *
1658 * where we could end up with stale data in uc_se and
1659 * bucket[uc_se->bucket_id].
1660 *
1661 * The following check here eliminates the possibility of such race.
1662 */
1663 if (unlikely(!uc_se->active))
1664 return;
1665
1666 bucket = &uc_rq->bucket[uc_se->bucket_id];
1667
1668 SCHED_WARN_ON(!bucket->tasks);
1669 if (likely(bucket->tasks))
1670 bucket->tasks--;
1671
1672 uc_se->active = false;
1673
1674 /*
1675 * Keep "local max aggregation" simple and accept to (possibly)
1676 * overboost some RUNNABLE tasks in the same bucket.
1677 * The rq clamp bucket value is reset to its base value whenever
1678 * there are no more RUNNABLE tasks refcounting it.
1679 */
1680 if (likely(bucket->tasks))
1681 return;
1682
1683 rq_clamp = uclamp_rq_get(rq, clamp_id);
1684 /*
1685 * Defensive programming: this should never happen. If it happens,
1686 * e.g. due to future modification, warn and fix up the expected value.
1687 */
1688 SCHED_WARN_ON(bucket->value > rq_clamp);
1689 if (bucket->value >= rq_clamp) {
1690 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1691 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1692 }
1693 }
1694
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1695 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1696 {
1697 enum uclamp_id clamp_id;
1698
1699 /*
1700 * Avoid any overhead until uclamp is actually used by the userspace.
1701 *
1702 * The condition is constructed such that a NOP is generated when
1703 * sched_uclamp_used is disabled.
1704 */
1705 if (!static_branch_unlikely(&sched_uclamp_used))
1706 return;
1707
1708 if (unlikely(!p->sched_class->uclamp_enabled))
1709 return;
1710
1711 if (p->se.sched_delayed)
1712 return;
1713
1714 for_each_clamp_id(clamp_id)
1715 uclamp_rq_inc_id(rq, p, clamp_id);
1716
1717 /* Reset clamp idle holding when there is one RUNNABLE task */
1718 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1719 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1720 }
1721
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1722 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1723 {
1724 enum uclamp_id clamp_id;
1725
1726 /*
1727 * Avoid any overhead until uclamp is actually used by the userspace.
1728 *
1729 * The condition is constructed such that a NOP is generated when
1730 * sched_uclamp_used is disabled.
1731 */
1732 if (!static_branch_unlikely(&sched_uclamp_used))
1733 return;
1734
1735 if (unlikely(!p->sched_class->uclamp_enabled))
1736 return;
1737
1738 if (p->se.sched_delayed)
1739 return;
1740
1741 for_each_clamp_id(clamp_id)
1742 uclamp_rq_dec_id(rq, p, clamp_id);
1743 }
1744
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1745 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1746 enum uclamp_id clamp_id)
1747 {
1748 if (!p->uclamp[clamp_id].active)
1749 return;
1750
1751 uclamp_rq_dec_id(rq, p, clamp_id);
1752 uclamp_rq_inc_id(rq, p, clamp_id);
1753
1754 /*
1755 * Make sure to clear the idle flag if we've transiently reached 0
1756 * active tasks on rq.
1757 */
1758 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1759 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1760 }
1761
1762 static inline void
uclamp_update_active(struct task_struct * p)1763 uclamp_update_active(struct task_struct *p)
1764 {
1765 enum uclamp_id clamp_id;
1766 struct rq_flags rf;
1767 struct rq *rq;
1768
1769 /*
1770 * Lock the task and the rq where the task is (or was) queued.
1771 *
1772 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1773 * price to pay to safely serialize util_{min,max} updates with
1774 * enqueues, dequeues and migration operations.
1775 * This is the same locking schema used by __set_cpus_allowed_ptr().
1776 */
1777 rq = task_rq_lock(p, &rf);
1778
1779 /*
1780 * Setting the clamp bucket is serialized by task_rq_lock().
1781 * If the task is not yet RUNNABLE and its task_struct is not
1782 * affecting a valid clamp bucket, the next time it's enqueued,
1783 * it will already see the updated clamp bucket value.
1784 */
1785 for_each_clamp_id(clamp_id)
1786 uclamp_rq_reinc_id(rq, p, clamp_id);
1787
1788 task_rq_unlock(rq, p, &rf);
1789 }
1790
1791 #ifdef CONFIG_UCLAMP_TASK_GROUP
1792 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1793 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1794 {
1795 struct css_task_iter it;
1796 struct task_struct *p;
1797
1798 css_task_iter_start(css, 0, &it);
1799 while ((p = css_task_iter_next(&it)))
1800 uclamp_update_active(p);
1801 css_task_iter_end(&it);
1802 }
1803
1804 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1805 #endif
1806
1807 #ifdef CONFIG_SYSCTL
1808 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1809 static void uclamp_update_root_tg(void)
1810 {
1811 struct task_group *tg = &root_task_group;
1812
1813 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1814 sysctl_sched_uclamp_util_min, false);
1815 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1816 sysctl_sched_uclamp_util_max, false);
1817
1818 guard(rcu)();
1819 cpu_util_update_eff(&root_task_group.css);
1820 }
1821 #else
uclamp_update_root_tg(void)1822 static void uclamp_update_root_tg(void) { }
1823 #endif
1824
uclamp_sync_util_min_rt_default(void)1825 static void uclamp_sync_util_min_rt_default(void)
1826 {
1827 struct task_struct *g, *p;
1828
1829 /*
1830 * copy_process() sysctl_uclamp
1831 * uclamp_min_rt = X;
1832 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1833 * // link thread smp_mb__after_spinlock()
1834 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1835 * sched_post_fork() for_each_process_thread()
1836 * __uclamp_sync_rt() __uclamp_sync_rt()
1837 *
1838 * Ensures that either sched_post_fork() will observe the new
1839 * uclamp_min_rt or for_each_process_thread() will observe the new
1840 * task.
1841 */
1842 read_lock(&tasklist_lock);
1843 smp_mb__after_spinlock();
1844 read_unlock(&tasklist_lock);
1845
1846 guard(rcu)();
1847 for_each_process_thread(g, p)
1848 uclamp_update_util_min_rt_default(p);
1849 }
1850
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1851 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1852 void *buffer, size_t *lenp, loff_t *ppos)
1853 {
1854 bool update_root_tg = false;
1855 int old_min, old_max, old_min_rt;
1856 int result;
1857
1858 guard(mutex)(&uclamp_mutex);
1859
1860 old_min = sysctl_sched_uclamp_util_min;
1861 old_max = sysctl_sched_uclamp_util_max;
1862 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1863
1864 result = proc_dointvec(table, write, buffer, lenp, ppos);
1865 if (result)
1866 goto undo;
1867 if (!write)
1868 return 0;
1869
1870 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1871 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1872 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1873
1874 result = -EINVAL;
1875 goto undo;
1876 }
1877
1878 if (old_min != sysctl_sched_uclamp_util_min) {
1879 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1880 sysctl_sched_uclamp_util_min, false);
1881 update_root_tg = true;
1882 }
1883 if (old_max != sysctl_sched_uclamp_util_max) {
1884 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1885 sysctl_sched_uclamp_util_max, false);
1886 update_root_tg = true;
1887 }
1888
1889 if (update_root_tg) {
1890 static_branch_enable(&sched_uclamp_used);
1891 uclamp_update_root_tg();
1892 }
1893
1894 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1895 static_branch_enable(&sched_uclamp_used);
1896 uclamp_sync_util_min_rt_default();
1897 }
1898
1899 /*
1900 * We update all RUNNABLE tasks only when task groups are in use.
1901 * Otherwise, keep it simple and do just a lazy update at each next
1902 * task enqueue time.
1903 */
1904 return 0;
1905
1906 undo:
1907 sysctl_sched_uclamp_util_min = old_min;
1908 sysctl_sched_uclamp_util_max = old_max;
1909 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1910 return result;
1911 }
1912 #endif
1913
uclamp_fork(struct task_struct * p)1914 static void uclamp_fork(struct task_struct *p)
1915 {
1916 enum uclamp_id clamp_id;
1917
1918 /*
1919 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1920 * as the task is still at its early fork stages.
1921 */
1922 for_each_clamp_id(clamp_id)
1923 p->uclamp[clamp_id].active = false;
1924
1925 if (likely(!p->sched_reset_on_fork))
1926 return;
1927
1928 for_each_clamp_id(clamp_id) {
1929 uclamp_se_set(&p->uclamp_req[clamp_id],
1930 uclamp_none(clamp_id), false);
1931 }
1932 }
1933
uclamp_post_fork(struct task_struct * p)1934 static void uclamp_post_fork(struct task_struct *p)
1935 {
1936 uclamp_update_util_min_rt_default(p);
1937 }
1938
init_uclamp_rq(struct rq * rq)1939 static void __init init_uclamp_rq(struct rq *rq)
1940 {
1941 enum uclamp_id clamp_id;
1942 struct uclamp_rq *uc_rq = rq->uclamp;
1943
1944 for_each_clamp_id(clamp_id) {
1945 uc_rq[clamp_id] = (struct uclamp_rq) {
1946 .value = uclamp_none(clamp_id)
1947 };
1948 }
1949
1950 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1951 }
1952
init_uclamp(void)1953 static void __init init_uclamp(void)
1954 {
1955 struct uclamp_se uc_max = {};
1956 enum uclamp_id clamp_id;
1957 int cpu;
1958
1959 for_each_possible_cpu(cpu)
1960 init_uclamp_rq(cpu_rq(cpu));
1961
1962 for_each_clamp_id(clamp_id) {
1963 uclamp_se_set(&init_task.uclamp_req[clamp_id],
1964 uclamp_none(clamp_id), false);
1965 }
1966
1967 /* System defaults allow max clamp values for both indexes */
1968 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1969 for_each_clamp_id(clamp_id) {
1970 uclamp_default[clamp_id] = uc_max;
1971 #ifdef CONFIG_UCLAMP_TASK_GROUP
1972 root_task_group.uclamp_req[clamp_id] = uc_max;
1973 root_task_group.uclamp[clamp_id] = uc_max;
1974 #endif
1975 }
1976 }
1977
1978 #else /* !CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1979 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1980 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)1981 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)1982 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)1983 static inline void init_uclamp(void) { }
1984 #endif /* CONFIG_UCLAMP_TASK */
1985
sched_task_on_rq(struct task_struct * p)1986 bool sched_task_on_rq(struct task_struct *p)
1987 {
1988 return task_on_rq_queued(p);
1989 }
1990
get_wchan(struct task_struct * p)1991 unsigned long get_wchan(struct task_struct *p)
1992 {
1993 unsigned long ip = 0;
1994 unsigned int state;
1995
1996 if (!p || p == current)
1997 return 0;
1998
1999 /* Only get wchan if task is blocked and we can keep it that way. */
2000 raw_spin_lock_irq(&p->pi_lock);
2001 state = READ_ONCE(p->__state);
2002 smp_rmb(); /* see try_to_wake_up() */
2003 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2004 ip = __get_wchan(p);
2005 raw_spin_unlock_irq(&p->pi_lock);
2006
2007 return ip;
2008 }
2009
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2010 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2011 {
2012 if (!(flags & ENQUEUE_NOCLOCK))
2013 update_rq_clock(rq);
2014
2015 if (!(flags & ENQUEUE_RESTORE)) {
2016 sched_info_enqueue(rq, p);
2017 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2018 }
2019
2020 p->sched_class->enqueue_task(rq, p, flags);
2021 /*
2022 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2023 * ->sched_delayed.
2024 */
2025 uclamp_rq_inc(rq, p);
2026
2027 if (sched_core_enabled(rq))
2028 sched_core_enqueue(rq, p);
2029 }
2030
2031 /*
2032 * Must only return false when DEQUEUE_SLEEP.
2033 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2034 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2035 {
2036 if (sched_core_enabled(rq))
2037 sched_core_dequeue(rq, p, flags);
2038
2039 if (!(flags & DEQUEUE_NOCLOCK))
2040 update_rq_clock(rq);
2041
2042 if (!(flags & DEQUEUE_SAVE)) {
2043 sched_info_dequeue(rq, p);
2044 psi_dequeue(p, flags & DEQUEUE_SLEEP);
2045 }
2046
2047 /*
2048 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2049 * and mark the task ->sched_delayed.
2050 */
2051 uclamp_rq_dec(rq, p);
2052 return p->sched_class->dequeue_task(rq, p, flags);
2053 }
2054
activate_task(struct rq * rq,struct task_struct * p,int flags)2055 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2056 {
2057 if (task_on_rq_migrating(p))
2058 flags |= ENQUEUE_MIGRATED;
2059 if (flags & ENQUEUE_MIGRATED)
2060 sched_mm_cid_migrate_to(rq, p);
2061
2062 enqueue_task(rq, p, flags);
2063
2064 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2065 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2066 }
2067
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2068 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2069 {
2070 SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2071
2072 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2073 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2074
2075 /*
2076 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2077 * dequeue_task() and cleared *after* enqueue_task().
2078 */
2079
2080 dequeue_task(rq, p, flags);
2081 }
2082
block_task(struct rq * rq,struct task_struct * p,int flags)2083 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2084 {
2085 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2086 __block_task(rq, p);
2087 }
2088
2089 /**
2090 * task_curr - is this task currently executing on a CPU?
2091 * @p: the task in question.
2092 *
2093 * Return: 1 if the task is currently executing. 0 otherwise.
2094 */
task_curr(const struct task_struct * p)2095 inline int task_curr(const struct task_struct *p)
2096 {
2097 return cpu_curr(task_cpu(p)) == p;
2098 }
2099
2100 /*
2101 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2102 * mess with locking.
2103 */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2104 void check_class_changing(struct rq *rq, struct task_struct *p,
2105 const struct sched_class *prev_class)
2106 {
2107 if (prev_class != p->sched_class && p->sched_class->switching_to)
2108 p->sched_class->switching_to(rq, p);
2109 }
2110
2111 /*
2112 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2113 * use the balance_callback list if you want balancing.
2114 *
2115 * this means any call to check_class_changed() must be followed by a call to
2116 * balance_callback().
2117 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2118 void check_class_changed(struct rq *rq, struct task_struct *p,
2119 const struct sched_class *prev_class,
2120 int oldprio)
2121 {
2122 if (prev_class != p->sched_class) {
2123 if (prev_class->switched_from)
2124 prev_class->switched_from(rq, p);
2125
2126 p->sched_class->switched_to(rq, p);
2127 } else if (oldprio != p->prio || dl_task(p))
2128 p->sched_class->prio_changed(rq, p, oldprio);
2129 }
2130
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2131 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2132 {
2133 if (p->sched_class == rq->curr->sched_class)
2134 rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2135 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2136 resched_curr(rq);
2137
2138 /*
2139 * A queue event has occurred, and we're going to schedule. In
2140 * this case, we can save a useless back to back clock update.
2141 */
2142 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2143 rq_clock_skip_update(rq);
2144 }
2145
2146 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2147 int __task_state_match(struct task_struct *p, unsigned int state)
2148 {
2149 if (READ_ONCE(p->__state) & state)
2150 return 1;
2151
2152 if (READ_ONCE(p->saved_state) & state)
2153 return -1;
2154
2155 return 0;
2156 }
2157
2158 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2159 int task_state_match(struct task_struct *p, unsigned int state)
2160 {
2161 /*
2162 * Serialize against current_save_and_set_rtlock_wait_state(),
2163 * current_restore_rtlock_saved_state(), and __refrigerator().
2164 */
2165 guard(raw_spinlock_irq)(&p->pi_lock);
2166 return __task_state_match(p, state);
2167 }
2168
2169 /*
2170 * wait_task_inactive - wait for a thread to unschedule.
2171 *
2172 * Wait for the thread to block in any of the states set in @match_state.
2173 * If it changes, i.e. @p might have woken up, then return zero. When we
2174 * succeed in waiting for @p to be off its CPU, we return a positive number
2175 * (its total switch count). If a second call a short while later returns the
2176 * same number, the caller can be sure that @p has remained unscheduled the
2177 * whole time.
2178 *
2179 * The caller must ensure that the task *will* unschedule sometime soon,
2180 * else this function might spin for a *long* time. This function can't
2181 * be called with interrupts off, or it may introduce deadlock with
2182 * smp_call_function() if an IPI is sent by the same process we are
2183 * waiting to become inactive.
2184 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2185 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2186 {
2187 int running, queued, match;
2188 struct rq_flags rf;
2189 unsigned long ncsw;
2190 struct rq *rq;
2191
2192 for (;;) {
2193 /*
2194 * We do the initial early heuristics without holding
2195 * any task-queue locks at all. We'll only try to get
2196 * the runqueue lock when things look like they will
2197 * work out!
2198 */
2199 rq = task_rq(p);
2200
2201 /*
2202 * If the task is actively running on another CPU
2203 * still, just relax and busy-wait without holding
2204 * any locks.
2205 *
2206 * NOTE! Since we don't hold any locks, it's not
2207 * even sure that "rq" stays as the right runqueue!
2208 * But we don't care, since "task_on_cpu()" will
2209 * return false if the runqueue has changed and p
2210 * is actually now running somewhere else!
2211 */
2212 while (task_on_cpu(rq, p)) {
2213 if (!task_state_match(p, match_state))
2214 return 0;
2215 cpu_relax();
2216 }
2217
2218 /*
2219 * Ok, time to look more closely! We need the rq
2220 * lock now, to be *sure*. If we're wrong, we'll
2221 * just go back and repeat.
2222 */
2223 rq = task_rq_lock(p, &rf);
2224 trace_sched_wait_task(p);
2225 running = task_on_cpu(rq, p);
2226 queued = task_on_rq_queued(p);
2227 ncsw = 0;
2228 if ((match = __task_state_match(p, match_state))) {
2229 /*
2230 * When matching on p->saved_state, consider this task
2231 * still queued so it will wait.
2232 */
2233 if (match < 0)
2234 queued = 1;
2235 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2236 }
2237 task_rq_unlock(rq, p, &rf);
2238
2239 /*
2240 * If it changed from the expected state, bail out now.
2241 */
2242 if (unlikely(!ncsw))
2243 break;
2244
2245 /*
2246 * Was it really running after all now that we
2247 * checked with the proper locks actually held?
2248 *
2249 * Oops. Go back and try again..
2250 */
2251 if (unlikely(running)) {
2252 cpu_relax();
2253 continue;
2254 }
2255
2256 /*
2257 * It's not enough that it's not actively running,
2258 * it must be off the runqueue _entirely_, and not
2259 * preempted!
2260 *
2261 * So if it was still runnable (but just not actively
2262 * running right now), it's preempted, and we should
2263 * yield - it could be a while.
2264 */
2265 if (unlikely(queued)) {
2266 ktime_t to = NSEC_PER_SEC / HZ;
2267
2268 set_current_state(TASK_UNINTERRUPTIBLE);
2269 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2270 continue;
2271 }
2272
2273 /*
2274 * Ahh, all good. It wasn't running, and it wasn't
2275 * runnable, which means that it will never become
2276 * running in the future either. We're all done!
2277 */
2278 break;
2279 }
2280
2281 return ncsw;
2282 }
2283
2284 #ifdef CONFIG_SMP
2285
2286 static void
2287 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2288
migrate_disable_switch(struct rq * rq,struct task_struct * p)2289 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2290 {
2291 struct affinity_context ac = {
2292 .new_mask = cpumask_of(rq->cpu),
2293 .flags = SCA_MIGRATE_DISABLE,
2294 };
2295
2296 if (likely(!p->migration_disabled))
2297 return;
2298
2299 if (p->cpus_ptr != &p->cpus_mask)
2300 return;
2301
2302 /*
2303 * Violates locking rules! See comment in __do_set_cpus_allowed().
2304 */
2305 __do_set_cpus_allowed(p, &ac);
2306 }
2307
migrate_disable(void)2308 void migrate_disable(void)
2309 {
2310 struct task_struct *p = current;
2311
2312 if (p->migration_disabled) {
2313 #ifdef CONFIG_DEBUG_PREEMPT
2314 /*
2315 *Warn about overflow half-way through the range.
2316 */
2317 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2318 #endif
2319 p->migration_disabled++;
2320 return;
2321 }
2322
2323 guard(preempt)();
2324 this_rq()->nr_pinned++;
2325 p->migration_disabled = 1;
2326 }
2327 EXPORT_SYMBOL_GPL(migrate_disable);
2328
migrate_enable(void)2329 void migrate_enable(void)
2330 {
2331 struct task_struct *p = current;
2332 struct affinity_context ac = {
2333 .new_mask = &p->cpus_mask,
2334 .flags = SCA_MIGRATE_ENABLE,
2335 };
2336
2337 #ifdef CONFIG_DEBUG_PREEMPT
2338 /*
2339 * Check both overflow from migrate_disable() and superfluous
2340 * migrate_enable().
2341 */
2342 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2343 return;
2344 #endif
2345
2346 if (p->migration_disabled > 1) {
2347 p->migration_disabled--;
2348 return;
2349 }
2350
2351 /*
2352 * Ensure stop_task runs either before or after this, and that
2353 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2354 */
2355 guard(preempt)();
2356 if (p->cpus_ptr != &p->cpus_mask)
2357 __set_cpus_allowed_ptr(p, &ac);
2358 /*
2359 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2360 * regular cpus_mask, otherwise things that race (eg.
2361 * select_fallback_rq) get confused.
2362 */
2363 barrier();
2364 p->migration_disabled = 0;
2365 this_rq()->nr_pinned--;
2366 }
2367 EXPORT_SYMBOL_GPL(migrate_enable);
2368
rq_has_pinned_tasks(struct rq * rq)2369 static inline bool rq_has_pinned_tasks(struct rq *rq)
2370 {
2371 return rq->nr_pinned;
2372 }
2373
2374 /*
2375 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2376 * __set_cpus_allowed_ptr() and select_fallback_rq().
2377 */
is_cpu_allowed(struct task_struct * p,int cpu)2378 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2379 {
2380 /* When not in the task's cpumask, no point in looking further. */
2381 if (!task_allowed_on_cpu(p, cpu))
2382 return false;
2383
2384 /* migrate_disabled() must be allowed to finish. */
2385 if (is_migration_disabled(p))
2386 return cpu_online(cpu);
2387
2388 /* Non kernel threads are not allowed during either online or offline. */
2389 if (!(p->flags & PF_KTHREAD))
2390 return cpu_active(cpu);
2391
2392 /* KTHREAD_IS_PER_CPU is always allowed. */
2393 if (kthread_is_per_cpu(p))
2394 return cpu_online(cpu);
2395
2396 /* Regular kernel threads don't get to stay during offline. */
2397 if (cpu_dying(cpu))
2398 return false;
2399
2400 /* But are allowed during online. */
2401 return cpu_online(cpu);
2402 }
2403
2404 /*
2405 * This is how migration works:
2406 *
2407 * 1) we invoke migration_cpu_stop() on the target CPU using
2408 * stop_one_cpu().
2409 * 2) stopper starts to run (implicitly forcing the migrated thread
2410 * off the CPU)
2411 * 3) it checks whether the migrated task is still in the wrong runqueue.
2412 * 4) if it's in the wrong runqueue then the migration thread removes
2413 * it and puts it into the right queue.
2414 * 5) stopper completes and stop_one_cpu() returns and the migration
2415 * is done.
2416 */
2417
2418 /*
2419 * move_queued_task - move a queued task to new rq.
2420 *
2421 * Returns (locked) new rq. Old rq's lock is released.
2422 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2423 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2424 struct task_struct *p, int new_cpu)
2425 {
2426 lockdep_assert_rq_held(rq);
2427
2428 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2429 set_task_cpu(p, new_cpu);
2430 rq_unlock(rq, rf);
2431
2432 rq = cpu_rq(new_cpu);
2433
2434 rq_lock(rq, rf);
2435 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2436 activate_task(rq, p, 0);
2437 wakeup_preempt(rq, p, 0);
2438
2439 return rq;
2440 }
2441
2442 struct migration_arg {
2443 struct task_struct *task;
2444 int dest_cpu;
2445 struct set_affinity_pending *pending;
2446 };
2447
2448 /*
2449 * @refs: number of wait_for_completion()
2450 * @stop_pending: is @stop_work in use
2451 */
2452 struct set_affinity_pending {
2453 refcount_t refs;
2454 unsigned int stop_pending;
2455 struct completion done;
2456 struct cpu_stop_work stop_work;
2457 struct migration_arg arg;
2458 };
2459
2460 /*
2461 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2462 * this because either it can't run here any more (set_cpus_allowed()
2463 * away from this CPU, or CPU going down), or because we're
2464 * attempting to rebalance this task on exec (sched_exec).
2465 *
2466 * So we race with normal scheduler movements, but that's OK, as long
2467 * as the task is no longer on this CPU.
2468 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2469 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2470 struct task_struct *p, int dest_cpu)
2471 {
2472 /* Affinity changed (again). */
2473 if (!is_cpu_allowed(p, dest_cpu))
2474 return rq;
2475
2476 rq = move_queued_task(rq, rf, p, dest_cpu);
2477
2478 return rq;
2479 }
2480
2481 /*
2482 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2483 * and performs thread migration by bumping thread off CPU then
2484 * 'pushing' onto another runqueue.
2485 */
migration_cpu_stop(void * data)2486 static int migration_cpu_stop(void *data)
2487 {
2488 struct migration_arg *arg = data;
2489 struct set_affinity_pending *pending = arg->pending;
2490 struct task_struct *p = arg->task;
2491 struct rq *rq = this_rq();
2492 bool complete = false;
2493 struct rq_flags rf;
2494
2495 /*
2496 * The original target CPU might have gone down and we might
2497 * be on another CPU but it doesn't matter.
2498 */
2499 local_irq_save(rf.flags);
2500 /*
2501 * We need to explicitly wake pending tasks before running
2502 * __migrate_task() such that we will not miss enforcing cpus_ptr
2503 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2504 */
2505 flush_smp_call_function_queue();
2506
2507 raw_spin_lock(&p->pi_lock);
2508 rq_lock(rq, &rf);
2509
2510 /*
2511 * If we were passed a pending, then ->stop_pending was set, thus
2512 * p->migration_pending must have remained stable.
2513 */
2514 WARN_ON_ONCE(pending && pending != p->migration_pending);
2515
2516 /*
2517 * If task_rq(p) != rq, it cannot be migrated here, because we're
2518 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2519 * we're holding p->pi_lock.
2520 */
2521 if (task_rq(p) == rq) {
2522 if (is_migration_disabled(p))
2523 goto out;
2524
2525 if (pending) {
2526 p->migration_pending = NULL;
2527 complete = true;
2528
2529 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2530 goto out;
2531 }
2532
2533 if (task_on_rq_queued(p)) {
2534 update_rq_clock(rq);
2535 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2536 } else {
2537 p->wake_cpu = arg->dest_cpu;
2538 }
2539
2540 /*
2541 * XXX __migrate_task() can fail, at which point we might end
2542 * up running on a dodgy CPU, AFAICT this can only happen
2543 * during CPU hotplug, at which point we'll get pushed out
2544 * anyway, so it's probably not a big deal.
2545 */
2546
2547 } else if (pending) {
2548 /*
2549 * This happens when we get migrated between migrate_enable()'s
2550 * preempt_enable() and scheduling the stopper task. At that
2551 * point we're a regular task again and not current anymore.
2552 *
2553 * A !PREEMPT kernel has a giant hole here, which makes it far
2554 * more likely.
2555 */
2556
2557 /*
2558 * The task moved before the stopper got to run. We're holding
2559 * ->pi_lock, so the allowed mask is stable - if it got
2560 * somewhere allowed, we're done.
2561 */
2562 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2563 p->migration_pending = NULL;
2564 complete = true;
2565 goto out;
2566 }
2567
2568 /*
2569 * When migrate_enable() hits a rq mis-match we can't reliably
2570 * determine is_migration_disabled() and so have to chase after
2571 * it.
2572 */
2573 WARN_ON_ONCE(!pending->stop_pending);
2574 preempt_disable();
2575 task_rq_unlock(rq, p, &rf);
2576 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2577 &pending->arg, &pending->stop_work);
2578 preempt_enable();
2579 return 0;
2580 }
2581 out:
2582 if (pending)
2583 pending->stop_pending = false;
2584 task_rq_unlock(rq, p, &rf);
2585
2586 if (complete)
2587 complete_all(&pending->done);
2588
2589 return 0;
2590 }
2591
push_cpu_stop(void * arg)2592 int push_cpu_stop(void *arg)
2593 {
2594 struct rq *lowest_rq = NULL, *rq = this_rq();
2595 struct task_struct *p = arg;
2596
2597 raw_spin_lock_irq(&p->pi_lock);
2598 raw_spin_rq_lock(rq);
2599
2600 if (task_rq(p) != rq)
2601 goto out_unlock;
2602
2603 if (is_migration_disabled(p)) {
2604 p->migration_flags |= MDF_PUSH;
2605 goto out_unlock;
2606 }
2607
2608 p->migration_flags &= ~MDF_PUSH;
2609
2610 if (p->sched_class->find_lock_rq)
2611 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2612
2613 if (!lowest_rq)
2614 goto out_unlock;
2615
2616 // XXX validate p is still the highest prio task
2617 if (task_rq(p) == rq) {
2618 deactivate_task(rq, p, 0);
2619 set_task_cpu(p, lowest_rq->cpu);
2620 activate_task(lowest_rq, p, 0);
2621 resched_curr(lowest_rq);
2622 }
2623
2624 double_unlock_balance(rq, lowest_rq);
2625
2626 out_unlock:
2627 rq->push_busy = false;
2628 raw_spin_rq_unlock(rq);
2629 raw_spin_unlock_irq(&p->pi_lock);
2630
2631 put_task_struct(p);
2632 return 0;
2633 }
2634
2635 /*
2636 * sched_class::set_cpus_allowed must do the below, but is not required to
2637 * actually call this function.
2638 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2639 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2640 {
2641 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2642 p->cpus_ptr = ctx->new_mask;
2643 return;
2644 }
2645
2646 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2647 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2648
2649 /*
2650 * Swap in a new user_cpus_ptr if SCA_USER flag set
2651 */
2652 if (ctx->flags & SCA_USER)
2653 swap(p->user_cpus_ptr, ctx->user_mask);
2654 }
2655
2656 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2657 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2658 {
2659 struct rq *rq = task_rq(p);
2660 bool queued, running;
2661
2662 /*
2663 * This here violates the locking rules for affinity, since we're only
2664 * supposed to change these variables while holding both rq->lock and
2665 * p->pi_lock.
2666 *
2667 * HOWEVER, it magically works, because ttwu() is the only code that
2668 * accesses these variables under p->pi_lock and only does so after
2669 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2670 * before finish_task().
2671 *
2672 * XXX do further audits, this smells like something putrid.
2673 */
2674 if (ctx->flags & SCA_MIGRATE_DISABLE)
2675 SCHED_WARN_ON(!p->on_cpu);
2676 else
2677 lockdep_assert_held(&p->pi_lock);
2678
2679 queued = task_on_rq_queued(p);
2680 running = task_current(rq, p);
2681
2682 if (queued) {
2683 /*
2684 * Because __kthread_bind() calls this on blocked tasks without
2685 * holding rq->lock.
2686 */
2687 lockdep_assert_rq_held(rq);
2688 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2689 }
2690 if (running)
2691 put_prev_task(rq, p);
2692
2693 p->sched_class->set_cpus_allowed(p, ctx);
2694
2695 if (queued)
2696 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2697 if (running)
2698 set_next_task(rq, p);
2699 }
2700
2701 /*
2702 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2703 * affinity (if any) should be destroyed too.
2704 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2705 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2706 {
2707 struct affinity_context ac = {
2708 .new_mask = new_mask,
2709 .user_mask = NULL,
2710 .flags = SCA_USER, /* clear the user requested mask */
2711 };
2712 union cpumask_rcuhead {
2713 cpumask_t cpumask;
2714 struct rcu_head rcu;
2715 };
2716
2717 __do_set_cpus_allowed(p, &ac);
2718
2719 /*
2720 * Because this is called with p->pi_lock held, it is not possible
2721 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2722 * kfree_rcu().
2723 */
2724 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2725 }
2726
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2727 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2728 int node)
2729 {
2730 cpumask_t *user_mask;
2731 unsigned long flags;
2732
2733 /*
2734 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2735 * may differ by now due to racing.
2736 */
2737 dst->user_cpus_ptr = NULL;
2738
2739 /*
2740 * This check is racy and losing the race is a valid situation.
2741 * It is not worth the extra overhead of taking the pi_lock on
2742 * every fork/clone.
2743 */
2744 if (data_race(!src->user_cpus_ptr))
2745 return 0;
2746
2747 user_mask = alloc_user_cpus_ptr(node);
2748 if (!user_mask)
2749 return -ENOMEM;
2750
2751 /*
2752 * Use pi_lock to protect content of user_cpus_ptr
2753 *
2754 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2755 * do_set_cpus_allowed().
2756 */
2757 raw_spin_lock_irqsave(&src->pi_lock, flags);
2758 if (src->user_cpus_ptr) {
2759 swap(dst->user_cpus_ptr, user_mask);
2760 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2761 }
2762 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2763
2764 if (unlikely(user_mask))
2765 kfree(user_mask);
2766
2767 return 0;
2768 }
2769
clear_user_cpus_ptr(struct task_struct * p)2770 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2771 {
2772 struct cpumask *user_mask = NULL;
2773
2774 swap(p->user_cpus_ptr, user_mask);
2775
2776 return user_mask;
2777 }
2778
release_user_cpus_ptr(struct task_struct * p)2779 void release_user_cpus_ptr(struct task_struct *p)
2780 {
2781 kfree(clear_user_cpus_ptr(p));
2782 }
2783
2784 /*
2785 * This function is wildly self concurrent; here be dragons.
2786 *
2787 *
2788 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2789 * designated task is enqueued on an allowed CPU. If that task is currently
2790 * running, we have to kick it out using the CPU stopper.
2791 *
2792 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2793 * Consider:
2794 *
2795 * Initial conditions: P0->cpus_mask = [0, 1]
2796 *
2797 * P0@CPU0 P1
2798 *
2799 * migrate_disable();
2800 * <preempted>
2801 * set_cpus_allowed_ptr(P0, [1]);
2802 *
2803 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2804 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2805 * This means we need the following scheme:
2806 *
2807 * P0@CPU0 P1
2808 *
2809 * migrate_disable();
2810 * <preempted>
2811 * set_cpus_allowed_ptr(P0, [1]);
2812 * <blocks>
2813 * <resumes>
2814 * migrate_enable();
2815 * __set_cpus_allowed_ptr();
2816 * <wakes local stopper>
2817 * `--> <woken on migration completion>
2818 *
2819 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2820 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2821 * task p are serialized by p->pi_lock, which we can leverage: the one that
2822 * should come into effect at the end of the Migrate-Disable region is the last
2823 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2824 * but we still need to properly signal those waiting tasks at the appropriate
2825 * moment.
2826 *
2827 * This is implemented using struct set_affinity_pending. The first
2828 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2829 * setup an instance of that struct and install it on the targeted task_struct.
2830 * Any and all further callers will reuse that instance. Those then wait for
2831 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2832 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2833 *
2834 *
2835 * (1) In the cases covered above. There is one more where the completion is
2836 * signaled within affine_move_task() itself: when a subsequent affinity request
2837 * occurs after the stopper bailed out due to the targeted task still being
2838 * Migrate-Disable. Consider:
2839 *
2840 * Initial conditions: P0->cpus_mask = [0, 1]
2841 *
2842 * CPU0 P1 P2
2843 * <P0>
2844 * migrate_disable();
2845 * <preempted>
2846 * set_cpus_allowed_ptr(P0, [1]);
2847 * <blocks>
2848 * <migration/0>
2849 * migration_cpu_stop()
2850 * is_migration_disabled()
2851 * <bails>
2852 * set_cpus_allowed_ptr(P0, [0, 1]);
2853 * <signal completion>
2854 * <awakes>
2855 *
2856 * Note that the above is safe vs a concurrent migrate_enable(), as any
2857 * pending affinity completion is preceded by an uninstallation of
2858 * p->migration_pending done with p->pi_lock held.
2859 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2860 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2861 int dest_cpu, unsigned int flags)
2862 __releases(rq->lock)
2863 __releases(p->pi_lock)
2864 {
2865 struct set_affinity_pending my_pending = { }, *pending = NULL;
2866 bool stop_pending, complete = false;
2867
2868 /* Can the task run on the task's current CPU? If so, we're done */
2869 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2870 struct task_struct *push_task = NULL;
2871
2872 if ((flags & SCA_MIGRATE_ENABLE) &&
2873 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2874 rq->push_busy = true;
2875 push_task = get_task_struct(p);
2876 }
2877
2878 /*
2879 * If there are pending waiters, but no pending stop_work,
2880 * then complete now.
2881 */
2882 pending = p->migration_pending;
2883 if (pending && !pending->stop_pending) {
2884 p->migration_pending = NULL;
2885 complete = true;
2886 }
2887
2888 preempt_disable();
2889 task_rq_unlock(rq, p, rf);
2890 if (push_task) {
2891 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2892 p, &rq->push_work);
2893 }
2894 preempt_enable();
2895
2896 if (complete)
2897 complete_all(&pending->done);
2898
2899 return 0;
2900 }
2901
2902 if (!(flags & SCA_MIGRATE_ENABLE)) {
2903 /* serialized by p->pi_lock */
2904 if (!p->migration_pending) {
2905 /* Install the request */
2906 refcount_set(&my_pending.refs, 1);
2907 init_completion(&my_pending.done);
2908 my_pending.arg = (struct migration_arg) {
2909 .task = p,
2910 .dest_cpu = dest_cpu,
2911 .pending = &my_pending,
2912 };
2913
2914 p->migration_pending = &my_pending;
2915 } else {
2916 pending = p->migration_pending;
2917 refcount_inc(&pending->refs);
2918 /*
2919 * Affinity has changed, but we've already installed a
2920 * pending. migration_cpu_stop() *must* see this, else
2921 * we risk a completion of the pending despite having a
2922 * task on a disallowed CPU.
2923 *
2924 * Serialized by p->pi_lock, so this is safe.
2925 */
2926 pending->arg.dest_cpu = dest_cpu;
2927 }
2928 }
2929 pending = p->migration_pending;
2930 /*
2931 * - !MIGRATE_ENABLE:
2932 * we'll have installed a pending if there wasn't one already.
2933 *
2934 * - MIGRATE_ENABLE:
2935 * we're here because the current CPU isn't matching anymore,
2936 * the only way that can happen is because of a concurrent
2937 * set_cpus_allowed_ptr() call, which should then still be
2938 * pending completion.
2939 *
2940 * Either way, we really should have a @pending here.
2941 */
2942 if (WARN_ON_ONCE(!pending)) {
2943 task_rq_unlock(rq, p, rf);
2944 return -EINVAL;
2945 }
2946
2947 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2948 /*
2949 * MIGRATE_ENABLE gets here because 'p == current', but for
2950 * anything else we cannot do is_migration_disabled(), punt
2951 * and have the stopper function handle it all race-free.
2952 */
2953 stop_pending = pending->stop_pending;
2954 if (!stop_pending)
2955 pending->stop_pending = true;
2956
2957 if (flags & SCA_MIGRATE_ENABLE)
2958 p->migration_flags &= ~MDF_PUSH;
2959
2960 preempt_disable();
2961 task_rq_unlock(rq, p, rf);
2962 if (!stop_pending) {
2963 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2964 &pending->arg, &pending->stop_work);
2965 }
2966 preempt_enable();
2967
2968 if (flags & SCA_MIGRATE_ENABLE)
2969 return 0;
2970 } else {
2971
2972 if (!is_migration_disabled(p)) {
2973 if (task_on_rq_queued(p))
2974 rq = move_queued_task(rq, rf, p, dest_cpu);
2975
2976 if (!pending->stop_pending) {
2977 p->migration_pending = NULL;
2978 complete = true;
2979 }
2980 }
2981 task_rq_unlock(rq, p, rf);
2982
2983 if (complete)
2984 complete_all(&pending->done);
2985 }
2986
2987 wait_for_completion(&pending->done);
2988
2989 if (refcount_dec_and_test(&pending->refs))
2990 wake_up_var(&pending->refs); /* No UaF, just an address */
2991
2992 /*
2993 * Block the original owner of &pending until all subsequent callers
2994 * have seen the completion and decremented the refcount
2995 */
2996 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2997
2998 /* ARGH */
2999 WARN_ON_ONCE(my_pending.stop_pending);
3000
3001 return 0;
3002 }
3003
3004 /*
3005 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3006 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3007 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3008 struct affinity_context *ctx,
3009 struct rq *rq,
3010 struct rq_flags *rf)
3011 __releases(rq->lock)
3012 __releases(p->pi_lock)
3013 {
3014 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3015 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3016 bool kthread = p->flags & PF_KTHREAD;
3017 unsigned int dest_cpu;
3018 int ret = 0;
3019
3020 update_rq_clock(rq);
3021
3022 if (kthread || is_migration_disabled(p)) {
3023 /*
3024 * Kernel threads are allowed on online && !active CPUs,
3025 * however, during cpu-hot-unplug, even these might get pushed
3026 * away if not KTHREAD_IS_PER_CPU.
3027 *
3028 * Specifically, migration_disabled() tasks must not fail the
3029 * cpumask_any_and_distribute() pick below, esp. so on
3030 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3031 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3032 */
3033 cpu_valid_mask = cpu_online_mask;
3034 }
3035
3036 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3037 ret = -EINVAL;
3038 goto out;
3039 }
3040
3041 /*
3042 * Must re-check here, to close a race against __kthread_bind(),
3043 * sched_setaffinity() is not guaranteed to observe the flag.
3044 */
3045 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3046 ret = -EINVAL;
3047 goto out;
3048 }
3049
3050 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3051 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3052 if (ctx->flags & SCA_USER)
3053 swap(p->user_cpus_ptr, ctx->user_mask);
3054 goto out;
3055 }
3056
3057 if (WARN_ON_ONCE(p == current &&
3058 is_migration_disabled(p) &&
3059 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3060 ret = -EBUSY;
3061 goto out;
3062 }
3063 }
3064
3065 /*
3066 * Picking a ~random cpu helps in cases where we are changing affinity
3067 * for groups of tasks (ie. cpuset), so that load balancing is not
3068 * immediately required to distribute the tasks within their new mask.
3069 */
3070 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3071 if (dest_cpu >= nr_cpu_ids) {
3072 ret = -EINVAL;
3073 goto out;
3074 }
3075
3076 __do_set_cpus_allowed(p, ctx);
3077
3078 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3079
3080 out:
3081 task_rq_unlock(rq, p, rf);
3082
3083 return ret;
3084 }
3085
3086 /*
3087 * Change a given task's CPU affinity. Migrate the thread to a
3088 * proper CPU and schedule it away if the CPU it's executing on
3089 * is removed from the allowed bitmask.
3090 *
3091 * NOTE: the caller must have a valid reference to the task, the
3092 * task must not exit() & deallocate itself prematurely. The
3093 * call is not atomic; no spinlocks may be held.
3094 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3095 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3096 {
3097 struct rq_flags rf;
3098 struct rq *rq;
3099
3100 rq = task_rq_lock(p, &rf);
3101 /*
3102 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3103 * flags are set.
3104 */
3105 if (p->user_cpus_ptr &&
3106 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3107 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3108 ctx->new_mask = rq->scratch_mask;
3109
3110 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3111 }
3112
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3113 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3114 {
3115 struct affinity_context ac = {
3116 .new_mask = new_mask,
3117 .flags = 0,
3118 };
3119
3120 return __set_cpus_allowed_ptr(p, &ac);
3121 }
3122 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3123
3124 /*
3125 * Change a given task's CPU affinity to the intersection of its current
3126 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3127 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3128 * affinity or use cpu_online_mask instead.
3129 *
3130 * If the resulting mask is empty, leave the affinity unchanged and return
3131 * -EINVAL.
3132 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3133 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3134 struct cpumask *new_mask,
3135 const struct cpumask *subset_mask)
3136 {
3137 struct affinity_context ac = {
3138 .new_mask = new_mask,
3139 .flags = 0,
3140 };
3141 struct rq_flags rf;
3142 struct rq *rq;
3143 int err;
3144
3145 rq = task_rq_lock(p, &rf);
3146
3147 /*
3148 * Forcefully restricting the affinity of a deadline task is
3149 * likely to cause problems, so fail and noisily override the
3150 * mask entirely.
3151 */
3152 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3153 err = -EPERM;
3154 goto err_unlock;
3155 }
3156
3157 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3158 err = -EINVAL;
3159 goto err_unlock;
3160 }
3161
3162 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3163
3164 err_unlock:
3165 task_rq_unlock(rq, p, &rf);
3166 return err;
3167 }
3168
3169 /*
3170 * Restrict the CPU affinity of task @p so that it is a subset of
3171 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3172 * old affinity mask. If the resulting mask is empty, we warn and walk
3173 * up the cpuset hierarchy until we find a suitable mask.
3174 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3175 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3176 {
3177 cpumask_var_t new_mask;
3178 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3179
3180 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3181
3182 /*
3183 * __migrate_task() can fail silently in the face of concurrent
3184 * offlining of the chosen destination CPU, so take the hotplug
3185 * lock to ensure that the migration succeeds.
3186 */
3187 cpus_read_lock();
3188 if (!cpumask_available(new_mask))
3189 goto out_set_mask;
3190
3191 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3192 goto out_free_mask;
3193
3194 /*
3195 * We failed to find a valid subset of the affinity mask for the
3196 * task, so override it based on its cpuset hierarchy.
3197 */
3198 cpuset_cpus_allowed(p, new_mask);
3199 override_mask = new_mask;
3200
3201 out_set_mask:
3202 if (printk_ratelimit()) {
3203 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3204 task_pid_nr(p), p->comm,
3205 cpumask_pr_args(override_mask));
3206 }
3207
3208 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3209 out_free_mask:
3210 cpus_read_unlock();
3211 free_cpumask_var(new_mask);
3212 }
3213
3214 /*
3215 * Restore the affinity of a task @p which was previously restricted by a
3216 * call to force_compatible_cpus_allowed_ptr().
3217 *
3218 * It is the caller's responsibility to serialise this with any calls to
3219 * force_compatible_cpus_allowed_ptr(@p).
3220 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3221 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3222 {
3223 struct affinity_context ac = {
3224 .new_mask = task_user_cpus(p),
3225 .flags = 0,
3226 };
3227 int ret;
3228
3229 /*
3230 * Try to restore the old affinity mask with __sched_setaffinity().
3231 * Cpuset masking will be done there too.
3232 */
3233 ret = __sched_setaffinity(p, &ac);
3234 WARN_ON_ONCE(ret);
3235 }
3236
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3237 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3238 {
3239 #ifdef CONFIG_SCHED_DEBUG
3240 unsigned int state = READ_ONCE(p->__state);
3241
3242 /*
3243 * We should never call set_task_cpu() on a blocked task,
3244 * ttwu() will sort out the placement.
3245 */
3246 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3247
3248 /*
3249 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3250 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3251 * time relying on p->on_rq.
3252 */
3253 WARN_ON_ONCE(state == TASK_RUNNING &&
3254 p->sched_class == &fair_sched_class &&
3255 (p->on_rq && !task_on_rq_migrating(p)));
3256
3257 #ifdef CONFIG_LOCKDEP
3258 /*
3259 * The caller should hold either p->pi_lock or rq->lock, when changing
3260 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3261 *
3262 * sched_move_task() holds both and thus holding either pins the cgroup,
3263 * see task_group().
3264 *
3265 * Furthermore, all task_rq users should acquire both locks, see
3266 * task_rq_lock().
3267 */
3268 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3269 lockdep_is_held(__rq_lockp(task_rq(p)))));
3270 #endif
3271 /*
3272 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3273 */
3274 WARN_ON_ONCE(!cpu_online(new_cpu));
3275
3276 WARN_ON_ONCE(is_migration_disabled(p));
3277 #endif
3278
3279 trace_sched_migrate_task(p, new_cpu);
3280
3281 if (task_cpu(p) != new_cpu) {
3282 if (p->sched_class->migrate_task_rq)
3283 p->sched_class->migrate_task_rq(p, new_cpu);
3284 p->se.nr_migrations++;
3285 rseq_migrate(p);
3286 sched_mm_cid_migrate_from(p);
3287 perf_event_task_migrate(p);
3288 }
3289
3290 __set_task_cpu(p, new_cpu);
3291 }
3292
3293 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3294 static void __migrate_swap_task(struct task_struct *p, int cpu)
3295 {
3296 if (task_on_rq_queued(p)) {
3297 struct rq *src_rq, *dst_rq;
3298 struct rq_flags srf, drf;
3299
3300 src_rq = task_rq(p);
3301 dst_rq = cpu_rq(cpu);
3302
3303 rq_pin_lock(src_rq, &srf);
3304 rq_pin_lock(dst_rq, &drf);
3305
3306 deactivate_task(src_rq, p, 0);
3307 set_task_cpu(p, cpu);
3308 activate_task(dst_rq, p, 0);
3309 wakeup_preempt(dst_rq, p, 0);
3310
3311 rq_unpin_lock(dst_rq, &drf);
3312 rq_unpin_lock(src_rq, &srf);
3313
3314 } else {
3315 /*
3316 * Task isn't running anymore; make it appear like we migrated
3317 * it before it went to sleep. This means on wakeup we make the
3318 * previous CPU our target instead of where it really is.
3319 */
3320 p->wake_cpu = cpu;
3321 }
3322 }
3323
3324 struct migration_swap_arg {
3325 struct task_struct *src_task, *dst_task;
3326 int src_cpu, dst_cpu;
3327 };
3328
migrate_swap_stop(void * data)3329 static int migrate_swap_stop(void *data)
3330 {
3331 struct migration_swap_arg *arg = data;
3332 struct rq *src_rq, *dst_rq;
3333
3334 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3335 return -EAGAIN;
3336
3337 src_rq = cpu_rq(arg->src_cpu);
3338 dst_rq = cpu_rq(arg->dst_cpu);
3339
3340 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3341 guard(double_rq_lock)(src_rq, dst_rq);
3342
3343 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3344 return -EAGAIN;
3345
3346 if (task_cpu(arg->src_task) != arg->src_cpu)
3347 return -EAGAIN;
3348
3349 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3350 return -EAGAIN;
3351
3352 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3353 return -EAGAIN;
3354
3355 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3356 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3357
3358 return 0;
3359 }
3360
3361 /*
3362 * Cross migrate two tasks
3363 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3364 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3365 int target_cpu, int curr_cpu)
3366 {
3367 struct migration_swap_arg arg;
3368 int ret = -EINVAL;
3369
3370 arg = (struct migration_swap_arg){
3371 .src_task = cur,
3372 .src_cpu = curr_cpu,
3373 .dst_task = p,
3374 .dst_cpu = target_cpu,
3375 };
3376
3377 if (arg.src_cpu == arg.dst_cpu)
3378 goto out;
3379
3380 /*
3381 * These three tests are all lockless; this is OK since all of them
3382 * will be re-checked with proper locks held further down the line.
3383 */
3384 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3385 goto out;
3386
3387 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3388 goto out;
3389
3390 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3391 goto out;
3392
3393 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3394 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3395
3396 out:
3397 return ret;
3398 }
3399 #endif /* CONFIG_NUMA_BALANCING */
3400
3401 /***
3402 * kick_process - kick a running thread to enter/exit the kernel
3403 * @p: the to-be-kicked thread
3404 *
3405 * Cause a process which is running on another CPU to enter
3406 * kernel-mode, without any delay. (to get signals handled.)
3407 *
3408 * NOTE: this function doesn't have to take the runqueue lock,
3409 * because all it wants to ensure is that the remote task enters
3410 * the kernel. If the IPI races and the task has been migrated
3411 * to another CPU then no harm is done and the purpose has been
3412 * achieved as well.
3413 */
kick_process(struct task_struct * p)3414 void kick_process(struct task_struct *p)
3415 {
3416 guard(preempt)();
3417 int cpu = task_cpu(p);
3418
3419 if ((cpu != smp_processor_id()) && task_curr(p))
3420 smp_send_reschedule(cpu);
3421 }
3422 EXPORT_SYMBOL_GPL(kick_process);
3423
3424 /*
3425 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3426 *
3427 * A few notes on cpu_active vs cpu_online:
3428 *
3429 * - cpu_active must be a subset of cpu_online
3430 *
3431 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3432 * see __set_cpus_allowed_ptr(). At this point the newly online
3433 * CPU isn't yet part of the sched domains, and balancing will not
3434 * see it.
3435 *
3436 * - on CPU-down we clear cpu_active() to mask the sched domains and
3437 * avoid the load balancer to place new tasks on the to be removed
3438 * CPU. Existing tasks will remain running there and will be taken
3439 * off.
3440 *
3441 * This means that fallback selection must not select !active CPUs.
3442 * And can assume that any active CPU must be online. Conversely
3443 * select_task_rq() below may allow selection of !active CPUs in order
3444 * to satisfy the above rules.
3445 */
select_fallback_rq(int cpu,struct task_struct * p)3446 static int select_fallback_rq(int cpu, struct task_struct *p)
3447 {
3448 int nid = cpu_to_node(cpu);
3449 const struct cpumask *nodemask = NULL;
3450 enum { cpuset, possible, fail } state = cpuset;
3451 int dest_cpu;
3452
3453 /*
3454 * If the node that the CPU is on has been offlined, cpu_to_node()
3455 * will return -1. There is no CPU on the node, and we should
3456 * select the CPU on the other node.
3457 */
3458 if (nid != -1) {
3459 nodemask = cpumask_of_node(nid);
3460
3461 /* Look for allowed, online CPU in same node. */
3462 for_each_cpu(dest_cpu, nodemask) {
3463 if (is_cpu_allowed(p, dest_cpu))
3464 return dest_cpu;
3465 }
3466 }
3467
3468 for (;;) {
3469 /* Any allowed, online CPU? */
3470 for_each_cpu(dest_cpu, p->cpus_ptr) {
3471 if (!is_cpu_allowed(p, dest_cpu))
3472 continue;
3473
3474 goto out;
3475 }
3476
3477 /* No more Mr. Nice Guy. */
3478 switch (state) {
3479 case cpuset:
3480 if (cpuset_cpus_allowed_fallback(p)) {
3481 state = possible;
3482 break;
3483 }
3484 fallthrough;
3485 case possible:
3486 /*
3487 * XXX When called from select_task_rq() we only
3488 * hold p->pi_lock and again violate locking order.
3489 *
3490 * More yuck to audit.
3491 */
3492 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3493 state = fail;
3494 break;
3495 case fail:
3496 BUG();
3497 break;
3498 }
3499 }
3500
3501 out:
3502 if (state != cpuset) {
3503 /*
3504 * Don't tell them about moving exiting tasks or
3505 * kernel threads (both mm NULL), since they never
3506 * leave kernel.
3507 */
3508 if (p->mm && printk_ratelimit()) {
3509 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3510 task_pid_nr(p), p->comm, cpu);
3511 }
3512 }
3513
3514 return dest_cpu;
3515 }
3516
3517 /*
3518 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3519 */
3520 static inline
select_task_rq(struct task_struct * p,int cpu,int wake_flags)3521 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3522 {
3523 lockdep_assert_held(&p->pi_lock);
3524
3525 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3526 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3527 else
3528 cpu = cpumask_any(p->cpus_ptr);
3529
3530 /*
3531 * In order not to call set_task_cpu() on a blocking task we need
3532 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3533 * CPU.
3534 *
3535 * Since this is common to all placement strategies, this lives here.
3536 *
3537 * [ this allows ->select_task() to simply return task_cpu(p) and
3538 * not worry about this generic constraint ]
3539 */
3540 if (unlikely(!is_cpu_allowed(p, cpu)))
3541 cpu = select_fallback_rq(task_cpu(p), p);
3542
3543 return cpu;
3544 }
3545
sched_set_stop_task(int cpu,struct task_struct * stop)3546 void sched_set_stop_task(int cpu, struct task_struct *stop)
3547 {
3548 static struct lock_class_key stop_pi_lock;
3549 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3550 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3551
3552 if (stop) {
3553 /*
3554 * Make it appear like a SCHED_FIFO task, its something
3555 * userspace knows about and won't get confused about.
3556 *
3557 * Also, it will make PI more or less work without too
3558 * much confusion -- but then, stop work should not
3559 * rely on PI working anyway.
3560 */
3561 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3562
3563 stop->sched_class = &stop_sched_class;
3564
3565 /*
3566 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3567 * adjust the effective priority of a task. As a result,
3568 * rt_mutex_setprio() can trigger (RT) balancing operations,
3569 * which can then trigger wakeups of the stop thread to push
3570 * around the current task.
3571 *
3572 * The stop task itself will never be part of the PI-chain, it
3573 * never blocks, therefore that ->pi_lock recursion is safe.
3574 * Tell lockdep about this by placing the stop->pi_lock in its
3575 * own class.
3576 */
3577 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3578 }
3579
3580 cpu_rq(cpu)->stop = stop;
3581
3582 if (old_stop) {
3583 /*
3584 * Reset it back to a normal scheduling class so that
3585 * it can die in pieces.
3586 */
3587 old_stop->sched_class = &rt_sched_class;
3588 }
3589 }
3590
3591 #else /* CONFIG_SMP */
3592
migrate_disable_switch(struct rq * rq,struct task_struct * p)3593 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3594
rq_has_pinned_tasks(struct rq * rq)3595 static inline bool rq_has_pinned_tasks(struct rq *rq)
3596 {
3597 return false;
3598 }
3599
3600 #endif /* !CONFIG_SMP */
3601
3602 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3603 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3604 {
3605 struct rq *rq;
3606
3607 if (!schedstat_enabled())
3608 return;
3609
3610 rq = this_rq();
3611
3612 #ifdef CONFIG_SMP
3613 if (cpu == rq->cpu) {
3614 __schedstat_inc(rq->ttwu_local);
3615 __schedstat_inc(p->stats.nr_wakeups_local);
3616 } else {
3617 struct sched_domain *sd;
3618
3619 __schedstat_inc(p->stats.nr_wakeups_remote);
3620
3621 guard(rcu)();
3622 for_each_domain(rq->cpu, sd) {
3623 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3624 __schedstat_inc(sd->ttwu_wake_remote);
3625 break;
3626 }
3627 }
3628 }
3629
3630 if (wake_flags & WF_MIGRATED)
3631 __schedstat_inc(p->stats.nr_wakeups_migrate);
3632 #endif /* CONFIG_SMP */
3633
3634 __schedstat_inc(rq->ttwu_count);
3635 __schedstat_inc(p->stats.nr_wakeups);
3636
3637 if (wake_flags & WF_SYNC)
3638 __schedstat_inc(p->stats.nr_wakeups_sync);
3639 }
3640
3641 /*
3642 * Mark the task runnable.
3643 */
ttwu_do_wakeup(struct task_struct * p)3644 static inline void ttwu_do_wakeup(struct task_struct *p)
3645 {
3646 WRITE_ONCE(p->__state, TASK_RUNNING);
3647 trace_sched_wakeup(p);
3648 }
3649
3650 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3651 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3652 struct rq_flags *rf)
3653 {
3654 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3655
3656 lockdep_assert_rq_held(rq);
3657
3658 if (p->sched_contributes_to_load)
3659 rq->nr_uninterruptible--;
3660
3661 #ifdef CONFIG_SMP
3662 if (wake_flags & WF_MIGRATED)
3663 en_flags |= ENQUEUE_MIGRATED;
3664 else
3665 #endif
3666 if (p->in_iowait) {
3667 delayacct_blkio_end(p);
3668 atomic_dec(&task_rq(p)->nr_iowait);
3669 }
3670
3671 activate_task(rq, p, en_flags);
3672 wakeup_preempt(rq, p, wake_flags);
3673
3674 ttwu_do_wakeup(p);
3675
3676 #ifdef CONFIG_SMP
3677 if (p->sched_class->task_woken) {
3678 /*
3679 * Our task @p is fully woken up and running; so it's safe to
3680 * drop the rq->lock, hereafter rq is only used for statistics.
3681 */
3682 rq_unpin_lock(rq, rf);
3683 p->sched_class->task_woken(rq, p);
3684 rq_repin_lock(rq, rf);
3685 }
3686
3687 if (rq->idle_stamp) {
3688 u64 delta = rq_clock(rq) - rq->idle_stamp;
3689 u64 max = 2*rq->max_idle_balance_cost;
3690
3691 update_avg(&rq->avg_idle, delta);
3692
3693 if (rq->avg_idle > max)
3694 rq->avg_idle = max;
3695
3696 rq->idle_stamp = 0;
3697 }
3698 #endif
3699 }
3700
3701 /*
3702 * Consider @p being inside a wait loop:
3703 *
3704 * for (;;) {
3705 * set_current_state(TASK_UNINTERRUPTIBLE);
3706 *
3707 * if (CONDITION)
3708 * break;
3709 *
3710 * schedule();
3711 * }
3712 * __set_current_state(TASK_RUNNING);
3713 *
3714 * between set_current_state() and schedule(). In this case @p is still
3715 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3716 * an atomic manner.
3717 *
3718 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3719 * then schedule() must still happen and p->state can be changed to
3720 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3721 * need to do a full wakeup with enqueue.
3722 *
3723 * Returns: %true when the wakeup is done,
3724 * %false otherwise.
3725 */
ttwu_runnable(struct task_struct * p,int wake_flags)3726 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3727 {
3728 struct rq_flags rf;
3729 struct rq *rq;
3730 int ret = 0;
3731
3732 rq = __task_rq_lock(p, &rf);
3733 if (task_on_rq_queued(p)) {
3734 update_rq_clock(rq);
3735 if (p->se.sched_delayed)
3736 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3737 if (!task_on_cpu(rq, p)) {
3738 /*
3739 * When on_rq && !on_cpu the task is preempted, see if
3740 * it should preempt the task that is current now.
3741 */
3742 wakeup_preempt(rq, p, wake_flags);
3743 }
3744 ttwu_do_wakeup(p);
3745 ret = 1;
3746 }
3747 __task_rq_unlock(rq, &rf);
3748
3749 return ret;
3750 }
3751
3752 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3753 void sched_ttwu_pending(void *arg)
3754 {
3755 struct llist_node *llist = arg;
3756 struct rq *rq = this_rq();
3757 struct task_struct *p, *t;
3758 struct rq_flags rf;
3759
3760 if (!llist)
3761 return;
3762
3763 rq_lock_irqsave(rq, &rf);
3764 update_rq_clock(rq);
3765
3766 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3767 if (WARN_ON_ONCE(p->on_cpu))
3768 smp_cond_load_acquire(&p->on_cpu, !VAL);
3769
3770 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3771 set_task_cpu(p, cpu_of(rq));
3772
3773 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3774 }
3775
3776 /*
3777 * Must be after enqueueing at least once task such that
3778 * idle_cpu() does not observe a false-negative -- if it does,
3779 * it is possible for select_idle_siblings() to stack a number
3780 * of tasks on this CPU during that window.
3781 *
3782 * It is OK to clear ttwu_pending when another task pending.
3783 * We will receive IPI after local IRQ enabled and then enqueue it.
3784 * Since now nr_running > 0, idle_cpu() will always get correct result.
3785 */
3786 WRITE_ONCE(rq->ttwu_pending, 0);
3787 rq_unlock_irqrestore(rq, &rf);
3788 }
3789
3790 /*
3791 * Prepare the scene for sending an IPI for a remote smp_call
3792 *
3793 * Returns true if the caller can proceed with sending the IPI.
3794 * Returns false otherwise.
3795 */
call_function_single_prep_ipi(int cpu)3796 bool call_function_single_prep_ipi(int cpu)
3797 {
3798 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3799 trace_sched_wake_idle_without_ipi(cpu);
3800 return false;
3801 }
3802
3803 return true;
3804 }
3805
3806 /*
3807 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3808 * necessary. The wakee CPU on receipt of the IPI will queue the task
3809 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3810 * of the wakeup instead of the waker.
3811 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3812 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3813 {
3814 struct rq *rq = cpu_rq(cpu);
3815
3816 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3817
3818 WRITE_ONCE(rq->ttwu_pending, 1);
3819 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3820 }
3821
wake_up_if_idle(int cpu)3822 void wake_up_if_idle(int cpu)
3823 {
3824 struct rq *rq = cpu_rq(cpu);
3825
3826 guard(rcu)();
3827 if (is_idle_task(rcu_dereference(rq->curr))) {
3828 guard(rq_lock_irqsave)(rq);
3829 if (is_idle_task(rq->curr))
3830 resched_curr(rq);
3831 }
3832 }
3833
cpus_equal_capacity(int this_cpu,int that_cpu)3834 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3835 {
3836 if (!sched_asym_cpucap_active())
3837 return true;
3838
3839 if (this_cpu == that_cpu)
3840 return true;
3841
3842 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3843 }
3844
cpus_share_cache(int this_cpu,int that_cpu)3845 bool cpus_share_cache(int this_cpu, int that_cpu)
3846 {
3847 if (this_cpu == that_cpu)
3848 return true;
3849
3850 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3851 }
3852
3853 /*
3854 * Whether CPUs are share cache resources, which means LLC on non-cluster
3855 * machines and LLC tag or L2 on machines with clusters.
3856 */
cpus_share_resources(int this_cpu,int that_cpu)3857 bool cpus_share_resources(int this_cpu, int that_cpu)
3858 {
3859 if (this_cpu == that_cpu)
3860 return true;
3861
3862 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3863 }
3864
ttwu_queue_cond(struct task_struct * p,int cpu)3865 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3866 {
3867 /*
3868 * The BPF scheduler may depend on select_task_rq() being invoked during
3869 * wakeups. In addition, @p may end up executing on a different CPU
3870 * regardless of what happens in the wakeup path making the ttwu_queue
3871 * optimization less meaningful. Skip if on SCX.
3872 */
3873 if (task_on_scx(p))
3874 return false;
3875
3876 /*
3877 * Do not complicate things with the async wake_list while the CPU is
3878 * in hotplug state.
3879 */
3880 if (!cpu_active(cpu))
3881 return false;
3882
3883 /* Ensure the task will still be allowed to run on the CPU. */
3884 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3885 return false;
3886
3887 /*
3888 * If the CPU does not share cache, then queue the task on the
3889 * remote rqs wakelist to avoid accessing remote data.
3890 */
3891 if (!cpus_share_cache(smp_processor_id(), cpu))
3892 return true;
3893
3894 if (cpu == smp_processor_id())
3895 return false;
3896
3897 /*
3898 * If the wakee cpu is idle, or the task is descheduling and the
3899 * only running task on the CPU, then use the wakelist to offload
3900 * the task activation to the idle (or soon-to-be-idle) CPU as
3901 * the current CPU is likely busy. nr_running is checked to
3902 * avoid unnecessary task stacking.
3903 *
3904 * Note that we can only get here with (wakee) p->on_rq=0,
3905 * p->on_cpu can be whatever, we've done the dequeue, so
3906 * the wakee has been accounted out of ->nr_running.
3907 */
3908 if (!cpu_rq(cpu)->nr_running)
3909 return true;
3910
3911 return false;
3912 }
3913
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3914 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3915 {
3916 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3917 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3918 __ttwu_queue_wakelist(p, cpu, wake_flags);
3919 return true;
3920 }
3921
3922 return false;
3923 }
3924
3925 #else /* !CONFIG_SMP */
3926
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3927 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3928 {
3929 return false;
3930 }
3931
3932 #endif /* CONFIG_SMP */
3933
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3934 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3935 {
3936 struct rq *rq = cpu_rq(cpu);
3937 struct rq_flags rf;
3938
3939 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3940 return;
3941
3942 rq_lock(rq, &rf);
3943 update_rq_clock(rq);
3944 ttwu_do_activate(rq, p, wake_flags, &rf);
3945 rq_unlock(rq, &rf);
3946 }
3947
3948 /*
3949 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3950 *
3951 * The caller holds p::pi_lock if p != current or has preemption
3952 * disabled when p == current.
3953 *
3954 * The rules of saved_state:
3955 *
3956 * The related locking code always holds p::pi_lock when updating
3957 * p::saved_state, which means the code is fully serialized in both cases.
3958 *
3959 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3960 * No other bits set. This allows to distinguish all wakeup scenarios.
3961 *
3962 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3963 * allows us to prevent early wakeup of tasks before they can be run on
3964 * asymmetric ISA architectures (eg ARMv9).
3965 */
3966 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)3967 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3968 {
3969 int match;
3970
3971 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3972 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3973 state != TASK_RTLOCK_WAIT);
3974 }
3975
3976 *success = !!(match = __task_state_match(p, state));
3977
3978 /*
3979 * Saved state preserves the task state across blocking on
3980 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
3981 * set p::saved_state to TASK_RUNNING, but do not wake the task
3982 * because it waits for a lock wakeup or __thaw_task(). Also
3983 * indicate success because from the regular waker's point of
3984 * view this has succeeded.
3985 *
3986 * After acquiring the lock the task will restore p::__state
3987 * from p::saved_state which ensures that the regular
3988 * wakeup is not lost. The restore will also set
3989 * p::saved_state to TASK_RUNNING so any further tests will
3990 * not result in false positives vs. @success
3991 */
3992 if (match < 0)
3993 p->saved_state = TASK_RUNNING;
3994
3995 return match > 0;
3996 }
3997
3998 /*
3999 * Notes on Program-Order guarantees on SMP systems.
4000 *
4001 * MIGRATION
4002 *
4003 * The basic program-order guarantee on SMP systems is that when a task [t]
4004 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4005 * execution on its new CPU [c1].
4006 *
4007 * For migration (of runnable tasks) this is provided by the following means:
4008 *
4009 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4010 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4011 * rq(c1)->lock (if not at the same time, then in that order).
4012 * C) LOCK of the rq(c1)->lock scheduling in task
4013 *
4014 * Release/acquire chaining guarantees that B happens after A and C after B.
4015 * Note: the CPU doing B need not be c0 or c1
4016 *
4017 * Example:
4018 *
4019 * CPU0 CPU1 CPU2
4020 *
4021 * LOCK rq(0)->lock
4022 * sched-out X
4023 * sched-in Y
4024 * UNLOCK rq(0)->lock
4025 *
4026 * LOCK rq(0)->lock // orders against CPU0
4027 * dequeue X
4028 * UNLOCK rq(0)->lock
4029 *
4030 * LOCK rq(1)->lock
4031 * enqueue X
4032 * UNLOCK rq(1)->lock
4033 *
4034 * LOCK rq(1)->lock // orders against CPU2
4035 * sched-out Z
4036 * sched-in X
4037 * UNLOCK rq(1)->lock
4038 *
4039 *
4040 * BLOCKING -- aka. SLEEP + WAKEUP
4041 *
4042 * For blocking we (obviously) need to provide the same guarantee as for
4043 * migration. However the means are completely different as there is no lock
4044 * chain to provide order. Instead we do:
4045 *
4046 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4047 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4048 *
4049 * Example:
4050 *
4051 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4052 *
4053 * LOCK rq(0)->lock LOCK X->pi_lock
4054 * dequeue X
4055 * sched-out X
4056 * smp_store_release(X->on_cpu, 0);
4057 *
4058 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4059 * X->state = WAKING
4060 * set_task_cpu(X,2)
4061 *
4062 * LOCK rq(2)->lock
4063 * enqueue X
4064 * X->state = RUNNING
4065 * UNLOCK rq(2)->lock
4066 *
4067 * LOCK rq(2)->lock // orders against CPU1
4068 * sched-out Z
4069 * sched-in X
4070 * UNLOCK rq(2)->lock
4071 *
4072 * UNLOCK X->pi_lock
4073 * UNLOCK rq(0)->lock
4074 *
4075 *
4076 * However, for wakeups there is a second guarantee we must provide, namely we
4077 * must ensure that CONDITION=1 done by the caller can not be reordered with
4078 * accesses to the task state; see try_to_wake_up() and set_current_state().
4079 */
4080
4081 /**
4082 * try_to_wake_up - wake up a thread
4083 * @p: the thread to be awakened
4084 * @state: the mask of task states that can be woken
4085 * @wake_flags: wake modifier flags (WF_*)
4086 *
4087 * Conceptually does:
4088 *
4089 * If (@state & @p->state) @p->state = TASK_RUNNING.
4090 *
4091 * If the task was not queued/runnable, also place it back on a runqueue.
4092 *
4093 * This function is atomic against schedule() which would dequeue the task.
4094 *
4095 * It issues a full memory barrier before accessing @p->state, see the comment
4096 * with set_current_state().
4097 *
4098 * Uses p->pi_lock to serialize against concurrent wake-ups.
4099 *
4100 * Relies on p->pi_lock stabilizing:
4101 * - p->sched_class
4102 * - p->cpus_ptr
4103 * - p->sched_task_group
4104 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4105 *
4106 * Tries really hard to only take one task_rq(p)->lock for performance.
4107 * Takes rq->lock in:
4108 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4109 * - ttwu_queue() -- new rq, for enqueue of the task;
4110 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4111 *
4112 * As a consequence we race really badly with just about everything. See the
4113 * many memory barriers and their comments for details.
4114 *
4115 * Return: %true if @p->state changes (an actual wakeup was done),
4116 * %false otherwise.
4117 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4118 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4119 {
4120 guard(preempt)();
4121 int cpu, success = 0;
4122
4123 if (p == current) {
4124 /*
4125 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4126 * == smp_processor_id()'. Together this means we can special
4127 * case the whole 'p->on_rq && ttwu_runnable()' case below
4128 * without taking any locks.
4129 *
4130 * Specifically, given current runs ttwu() we must be before
4131 * schedule()'s block_task(), as such this must not observe
4132 * sched_delayed.
4133 *
4134 * In particular:
4135 * - we rely on Program-Order guarantees for all the ordering,
4136 * - we're serialized against set_special_state() by virtue of
4137 * it disabling IRQs (this allows not taking ->pi_lock).
4138 */
4139 SCHED_WARN_ON(p->se.sched_delayed);
4140 if (!ttwu_state_match(p, state, &success))
4141 goto out;
4142
4143 trace_sched_waking(p);
4144 ttwu_do_wakeup(p);
4145 goto out;
4146 }
4147
4148 /*
4149 * If we are going to wake up a thread waiting for CONDITION we
4150 * need to ensure that CONDITION=1 done by the caller can not be
4151 * reordered with p->state check below. This pairs with smp_store_mb()
4152 * in set_current_state() that the waiting thread does.
4153 */
4154 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4155 smp_mb__after_spinlock();
4156 if (!ttwu_state_match(p, state, &success))
4157 break;
4158
4159 trace_sched_waking(p);
4160
4161 /*
4162 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4163 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4164 * in smp_cond_load_acquire() below.
4165 *
4166 * sched_ttwu_pending() try_to_wake_up()
4167 * STORE p->on_rq = 1 LOAD p->state
4168 * UNLOCK rq->lock
4169 *
4170 * __schedule() (switch to task 'p')
4171 * LOCK rq->lock smp_rmb();
4172 * smp_mb__after_spinlock();
4173 * UNLOCK rq->lock
4174 *
4175 * [task p]
4176 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4177 *
4178 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4179 * __schedule(). See the comment for smp_mb__after_spinlock().
4180 *
4181 * A similar smp_rmb() lives in __task_needs_rq_lock().
4182 */
4183 smp_rmb();
4184 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4185 break;
4186
4187 #ifdef CONFIG_SMP
4188 /*
4189 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4190 * possible to, falsely, observe p->on_cpu == 0.
4191 *
4192 * One must be running (->on_cpu == 1) in order to remove oneself
4193 * from the runqueue.
4194 *
4195 * __schedule() (switch to task 'p') try_to_wake_up()
4196 * STORE p->on_cpu = 1 LOAD p->on_rq
4197 * UNLOCK rq->lock
4198 *
4199 * __schedule() (put 'p' to sleep)
4200 * LOCK rq->lock smp_rmb();
4201 * smp_mb__after_spinlock();
4202 * STORE p->on_rq = 0 LOAD p->on_cpu
4203 *
4204 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4205 * __schedule(). See the comment for smp_mb__after_spinlock().
4206 *
4207 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4208 * schedule()'s deactivate_task() has 'happened' and p will no longer
4209 * care about it's own p->state. See the comment in __schedule().
4210 */
4211 smp_acquire__after_ctrl_dep();
4212
4213 /*
4214 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4215 * == 0), which means we need to do an enqueue, change p->state to
4216 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4217 * enqueue, such as ttwu_queue_wakelist().
4218 */
4219 WRITE_ONCE(p->__state, TASK_WAKING);
4220
4221 /*
4222 * If the owning (remote) CPU is still in the middle of schedule() with
4223 * this task as prev, considering queueing p on the remote CPUs wake_list
4224 * which potentially sends an IPI instead of spinning on p->on_cpu to
4225 * let the waker make forward progress. This is safe because IRQs are
4226 * disabled and the IPI will deliver after on_cpu is cleared.
4227 *
4228 * Ensure we load task_cpu(p) after p->on_cpu:
4229 *
4230 * set_task_cpu(p, cpu);
4231 * STORE p->cpu = @cpu
4232 * __schedule() (switch to task 'p')
4233 * LOCK rq->lock
4234 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4235 * STORE p->on_cpu = 1 LOAD p->cpu
4236 *
4237 * to ensure we observe the correct CPU on which the task is currently
4238 * scheduling.
4239 */
4240 if (smp_load_acquire(&p->on_cpu) &&
4241 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4242 break;
4243
4244 /*
4245 * If the owning (remote) CPU is still in the middle of schedule() with
4246 * this task as prev, wait until it's done referencing the task.
4247 *
4248 * Pairs with the smp_store_release() in finish_task().
4249 *
4250 * This ensures that tasks getting woken will be fully ordered against
4251 * their previous state and preserve Program Order.
4252 */
4253 smp_cond_load_acquire(&p->on_cpu, !VAL);
4254
4255 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4256 if (task_cpu(p) != cpu) {
4257 if (p->in_iowait) {
4258 delayacct_blkio_end(p);
4259 atomic_dec(&task_rq(p)->nr_iowait);
4260 }
4261
4262 wake_flags |= WF_MIGRATED;
4263 psi_ttwu_dequeue(p);
4264 set_task_cpu(p, cpu);
4265 }
4266 #else
4267 cpu = task_cpu(p);
4268 #endif /* CONFIG_SMP */
4269
4270 ttwu_queue(p, cpu, wake_flags);
4271 }
4272 out:
4273 if (success)
4274 ttwu_stat(p, task_cpu(p), wake_flags);
4275
4276 return success;
4277 }
4278
__task_needs_rq_lock(struct task_struct * p)4279 static bool __task_needs_rq_lock(struct task_struct *p)
4280 {
4281 unsigned int state = READ_ONCE(p->__state);
4282
4283 /*
4284 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4285 * the task is blocked. Make sure to check @state since ttwu() can drop
4286 * locks at the end, see ttwu_queue_wakelist().
4287 */
4288 if (state == TASK_RUNNING || state == TASK_WAKING)
4289 return true;
4290
4291 /*
4292 * Ensure we load p->on_rq after p->__state, otherwise it would be
4293 * possible to, falsely, observe p->on_rq == 0.
4294 *
4295 * See try_to_wake_up() for a longer comment.
4296 */
4297 smp_rmb();
4298 if (p->on_rq)
4299 return true;
4300
4301 #ifdef CONFIG_SMP
4302 /*
4303 * Ensure the task has finished __schedule() and will not be referenced
4304 * anymore. Again, see try_to_wake_up() for a longer comment.
4305 */
4306 smp_rmb();
4307 smp_cond_load_acquire(&p->on_cpu, !VAL);
4308 #endif
4309
4310 return false;
4311 }
4312
4313 /**
4314 * task_call_func - Invoke a function on task in fixed state
4315 * @p: Process for which the function is to be invoked, can be @current.
4316 * @func: Function to invoke.
4317 * @arg: Argument to function.
4318 *
4319 * Fix the task in it's current state by avoiding wakeups and or rq operations
4320 * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
4321 * to work out what the state is, if required. Given that @func can be invoked
4322 * with a runqueue lock held, it had better be quite lightweight.
4323 *
4324 * Returns:
4325 * Whatever @func returns
4326 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4327 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4328 {
4329 struct rq *rq = NULL;
4330 struct rq_flags rf;
4331 int ret;
4332
4333 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4334
4335 if (__task_needs_rq_lock(p))
4336 rq = __task_rq_lock(p, &rf);
4337
4338 /*
4339 * At this point the task is pinned; either:
4340 * - blocked and we're holding off wakeups (pi->lock)
4341 * - woken, and we're holding off enqueue (rq->lock)
4342 * - queued, and we're holding off schedule (rq->lock)
4343 * - running, and we're holding off de-schedule (rq->lock)
4344 *
4345 * The called function (@func) can use: task_curr(), p->on_rq and
4346 * p->__state to differentiate between these states.
4347 */
4348 ret = func(p, arg);
4349
4350 if (rq)
4351 rq_unlock(rq, &rf);
4352
4353 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4354 return ret;
4355 }
4356
4357 /**
4358 * cpu_curr_snapshot - Return a snapshot of the currently running task
4359 * @cpu: The CPU on which to snapshot the task.
4360 *
4361 * Returns the task_struct pointer of the task "currently" running on
4362 * the specified CPU.
4363 *
4364 * If the specified CPU was offline, the return value is whatever it
4365 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4366 * task, but there is no guarantee. Callers wishing a useful return
4367 * value must take some action to ensure that the specified CPU remains
4368 * online throughout.
4369 *
4370 * This function executes full memory barriers before and after fetching
4371 * the pointer, which permits the caller to confine this function's fetch
4372 * with respect to the caller's accesses to other shared variables.
4373 */
cpu_curr_snapshot(int cpu)4374 struct task_struct *cpu_curr_snapshot(int cpu)
4375 {
4376 struct rq *rq = cpu_rq(cpu);
4377 struct task_struct *t;
4378 struct rq_flags rf;
4379
4380 rq_lock_irqsave(rq, &rf);
4381 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4382 t = rcu_dereference(cpu_curr(cpu));
4383 rq_unlock_irqrestore(rq, &rf);
4384 smp_mb(); /* Pairing determined by caller's synchronization design. */
4385
4386 return t;
4387 }
4388
4389 /**
4390 * wake_up_process - Wake up a specific process
4391 * @p: The process to be woken up.
4392 *
4393 * Attempt to wake up the nominated process and move it to the set of runnable
4394 * processes.
4395 *
4396 * Return: 1 if the process was woken up, 0 if it was already running.
4397 *
4398 * This function executes a full memory barrier before accessing the task state.
4399 */
wake_up_process(struct task_struct * p)4400 int wake_up_process(struct task_struct *p)
4401 {
4402 return try_to_wake_up(p, TASK_NORMAL, 0);
4403 }
4404 EXPORT_SYMBOL(wake_up_process);
4405
wake_up_state(struct task_struct * p,unsigned int state)4406 int wake_up_state(struct task_struct *p, unsigned int state)
4407 {
4408 return try_to_wake_up(p, state, 0);
4409 }
4410
4411 /*
4412 * Perform scheduler related setup for a newly forked process p.
4413 * p is forked by current.
4414 *
4415 * __sched_fork() is basic setup used by init_idle() too:
4416 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4417 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4418 {
4419 p->on_rq = 0;
4420
4421 p->se.on_rq = 0;
4422 p->se.exec_start = 0;
4423 p->se.sum_exec_runtime = 0;
4424 p->se.prev_sum_exec_runtime = 0;
4425 p->se.nr_migrations = 0;
4426 p->se.vruntime = 0;
4427 p->se.vlag = 0;
4428 INIT_LIST_HEAD(&p->se.group_node);
4429
4430 /* A delayed task cannot be in clone(). */
4431 SCHED_WARN_ON(p->se.sched_delayed);
4432
4433 #ifdef CONFIG_FAIR_GROUP_SCHED
4434 p->se.cfs_rq = NULL;
4435 #endif
4436
4437 #ifdef CONFIG_SCHEDSTATS
4438 /* Even if schedstat is disabled, there should not be garbage */
4439 memset(&p->stats, 0, sizeof(p->stats));
4440 #endif
4441
4442 init_dl_entity(&p->dl);
4443
4444 INIT_LIST_HEAD(&p->rt.run_list);
4445 p->rt.timeout = 0;
4446 p->rt.time_slice = sched_rr_timeslice;
4447 p->rt.on_rq = 0;
4448 p->rt.on_list = 0;
4449
4450 #ifdef CONFIG_SCHED_CLASS_EXT
4451 init_scx_entity(&p->scx);
4452 #endif
4453
4454 #ifdef CONFIG_PREEMPT_NOTIFIERS
4455 INIT_HLIST_HEAD(&p->preempt_notifiers);
4456 #endif
4457
4458 #ifdef CONFIG_COMPACTION
4459 p->capture_control = NULL;
4460 #endif
4461 init_numa_balancing(clone_flags, p);
4462 #ifdef CONFIG_SMP
4463 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4464 p->migration_pending = NULL;
4465 #endif
4466 init_sched_mm_cid(p);
4467 }
4468
4469 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4470
4471 #ifdef CONFIG_NUMA_BALANCING
4472
4473 int sysctl_numa_balancing_mode;
4474
__set_numabalancing_state(bool enabled)4475 static void __set_numabalancing_state(bool enabled)
4476 {
4477 if (enabled)
4478 static_branch_enable(&sched_numa_balancing);
4479 else
4480 static_branch_disable(&sched_numa_balancing);
4481 }
4482
set_numabalancing_state(bool enabled)4483 void set_numabalancing_state(bool enabled)
4484 {
4485 if (enabled)
4486 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4487 else
4488 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4489 __set_numabalancing_state(enabled);
4490 }
4491
4492 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4493 static void reset_memory_tiering(void)
4494 {
4495 struct pglist_data *pgdat;
4496
4497 for_each_online_pgdat(pgdat) {
4498 pgdat->nbp_threshold = 0;
4499 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4500 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4501 }
4502 }
4503
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4504 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4505 void *buffer, size_t *lenp, loff_t *ppos)
4506 {
4507 struct ctl_table t;
4508 int err;
4509 int state = sysctl_numa_balancing_mode;
4510
4511 if (write && !capable(CAP_SYS_ADMIN))
4512 return -EPERM;
4513
4514 t = *table;
4515 t.data = &state;
4516 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4517 if (err < 0)
4518 return err;
4519 if (write) {
4520 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4521 (state & NUMA_BALANCING_MEMORY_TIERING))
4522 reset_memory_tiering();
4523 sysctl_numa_balancing_mode = state;
4524 __set_numabalancing_state(state);
4525 }
4526 return err;
4527 }
4528 #endif
4529 #endif
4530
4531 #ifdef CONFIG_SCHEDSTATS
4532
4533 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4534
set_schedstats(bool enabled)4535 static void set_schedstats(bool enabled)
4536 {
4537 if (enabled)
4538 static_branch_enable(&sched_schedstats);
4539 else
4540 static_branch_disable(&sched_schedstats);
4541 }
4542
force_schedstat_enabled(void)4543 void force_schedstat_enabled(void)
4544 {
4545 if (!schedstat_enabled()) {
4546 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4547 static_branch_enable(&sched_schedstats);
4548 }
4549 }
4550
setup_schedstats(char * str)4551 static int __init setup_schedstats(char *str)
4552 {
4553 int ret = 0;
4554 if (!str)
4555 goto out;
4556
4557 if (!strcmp(str, "enable")) {
4558 set_schedstats(true);
4559 ret = 1;
4560 } else if (!strcmp(str, "disable")) {
4561 set_schedstats(false);
4562 ret = 1;
4563 }
4564 out:
4565 if (!ret)
4566 pr_warn("Unable to parse schedstats=\n");
4567
4568 return ret;
4569 }
4570 __setup("schedstats=", setup_schedstats);
4571
4572 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4573 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4574 size_t *lenp, loff_t *ppos)
4575 {
4576 struct ctl_table t;
4577 int err;
4578 int state = static_branch_likely(&sched_schedstats);
4579
4580 if (write && !capable(CAP_SYS_ADMIN))
4581 return -EPERM;
4582
4583 t = *table;
4584 t.data = &state;
4585 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4586 if (err < 0)
4587 return err;
4588 if (write)
4589 set_schedstats(state);
4590 return err;
4591 }
4592 #endif /* CONFIG_PROC_SYSCTL */
4593 #endif /* CONFIG_SCHEDSTATS */
4594
4595 #ifdef CONFIG_SYSCTL
4596 static struct ctl_table sched_core_sysctls[] = {
4597 #ifdef CONFIG_SCHEDSTATS
4598 {
4599 .procname = "sched_schedstats",
4600 .data = NULL,
4601 .maxlen = sizeof(unsigned int),
4602 .mode = 0644,
4603 .proc_handler = sysctl_schedstats,
4604 .extra1 = SYSCTL_ZERO,
4605 .extra2 = SYSCTL_ONE,
4606 },
4607 #endif /* CONFIG_SCHEDSTATS */
4608 #ifdef CONFIG_UCLAMP_TASK
4609 {
4610 .procname = "sched_util_clamp_min",
4611 .data = &sysctl_sched_uclamp_util_min,
4612 .maxlen = sizeof(unsigned int),
4613 .mode = 0644,
4614 .proc_handler = sysctl_sched_uclamp_handler,
4615 },
4616 {
4617 .procname = "sched_util_clamp_max",
4618 .data = &sysctl_sched_uclamp_util_max,
4619 .maxlen = sizeof(unsigned int),
4620 .mode = 0644,
4621 .proc_handler = sysctl_sched_uclamp_handler,
4622 },
4623 {
4624 .procname = "sched_util_clamp_min_rt_default",
4625 .data = &sysctl_sched_uclamp_util_min_rt_default,
4626 .maxlen = sizeof(unsigned int),
4627 .mode = 0644,
4628 .proc_handler = sysctl_sched_uclamp_handler,
4629 },
4630 #endif /* CONFIG_UCLAMP_TASK */
4631 #ifdef CONFIG_NUMA_BALANCING
4632 {
4633 .procname = "numa_balancing",
4634 .data = NULL, /* filled in by handler */
4635 .maxlen = sizeof(unsigned int),
4636 .mode = 0644,
4637 .proc_handler = sysctl_numa_balancing,
4638 .extra1 = SYSCTL_ZERO,
4639 .extra2 = SYSCTL_FOUR,
4640 },
4641 #endif /* CONFIG_NUMA_BALANCING */
4642 };
sched_core_sysctl_init(void)4643 static int __init sched_core_sysctl_init(void)
4644 {
4645 register_sysctl_init("kernel", sched_core_sysctls);
4646 return 0;
4647 }
4648 late_initcall(sched_core_sysctl_init);
4649 #endif /* CONFIG_SYSCTL */
4650
4651 /*
4652 * fork()/clone()-time setup:
4653 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4654 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4655 {
4656 __sched_fork(clone_flags, p);
4657 /*
4658 * We mark the process as NEW here. This guarantees that
4659 * nobody will actually run it, and a signal or other external
4660 * event cannot wake it up and insert it on the runqueue either.
4661 */
4662 p->__state = TASK_NEW;
4663
4664 /*
4665 * Make sure we do not leak PI boosting priority to the child.
4666 */
4667 p->prio = current->normal_prio;
4668
4669 uclamp_fork(p);
4670
4671 /*
4672 * Revert to default priority/policy on fork if requested.
4673 */
4674 if (unlikely(p->sched_reset_on_fork)) {
4675 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4676 p->policy = SCHED_NORMAL;
4677 p->static_prio = NICE_TO_PRIO(0);
4678 p->rt_priority = 0;
4679 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4680 p->static_prio = NICE_TO_PRIO(0);
4681
4682 p->prio = p->normal_prio = p->static_prio;
4683 set_load_weight(p, false);
4684 p->se.custom_slice = 0;
4685 p->se.slice = sysctl_sched_base_slice;
4686
4687 /*
4688 * We don't need the reset flag anymore after the fork. It has
4689 * fulfilled its duty:
4690 */
4691 p->sched_reset_on_fork = 0;
4692 }
4693
4694 if (dl_prio(p->prio))
4695 return -EAGAIN;
4696
4697 scx_pre_fork(p);
4698
4699 if (rt_prio(p->prio)) {
4700 p->sched_class = &rt_sched_class;
4701 #ifdef CONFIG_SCHED_CLASS_EXT
4702 } else if (task_should_scx(p)) {
4703 p->sched_class = &ext_sched_class;
4704 #endif
4705 } else {
4706 p->sched_class = &fair_sched_class;
4707 }
4708
4709 init_entity_runnable_average(&p->se);
4710
4711
4712 #ifdef CONFIG_SCHED_INFO
4713 if (likely(sched_info_on()))
4714 memset(&p->sched_info, 0, sizeof(p->sched_info));
4715 #endif
4716 #if defined(CONFIG_SMP)
4717 p->on_cpu = 0;
4718 #endif
4719 init_task_preempt_count(p);
4720 #ifdef CONFIG_SMP
4721 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4722 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4723 #endif
4724 return 0;
4725 }
4726
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4727 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4728 {
4729 unsigned long flags;
4730
4731 /*
4732 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4733 * required yet, but lockdep gets upset if rules are violated.
4734 */
4735 raw_spin_lock_irqsave(&p->pi_lock, flags);
4736 #ifdef CONFIG_CGROUP_SCHED
4737 if (1) {
4738 struct task_group *tg;
4739 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4740 struct task_group, css);
4741 tg = autogroup_task_group(p, tg);
4742 p->sched_task_group = tg;
4743 }
4744 #endif
4745 rseq_migrate(p);
4746 /*
4747 * We're setting the CPU for the first time, we don't migrate,
4748 * so use __set_task_cpu().
4749 */
4750 __set_task_cpu(p, smp_processor_id());
4751 if (p->sched_class->task_fork)
4752 p->sched_class->task_fork(p);
4753 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4754
4755 return scx_fork(p);
4756 }
4757
sched_cancel_fork(struct task_struct * p)4758 void sched_cancel_fork(struct task_struct *p)
4759 {
4760 scx_cancel_fork(p);
4761 }
4762
sched_post_fork(struct task_struct * p)4763 void sched_post_fork(struct task_struct *p)
4764 {
4765 uclamp_post_fork(p);
4766 scx_post_fork(p);
4767 }
4768
to_ratio(u64 period,u64 runtime)4769 unsigned long to_ratio(u64 period, u64 runtime)
4770 {
4771 if (runtime == RUNTIME_INF)
4772 return BW_UNIT;
4773
4774 /*
4775 * Doing this here saves a lot of checks in all
4776 * the calling paths, and returning zero seems
4777 * safe for them anyway.
4778 */
4779 if (period == 0)
4780 return 0;
4781
4782 return div64_u64(runtime << BW_SHIFT, period);
4783 }
4784
4785 /*
4786 * wake_up_new_task - wake up a newly created task for the first time.
4787 *
4788 * This function will do some initial scheduler statistics housekeeping
4789 * that must be done for every newly created context, then puts the task
4790 * on the runqueue and wakes it.
4791 */
wake_up_new_task(struct task_struct * p)4792 void wake_up_new_task(struct task_struct *p)
4793 {
4794 struct rq_flags rf;
4795 struct rq *rq;
4796
4797 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4798 WRITE_ONCE(p->__state, TASK_RUNNING);
4799 #ifdef CONFIG_SMP
4800 /*
4801 * Fork balancing, do it here and not earlier because:
4802 * - cpus_ptr can change in the fork path
4803 * - any previously selected CPU might disappear through hotplug
4804 *
4805 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4806 * as we're not fully set-up yet.
4807 */
4808 p->recent_used_cpu = task_cpu(p);
4809 rseq_migrate(p);
4810 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4811 #endif
4812 rq = __task_rq_lock(p, &rf);
4813 update_rq_clock(rq);
4814 post_init_entity_util_avg(p);
4815
4816 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4817 trace_sched_wakeup_new(p);
4818 wakeup_preempt(rq, p, WF_FORK);
4819 #ifdef CONFIG_SMP
4820 if (p->sched_class->task_woken) {
4821 /*
4822 * Nothing relies on rq->lock after this, so it's fine to
4823 * drop it.
4824 */
4825 rq_unpin_lock(rq, &rf);
4826 p->sched_class->task_woken(rq, p);
4827 rq_repin_lock(rq, &rf);
4828 }
4829 #endif
4830 task_rq_unlock(rq, p, &rf);
4831 }
4832
4833 #ifdef CONFIG_PREEMPT_NOTIFIERS
4834
4835 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4836
preempt_notifier_inc(void)4837 void preempt_notifier_inc(void)
4838 {
4839 static_branch_inc(&preempt_notifier_key);
4840 }
4841 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4842
preempt_notifier_dec(void)4843 void preempt_notifier_dec(void)
4844 {
4845 static_branch_dec(&preempt_notifier_key);
4846 }
4847 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4848
4849 /**
4850 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4851 * @notifier: notifier struct to register
4852 */
preempt_notifier_register(struct preempt_notifier * notifier)4853 void preempt_notifier_register(struct preempt_notifier *notifier)
4854 {
4855 if (!static_branch_unlikely(&preempt_notifier_key))
4856 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4857
4858 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4859 }
4860 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4861
4862 /**
4863 * preempt_notifier_unregister - no longer interested in preemption notifications
4864 * @notifier: notifier struct to unregister
4865 *
4866 * This is *not* safe to call from within a preemption notifier.
4867 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4868 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4869 {
4870 hlist_del(¬ifier->link);
4871 }
4872 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4873
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4874 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4875 {
4876 struct preempt_notifier *notifier;
4877
4878 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4879 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4880 }
4881
fire_sched_in_preempt_notifiers(struct task_struct * curr)4882 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4883 {
4884 if (static_branch_unlikely(&preempt_notifier_key))
4885 __fire_sched_in_preempt_notifiers(curr);
4886 }
4887
4888 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4889 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4890 struct task_struct *next)
4891 {
4892 struct preempt_notifier *notifier;
4893
4894 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4895 notifier->ops->sched_out(notifier, next);
4896 }
4897
4898 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4899 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4900 struct task_struct *next)
4901 {
4902 if (static_branch_unlikely(&preempt_notifier_key))
4903 __fire_sched_out_preempt_notifiers(curr, next);
4904 }
4905
4906 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4907
fire_sched_in_preempt_notifiers(struct task_struct * curr)4908 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4909 {
4910 }
4911
4912 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4913 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4914 struct task_struct *next)
4915 {
4916 }
4917
4918 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4919
prepare_task(struct task_struct * next)4920 static inline void prepare_task(struct task_struct *next)
4921 {
4922 #ifdef CONFIG_SMP
4923 /*
4924 * Claim the task as running, we do this before switching to it
4925 * such that any running task will have this set.
4926 *
4927 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4928 * its ordering comment.
4929 */
4930 WRITE_ONCE(next->on_cpu, 1);
4931 #endif
4932 }
4933
finish_task(struct task_struct * prev)4934 static inline void finish_task(struct task_struct *prev)
4935 {
4936 #ifdef CONFIG_SMP
4937 /*
4938 * This must be the very last reference to @prev from this CPU. After
4939 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4940 * must ensure this doesn't happen until the switch is completely
4941 * finished.
4942 *
4943 * In particular, the load of prev->state in finish_task_switch() must
4944 * happen before this.
4945 *
4946 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4947 */
4948 smp_store_release(&prev->on_cpu, 0);
4949 #endif
4950 }
4951
4952 #ifdef CONFIG_SMP
4953
do_balance_callbacks(struct rq * rq,struct balance_callback * head)4954 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4955 {
4956 void (*func)(struct rq *rq);
4957 struct balance_callback *next;
4958
4959 lockdep_assert_rq_held(rq);
4960
4961 while (head) {
4962 func = (void (*)(struct rq *))head->func;
4963 next = head->next;
4964 head->next = NULL;
4965 head = next;
4966
4967 func(rq);
4968 }
4969 }
4970
4971 static void balance_push(struct rq *rq);
4972
4973 /*
4974 * balance_push_callback is a right abuse of the callback interface and plays
4975 * by significantly different rules.
4976 *
4977 * Where the normal balance_callback's purpose is to be ran in the same context
4978 * that queued it (only later, when it's safe to drop rq->lock again),
4979 * balance_push_callback is specifically targeted at __schedule().
4980 *
4981 * This abuse is tolerated because it places all the unlikely/odd cases behind
4982 * a single test, namely: rq->balance_callback == NULL.
4983 */
4984 struct balance_callback balance_push_callback = {
4985 .next = NULL,
4986 .func = balance_push,
4987 };
4988
4989 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)4990 __splice_balance_callbacks(struct rq *rq, bool split)
4991 {
4992 struct balance_callback *head = rq->balance_callback;
4993
4994 if (likely(!head))
4995 return NULL;
4996
4997 lockdep_assert_rq_held(rq);
4998 /*
4999 * Must not take balance_push_callback off the list when
5000 * splice_balance_callbacks() and balance_callbacks() are not
5001 * in the same rq->lock section.
5002 *
5003 * In that case it would be possible for __schedule() to interleave
5004 * and observe the list empty.
5005 */
5006 if (split && head == &balance_push_callback)
5007 head = NULL;
5008 else
5009 rq->balance_callback = NULL;
5010
5011 return head;
5012 }
5013
splice_balance_callbacks(struct rq * rq)5014 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5015 {
5016 return __splice_balance_callbacks(rq, true);
5017 }
5018
__balance_callbacks(struct rq * rq)5019 static void __balance_callbacks(struct rq *rq)
5020 {
5021 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5022 }
5023
balance_callbacks(struct rq * rq,struct balance_callback * head)5024 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5025 {
5026 unsigned long flags;
5027
5028 if (unlikely(head)) {
5029 raw_spin_rq_lock_irqsave(rq, flags);
5030 do_balance_callbacks(rq, head);
5031 raw_spin_rq_unlock_irqrestore(rq, flags);
5032 }
5033 }
5034
5035 #else
5036
__balance_callbacks(struct rq * rq)5037 static inline void __balance_callbacks(struct rq *rq)
5038 {
5039 }
5040
5041 #endif
5042
5043 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5044 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5045 {
5046 /*
5047 * Since the runqueue lock will be released by the next
5048 * task (which is an invalid locking op but in the case
5049 * of the scheduler it's an obvious special-case), so we
5050 * do an early lockdep release here:
5051 */
5052 rq_unpin_lock(rq, rf);
5053 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5054 #ifdef CONFIG_DEBUG_SPINLOCK
5055 /* this is a valid case when another task releases the spinlock */
5056 rq_lockp(rq)->owner = next;
5057 #endif
5058 }
5059
finish_lock_switch(struct rq * rq)5060 static inline void finish_lock_switch(struct rq *rq)
5061 {
5062 /*
5063 * If we are tracking spinlock dependencies then we have to
5064 * fix up the runqueue lock - which gets 'carried over' from
5065 * prev into current:
5066 */
5067 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5068 __balance_callbacks(rq);
5069 raw_spin_rq_unlock_irq(rq);
5070 }
5071
5072 /*
5073 * NOP if the arch has not defined these:
5074 */
5075
5076 #ifndef prepare_arch_switch
5077 # define prepare_arch_switch(next) do { } while (0)
5078 #endif
5079
5080 #ifndef finish_arch_post_lock_switch
5081 # define finish_arch_post_lock_switch() do { } while (0)
5082 #endif
5083
kmap_local_sched_out(void)5084 static inline void kmap_local_sched_out(void)
5085 {
5086 #ifdef CONFIG_KMAP_LOCAL
5087 if (unlikely(current->kmap_ctrl.idx))
5088 __kmap_local_sched_out();
5089 #endif
5090 }
5091
kmap_local_sched_in(void)5092 static inline void kmap_local_sched_in(void)
5093 {
5094 #ifdef CONFIG_KMAP_LOCAL
5095 if (unlikely(current->kmap_ctrl.idx))
5096 __kmap_local_sched_in();
5097 #endif
5098 }
5099
5100 /**
5101 * prepare_task_switch - prepare to switch tasks
5102 * @rq: the runqueue preparing to switch
5103 * @prev: the current task that is being switched out
5104 * @next: the task we are going to switch to.
5105 *
5106 * This is called with the rq lock held and interrupts off. It must
5107 * be paired with a subsequent finish_task_switch after the context
5108 * switch.
5109 *
5110 * prepare_task_switch sets up locking and calls architecture specific
5111 * hooks.
5112 */
5113 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5114 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5115 struct task_struct *next)
5116 {
5117 kcov_prepare_switch(prev);
5118 sched_info_switch(rq, prev, next);
5119 perf_event_task_sched_out(prev, next);
5120 rseq_preempt(prev);
5121 fire_sched_out_preempt_notifiers(prev, next);
5122 kmap_local_sched_out();
5123 prepare_task(next);
5124 prepare_arch_switch(next);
5125 }
5126
5127 /**
5128 * finish_task_switch - clean up after a task-switch
5129 * @prev: the thread we just switched away from.
5130 *
5131 * finish_task_switch must be called after the context switch, paired
5132 * with a prepare_task_switch call before the context switch.
5133 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5134 * and do any other architecture-specific cleanup actions.
5135 *
5136 * Note that we may have delayed dropping an mm in context_switch(). If
5137 * so, we finish that here outside of the runqueue lock. (Doing it
5138 * with the lock held can cause deadlocks; see schedule() for
5139 * details.)
5140 *
5141 * The context switch have flipped the stack from under us and restored the
5142 * local variables which were saved when this task called schedule() in the
5143 * past. 'prev == current' is still correct but we need to recalculate this_rq
5144 * because prev may have moved to another CPU.
5145 */
finish_task_switch(struct task_struct * prev)5146 static struct rq *finish_task_switch(struct task_struct *prev)
5147 __releases(rq->lock)
5148 {
5149 struct rq *rq = this_rq();
5150 struct mm_struct *mm = rq->prev_mm;
5151 unsigned int prev_state;
5152
5153 /*
5154 * The previous task will have left us with a preempt_count of 2
5155 * because it left us after:
5156 *
5157 * schedule()
5158 * preempt_disable(); // 1
5159 * __schedule()
5160 * raw_spin_lock_irq(&rq->lock) // 2
5161 *
5162 * Also, see FORK_PREEMPT_COUNT.
5163 */
5164 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5165 "corrupted preempt_count: %s/%d/0x%x\n",
5166 current->comm, current->pid, preempt_count()))
5167 preempt_count_set(FORK_PREEMPT_COUNT);
5168
5169 rq->prev_mm = NULL;
5170
5171 /*
5172 * A task struct has one reference for the use as "current".
5173 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5174 * schedule one last time. The schedule call will never return, and
5175 * the scheduled task must drop that reference.
5176 *
5177 * We must observe prev->state before clearing prev->on_cpu (in
5178 * finish_task), otherwise a concurrent wakeup can get prev
5179 * running on another CPU and we could rave with its RUNNING -> DEAD
5180 * transition, resulting in a double drop.
5181 */
5182 prev_state = READ_ONCE(prev->__state);
5183 vtime_task_switch(prev);
5184 perf_event_task_sched_in(prev, current);
5185 finish_task(prev);
5186 tick_nohz_task_switch();
5187 finish_lock_switch(rq);
5188 finish_arch_post_lock_switch();
5189 kcov_finish_switch(current);
5190 /*
5191 * kmap_local_sched_out() is invoked with rq::lock held and
5192 * interrupts disabled. There is no requirement for that, but the
5193 * sched out code does not have an interrupt enabled section.
5194 * Restoring the maps on sched in does not require interrupts being
5195 * disabled either.
5196 */
5197 kmap_local_sched_in();
5198
5199 fire_sched_in_preempt_notifiers(current);
5200 /*
5201 * When switching through a kernel thread, the loop in
5202 * membarrier_{private,global}_expedited() may have observed that
5203 * kernel thread and not issued an IPI. It is therefore possible to
5204 * schedule between user->kernel->user threads without passing though
5205 * switch_mm(). Membarrier requires a barrier after storing to
5206 * rq->curr, before returning to userspace, so provide them here:
5207 *
5208 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5209 * provided by mmdrop_lazy_tlb(),
5210 * - a sync_core for SYNC_CORE.
5211 */
5212 if (mm) {
5213 membarrier_mm_sync_core_before_usermode(mm);
5214 mmdrop_lazy_tlb_sched(mm);
5215 }
5216
5217 if (unlikely(prev_state == TASK_DEAD)) {
5218 if (prev->sched_class->task_dead)
5219 prev->sched_class->task_dead(prev);
5220
5221 /* Task is done with its stack. */
5222 put_task_stack(prev);
5223
5224 put_task_struct_rcu_user(prev);
5225 }
5226
5227 return rq;
5228 }
5229
5230 /**
5231 * schedule_tail - first thing a freshly forked thread must call.
5232 * @prev: the thread we just switched away from.
5233 */
schedule_tail(struct task_struct * prev)5234 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5235 __releases(rq->lock)
5236 {
5237 /*
5238 * New tasks start with FORK_PREEMPT_COUNT, see there and
5239 * finish_task_switch() for details.
5240 *
5241 * finish_task_switch() will drop rq->lock() and lower preempt_count
5242 * and the preempt_enable() will end up enabling preemption (on
5243 * PREEMPT_COUNT kernels).
5244 */
5245
5246 finish_task_switch(prev);
5247 preempt_enable();
5248
5249 if (current->set_child_tid)
5250 put_user(task_pid_vnr(current), current->set_child_tid);
5251
5252 calculate_sigpending();
5253 }
5254
5255 /*
5256 * context_switch - switch to the new MM and the new thread's register state.
5257 */
5258 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5259 context_switch(struct rq *rq, struct task_struct *prev,
5260 struct task_struct *next, struct rq_flags *rf)
5261 {
5262 prepare_task_switch(rq, prev, next);
5263
5264 /*
5265 * For paravirt, this is coupled with an exit in switch_to to
5266 * combine the page table reload and the switch backend into
5267 * one hypercall.
5268 */
5269 arch_start_context_switch(prev);
5270
5271 /*
5272 * kernel -> kernel lazy + transfer active
5273 * user -> kernel lazy + mmgrab_lazy_tlb() active
5274 *
5275 * kernel -> user switch + mmdrop_lazy_tlb() active
5276 * user -> user switch
5277 *
5278 * switch_mm_cid() needs to be updated if the barriers provided
5279 * by context_switch() are modified.
5280 */
5281 if (!next->mm) { // to kernel
5282 enter_lazy_tlb(prev->active_mm, next);
5283
5284 next->active_mm = prev->active_mm;
5285 if (prev->mm) // from user
5286 mmgrab_lazy_tlb(prev->active_mm);
5287 else
5288 prev->active_mm = NULL;
5289 } else { // to user
5290 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5291 /*
5292 * sys_membarrier() requires an smp_mb() between setting
5293 * rq->curr / membarrier_switch_mm() and returning to userspace.
5294 *
5295 * The below provides this either through switch_mm(), or in
5296 * case 'prev->active_mm == next->mm' through
5297 * finish_task_switch()'s mmdrop().
5298 */
5299 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5300 lru_gen_use_mm(next->mm);
5301
5302 if (!prev->mm) { // from kernel
5303 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5304 rq->prev_mm = prev->active_mm;
5305 prev->active_mm = NULL;
5306 }
5307 }
5308
5309 /* switch_mm_cid() requires the memory barriers above. */
5310 switch_mm_cid(rq, prev, next);
5311
5312 prepare_lock_switch(rq, next, rf);
5313
5314 /* Here we just switch the register state and the stack. */
5315 switch_to(prev, next, prev);
5316 barrier();
5317
5318 return finish_task_switch(prev);
5319 }
5320
5321 /*
5322 * nr_running and nr_context_switches:
5323 *
5324 * externally visible scheduler statistics: current number of runnable
5325 * threads, total number of context switches performed since bootup.
5326 */
nr_running(void)5327 unsigned int nr_running(void)
5328 {
5329 unsigned int i, sum = 0;
5330
5331 for_each_online_cpu(i)
5332 sum += cpu_rq(i)->nr_running;
5333
5334 return sum;
5335 }
5336
5337 /*
5338 * Check if only the current task is running on the CPU.
5339 *
5340 * Caution: this function does not check that the caller has disabled
5341 * preemption, thus the result might have a time-of-check-to-time-of-use
5342 * race. The caller is responsible to use it correctly, for example:
5343 *
5344 * - from a non-preemptible section (of course)
5345 *
5346 * - from a thread that is bound to a single CPU
5347 *
5348 * - in a loop with very short iterations (e.g. a polling loop)
5349 */
single_task_running(void)5350 bool single_task_running(void)
5351 {
5352 return raw_rq()->nr_running == 1;
5353 }
5354 EXPORT_SYMBOL(single_task_running);
5355
nr_context_switches_cpu(int cpu)5356 unsigned long long nr_context_switches_cpu(int cpu)
5357 {
5358 return cpu_rq(cpu)->nr_switches;
5359 }
5360
nr_context_switches(void)5361 unsigned long long nr_context_switches(void)
5362 {
5363 int i;
5364 unsigned long long sum = 0;
5365
5366 for_each_possible_cpu(i)
5367 sum += cpu_rq(i)->nr_switches;
5368
5369 return sum;
5370 }
5371
5372 /*
5373 * Consumers of these two interfaces, like for example the cpuidle menu
5374 * governor, are using nonsensical data. Preferring shallow idle state selection
5375 * for a CPU that has IO-wait which might not even end up running the task when
5376 * it does become runnable.
5377 */
5378
nr_iowait_cpu(int cpu)5379 unsigned int nr_iowait_cpu(int cpu)
5380 {
5381 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5382 }
5383
5384 /*
5385 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5386 *
5387 * The idea behind IO-wait account is to account the idle time that we could
5388 * have spend running if it were not for IO. That is, if we were to improve the
5389 * storage performance, we'd have a proportional reduction in IO-wait time.
5390 *
5391 * This all works nicely on UP, where, when a task blocks on IO, we account
5392 * idle time as IO-wait, because if the storage were faster, it could've been
5393 * running and we'd not be idle.
5394 *
5395 * This has been extended to SMP, by doing the same for each CPU. This however
5396 * is broken.
5397 *
5398 * Imagine for instance the case where two tasks block on one CPU, only the one
5399 * CPU will have IO-wait accounted, while the other has regular idle. Even
5400 * though, if the storage were faster, both could've ran at the same time,
5401 * utilising both CPUs.
5402 *
5403 * This means, that when looking globally, the current IO-wait accounting on
5404 * SMP is a lower bound, by reason of under accounting.
5405 *
5406 * Worse, since the numbers are provided per CPU, they are sometimes
5407 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5408 * associated with any one particular CPU, it can wake to another CPU than it
5409 * blocked on. This means the per CPU IO-wait number is meaningless.
5410 *
5411 * Task CPU affinities can make all that even more 'interesting'.
5412 */
5413
nr_iowait(void)5414 unsigned int nr_iowait(void)
5415 {
5416 unsigned int i, sum = 0;
5417
5418 for_each_possible_cpu(i)
5419 sum += nr_iowait_cpu(i);
5420
5421 return sum;
5422 }
5423
5424 #ifdef CONFIG_SMP
5425
5426 /*
5427 * sched_exec - execve() is a valuable balancing opportunity, because at
5428 * this point the task has the smallest effective memory and cache footprint.
5429 */
sched_exec(void)5430 void sched_exec(void)
5431 {
5432 struct task_struct *p = current;
5433 struct migration_arg arg;
5434 int dest_cpu;
5435
5436 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5437 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5438 if (dest_cpu == smp_processor_id())
5439 return;
5440
5441 if (unlikely(!cpu_active(dest_cpu)))
5442 return;
5443
5444 arg = (struct migration_arg){ p, dest_cpu };
5445 }
5446 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5447 }
5448
5449 #endif
5450
5451 DEFINE_PER_CPU(struct kernel_stat, kstat);
5452 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5453
5454 EXPORT_PER_CPU_SYMBOL(kstat);
5455 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5456
5457 /*
5458 * The function fair_sched_class.update_curr accesses the struct curr
5459 * and its field curr->exec_start; when called from task_sched_runtime(),
5460 * we observe a high rate of cache misses in practice.
5461 * Prefetching this data results in improved performance.
5462 */
prefetch_curr_exec_start(struct task_struct * p)5463 static inline void prefetch_curr_exec_start(struct task_struct *p)
5464 {
5465 #ifdef CONFIG_FAIR_GROUP_SCHED
5466 struct sched_entity *curr = p->se.cfs_rq->curr;
5467 #else
5468 struct sched_entity *curr = task_rq(p)->cfs.curr;
5469 #endif
5470 prefetch(curr);
5471 prefetch(&curr->exec_start);
5472 }
5473
5474 /*
5475 * Return accounted runtime for the task.
5476 * In case the task is currently running, return the runtime plus current's
5477 * pending runtime that have not been accounted yet.
5478 */
task_sched_runtime(struct task_struct * p)5479 unsigned long long task_sched_runtime(struct task_struct *p)
5480 {
5481 struct rq_flags rf;
5482 struct rq *rq;
5483 u64 ns;
5484
5485 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5486 /*
5487 * 64-bit doesn't need locks to atomically read a 64-bit value.
5488 * So we have a optimization chance when the task's delta_exec is 0.
5489 * Reading ->on_cpu is racy, but this is OK.
5490 *
5491 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5492 * If we race with it entering CPU, unaccounted time is 0. This is
5493 * indistinguishable from the read occurring a few cycles earlier.
5494 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5495 * been accounted, so we're correct here as well.
5496 */
5497 if (!p->on_cpu || !task_on_rq_queued(p))
5498 return p->se.sum_exec_runtime;
5499 #endif
5500
5501 rq = task_rq_lock(p, &rf);
5502 /*
5503 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5504 * project cycles that may never be accounted to this
5505 * thread, breaking clock_gettime().
5506 */
5507 if (task_current(rq, p) && task_on_rq_queued(p)) {
5508 prefetch_curr_exec_start(p);
5509 update_rq_clock(rq);
5510 p->sched_class->update_curr(rq);
5511 }
5512 ns = p->se.sum_exec_runtime;
5513 task_rq_unlock(rq, p, &rf);
5514
5515 return ns;
5516 }
5517
5518 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5519 static u64 cpu_resched_latency(struct rq *rq)
5520 {
5521 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5522 u64 resched_latency, now = rq_clock(rq);
5523 static bool warned_once;
5524
5525 if (sysctl_resched_latency_warn_once && warned_once)
5526 return 0;
5527
5528 if (!need_resched() || !latency_warn_ms)
5529 return 0;
5530
5531 if (system_state == SYSTEM_BOOTING)
5532 return 0;
5533
5534 if (!rq->last_seen_need_resched_ns) {
5535 rq->last_seen_need_resched_ns = now;
5536 rq->ticks_without_resched = 0;
5537 return 0;
5538 }
5539
5540 rq->ticks_without_resched++;
5541 resched_latency = now - rq->last_seen_need_resched_ns;
5542 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5543 return 0;
5544
5545 warned_once = true;
5546
5547 return resched_latency;
5548 }
5549
setup_resched_latency_warn_ms(char * str)5550 static int __init setup_resched_latency_warn_ms(char *str)
5551 {
5552 long val;
5553
5554 if ((kstrtol(str, 0, &val))) {
5555 pr_warn("Unable to set resched_latency_warn_ms\n");
5556 return 1;
5557 }
5558
5559 sysctl_resched_latency_warn_ms = val;
5560 return 1;
5561 }
5562 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5563 #else
cpu_resched_latency(struct rq * rq)5564 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5565 #endif /* CONFIG_SCHED_DEBUG */
5566
5567 /*
5568 * This function gets called by the timer code, with HZ frequency.
5569 * We call it with interrupts disabled.
5570 */
sched_tick(void)5571 void sched_tick(void)
5572 {
5573 int cpu = smp_processor_id();
5574 struct rq *rq = cpu_rq(cpu);
5575 struct task_struct *curr;
5576 struct rq_flags rf;
5577 unsigned long hw_pressure;
5578 u64 resched_latency;
5579
5580 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5581 arch_scale_freq_tick();
5582
5583 sched_clock_tick();
5584
5585 rq_lock(rq, &rf);
5586
5587 curr = rq->curr;
5588 psi_account_irqtime(rq, curr, NULL);
5589
5590 update_rq_clock(rq);
5591 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5592 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5593 curr->sched_class->task_tick(rq, curr, 0);
5594 if (sched_feat(LATENCY_WARN))
5595 resched_latency = cpu_resched_latency(rq);
5596 calc_global_load_tick(rq);
5597 sched_core_tick(rq);
5598 task_tick_mm_cid(rq, curr);
5599 scx_tick(rq);
5600
5601 rq_unlock(rq, &rf);
5602
5603 if (sched_feat(LATENCY_WARN) && resched_latency)
5604 resched_latency_warn(cpu, resched_latency);
5605
5606 perf_event_task_tick();
5607
5608 if (curr->flags & PF_WQ_WORKER)
5609 wq_worker_tick(curr);
5610
5611 #ifdef CONFIG_SMP
5612 if (!scx_switched_all()) {
5613 rq->idle_balance = idle_cpu(cpu);
5614 sched_balance_trigger(rq);
5615 }
5616 #endif
5617 }
5618
5619 #ifdef CONFIG_NO_HZ_FULL
5620
5621 struct tick_work {
5622 int cpu;
5623 atomic_t state;
5624 struct delayed_work work;
5625 };
5626 /* Values for ->state, see diagram below. */
5627 #define TICK_SCHED_REMOTE_OFFLINE 0
5628 #define TICK_SCHED_REMOTE_OFFLINING 1
5629 #define TICK_SCHED_REMOTE_RUNNING 2
5630
5631 /*
5632 * State diagram for ->state:
5633 *
5634 *
5635 * TICK_SCHED_REMOTE_OFFLINE
5636 * | ^
5637 * | |
5638 * | | sched_tick_remote()
5639 * | |
5640 * | |
5641 * +--TICK_SCHED_REMOTE_OFFLINING
5642 * | ^
5643 * | |
5644 * sched_tick_start() | | sched_tick_stop()
5645 * | |
5646 * V |
5647 * TICK_SCHED_REMOTE_RUNNING
5648 *
5649 *
5650 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5651 * and sched_tick_start() are happy to leave the state in RUNNING.
5652 */
5653
5654 static struct tick_work __percpu *tick_work_cpu;
5655
sched_tick_remote(struct work_struct * work)5656 static void sched_tick_remote(struct work_struct *work)
5657 {
5658 struct delayed_work *dwork = to_delayed_work(work);
5659 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5660 int cpu = twork->cpu;
5661 struct rq *rq = cpu_rq(cpu);
5662 int os;
5663
5664 /*
5665 * Handle the tick only if it appears the remote CPU is running in full
5666 * dynticks mode. The check is racy by nature, but missing a tick or
5667 * having one too much is no big deal because the scheduler tick updates
5668 * statistics and checks timeslices in a time-independent way, regardless
5669 * of when exactly it is running.
5670 */
5671 if (tick_nohz_tick_stopped_cpu(cpu)) {
5672 guard(rq_lock_irq)(rq);
5673 struct task_struct *curr = rq->curr;
5674
5675 if (cpu_online(cpu)) {
5676 update_rq_clock(rq);
5677
5678 if (!is_idle_task(curr)) {
5679 /*
5680 * Make sure the next tick runs within a
5681 * reasonable amount of time.
5682 */
5683 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5684 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5685 }
5686 curr->sched_class->task_tick(rq, curr, 0);
5687
5688 calc_load_nohz_remote(rq);
5689 }
5690 }
5691
5692 /*
5693 * Run the remote tick once per second (1Hz). This arbitrary
5694 * frequency is large enough to avoid overload but short enough
5695 * to keep scheduler internal stats reasonably up to date. But
5696 * first update state to reflect hotplug activity if required.
5697 */
5698 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5699 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5700 if (os == TICK_SCHED_REMOTE_RUNNING)
5701 queue_delayed_work(system_unbound_wq, dwork, HZ);
5702 }
5703
sched_tick_start(int cpu)5704 static void sched_tick_start(int cpu)
5705 {
5706 int os;
5707 struct tick_work *twork;
5708
5709 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5710 return;
5711
5712 WARN_ON_ONCE(!tick_work_cpu);
5713
5714 twork = per_cpu_ptr(tick_work_cpu, cpu);
5715 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5716 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5717 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5718 twork->cpu = cpu;
5719 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5720 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5721 }
5722 }
5723
5724 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5725 static void sched_tick_stop(int cpu)
5726 {
5727 struct tick_work *twork;
5728 int os;
5729
5730 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5731 return;
5732
5733 WARN_ON_ONCE(!tick_work_cpu);
5734
5735 twork = per_cpu_ptr(tick_work_cpu, cpu);
5736 /* There cannot be competing actions, but don't rely on stop-machine. */
5737 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5738 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5739 /* Don't cancel, as this would mess up the state machine. */
5740 }
5741 #endif /* CONFIG_HOTPLUG_CPU */
5742
sched_tick_offload_init(void)5743 int __init sched_tick_offload_init(void)
5744 {
5745 tick_work_cpu = alloc_percpu(struct tick_work);
5746 BUG_ON(!tick_work_cpu);
5747 return 0;
5748 }
5749
5750 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5751 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5752 static inline void sched_tick_stop(int cpu) { }
5753 #endif
5754
5755 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5756 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5757 /*
5758 * If the value passed in is equal to the current preempt count
5759 * then we just disabled preemption. Start timing the latency.
5760 */
preempt_latency_start(int val)5761 static inline void preempt_latency_start(int val)
5762 {
5763 if (preempt_count() == val) {
5764 unsigned long ip = get_lock_parent_ip();
5765 #ifdef CONFIG_DEBUG_PREEMPT
5766 current->preempt_disable_ip = ip;
5767 #endif
5768 trace_preempt_off(CALLER_ADDR0, ip);
5769 }
5770 }
5771
preempt_count_add(int val)5772 void preempt_count_add(int val)
5773 {
5774 #ifdef CONFIG_DEBUG_PREEMPT
5775 /*
5776 * Underflow?
5777 */
5778 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5779 return;
5780 #endif
5781 __preempt_count_add(val);
5782 #ifdef CONFIG_DEBUG_PREEMPT
5783 /*
5784 * Spinlock count overflowing soon?
5785 */
5786 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5787 PREEMPT_MASK - 10);
5788 #endif
5789 preempt_latency_start(val);
5790 }
5791 EXPORT_SYMBOL(preempt_count_add);
5792 NOKPROBE_SYMBOL(preempt_count_add);
5793
5794 /*
5795 * If the value passed in equals to the current preempt count
5796 * then we just enabled preemption. Stop timing the latency.
5797 */
preempt_latency_stop(int val)5798 static inline void preempt_latency_stop(int val)
5799 {
5800 if (preempt_count() == val)
5801 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5802 }
5803
preempt_count_sub(int val)5804 void preempt_count_sub(int val)
5805 {
5806 #ifdef CONFIG_DEBUG_PREEMPT
5807 /*
5808 * Underflow?
5809 */
5810 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5811 return;
5812 /*
5813 * Is the spinlock portion underflowing?
5814 */
5815 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5816 !(preempt_count() & PREEMPT_MASK)))
5817 return;
5818 #endif
5819
5820 preempt_latency_stop(val);
5821 __preempt_count_sub(val);
5822 }
5823 EXPORT_SYMBOL(preempt_count_sub);
5824 NOKPROBE_SYMBOL(preempt_count_sub);
5825
5826 #else
preempt_latency_start(int val)5827 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5828 static inline void preempt_latency_stop(int val) { }
5829 #endif
5830
get_preempt_disable_ip(struct task_struct * p)5831 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5832 {
5833 #ifdef CONFIG_DEBUG_PREEMPT
5834 return p->preempt_disable_ip;
5835 #else
5836 return 0;
5837 #endif
5838 }
5839
5840 /*
5841 * Print scheduling while atomic bug:
5842 */
__schedule_bug(struct task_struct * prev)5843 static noinline void __schedule_bug(struct task_struct *prev)
5844 {
5845 /* Save this before calling printk(), since that will clobber it */
5846 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5847
5848 if (oops_in_progress)
5849 return;
5850
5851 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5852 prev->comm, prev->pid, preempt_count());
5853
5854 debug_show_held_locks(prev);
5855 print_modules();
5856 if (irqs_disabled())
5857 print_irqtrace_events(prev);
5858 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5859 pr_err("Preemption disabled at:");
5860 print_ip_sym(KERN_ERR, preempt_disable_ip);
5861 }
5862 check_panic_on_warn("scheduling while atomic");
5863
5864 dump_stack();
5865 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5866 }
5867
5868 /*
5869 * Various schedule()-time debugging checks and statistics:
5870 */
schedule_debug(struct task_struct * prev,bool preempt)5871 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5872 {
5873 #ifdef CONFIG_SCHED_STACK_END_CHECK
5874 if (task_stack_end_corrupted(prev))
5875 panic("corrupted stack end detected inside scheduler\n");
5876
5877 if (task_scs_end_corrupted(prev))
5878 panic("corrupted shadow stack detected inside scheduler\n");
5879 #endif
5880
5881 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5882 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5883 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5884 prev->comm, prev->pid, prev->non_block_count);
5885 dump_stack();
5886 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5887 }
5888 #endif
5889
5890 if (unlikely(in_atomic_preempt_off())) {
5891 __schedule_bug(prev);
5892 preempt_count_set(PREEMPT_DISABLED);
5893 }
5894 rcu_sleep_check();
5895 SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5896
5897 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5898
5899 schedstat_inc(this_rq()->sched_count);
5900 }
5901
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5902 static void prev_balance(struct rq *rq, struct task_struct *prev,
5903 struct rq_flags *rf)
5904 {
5905 const struct sched_class *start_class = prev->sched_class;
5906 const struct sched_class *class;
5907
5908 #ifdef CONFIG_SCHED_CLASS_EXT
5909 /*
5910 * SCX requires a balance() call before every pick_next_task() including
5911 * when waking up from SCHED_IDLE. If @start_class is below SCX, start
5912 * from SCX instead.
5913 */
5914 if (scx_enabled() && sched_class_above(&ext_sched_class, start_class))
5915 start_class = &ext_sched_class;
5916 #endif
5917
5918 /*
5919 * We must do the balancing pass before put_prev_task(), such
5920 * that when we release the rq->lock the task is in the same
5921 * state as before we took rq->lock.
5922 *
5923 * We can terminate the balance pass as soon as we know there is
5924 * a runnable task of @class priority or higher.
5925 */
5926 for_active_class_range(class, start_class, &idle_sched_class) {
5927 if (class->balance && class->balance(rq, prev, rf))
5928 break;
5929 }
5930 }
5931
5932 /*
5933 * Pick up the highest-prio task:
5934 */
5935 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5936 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5937 {
5938 const struct sched_class *class;
5939 struct task_struct *p;
5940
5941 rq->dl_server = NULL;
5942
5943 if (scx_enabled())
5944 goto restart;
5945
5946 /*
5947 * Optimization: we know that if all tasks are in the fair class we can
5948 * call that function directly, but only if the @prev task wasn't of a
5949 * higher scheduling class, because otherwise those lose the
5950 * opportunity to pull in more work from other CPUs.
5951 */
5952 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5953 rq->nr_running == rq->cfs.h_nr_running)) {
5954
5955 p = pick_next_task_fair(rq, prev, rf);
5956 if (unlikely(p == RETRY_TASK))
5957 goto restart;
5958
5959 /* Assume the next prioritized class is idle_sched_class */
5960 if (!p) {
5961 p = pick_task_idle(rq);
5962 put_prev_set_next_task(rq, prev, p);
5963 }
5964
5965 return p;
5966 }
5967
5968 restart:
5969 prev_balance(rq, prev, rf);
5970
5971 for_each_active_class(class) {
5972 if (class->pick_next_task) {
5973 p = class->pick_next_task(rq, prev);
5974 if (p)
5975 return p;
5976 } else {
5977 p = class->pick_task(rq);
5978 if (p) {
5979 put_prev_set_next_task(rq, prev, p);
5980 return p;
5981 }
5982 }
5983 }
5984
5985 BUG(); /* The idle class should always have a runnable task. */
5986 }
5987
5988 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)5989 static inline bool is_task_rq_idle(struct task_struct *t)
5990 {
5991 return (task_rq(t)->idle == t);
5992 }
5993
cookie_equals(struct task_struct * a,unsigned long cookie)5994 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5995 {
5996 return is_task_rq_idle(a) || (a->core_cookie == cookie);
5997 }
5998
cookie_match(struct task_struct * a,struct task_struct * b)5999 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6000 {
6001 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6002 return true;
6003
6004 return a->core_cookie == b->core_cookie;
6005 }
6006
pick_task(struct rq * rq)6007 static inline struct task_struct *pick_task(struct rq *rq)
6008 {
6009 const struct sched_class *class;
6010 struct task_struct *p;
6011
6012 rq->dl_server = NULL;
6013
6014 for_each_active_class(class) {
6015 p = class->pick_task(rq);
6016 if (p)
6017 return p;
6018 }
6019
6020 BUG(); /* The idle class should always have a runnable task. */
6021 }
6022
6023 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6024
6025 static void queue_core_balance(struct rq *rq);
6026
6027 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6028 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6029 {
6030 struct task_struct *next, *p, *max = NULL;
6031 const struct cpumask *smt_mask;
6032 bool fi_before = false;
6033 bool core_clock_updated = (rq == rq->core);
6034 unsigned long cookie;
6035 int i, cpu, occ = 0;
6036 struct rq *rq_i;
6037 bool need_sync;
6038
6039 if (!sched_core_enabled(rq))
6040 return __pick_next_task(rq, prev, rf);
6041
6042 cpu = cpu_of(rq);
6043
6044 /* Stopper task is switching into idle, no need core-wide selection. */
6045 if (cpu_is_offline(cpu)) {
6046 /*
6047 * Reset core_pick so that we don't enter the fastpath when
6048 * coming online. core_pick would already be migrated to
6049 * another cpu during offline.
6050 */
6051 rq->core_pick = NULL;
6052 rq->core_dl_server = NULL;
6053 return __pick_next_task(rq, prev, rf);
6054 }
6055
6056 /*
6057 * If there were no {en,de}queues since we picked (IOW, the task
6058 * pointers are all still valid), and we haven't scheduled the last
6059 * pick yet, do so now.
6060 *
6061 * rq->core_pick can be NULL if no selection was made for a CPU because
6062 * it was either offline or went offline during a sibling's core-wide
6063 * selection. In this case, do a core-wide selection.
6064 */
6065 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6066 rq->core->core_pick_seq != rq->core_sched_seq &&
6067 rq->core_pick) {
6068 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6069
6070 next = rq->core_pick;
6071 rq->dl_server = rq->core_dl_server;
6072 rq->core_pick = NULL;
6073 rq->core_dl_server = NULL;
6074 goto out_set_next;
6075 }
6076
6077 prev_balance(rq, prev, rf);
6078
6079 smt_mask = cpu_smt_mask(cpu);
6080 need_sync = !!rq->core->core_cookie;
6081
6082 /* reset state */
6083 rq->core->core_cookie = 0UL;
6084 if (rq->core->core_forceidle_count) {
6085 if (!core_clock_updated) {
6086 update_rq_clock(rq->core);
6087 core_clock_updated = true;
6088 }
6089 sched_core_account_forceidle(rq);
6090 /* reset after accounting force idle */
6091 rq->core->core_forceidle_start = 0;
6092 rq->core->core_forceidle_count = 0;
6093 rq->core->core_forceidle_occupation = 0;
6094 need_sync = true;
6095 fi_before = true;
6096 }
6097
6098 /*
6099 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6100 *
6101 * @task_seq guards the task state ({en,de}queues)
6102 * @pick_seq is the @task_seq we did a selection on
6103 * @sched_seq is the @pick_seq we scheduled
6104 *
6105 * However, preemptions can cause multiple picks on the same task set.
6106 * 'Fix' this by also increasing @task_seq for every pick.
6107 */
6108 rq->core->core_task_seq++;
6109
6110 /*
6111 * Optimize for common case where this CPU has no cookies
6112 * and there are no cookied tasks running on siblings.
6113 */
6114 if (!need_sync) {
6115 next = pick_task(rq);
6116 if (!next->core_cookie) {
6117 rq->core_pick = NULL;
6118 rq->core_dl_server = NULL;
6119 /*
6120 * For robustness, update the min_vruntime_fi for
6121 * unconstrained picks as well.
6122 */
6123 WARN_ON_ONCE(fi_before);
6124 task_vruntime_update(rq, next, false);
6125 goto out_set_next;
6126 }
6127 }
6128
6129 /*
6130 * For each thread: do the regular task pick and find the max prio task
6131 * amongst them.
6132 *
6133 * Tie-break prio towards the current CPU
6134 */
6135 for_each_cpu_wrap(i, smt_mask, cpu) {
6136 rq_i = cpu_rq(i);
6137
6138 /*
6139 * Current cpu always has its clock updated on entrance to
6140 * pick_next_task(). If the current cpu is not the core,
6141 * the core may also have been updated above.
6142 */
6143 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6144 update_rq_clock(rq_i);
6145
6146 rq_i->core_pick = p = pick_task(rq_i);
6147 rq_i->core_dl_server = rq_i->dl_server;
6148
6149 if (!max || prio_less(max, p, fi_before))
6150 max = p;
6151 }
6152
6153 cookie = rq->core->core_cookie = max->core_cookie;
6154
6155 /*
6156 * For each thread: try and find a runnable task that matches @max or
6157 * force idle.
6158 */
6159 for_each_cpu(i, smt_mask) {
6160 rq_i = cpu_rq(i);
6161 p = rq_i->core_pick;
6162
6163 if (!cookie_equals(p, cookie)) {
6164 p = NULL;
6165 if (cookie)
6166 p = sched_core_find(rq_i, cookie);
6167 if (!p)
6168 p = idle_sched_class.pick_task(rq_i);
6169 }
6170
6171 rq_i->core_pick = p;
6172 rq_i->core_dl_server = NULL;
6173
6174 if (p == rq_i->idle) {
6175 if (rq_i->nr_running) {
6176 rq->core->core_forceidle_count++;
6177 if (!fi_before)
6178 rq->core->core_forceidle_seq++;
6179 }
6180 } else {
6181 occ++;
6182 }
6183 }
6184
6185 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6186 rq->core->core_forceidle_start = rq_clock(rq->core);
6187 rq->core->core_forceidle_occupation = occ;
6188 }
6189
6190 rq->core->core_pick_seq = rq->core->core_task_seq;
6191 next = rq->core_pick;
6192 rq->core_sched_seq = rq->core->core_pick_seq;
6193
6194 /* Something should have been selected for current CPU */
6195 WARN_ON_ONCE(!next);
6196
6197 /*
6198 * Reschedule siblings
6199 *
6200 * NOTE: L1TF -- at this point we're no longer running the old task and
6201 * sending an IPI (below) ensures the sibling will no longer be running
6202 * their task. This ensures there is no inter-sibling overlap between
6203 * non-matching user state.
6204 */
6205 for_each_cpu(i, smt_mask) {
6206 rq_i = cpu_rq(i);
6207
6208 /*
6209 * An online sibling might have gone offline before a task
6210 * could be picked for it, or it might be offline but later
6211 * happen to come online, but its too late and nothing was
6212 * picked for it. That's Ok - it will pick tasks for itself,
6213 * so ignore it.
6214 */
6215 if (!rq_i->core_pick)
6216 continue;
6217
6218 /*
6219 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6220 * fi_before fi update?
6221 * 0 0 1
6222 * 0 1 1
6223 * 1 0 1
6224 * 1 1 0
6225 */
6226 if (!(fi_before && rq->core->core_forceidle_count))
6227 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6228
6229 rq_i->core_pick->core_occupation = occ;
6230
6231 if (i == cpu) {
6232 rq_i->core_pick = NULL;
6233 rq_i->core_dl_server = NULL;
6234 continue;
6235 }
6236
6237 /* Did we break L1TF mitigation requirements? */
6238 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6239
6240 if (rq_i->curr == rq_i->core_pick) {
6241 rq_i->core_pick = NULL;
6242 rq_i->core_dl_server = NULL;
6243 continue;
6244 }
6245
6246 resched_curr(rq_i);
6247 }
6248
6249 out_set_next:
6250 put_prev_set_next_task(rq, prev, next);
6251 if (rq->core->core_forceidle_count && next == rq->idle)
6252 queue_core_balance(rq);
6253
6254 return next;
6255 }
6256
try_steal_cookie(int this,int that)6257 static bool try_steal_cookie(int this, int that)
6258 {
6259 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6260 struct task_struct *p;
6261 unsigned long cookie;
6262 bool success = false;
6263
6264 guard(irq)();
6265 guard(double_rq_lock)(dst, src);
6266
6267 cookie = dst->core->core_cookie;
6268 if (!cookie)
6269 return false;
6270
6271 if (dst->curr != dst->idle)
6272 return false;
6273
6274 p = sched_core_find(src, cookie);
6275 if (!p)
6276 return false;
6277
6278 do {
6279 if (p == src->core_pick || p == src->curr)
6280 goto next;
6281
6282 if (!is_cpu_allowed(p, this))
6283 goto next;
6284
6285 if (p->core_occupation > dst->idle->core_occupation)
6286 goto next;
6287 /*
6288 * sched_core_find() and sched_core_next() will ensure
6289 * that task @p is not throttled now, we also need to
6290 * check whether the runqueue of the destination CPU is
6291 * being throttled.
6292 */
6293 if (sched_task_is_throttled(p, this))
6294 goto next;
6295
6296 deactivate_task(src, p, 0);
6297 set_task_cpu(p, this);
6298 activate_task(dst, p, 0);
6299
6300 resched_curr(dst);
6301
6302 success = true;
6303 break;
6304
6305 next:
6306 p = sched_core_next(p, cookie);
6307 } while (p);
6308
6309 return success;
6310 }
6311
steal_cookie_task(int cpu,struct sched_domain * sd)6312 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6313 {
6314 int i;
6315
6316 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6317 if (i == cpu)
6318 continue;
6319
6320 if (need_resched())
6321 break;
6322
6323 if (try_steal_cookie(cpu, i))
6324 return true;
6325 }
6326
6327 return false;
6328 }
6329
sched_core_balance(struct rq * rq)6330 static void sched_core_balance(struct rq *rq)
6331 {
6332 struct sched_domain *sd;
6333 int cpu = cpu_of(rq);
6334
6335 guard(preempt)();
6336 guard(rcu)();
6337
6338 raw_spin_rq_unlock_irq(rq);
6339 for_each_domain(cpu, sd) {
6340 if (need_resched())
6341 break;
6342
6343 if (steal_cookie_task(cpu, sd))
6344 break;
6345 }
6346 raw_spin_rq_lock_irq(rq);
6347 }
6348
6349 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6350
queue_core_balance(struct rq * rq)6351 static void queue_core_balance(struct rq *rq)
6352 {
6353 if (!sched_core_enabled(rq))
6354 return;
6355
6356 if (!rq->core->core_cookie)
6357 return;
6358
6359 if (!rq->nr_running) /* not forced idle */
6360 return;
6361
6362 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6363 }
6364
6365 DEFINE_LOCK_GUARD_1(core_lock, int,
6366 sched_core_lock(*_T->lock, &_T->flags),
6367 sched_core_unlock(*_T->lock, &_T->flags),
6368 unsigned long flags)
6369
sched_core_cpu_starting(unsigned int cpu)6370 static void sched_core_cpu_starting(unsigned int cpu)
6371 {
6372 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6373 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6374 int t;
6375
6376 guard(core_lock)(&cpu);
6377
6378 WARN_ON_ONCE(rq->core != rq);
6379
6380 /* if we're the first, we'll be our own leader */
6381 if (cpumask_weight(smt_mask) == 1)
6382 return;
6383
6384 /* find the leader */
6385 for_each_cpu(t, smt_mask) {
6386 if (t == cpu)
6387 continue;
6388 rq = cpu_rq(t);
6389 if (rq->core == rq) {
6390 core_rq = rq;
6391 break;
6392 }
6393 }
6394
6395 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6396 return;
6397
6398 /* install and validate core_rq */
6399 for_each_cpu(t, smt_mask) {
6400 rq = cpu_rq(t);
6401
6402 if (t == cpu)
6403 rq->core = core_rq;
6404
6405 WARN_ON_ONCE(rq->core != core_rq);
6406 }
6407 }
6408
sched_core_cpu_deactivate(unsigned int cpu)6409 static void sched_core_cpu_deactivate(unsigned int cpu)
6410 {
6411 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6412 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6413 int t;
6414
6415 guard(core_lock)(&cpu);
6416
6417 /* if we're the last man standing, nothing to do */
6418 if (cpumask_weight(smt_mask) == 1) {
6419 WARN_ON_ONCE(rq->core != rq);
6420 return;
6421 }
6422
6423 /* if we're not the leader, nothing to do */
6424 if (rq->core != rq)
6425 return;
6426
6427 /* find a new leader */
6428 for_each_cpu(t, smt_mask) {
6429 if (t == cpu)
6430 continue;
6431 core_rq = cpu_rq(t);
6432 break;
6433 }
6434
6435 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6436 return;
6437
6438 /* copy the shared state to the new leader */
6439 core_rq->core_task_seq = rq->core_task_seq;
6440 core_rq->core_pick_seq = rq->core_pick_seq;
6441 core_rq->core_cookie = rq->core_cookie;
6442 core_rq->core_forceidle_count = rq->core_forceidle_count;
6443 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6444 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6445
6446 /*
6447 * Accounting edge for forced idle is handled in pick_next_task().
6448 * Don't need another one here, since the hotplug thread shouldn't
6449 * have a cookie.
6450 */
6451 core_rq->core_forceidle_start = 0;
6452
6453 /* install new leader */
6454 for_each_cpu(t, smt_mask) {
6455 rq = cpu_rq(t);
6456 rq->core = core_rq;
6457 }
6458 }
6459
sched_core_cpu_dying(unsigned int cpu)6460 static inline void sched_core_cpu_dying(unsigned int cpu)
6461 {
6462 struct rq *rq = cpu_rq(cpu);
6463
6464 if (rq->core != rq)
6465 rq->core = rq;
6466 }
6467
6468 #else /* !CONFIG_SCHED_CORE */
6469
sched_core_cpu_starting(unsigned int cpu)6470 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6471 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6472 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6473
6474 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6475 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6476 {
6477 return __pick_next_task(rq, prev, rf);
6478 }
6479
6480 #endif /* CONFIG_SCHED_CORE */
6481
6482 /*
6483 * Constants for the sched_mode argument of __schedule().
6484 *
6485 * The mode argument allows RT enabled kernels to differentiate a
6486 * preemption from blocking on an 'sleeping' spin/rwlock.
6487 */
6488 #define SM_IDLE (-1)
6489 #define SM_NONE 0
6490 #define SM_PREEMPT 1
6491 #define SM_RTLOCK_WAIT 2
6492
6493 /*
6494 * __schedule() is the main scheduler function.
6495 *
6496 * The main means of driving the scheduler and thus entering this function are:
6497 *
6498 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6499 *
6500 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6501 * paths. For example, see arch/x86/entry_64.S.
6502 *
6503 * To drive preemption between tasks, the scheduler sets the flag in timer
6504 * interrupt handler sched_tick().
6505 *
6506 * 3. Wakeups don't really cause entry into schedule(). They add a
6507 * task to the run-queue and that's it.
6508 *
6509 * Now, if the new task added to the run-queue preempts the current
6510 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6511 * called on the nearest possible occasion:
6512 *
6513 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6514 *
6515 * - in syscall or exception context, at the next outmost
6516 * preempt_enable(). (this might be as soon as the wake_up()'s
6517 * spin_unlock()!)
6518 *
6519 * - in IRQ context, return from interrupt-handler to
6520 * preemptible context
6521 *
6522 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6523 * then at the next:
6524 *
6525 * - cond_resched() call
6526 * - explicit schedule() call
6527 * - return from syscall or exception to user-space
6528 * - return from interrupt-handler to user-space
6529 *
6530 * WARNING: must be called with preemption disabled!
6531 */
__schedule(int sched_mode)6532 static void __sched notrace __schedule(int sched_mode)
6533 {
6534 struct task_struct *prev, *next;
6535 /*
6536 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6537 * as a preemption by schedule_debug() and RCU.
6538 */
6539 bool preempt = sched_mode > SM_NONE;
6540 unsigned long *switch_count;
6541 unsigned long prev_state;
6542 struct rq_flags rf;
6543 struct rq *rq;
6544 int cpu;
6545
6546 cpu = smp_processor_id();
6547 rq = cpu_rq(cpu);
6548 prev = rq->curr;
6549
6550 schedule_debug(prev, preempt);
6551
6552 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6553 hrtick_clear(rq);
6554
6555 local_irq_disable();
6556 rcu_note_context_switch(preempt);
6557
6558 /*
6559 * Make sure that signal_pending_state()->signal_pending() below
6560 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6561 * done by the caller to avoid the race with signal_wake_up():
6562 *
6563 * __set_current_state(@state) signal_wake_up()
6564 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6565 * wake_up_state(p, state)
6566 * LOCK rq->lock LOCK p->pi_state
6567 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6568 * if (signal_pending_state()) if (p->state & @state)
6569 *
6570 * Also, the membarrier system call requires a full memory barrier
6571 * after coming from user-space, before storing to rq->curr; this
6572 * barrier matches a full barrier in the proximity of the membarrier
6573 * system call exit.
6574 */
6575 rq_lock(rq, &rf);
6576 smp_mb__after_spinlock();
6577
6578 /* Promote REQ to ACT */
6579 rq->clock_update_flags <<= 1;
6580 update_rq_clock(rq);
6581 rq->clock_update_flags = RQCF_UPDATED;
6582
6583 switch_count = &prev->nivcsw;
6584
6585 /* Task state changes only considers SM_PREEMPT as preemption */
6586 preempt = sched_mode == SM_PREEMPT;
6587
6588 /*
6589 * We must load prev->state once (task_struct::state is volatile), such
6590 * that we form a control dependency vs deactivate_task() below.
6591 */
6592 prev_state = READ_ONCE(prev->__state);
6593 if (sched_mode == SM_IDLE) {
6594 /* SCX must consult the BPF scheduler to tell if rq is empty */
6595 if (!rq->nr_running && !scx_enabled()) {
6596 next = prev;
6597 goto picked;
6598 }
6599 } else if (!preempt && prev_state) {
6600 if (signal_pending_state(prev_state, prev)) {
6601 WRITE_ONCE(prev->__state, TASK_RUNNING);
6602 } else {
6603 int flags = DEQUEUE_NOCLOCK;
6604
6605 prev->sched_contributes_to_load =
6606 (prev_state & TASK_UNINTERRUPTIBLE) &&
6607 !(prev_state & TASK_NOLOAD) &&
6608 !(prev_state & TASK_FROZEN);
6609
6610 if (unlikely(is_special_task_state(prev_state)))
6611 flags |= DEQUEUE_SPECIAL;
6612
6613 /*
6614 * __schedule() ttwu()
6615 * prev_state = prev->state; if (p->on_rq && ...)
6616 * if (prev_state) goto out;
6617 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6618 * p->state = TASK_WAKING
6619 *
6620 * Where __schedule() and ttwu() have matching control dependencies.
6621 *
6622 * After this, schedule() must not care about p->state any more.
6623 */
6624 block_task(rq, prev, flags);
6625 }
6626 switch_count = &prev->nvcsw;
6627 }
6628
6629 next = pick_next_task(rq, prev, &rf);
6630 picked:
6631 clear_tsk_need_resched(prev);
6632 clear_preempt_need_resched();
6633 #ifdef CONFIG_SCHED_DEBUG
6634 rq->last_seen_need_resched_ns = 0;
6635 #endif
6636
6637 if (likely(prev != next)) {
6638 rq->nr_switches++;
6639 /*
6640 * RCU users of rcu_dereference(rq->curr) may not see
6641 * changes to task_struct made by pick_next_task().
6642 */
6643 RCU_INIT_POINTER(rq->curr, next);
6644 /*
6645 * The membarrier system call requires each architecture
6646 * to have a full memory barrier after updating
6647 * rq->curr, before returning to user-space.
6648 *
6649 * Here are the schemes providing that barrier on the
6650 * various architectures:
6651 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6652 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6653 * on PowerPC and on RISC-V.
6654 * - finish_lock_switch() for weakly-ordered
6655 * architectures where spin_unlock is a full barrier,
6656 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6657 * is a RELEASE barrier),
6658 *
6659 * The barrier matches a full barrier in the proximity of
6660 * the membarrier system call entry.
6661 *
6662 * On RISC-V, this barrier pairing is also needed for the
6663 * SYNC_CORE command when switching between processes, cf.
6664 * the inline comments in membarrier_arch_switch_mm().
6665 */
6666 ++*switch_count;
6667
6668 migrate_disable_switch(rq, prev);
6669 psi_account_irqtime(rq, prev, next);
6670 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6671
6672 trace_sched_switch(preempt, prev, next, prev_state);
6673
6674 /* Also unlocks the rq: */
6675 rq = context_switch(rq, prev, next, &rf);
6676 } else {
6677 rq_unpin_lock(rq, &rf);
6678 __balance_callbacks(rq);
6679 raw_spin_rq_unlock_irq(rq);
6680 }
6681 }
6682
do_task_dead(void)6683 void __noreturn do_task_dead(void)
6684 {
6685 /* Causes final put_task_struct in finish_task_switch(): */
6686 set_special_state(TASK_DEAD);
6687
6688 /* Tell freezer to ignore us: */
6689 current->flags |= PF_NOFREEZE;
6690
6691 __schedule(SM_NONE);
6692 BUG();
6693
6694 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6695 for (;;)
6696 cpu_relax();
6697 }
6698
sched_submit_work(struct task_struct * tsk)6699 static inline void sched_submit_work(struct task_struct *tsk)
6700 {
6701 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6702 unsigned int task_flags;
6703
6704 /*
6705 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6706 * will use a blocking primitive -- which would lead to recursion.
6707 */
6708 lock_map_acquire_try(&sched_map);
6709
6710 task_flags = tsk->flags;
6711 /*
6712 * If a worker goes to sleep, notify and ask workqueue whether it
6713 * wants to wake up a task to maintain concurrency.
6714 */
6715 if (task_flags & PF_WQ_WORKER)
6716 wq_worker_sleeping(tsk);
6717 else if (task_flags & PF_IO_WORKER)
6718 io_wq_worker_sleeping(tsk);
6719
6720 /*
6721 * spinlock and rwlock must not flush block requests. This will
6722 * deadlock if the callback attempts to acquire a lock which is
6723 * already acquired.
6724 */
6725 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6726
6727 /*
6728 * If we are going to sleep and we have plugged IO queued,
6729 * make sure to submit it to avoid deadlocks.
6730 */
6731 blk_flush_plug(tsk->plug, true);
6732
6733 lock_map_release(&sched_map);
6734 }
6735
sched_update_worker(struct task_struct * tsk)6736 static void sched_update_worker(struct task_struct *tsk)
6737 {
6738 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6739 if (tsk->flags & PF_BLOCK_TS)
6740 blk_plug_invalidate_ts(tsk);
6741 if (tsk->flags & PF_WQ_WORKER)
6742 wq_worker_running(tsk);
6743 else if (tsk->flags & PF_IO_WORKER)
6744 io_wq_worker_running(tsk);
6745 }
6746 }
6747
__schedule_loop(int sched_mode)6748 static __always_inline void __schedule_loop(int sched_mode)
6749 {
6750 do {
6751 preempt_disable();
6752 __schedule(sched_mode);
6753 sched_preempt_enable_no_resched();
6754 } while (need_resched());
6755 }
6756
schedule(void)6757 asmlinkage __visible void __sched schedule(void)
6758 {
6759 struct task_struct *tsk = current;
6760
6761 #ifdef CONFIG_RT_MUTEXES
6762 lockdep_assert(!tsk->sched_rt_mutex);
6763 #endif
6764
6765 if (!task_is_running(tsk))
6766 sched_submit_work(tsk);
6767 __schedule_loop(SM_NONE);
6768 sched_update_worker(tsk);
6769 }
6770 EXPORT_SYMBOL(schedule);
6771
6772 /*
6773 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6774 * state (have scheduled out non-voluntarily) by making sure that all
6775 * tasks have either left the run queue or have gone into user space.
6776 * As idle tasks do not do either, they must not ever be preempted
6777 * (schedule out non-voluntarily).
6778 *
6779 * schedule_idle() is similar to schedule_preempt_disable() except that it
6780 * never enables preemption because it does not call sched_submit_work().
6781 */
schedule_idle(void)6782 void __sched schedule_idle(void)
6783 {
6784 /*
6785 * As this skips calling sched_submit_work(), which the idle task does
6786 * regardless because that function is a NOP when the task is in a
6787 * TASK_RUNNING state, make sure this isn't used someplace that the
6788 * current task can be in any other state. Note, idle is always in the
6789 * TASK_RUNNING state.
6790 */
6791 WARN_ON_ONCE(current->__state);
6792 do {
6793 __schedule(SM_IDLE);
6794 } while (need_resched());
6795 }
6796
6797 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6798 asmlinkage __visible void __sched schedule_user(void)
6799 {
6800 /*
6801 * If we come here after a random call to set_need_resched(),
6802 * or we have been woken up remotely but the IPI has not yet arrived,
6803 * we haven't yet exited the RCU idle mode. Do it here manually until
6804 * we find a better solution.
6805 *
6806 * NB: There are buggy callers of this function. Ideally we
6807 * should warn if prev_state != CT_STATE_USER, but that will trigger
6808 * too frequently to make sense yet.
6809 */
6810 enum ctx_state prev_state = exception_enter();
6811 schedule();
6812 exception_exit(prev_state);
6813 }
6814 #endif
6815
6816 /**
6817 * schedule_preempt_disabled - called with preemption disabled
6818 *
6819 * Returns with preemption disabled. Note: preempt_count must be 1
6820 */
schedule_preempt_disabled(void)6821 void __sched schedule_preempt_disabled(void)
6822 {
6823 sched_preempt_enable_no_resched();
6824 schedule();
6825 preempt_disable();
6826 }
6827
6828 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6829 void __sched notrace schedule_rtlock(void)
6830 {
6831 __schedule_loop(SM_RTLOCK_WAIT);
6832 }
6833 NOKPROBE_SYMBOL(schedule_rtlock);
6834 #endif
6835
preempt_schedule_common(void)6836 static void __sched notrace preempt_schedule_common(void)
6837 {
6838 do {
6839 /*
6840 * Because the function tracer can trace preempt_count_sub()
6841 * and it also uses preempt_enable/disable_notrace(), if
6842 * NEED_RESCHED is set, the preempt_enable_notrace() called
6843 * by the function tracer will call this function again and
6844 * cause infinite recursion.
6845 *
6846 * Preemption must be disabled here before the function
6847 * tracer can trace. Break up preempt_disable() into two
6848 * calls. One to disable preemption without fear of being
6849 * traced. The other to still record the preemption latency,
6850 * which can also be traced by the function tracer.
6851 */
6852 preempt_disable_notrace();
6853 preempt_latency_start(1);
6854 __schedule(SM_PREEMPT);
6855 preempt_latency_stop(1);
6856 preempt_enable_no_resched_notrace();
6857
6858 /*
6859 * Check again in case we missed a preemption opportunity
6860 * between schedule and now.
6861 */
6862 } while (need_resched());
6863 }
6864
6865 #ifdef CONFIG_PREEMPTION
6866 /*
6867 * This is the entry point to schedule() from in-kernel preemption
6868 * off of preempt_enable.
6869 */
preempt_schedule(void)6870 asmlinkage __visible void __sched notrace preempt_schedule(void)
6871 {
6872 /*
6873 * If there is a non-zero preempt_count or interrupts are disabled,
6874 * we do not want to preempt the current task. Just return..
6875 */
6876 if (likely(!preemptible()))
6877 return;
6878 preempt_schedule_common();
6879 }
6880 NOKPROBE_SYMBOL(preempt_schedule);
6881 EXPORT_SYMBOL(preempt_schedule);
6882
6883 #ifdef CONFIG_PREEMPT_DYNAMIC
6884 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6885 #ifndef preempt_schedule_dynamic_enabled
6886 #define preempt_schedule_dynamic_enabled preempt_schedule
6887 #define preempt_schedule_dynamic_disabled NULL
6888 #endif
6889 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6890 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6891 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6892 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6893 void __sched notrace dynamic_preempt_schedule(void)
6894 {
6895 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6896 return;
6897 preempt_schedule();
6898 }
6899 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6900 EXPORT_SYMBOL(dynamic_preempt_schedule);
6901 #endif
6902 #endif
6903
6904 /**
6905 * preempt_schedule_notrace - preempt_schedule called by tracing
6906 *
6907 * The tracing infrastructure uses preempt_enable_notrace to prevent
6908 * recursion and tracing preempt enabling caused by the tracing
6909 * infrastructure itself. But as tracing can happen in areas coming
6910 * from userspace or just about to enter userspace, a preempt enable
6911 * can occur before user_exit() is called. This will cause the scheduler
6912 * to be called when the system is still in usermode.
6913 *
6914 * To prevent this, the preempt_enable_notrace will use this function
6915 * instead of preempt_schedule() to exit user context if needed before
6916 * calling the scheduler.
6917 */
preempt_schedule_notrace(void)6918 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6919 {
6920 enum ctx_state prev_ctx;
6921
6922 if (likely(!preemptible()))
6923 return;
6924
6925 do {
6926 /*
6927 * Because the function tracer can trace preempt_count_sub()
6928 * and it also uses preempt_enable/disable_notrace(), if
6929 * NEED_RESCHED is set, the preempt_enable_notrace() called
6930 * by the function tracer will call this function again and
6931 * cause infinite recursion.
6932 *
6933 * Preemption must be disabled here before the function
6934 * tracer can trace. Break up preempt_disable() into two
6935 * calls. One to disable preemption without fear of being
6936 * traced. The other to still record the preemption latency,
6937 * which can also be traced by the function tracer.
6938 */
6939 preempt_disable_notrace();
6940 preempt_latency_start(1);
6941 /*
6942 * Needs preempt disabled in case user_exit() is traced
6943 * and the tracer calls preempt_enable_notrace() causing
6944 * an infinite recursion.
6945 */
6946 prev_ctx = exception_enter();
6947 __schedule(SM_PREEMPT);
6948 exception_exit(prev_ctx);
6949
6950 preempt_latency_stop(1);
6951 preempt_enable_no_resched_notrace();
6952 } while (need_resched());
6953 }
6954 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6955
6956 #ifdef CONFIG_PREEMPT_DYNAMIC
6957 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6958 #ifndef preempt_schedule_notrace_dynamic_enabled
6959 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
6960 #define preempt_schedule_notrace_dynamic_disabled NULL
6961 #endif
6962 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6963 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6964 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6965 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)6966 void __sched notrace dynamic_preempt_schedule_notrace(void)
6967 {
6968 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6969 return;
6970 preempt_schedule_notrace();
6971 }
6972 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6973 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6974 #endif
6975 #endif
6976
6977 #endif /* CONFIG_PREEMPTION */
6978
6979 /*
6980 * This is the entry point to schedule() from kernel preemption
6981 * off of IRQ context.
6982 * Note, that this is called and return with IRQs disabled. This will
6983 * protect us against recursive calling from IRQ contexts.
6984 */
preempt_schedule_irq(void)6985 asmlinkage __visible void __sched preempt_schedule_irq(void)
6986 {
6987 enum ctx_state prev_state;
6988
6989 /* Catch callers which need to be fixed */
6990 BUG_ON(preempt_count() || !irqs_disabled());
6991
6992 prev_state = exception_enter();
6993
6994 do {
6995 preempt_disable();
6996 local_irq_enable();
6997 __schedule(SM_PREEMPT);
6998 local_irq_disable();
6999 sched_preempt_enable_no_resched();
7000 } while (need_resched());
7001
7002 exception_exit(prev_state);
7003 }
7004
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7005 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7006 void *key)
7007 {
7008 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7009 return try_to_wake_up(curr->private, mode, wake_flags);
7010 }
7011 EXPORT_SYMBOL(default_wake_function);
7012
__setscheduler_prio(struct task_struct * p,int prio)7013 void __setscheduler_prio(struct task_struct *p, int prio)
7014 {
7015 if (dl_prio(prio))
7016 p->sched_class = &dl_sched_class;
7017 else if (rt_prio(prio))
7018 p->sched_class = &rt_sched_class;
7019 #ifdef CONFIG_SCHED_CLASS_EXT
7020 else if (task_should_scx(p))
7021 p->sched_class = &ext_sched_class;
7022 #endif
7023 else
7024 p->sched_class = &fair_sched_class;
7025
7026 p->prio = prio;
7027 }
7028
7029 #ifdef CONFIG_RT_MUTEXES
7030
7031 /*
7032 * Would be more useful with typeof()/auto_type but they don't mix with
7033 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7034 * name such that if someone were to implement this function we get to compare
7035 * notes.
7036 */
7037 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7038
rt_mutex_pre_schedule(void)7039 void rt_mutex_pre_schedule(void)
7040 {
7041 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7042 sched_submit_work(current);
7043 }
7044
rt_mutex_schedule(void)7045 void rt_mutex_schedule(void)
7046 {
7047 lockdep_assert(current->sched_rt_mutex);
7048 __schedule_loop(SM_NONE);
7049 }
7050
rt_mutex_post_schedule(void)7051 void rt_mutex_post_schedule(void)
7052 {
7053 sched_update_worker(current);
7054 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7055 }
7056
7057 /*
7058 * rt_mutex_setprio - set the current priority of a task
7059 * @p: task to boost
7060 * @pi_task: donor task
7061 *
7062 * This function changes the 'effective' priority of a task. It does
7063 * not touch ->normal_prio like __setscheduler().
7064 *
7065 * Used by the rt_mutex code to implement priority inheritance
7066 * logic. Call site only calls if the priority of the task changed.
7067 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7068 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7069 {
7070 int prio, oldprio, queued, running, queue_flag =
7071 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7072 const struct sched_class *prev_class;
7073 struct rq_flags rf;
7074 struct rq *rq;
7075
7076 /* XXX used to be waiter->prio, not waiter->task->prio */
7077 prio = __rt_effective_prio(pi_task, p->normal_prio);
7078
7079 /*
7080 * If nothing changed; bail early.
7081 */
7082 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7083 return;
7084
7085 rq = __task_rq_lock(p, &rf);
7086 update_rq_clock(rq);
7087 /*
7088 * Set under pi_lock && rq->lock, such that the value can be used under
7089 * either lock.
7090 *
7091 * Note that there is loads of tricky to make this pointer cache work
7092 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7093 * ensure a task is de-boosted (pi_task is set to NULL) before the
7094 * task is allowed to run again (and can exit). This ensures the pointer
7095 * points to a blocked task -- which guarantees the task is present.
7096 */
7097 p->pi_top_task = pi_task;
7098
7099 /*
7100 * For FIFO/RR we only need to set prio, if that matches we're done.
7101 */
7102 if (prio == p->prio && !dl_prio(prio))
7103 goto out_unlock;
7104
7105 /*
7106 * Idle task boosting is a no-no in general. There is one
7107 * exception, when PREEMPT_RT and NOHZ is active:
7108 *
7109 * The idle task calls get_next_timer_interrupt() and holds
7110 * the timer wheel base->lock on the CPU and another CPU wants
7111 * to access the timer (probably to cancel it). We can safely
7112 * ignore the boosting request, as the idle CPU runs this code
7113 * with interrupts disabled and will complete the lock
7114 * protected section without being interrupted. So there is no
7115 * real need to boost.
7116 */
7117 if (unlikely(p == rq->idle)) {
7118 WARN_ON(p != rq->curr);
7119 WARN_ON(p->pi_blocked_on);
7120 goto out_unlock;
7121 }
7122
7123 trace_sched_pi_setprio(p, pi_task);
7124 oldprio = p->prio;
7125
7126 if (oldprio == prio)
7127 queue_flag &= ~DEQUEUE_MOVE;
7128
7129 prev_class = p->sched_class;
7130 queued = task_on_rq_queued(p);
7131 running = task_current(rq, p);
7132 if (queued)
7133 dequeue_task(rq, p, queue_flag);
7134 if (running)
7135 put_prev_task(rq, p);
7136
7137 /*
7138 * Boosting condition are:
7139 * 1. -rt task is running and holds mutex A
7140 * --> -dl task blocks on mutex A
7141 *
7142 * 2. -dl task is running and holds mutex A
7143 * --> -dl task blocks on mutex A and could preempt the
7144 * running task
7145 */
7146 if (dl_prio(prio)) {
7147 if (!dl_prio(p->normal_prio) ||
7148 (pi_task && dl_prio(pi_task->prio) &&
7149 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7150 p->dl.pi_se = pi_task->dl.pi_se;
7151 queue_flag |= ENQUEUE_REPLENISH;
7152 } else {
7153 p->dl.pi_se = &p->dl;
7154 }
7155 } else if (rt_prio(prio)) {
7156 if (dl_prio(oldprio))
7157 p->dl.pi_se = &p->dl;
7158 if (oldprio < prio)
7159 queue_flag |= ENQUEUE_HEAD;
7160 } else {
7161 if (dl_prio(oldprio))
7162 p->dl.pi_se = &p->dl;
7163 if (rt_prio(oldprio))
7164 p->rt.timeout = 0;
7165 }
7166
7167 __setscheduler_prio(p, prio);
7168 check_class_changing(rq, p, prev_class);
7169
7170 if (queued)
7171 enqueue_task(rq, p, queue_flag);
7172 if (running)
7173 set_next_task(rq, p);
7174
7175 check_class_changed(rq, p, prev_class, oldprio);
7176 out_unlock:
7177 /* Avoid rq from going away on us: */
7178 preempt_disable();
7179
7180 rq_unpin_lock(rq, &rf);
7181 __balance_callbacks(rq);
7182 raw_spin_rq_unlock(rq);
7183
7184 preempt_enable();
7185 }
7186 #endif
7187
7188 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7189 int __sched __cond_resched(void)
7190 {
7191 if (should_resched(0)) {
7192 preempt_schedule_common();
7193 return 1;
7194 }
7195 /*
7196 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7197 * whether the current CPU is in an RCU read-side critical section,
7198 * so the tick can report quiescent states even for CPUs looping
7199 * in kernel context. In contrast, in non-preemptible kernels,
7200 * RCU readers leave no in-memory hints, which means that CPU-bound
7201 * processes executing in kernel context might never report an
7202 * RCU quiescent state. Therefore, the following code causes
7203 * cond_resched() to report a quiescent state, but only when RCU
7204 * is in urgent need of one.
7205 */
7206 #ifndef CONFIG_PREEMPT_RCU
7207 rcu_all_qs();
7208 #endif
7209 return 0;
7210 }
7211 EXPORT_SYMBOL(__cond_resched);
7212 #endif
7213
7214 #ifdef CONFIG_PREEMPT_DYNAMIC
7215 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7216 #define cond_resched_dynamic_enabled __cond_resched
7217 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7218 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7219 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7220
7221 #define might_resched_dynamic_enabled __cond_resched
7222 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7223 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7224 EXPORT_STATIC_CALL_TRAMP(might_resched);
7225 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7226 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7227 int __sched dynamic_cond_resched(void)
7228 {
7229 klp_sched_try_switch();
7230 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7231 return 0;
7232 return __cond_resched();
7233 }
7234 EXPORT_SYMBOL(dynamic_cond_resched);
7235
7236 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7237 int __sched dynamic_might_resched(void)
7238 {
7239 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7240 return 0;
7241 return __cond_resched();
7242 }
7243 EXPORT_SYMBOL(dynamic_might_resched);
7244 #endif
7245 #endif
7246
7247 /*
7248 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7249 * call schedule, and on return reacquire the lock.
7250 *
7251 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7252 * operations here to prevent schedule() from being called twice (once via
7253 * spin_unlock(), once by hand).
7254 */
__cond_resched_lock(spinlock_t * lock)7255 int __cond_resched_lock(spinlock_t *lock)
7256 {
7257 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7258 int ret = 0;
7259
7260 lockdep_assert_held(lock);
7261
7262 if (spin_needbreak(lock) || resched) {
7263 spin_unlock(lock);
7264 if (!_cond_resched())
7265 cpu_relax();
7266 ret = 1;
7267 spin_lock(lock);
7268 }
7269 return ret;
7270 }
7271 EXPORT_SYMBOL(__cond_resched_lock);
7272
__cond_resched_rwlock_read(rwlock_t * lock)7273 int __cond_resched_rwlock_read(rwlock_t *lock)
7274 {
7275 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7276 int ret = 0;
7277
7278 lockdep_assert_held_read(lock);
7279
7280 if (rwlock_needbreak(lock) || resched) {
7281 read_unlock(lock);
7282 if (!_cond_resched())
7283 cpu_relax();
7284 ret = 1;
7285 read_lock(lock);
7286 }
7287 return ret;
7288 }
7289 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7290
__cond_resched_rwlock_write(rwlock_t * lock)7291 int __cond_resched_rwlock_write(rwlock_t *lock)
7292 {
7293 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7294 int ret = 0;
7295
7296 lockdep_assert_held_write(lock);
7297
7298 if (rwlock_needbreak(lock) || resched) {
7299 write_unlock(lock);
7300 if (!_cond_resched())
7301 cpu_relax();
7302 ret = 1;
7303 write_lock(lock);
7304 }
7305 return ret;
7306 }
7307 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7308
7309 #ifdef CONFIG_PREEMPT_DYNAMIC
7310
7311 #ifdef CONFIG_GENERIC_ENTRY
7312 #include <linux/entry-common.h>
7313 #endif
7314
7315 /*
7316 * SC:cond_resched
7317 * SC:might_resched
7318 * SC:preempt_schedule
7319 * SC:preempt_schedule_notrace
7320 * SC:irqentry_exit_cond_resched
7321 *
7322 *
7323 * NONE:
7324 * cond_resched <- __cond_resched
7325 * might_resched <- RET0
7326 * preempt_schedule <- NOP
7327 * preempt_schedule_notrace <- NOP
7328 * irqentry_exit_cond_resched <- NOP
7329 *
7330 * VOLUNTARY:
7331 * cond_resched <- __cond_resched
7332 * might_resched <- __cond_resched
7333 * preempt_schedule <- NOP
7334 * preempt_schedule_notrace <- NOP
7335 * irqentry_exit_cond_resched <- NOP
7336 *
7337 * FULL:
7338 * cond_resched <- RET0
7339 * might_resched <- RET0
7340 * preempt_schedule <- preempt_schedule
7341 * preempt_schedule_notrace <- preempt_schedule_notrace
7342 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7343 */
7344
7345 enum {
7346 preempt_dynamic_undefined = -1,
7347 preempt_dynamic_none,
7348 preempt_dynamic_voluntary,
7349 preempt_dynamic_full,
7350 };
7351
7352 int preempt_dynamic_mode = preempt_dynamic_undefined;
7353
sched_dynamic_mode(const char * str)7354 int sched_dynamic_mode(const char *str)
7355 {
7356 if (!strcmp(str, "none"))
7357 return preempt_dynamic_none;
7358
7359 if (!strcmp(str, "voluntary"))
7360 return preempt_dynamic_voluntary;
7361
7362 if (!strcmp(str, "full"))
7363 return preempt_dynamic_full;
7364
7365 return -EINVAL;
7366 }
7367
7368 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7369 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7370 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7371 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7372 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
7373 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
7374 #else
7375 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7376 #endif
7377
7378 static DEFINE_MUTEX(sched_dynamic_mutex);
7379 static bool klp_override;
7380
__sched_dynamic_update(int mode)7381 static void __sched_dynamic_update(int mode)
7382 {
7383 /*
7384 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7385 * the ZERO state, which is invalid.
7386 */
7387 if (!klp_override)
7388 preempt_dynamic_enable(cond_resched);
7389 preempt_dynamic_enable(might_resched);
7390 preempt_dynamic_enable(preempt_schedule);
7391 preempt_dynamic_enable(preempt_schedule_notrace);
7392 preempt_dynamic_enable(irqentry_exit_cond_resched);
7393
7394 switch (mode) {
7395 case preempt_dynamic_none:
7396 if (!klp_override)
7397 preempt_dynamic_enable(cond_resched);
7398 preempt_dynamic_disable(might_resched);
7399 preempt_dynamic_disable(preempt_schedule);
7400 preempt_dynamic_disable(preempt_schedule_notrace);
7401 preempt_dynamic_disable(irqentry_exit_cond_resched);
7402 if (mode != preempt_dynamic_mode)
7403 pr_info("Dynamic Preempt: none\n");
7404 break;
7405
7406 case preempt_dynamic_voluntary:
7407 if (!klp_override)
7408 preempt_dynamic_enable(cond_resched);
7409 preempt_dynamic_enable(might_resched);
7410 preempt_dynamic_disable(preempt_schedule);
7411 preempt_dynamic_disable(preempt_schedule_notrace);
7412 preempt_dynamic_disable(irqentry_exit_cond_resched);
7413 if (mode != preempt_dynamic_mode)
7414 pr_info("Dynamic Preempt: voluntary\n");
7415 break;
7416
7417 case preempt_dynamic_full:
7418 if (!klp_override)
7419 preempt_dynamic_disable(cond_resched);
7420 preempt_dynamic_disable(might_resched);
7421 preempt_dynamic_enable(preempt_schedule);
7422 preempt_dynamic_enable(preempt_schedule_notrace);
7423 preempt_dynamic_enable(irqentry_exit_cond_resched);
7424 if (mode != preempt_dynamic_mode)
7425 pr_info("Dynamic Preempt: full\n");
7426 break;
7427 }
7428
7429 preempt_dynamic_mode = mode;
7430 }
7431
sched_dynamic_update(int mode)7432 void sched_dynamic_update(int mode)
7433 {
7434 mutex_lock(&sched_dynamic_mutex);
7435 __sched_dynamic_update(mode);
7436 mutex_unlock(&sched_dynamic_mutex);
7437 }
7438
7439 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7440
klp_cond_resched(void)7441 static int klp_cond_resched(void)
7442 {
7443 __klp_sched_try_switch();
7444 return __cond_resched();
7445 }
7446
sched_dynamic_klp_enable(void)7447 void sched_dynamic_klp_enable(void)
7448 {
7449 mutex_lock(&sched_dynamic_mutex);
7450
7451 klp_override = true;
7452 static_call_update(cond_resched, klp_cond_resched);
7453
7454 mutex_unlock(&sched_dynamic_mutex);
7455 }
7456
sched_dynamic_klp_disable(void)7457 void sched_dynamic_klp_disable(void)
7458 {
7459 mutex_lock(&sched_dynamic_mutex);
7460
7461 klp_override = false;
7462 __sched_dynamic_update(preempt_dynamic_mode);
7463
7464 mutex_unlock(&sched_dynamic_mutex);
7465 }
7466
7467 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7468
setup_preempt_mode(char * str)7469 static int __init setup_preempt_mode(char *str)
7470 {
7471 int mode = sched_dynamic_mode(str);
7472 if (mode < 0) {
7473 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7474 return 0;
7475 }
7476
7477 sched_dynamic_update(mode);
7478 return 1;
7479 }
7480 __setup("preempt=", setup_preempt_mode);
7481
preempt_dynamic_init(void)7482 static void __init preempt_dynamic_init(void)
7483 {
7484 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7485 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7486 sched_dynamic_update(preempt_dynamic_none);
7487 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7488 sched_dynamic_update(preempt_dynamic_voluntary);
7489 } else {
7490 /* Default static call setting, nothing to do */
7491 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7492 preempt_dynamic_mode = preempt_dynamic_full;
7493 pr_info("Dynamic Preempt: full\n");
7494 }
7495 }
7496 }
7497
7498 #define PREEMPT_MODEL_ACCESSOR(mode) \
7499 bool preempt_model_##mode(void) \
7500 { \
7501 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7502 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7503 } \
7504 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7505
7506 PREEMPT_MODEL_ACCESSOR(none);
7507 PREEMPT_MODEL_ACCESSOR(voluntary);
7508 PREEMPT_MODEL_ACCESSOR(full);
7509
7510 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7511
preempt_dynamic_init(void)7512 static inline void preempt_dynamic_init(void) { }
7513
7514 #endif /* CONFIG_PREEMPT_DYNAMIC */
7515
io_schedule_prepare(void)7516 int io_schedule_prepare(void)
7517 {
7518 int old_iowait = current->in_iowait;
7519
7520 current->in_iowait = 1;
7521 blk_flush_plug(current->plug, true);
7522 return old_iowait;
7523 }
7524
io_schedule_finish(int token)7525 void io_schedule_finish(int token)
7526 {
7527 current->in_iowait = token;
7528 }
7529
7530 /*
7531 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7532 * that process accounting knows that this is a task in IO wait state.
7533 */
io_schedule_timeout(long timeout)7534 long __sched io_schedule_timeout(long timeout)
7535 {
7536 int token;
7537 long ret;
7538
7539 token = io_schedule_prepare();
7540 ret = schedule_timeout(timeout);
7541 io_schedule_finish(token);
7542
7543 return ret;
7544 }
7545 EXPORT_SYMBOL(io_schedule_timeout);
7546
io_schedule(void)7547 void __sched io_schedule(void)
7548 {
7549 int token;
7550
7551 token = io_schedule_prepare();
7552 schedule();
7553 io_schedule_finish(token);
7554 }
7555 EXPORT_SYMBOL(io_schedule);
7556
sched_show_task(struct task_struct * p)7557 void sched_show_task(struct task_struct *p)
7558 {
7559 unsigned long free;
7560 int ppid;
7561
7562 if (!try_get_task_stack(p))
7563 return;
7564
7565 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7566
7567 if (task_is_running(p))
7568 pr_cont(" running task ");
7569 free = stack_not_used(p);
7570 ppid = 0;
7571 rcu_read_lock();
7572 if (pid_alive(p))
7573 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7574 rcu_read_unlock();
7575 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7576 free, task_pid_nr(p), task_tgid_nr(p),
7577 ppid, read_task_thread_flags(p));
7578
7579 print_worker_info(KERN_INFO, p);
7580 print_stop_info(KERN_INFO, p);
7581 print_scx_info(KERN_INFO, p);
7582 show_stack(p, NULL, KERN_INFO);
7583 put_task_stack(p);
7584 }
7585 EXPORT_SYMBOL_GPL(sched_show_task);
7586
7587 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7588 state_filter_match(unsigned long state_filter, struct task_struct *p)
7589 {
7590 unsigned int state = READ_ONCE(p->__state);
7591
7592 /* no filter, everything matches */
7593 if (!state_filter)
7594 return true;
7595
7596 /* filter, but doesn't match */
7597 if (!(state & state_filter))
7598 return false;
7599
7600 /*
7601 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7602 * TASK_KILLABLE).
7603 */
7604 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7605 return false;
7606
7607 return true;
7608 }
7609
7610
show_state_filter(unsigned int state_filter)7611 void show_state_filter(unsigned int state_filter)
7612 {
7613 struct task_struct *g, *p;
7614
7615 rcu_read_lock();
7616 for_each_process_thread(g, p) {
7617 /*
7618 * reset the NMI-timeout, listing all files on a slow
7619 * console might take a lot of time:
7620 * Also, reset softlockup watchdogs on all CPUs, because
7621 * another CPU might be blocked waiting for us to process
7622 * an IPI.
7623 */
7624 touch_nmi_watchdog();
7625 touch_all_softlockup_watchdogs();
7626 if (state_filter_match(state_filter, p))
7627 sched_show_task(p);
7628 }
7629
7630 #ifdef CONFIG_SCHED_DEBUG
7631 if (!state_filter)
7632 sysrq_sched_debug_show();
7633 #endif
7634 rcu_read_unlock();
7635 /*
7636 * Only show locks if all tasks are dumped:
7637 */
7638 if (!state_filter)
7639 debug_show_all_locks();
7640 }
7641
7642 /**
7643 * init_idle - set up an idle thread for a given CPU
7644 * @idle: task in question
7645 * @cpu: CPU the idle task belongs to
7646 *
7647 * NOTE: this function does not set the idle thread's NEED_RESCHED
7648 * flag, to make booting more robust.
7649 */
init_idle(struct task_struct * idle,int cpu)7650 void __init init_idle(struct task_struct *idle, int cpu)
7651 {
7652 #ifdef CONFIG_SMP
7653 struct affinity_context ac = (struct affinity_context) {
7654 .new_mask = cpumask_of(cpu),
7655 .flags = 0,
7656 };
7657 #endif
7658 struct rq *rq = cpu_rq(cpu);
7659 unsigned long flags;
7660
7661 __sched_fork(0, idle);
7662
7663 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7664 raw_spin_rq_lock(rq);
7665
7666 idle->__state = TASK_RUNNING;
7667 idle->se.exec_start = sched_clock();
7668 /*
7669 * PF_KTHREAD should already be set at this point; regardless, make it
7670 * look like a proper per-CPU kthread.
7671 */
7672 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7673 kthread_set_per_cpu(idle, cpu);
7674
7675 #ifdef CONFIG_SMP
7676 /*
7677 * It's possible that init_idle() gets called multiple times on a task,
7678 * in that case do_set_cpus_allowed() will not do the right thing.
7679 *
7680 * And since this is boot we can forgo the serialization.
7681 */
7682 set_cpus_allowed_common(idle, &ac);
7683 #endif
7684 /*
7685 * We're having a chicken and egg problem, even though we are
7686 * holding rq->lock, the CPU isn't yet set to this CPU so the
7687 * lockdep check in task_group() will fail.
7688 *
7689 * Similar case to sched_fork(). / Alternatively we could
7690 * use task_rq_lock() here and obtain the other rq->lock.
7691 *
7692 * Silence PROVE_RCU
7693 */
7694 rcu_read_lock();
7695 __set_task_cpu(idle, cpu);
7696 rcu_read_unlock();
7697
7698 rq->idle = idle;
7699 rcu_assign_pointer(rq->curr, idle);
7700 idle->on_rq = TASK_ON_RQ_QUEUED;
7701 #ifdef CONFIG_SMP
7702 idle->on_cpu = 1;
7703 #endif
7704 raw_spin_rq_unlock(rq);
7705 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7706
7707 /* Set the preempt count _outside_ the spinlocks! */
7708 init_idle_preempt_count(idle, cpu);
7709
7710 /*
7711 * The idle tasks have their own, simple scheduling class:
7712 */
7713 idle->sched_class = &idle_sched_class;
7714 ftrace_graph_init_idle_task(idle, cpu);
7715 vtime_init_idle(idle, cpu);
7716 #ifdef CONFIG_SMP
7717 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7718 #endif
7719 }
7720
7721 #ifdef CONFIG_SMP
7722
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7723 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7724 const struct cpumask *trial)
7725 {
7726 int ret = 1;
7727
7728 if (cpumask_empty(cur))
7729 return ret;
7730
7731 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7732
7733 return ret;
7734 }
7735
task_can_attach(struct task_struct * p)7736 int task_can_attach(struct task_struct *p)
7737 {
7738 int ret = 0;
7739
7740 /*
7741 * Kthreads which disallow setaffinity shouldn't be moved
7742 * to a new cpuset; we don't want to change their CPU
7743 * affinity and isolating such threads by their set of
7744 * allowed nodes is unnecessary. Thus, cpusets are not
7745 * applicable for such threads. This prevents checking for
7746 * success of set_cpus_allowed_ptr() on all attached tasks
7747 * before cpus_mask may be changed.
7748 */
7749 if (p->flags & PF_NO_SETAFFINITY)
7750 ret = -EINVAL;
7751
7752 return ret;
7753 }
7754
7755 bool sched_smp_initialized __read_mostly;
7756
7757 #ifdef CONFIG_NUMA_BALANCING
7758 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7759 int migrate_task_to(struct task_struct *p, int target_cpu)
7760 {
7761 struct migration_arg arg = { p, target_cpu };
7762 int curr_cpu = task_cpu(p);
7763
7764 if (curr_cpu == target_cpu)
7765 return 0;
7766
7767 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7768 return -EINVAL;
7769
7770 /* TODO: This is not properly updating schedstats */
7771
7772 trace_sched_move_numa(p, curr_cpu, target_cpu);
7773 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7774 }
7775
7776 /*
7777 * Requeue a task on a given node and accurately track the number of NUMA
7778 * tasks on the runqueues
7779 */
sched_setnuma(struct task_struct * p,int nid)7780 void sched_setnuma(struct task_struct *p, int nid)
7781 {
7782 bool queued, running;
7783 struct rq_flags rf;
7784 struct rq *rq;
7785
7786 rq = task_rq_lock(p, &rf);
7787 queued = task_on_rq_queued(p);
7788 running = task_current(rq, p);
7789
7790 if (queued)
7791 dequeue_task(rq, p, DEQUEUE_SAVE);
7792 if (running)
7793 put_prev_task(rq, p);
7794
7795 p->numa_preferred_nid = nid;
7796
7797 if (queued)
7798 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7799 if (running)
7800 set_next_task(rq, p);
7801 task_rq_unlock(rq, p, &rf);
7802 }
7803 #endif /* CONFIG_NUMA_BALANCING */
7804
7805 #ifdef CONFIG_HOTPLUG_CPU
7806 /*
7807 * Ensure that the idle task is using init_mm right before its CPU goes
7808 * offline.
7809 */
idle_task_exit(void)7810 void idle_task_exit(void)
7811 {
7812 struct mm_struct *mm = current->active_mm;
7813
7814 BUG_ON(cpu_online(smp_processor_id()));
7815 BUG_ON(current != this_rq()->idle);
7816
7817 if (mm != &init_mm) {
7818 switch_mm(mm, &init_mm, current);
7819 finish_arch_post_lock_switch();
7820 }
7821
7822 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7823 }
7824
__balance_push_cpu_stop(void * arg)7825 static int __balance_push_cpu_stop(void *arg)
7826 {
7827 struct task_struct *p = arg;
7828 struct rq *rq = this_rq();
7829 struct rq_flags rf;
7830 int cpu;
7831
7832 raw_spin_lock_irq(&p->pi_lock);
7833 rq_lock(rq, &rf);
7834
7835 update_rq_clock(rq);
7836
7837 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7838 cpu = select_fallback_rq(rq->cpu, p);
7839 rq = __migrate_task(rq, &rf, p, cpu);
7840 }
7841
7842 rq_unlock(rq, &rf);
7843 raw_spin_unlock_irq(&p->pi_lock);
7844
7845 put_task_struct(p);
7846
7847 return 0;
7848 }
7849
7850 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7851
7852 /*
7853 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7854 *
7855 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7856 * effective when the hotplug motion is down.
7857 */
balance_push(struct rq * rq)7858 static void balance_push(struct rq *rq)
7859 {
7860 struct task_struct *push_task = rq->curr;
7861
7862 lockdep_assert_rq_held(rq);
7863
7864 /*
7865 * Ensure the thing is persistent until balance_push_set(.on = false);
7866 */
7867 rq->balance_callback = &balance_push_callback;
7868
7869 /*
7870 * Only active while going offline and when invoked on the outgoing
7871 * CPU.
7872 */
7873 if (!cpu_dying(rq->cpu) || rq != this_rq())
7874 return;
7875
7876 /*
7877 * Both the cpu-hotplug and stop task are in this case and are
7878 * required to complete the hotplug process.
7879 */
7880 if (kthread_is_per_cpu(push_task) ||
7881 is_migration_disabled(push_task)) {
7882
7883 /*
7884 * If this is the idle task on the outgoing CPU try to wake
7885 * up the hotplug control thread which might wait for the
7886 * last task to vanish. The rcuwait_active() check is
7887 * accurate here because the waiter is pinned on this CPU
7888 * and can't obviously be running in parallel.
7889 *
7890 * On RT kernels this also has to check whether there are
7891 * pinned and scheduled out tasks on the runqueue. They
7892 * need to leave the migrate disabled section first.
7893 */
7894 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7895 rcuwait_active(&rq->hotplug_wait)) {
7896 raw_spin_rq_unlock(rq);
7897 rcuwait_wake_up(&rq->hotplug_wait);
7898 raw_spin_rq_lock(rq);
7899 }
7900 return;
7901 }
7902
7903 get_task_struct(push_task);
7904 /*
7905 * Temporarily drop rq->lock such that we can wake-up the stop task.
7906 * Both preemption and IRQs are still disabled.
7907 */
7908 preempt_disable();
7909 raw_spin_rq_unlock(rq);
7910 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7911 this_cpu_ptr(&push_work));
7912 preempt_enable();
7913 /*
7914 * At this point need_resched() is true and we'll take the loop in
7915 * schedule(). The next pick is obviously going to be the stop task
7916 * which kthread_is_per_cpu() and will push this task away.
7917 */
7918 raw_spin_rq_lock(rq);
7919 }
7920
balance_push_set(int cpu,bool on)7921 static void balance_push_set(int cpu, bool on)
7922 {
7923 struct rq *rq = cpu_rq(cpu);
7924 struct rq_flags rf;
7925
7926 rq_lock_irqsave(rq, &rf);
7927 if (on) {
7928 WARN_ON_ONCE(rq->balance_callback);
7929 rq->balance_callback = &balance_push_callback;
7930 } else if (rq->balance_callback == &balance_push_callback) {
7931 rq->balance_callback = NULL;
7932 }
7933 rq_unlock_irqrestore(rq, &rf);
7934 }
7935
7936 /*
7937 * Invoked from a CPUs hotplug control thread after the CPU has been marked
7938 * inactive. All tasks which are not per CPU kernel threads are either
7939 * pushed off this CPU now via balance_push() or placed on a different CPU
7940 * during wakeup. Wait until the CPU is quiescent.
7941 */
balance_hotplug_wait(void)7942 static void balance_hotplug_wait(void)
7943 {
7944 struct rq *rq = this_rq();
7945
7946 rcuwait_wait_event(&rq->hotplug_wait,
7947 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
7948 TASK_UNINTERRUPTIBLE);
7949 }
7950
7951 #else
7952
balance_push(struct rq * rq)7953 static inline void balance_push(struct rq *rq)
7954 {
7955 }
7956
balance_push_set(int cpu,bool on)7957 static inline void balance_push_set(int cpu, bool on)
7958 {
7959 }
7960
balance_hotplug_wait(void)7961 static inline void balance_hotplug_wait(void)
7962 {
7963 }
7964
7965 #endif /* CONFIG_HOTPLUG_CPU */
7966
set_rq_online(struct rq * rq)7967 void set_rq_online(struct rq *rq)
7968 {
7969 if (!rq->online) {
7970 const struct sched_class *class;
7971
7972 cpumask_set_cpu(rq->cpu, rq->rd->online);
7973 rq->online = 1;
7974
7975 for_each_class(class) {
7976 if (class->rq_online)
7977 class->rq_online(rq);
7978 }
7979 }
7980 }
7981
set_rq_offline(struct rq * rq)7982 void set_rq_offline(struct rq *rq)
7983 {
7984 if (rq->online) {
7985 const struct sched_class *class;
7986
7987 update_rq_clock(rq);
7988 for_each_class(class) {
7989 if (class->rq_offline)
7990 class->rq_offline(rq);
7991 }
7992
7993 cpumask_clear_cpu(rq->cpu, rq->rd->online);
7994 rq->online = 0;
7995 }
7996 }
7997
sched_set_rq_online(struct rq * rq,int cpu)7998 static inline void sched_set_rq_online(struct rq *rq, int cpu)
7999 {
8000 struct rq_flags rf;
8001
8002 rq_lock_irqsave(rq, &rf);
8003 if (rq->rd) {
8004 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8005 set_rq_online(rq);
8006 }
8007 rq_unlock_irqrestore(rq, &rf);
8008 }
8009
sched_set_rq_offline(struct rq * rq,int cpu)8010 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8011 {
8012 struct rq_flags rf;
8013
8014 rq_lock_irqsave(rq, &rf);
8015 if (rq->rd) {
8016 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8017 set_rq_offline(rq);
8018 }
8019 rq_unlock_irqrestore(rq, &rf);
8020 }
8021
8022 /*
8023 * used to mark begin/end of suspend/resume:
8024 */
8025 static int num_cpus_frozen;
8026
8027 /*
8028 * Update cpusets according to cpu_active mask. If cpusets are
8029 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8030 * around partition_sched_domains().
8031 *
8032 * If we come here as part of a suspend/resume, don't touch cpusets because we
8033 * want to restore it back to its original state upon resume anyway.
8034 */
cpuset_cpu_active(void)8035 static void cpuset_cpu_active(void)
8036 {
8037 if (cpuhp_tasks_frozen) {
8038 /*
8039 * num_cpus_frozen tracks how many CPUs are involved in suspend
8040 * resume sequence. As long as this is not the last online
8041 * operation in the resume sequence, just build a single sched
8042 * domain, ignoring cpusets.
8043 */
8044 partition_sched_domains(1, NULL, NULL);
8045 if (--num_cpus_frozen)
8046 return;
8047 /*
8048 * This is the last CPU online operation. So fall through and
8049 * restore the original sched domains by considering the
8050 * cpuset configurations.
8051 */
8052 cpuset_force_rebuild();
8053 }
8054 cpuset_update_active_cpus();
8055 }
8056
cpuset_cpu_inactive(unsigned int cpu)8057 static int cpuset_cpu_inactive(unsigned int cpu)
8058 {
8059 if (!cpuhp_tasks_frozen) {
8060 int ret = dl_bw_check_overflow(cpu);
8061
8062 if (ret)
8063 return ret;
8064 cpuset_update_active_cpus();
8065 } else {
8066 num_cpus_frozen++;
8067 partition_sched_domains(1, NULL, NULL);
8068 }
8069 return 0;
8070 }
8071
sched_smt_present_inc(int cpu)8072 static inline void sched_smt_present_inc(int cpu)
8073 {
8074 #ifdef CONFIG_SCHED_SMT
8075 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8076 static_branch_inc_cpuslocked(&sched_smt_present);
8077 #endif
8078 }
8079
sched_smt_present_dec(int cpu)8080 static inline void sched_smt_present_dec(int cpu)
8081 {
8082 #ifdef CONFIG_SCHED_SMT
8083 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8084 static_branch_dec_cpuslocked(&sched_smt_present);
8085 #endif
8086 }
8087
sched_cpu_activate(unsigned int cpu)8088 int sched_cpu_activate(unsigned int cpu)
8089 {
8090 struct rq *rq = cpu_rq(cpu);
8091
8092 /*
8093 * Clear the balance_push callback and prepare to schedule
8094 * regular tasks.
8095 */
8096 balance_push_set(cpu, false);
8097
8098 /*
8099 * When going up, increment the number of cores with SMT present.
8100 */
8101 sched_smt_present_inc(cpu);
8102 set_cpu_active(cpu, true);
8103
8104 if (sched_smp_initialized) {
8105 sched_update_numa(cpu, true);
8106 sched_domains_numa_masks_set(cpu);
8107 cpuset_cpu_active();
8108 }
8109
8110 scx_rq_activate(rq);
8111
8112 /*
8113 * Put the rq online, if not already. This happens:
8114 *
8115 * 1) In the early boot process, because we build the real domains
8116 * after all CPUs have been brought up.
8117 *
8118 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8119 * domains.
8120 */
8121 sched_set_rq_online(rq, cpu);
8122
8123 return 0;
8124 }
8125
sched_cpu_deactivate(unsigned int cpu)8126 int sched_cpu_deactivate(unsigned int cpu)
8127 {
8128 struct rq *rq = cpu_rq(cpu);
8129 int ret;
8130
8131 /*
8132 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8133 * load balancing when not active
8134 */
8135 nohz_balance_exit_idle(rq);
8136
8137 set_cpu_active(cpu, false);
8138
8139 /*
8140 * From this point forward, this CPU will refuse to run any task that
8141 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8142 * push those tasks away until this gets cleared, see
8143 * sched_cpu_dying().
8144 */
8145 balance_push_set(cpu, true);
8146
8147 /*
8148 * We've cleared cpu_active_mask / set balance_push, wait for all
8149 * preempt-disabled and RCU users of this state to go away such that
8150 * all new such users will observe it.
8151 *
8152 * Specifically, we rely on ttwu to no longer target this CPU, see
8153 * ttwu_queue_cond() and is_cpu_allowed().
8154 *
8155 * Do sync before park smpboot threads to take care the RCU boost case.
8156 */
8157 synchronize_rcu();
8158
8159 sched_set_rq_offline(rq, cpu);
8160
8161 scx_rq_deactivate(rq);
8162
8163 /*
8164 * When going down, decrement the number of cores with SMT present.
8165 */
8166 sched_smt_present_dec(cpu);
8167
8168 #ifdef CONFIG_SCHED_SMT
8169 sched_core_cpu_deactivate(cpu);
8170 #endif
8171
8172 if (!sched_smp_initialized)
8173 return 0;
8174
8175 sched_update_numa(cpu, false);
8176 ret = cpuset_cpu_inactive(cpu);
8177 if (ret) {
8178 sched_smt_present_inc(cpu);
8179 sched_set_rq_online(rq, cpu);
8180 balance_push_set(cpu, false);
8181 set_cpu_active(cpu, true);
8182 sched_update_numa(cpu, true);
8183 return ret;
8184 }
8185 sched_domains_numa_masks_clear(cpu);
8186 return 0;
8187 }
8188
sched_rq_cpu_starting(unsigned int cpu)8189 static void sched_rq_cpu_starting(unsigned int cpu)
8190 {
8191 struct rq *rq = cpu_rq(cpu);
8192
8193 rq->calc_load_update = calc_load_update;
8194 update_max_interval();
8195 }
8196
sched_cpu_starting(unsigned int cpu)8197 int sched_cpu_starting(unsigned int cpu)
8198 {
8199 sched_core_cpu_starting(cpu);
8200 sched_rq_cpu_starting(cpu);
8201 sched_tick_start(cpu);
8202 return 0;
8203 }
8204
8205 #ifdef CONFIG_HOTPLUG_CPU
8206
8207 /*
8208 * Invoked immediately before the stopper thread is invoked to bring the
8209 * CPU down completely. At this point all per CPU kthreads except the
8210 * hotplug thread (current) and the stopper thread (inactive) have been
8211 * either parked or have been unbound from the outgoing CPU. Ensure that
8212 * any of those which might be on the way out are gone.
8213 *
8214 * If after this point a bound task is being woken on this CPU then the
8215 * responsible hotplug callback has failed to do it's job.
8216 * sched_cpu_dying() will catch it with the appropriate fireworks.
8217 */
sched_cpu_wait_empty(unsigned int cpu)8218 int sched_cpu_wait_empty(unsigned int cpu)
8219 {
8220 balance_hotplug_wait();
8221 return 0;
8222 }
8223
8224 /*
8225 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8226 * might have. Called from the CPU stopper task after ensuring that the
8227 * stopper is the last running task on the CPU, so nr_active count is
8228 * stable. We need to take the tear-down thread which is calling this into
8229 * account, so we hand in adjust = 1 to the load calculation.
8230 *
8231 * Also see the comment "Global load-average calculations".
8232 */
calc_load_migrate(struct rq * rq)8233 static void calc_load_migrate(struct rq *rq)
8234 {
8235 long delta = calc_load_fold_active(rq, 1);
8236
8237 if (delta)
8238 atomic_long_add(delta, &calc_load_tasks);
8239 }
8240
dump_rq_tasks(struct rq * rq,const char * loglvl)8241 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8242 {
8243 struct task_struct *g, *p;
8244 int cpu = cpu_of(rq);
8245
8246 lockdep_assert_rq_held(rq);
8247
8248 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8249 for_each_process_thread(g, p) {
8250 if (task_cpu(p) != cpu)
8251 continue;
8252
8253 if (!task_on_rq_queued(p))
8254 continue;
8255
8256 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8257 }
8258 }
8259
sched_cpu_dying(unsigned int cpu)8260 int sched_cpu_dying(unsigned int cpu)
8261 {
8262 struct rq *rq = cpu_rq(cpu);
8263 struct rq_flags rf;
8264
8265 /* Handle pending wakeups and then migrate everything off */
8266 sched_tick_stop(cpu);
8267
8268 rq_lock_irqsave(rq, &rf);
8269 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8270 WARN(true, "Dying CPU not properly vacated!");
8271 dump_rq_tasks(rq, KERN_WARNING);
8272 }
8273 rq_unlock_irqrestore(rq, &rf);
8274
8275 calc_load_migrate(rq);
8276 update_max_interval();
8277 hrtick_clear(rq);
8278 sched_core_cpu_dying(cpu);
8279 return 0;
8280 }
8281 #endif
8282
sched_init_smp(void)8283 void __init sched_init_smp(void)
8284 {
8285 sched_init_numa(NUMA_NO_NODE);
8286
8287 /*
8288 * There's no userspace yet to cause hotplug operations; hence all the
8289 * CPU masks are stable and all blatant races in the below code cannot
8290 * happen.
8291 */
8292 mutex_lock(&sched_domains_mutex);
8293 sched_init_domains(cpu_active_mask);
8294 mutex_unlock(&sched_domains_mutex);
8295
8296 /* Move init over to a non-isolated CPU */
8297 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8298 BUG();
8299 current->flags &= ~PF_NO_SETAFFINITY;
8300 sched_init_granularity();
8301
8302 init_sched_rt_class();
8303 init_sched_dl_class();
8304
8305 sched_smp_initialized = true;
8306 }
8307
migration_init(void)8308 static int __init migration_init(void)
8309 {
8310 sched_cpu_starting(smp_processor_id());
8311 return 0;
8312 }
8313 early_initcall(migration_init);
8314
8315 #else
sched_init_smp(void)8316 void __init sched_init_smp(void)
8317 {
8318 sched_init_granularity();
8319 }
8320 #endif /* CONFIG_SMP */
8321
in_sched_functions(unsigned long addr)8322 int in_sched_functions(unsigned long addr)
8323 {
8324 return in_lock_functions(addr) ||
8325 (addr >= (unsigned long)__sched_text_start
8326 && addr < (unsigned long)__sched_text_end);
8327 }
8328
8329 #ifdef CONFIG_CGROUP_SCHED
8330 /*
8331 * Default task group.
8332 * Every task in system belongs to this group at bootup.
8333 */
8334 struct task_group root_task_group;
8335 LIST_HEAD(task_groups);
8336
8337 /* Cacheline aligned slab cache for task_group */
8338 static struct kmem_cache *task_group_cache __ro_after_init;
8339 #endif
8340
sched_init(void)8341 void __init sched_init(void)
8342 {
8343 unsigned long ptr = 0;
8344 int i;
8345
8346 /* Make sure the linker didn't screw up */
8347 #ifdef CONFIG_SMP
8348 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8349 #endif
8350 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8351 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8352 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8353 #ifdef CONFIG_SCHED_CLASS_EXT
8354 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8355 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8356 #endif
8357
8358 wait_bit_init();
8359
8360 #ifdef CONFIG_FAIR_GROUP_SCHED
8361 ptr += 2 * nr_cpu_ids * sizeof(void **);
8362 #endif
8363 #ifdef CONFIG_RT_GROUP_SCHED
8364 ptr += 2 * nr_cpu_ids * sizeof(void **);
8365 #endif
8366 if (ptr) {
8367 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8368
8369 #ifdef CONFIG_FAIR_GROUP_SCHED
8370 root_task_group.se = (struct sched_entity **)ptr;
8371 ptr += nr_cpu_ids * sizeof(void **);
8372
8373 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8374 ptr += nr_cpu_ids * sizeof(void **);
8375
8376 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8377 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8378 #endif /* CONFIG_FAIR_GROUP_SCHED */
8379 #ifdef CONFIG_EXT_GROUP_SCHED
8380 root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8381 #endif /* CONFIG_EXT_GROUP_SCHED */
8382 #ifdef CONFIG_RT_GROUP_SCHED
8383 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8384 ptr += nr_cpu_ids * sizeof(void **);
8385
8386 root_task_group.rt_rq = (struct rt_rq **)ptr;
8387 ptr += nr_cpu_ids * sizeof(void **);
8388
8389 #endif /* CONFIG_RT_GROUP_SCHED */
8390 }
8391
8392 #ifdef CONFIG_SMP
8393 init_defrootdomain();
8394 #endif
8395
8396 #ifdef CONFIG_RT_GROUP_SCHED
8397 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8398 global_rt_period(), global_rt_runtime());
8399 #endif /* CONFIG_RT_GROUP_SCHED */
8400
8401 #ifdef CONFIG_CGROUP_SCHED
8402 task_group_cache = KMEM_CACHE(task_group, 0);
8403
8404 list_add(&root_task_group.list, &task_groups);
8405 INIT_LIST_HEAD(&root_task_group.children);
8406 INIT_LIST_HEAD(&root_task_group.siblings);
8407 autogroup_init(&init_task);
8408 #endif /* CONFIG_CGROUP_SCHED */
8409
8410 for_each_possible_cpu(i) {
8411 struct rq *rq;
8412
8413 rq = cpu_rq(i);
8414 raw_spin_lock_init(&rq->__lock);
8415 rq->nr_running = 0;
8416 rq->calc_load_active = 0;
8417 rq->calc_load_update = jiffies + LOAD_FREQ;
8418 init_cfs_rq(&rq->cfs);
8419 init_rt_rq(&rq->rt);
8420 init_dl_rq(&rq->dl);
8421 #ifdef CONFIG_FAIR_GROUP_SCHED
8422 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8423 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8424 /*
8425 * How much CPU bandwidth does root_task_group get?
8426 *
8427 * In case of task-groups formed through the cgroup filesystem, it
8428 * gets 100% of the CPU resources in the system. This overall
8429 * system CPU resource is divided among the tasks of
8430 * root_task_group and its child task-groups in a fair manner,
8431 * based on each entity's (task or task-group's) weight
8432 * (se->load.weight).
8433 *
8434 * In other words, if root_task_group has 10 tasks of weight
8435 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8436 * then A0's share of the CPU resource is:
8437 *
8438 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8439 *
8440 * We achieve this by letting root_task_group's tasks sit
8441 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8442 */
8443 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8444 #endif /* CONFIG_FAIR_GROUP_SCHED */
8445
8446 #ifdef CONFIG_RT_GROUP_SCHED
8447 /*
8448 * This is required for init cpu because rt.c:__enable_runtime()
8449 * starts working after scheduler_running, which is not the case
8450 * yet.
8451 */
8452 rq->rt.rt_runtime = global_rt_runtime();
8453 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8454 #endif
8455 #ifdef CONFIG_SMP
8456 rq->sd = NULL;
8457 rq->rd = NULL;
8458 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8459 rq->balance_callback = &balance_push_callback;
8460 rq->active_balance = 0;
8461 rq->next_balance = jiffies;
8462 rq->push_cpu = 0;
8463 rq->cpu = i;
8464 rq->online = 0;
8465 rq->idle_stamp = 0;
8466 rq->avg_idle = 2*sysctl_sched_migration_cost;
8467 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8468
8469 INIT_LIST_HEAD(&rq->cfs_tasks);
8470
8471 rq_attach_root(rq, &def_root_domain);
8472 #ifdef CONFIG_NO_HZ_COMMON
8473 rq->last_blocked_load_update_tick = jiffies;
8474 atomic_set(&rq->nohz_flags, 0);
8475
8476 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8477 #endif
8478 #ifdef CONFIG_HOTPLUG_CPU
8479 rcuwait_init(&rq->hotplug_wait);
8480 #endif
8481 #endif /* CONFIG_SMP */
8482 hrtick_rq_init(rq);
8483 atomic_set(&rq->nr_iowait, 0);
8484 fair_server_init(rq);
8485
8486 #ifdef CONFIG_SCHED_CORE
8487 rq->core = rq;
8488 rq->core_pick = NULL;
8489 rq->core_dl_server = NULL;
8490 rq->core_enabled = 0;
8491 rq->core_tree = RB_ROOT;
8492 rq->core_forceidle_count = 0;
8493 rq->core_forceidle_occupation = 0;
8494 rq->core_forceidle_start = 0;
8495
8496 rq->core_cookie = 0UL;
8497 #endif
8498 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8499 }
8500
8501 set_load_weight(&init_task, false);
8502 init_task.se.slice = sysctl_sched_base_slice,
8503
8504 /*
8505 * The boot idle thread does lazy MMU switching as well:
8506 */
8507 mmgrab_lazy_tlb(&init_mm);
8508 enter_lazy_tlb(&init_mm, current);
8509
8510 /*
8511 * The idle task doesn't need the kthread struct to function, but it
8512 * is dressed up as a per-CPU kthread and thus needs to play the part
8513 * if we want to avoid special-casing it in code that deals with per-CPU
8514 * kthreads.
8515 */
8516 WARN_ON(!set_kthread_struct(current));
8517
8518 /*
8519 * Make us the idle thread. Technically, schedule() should not be
8520 * called from this thread, however somewhere below it might be,
8521 * but because we are the idle thread, we just pick up running again
8522 * when this runqueue becomes "idle".
8523 */
8524 init_idle(current, smp_processor_id());
8525
8526 calc_load_update = jiffies + LOAD_FREQ;
8527
8528 #ifdef CONFIG_SMP
8529 idle_thread_set_boot_cpu();
8530 balance_push_set(smp_processor_id(), false);
8531 #endif
8532 init_sched_fair_class();
8533 init_sched_ext_class();
8534
8535 psi_init();
8536
8537 init_uclamp();
8538
8539 preempt_dynamic_init();
8540
8541 scheduler_running = 1;
8542 }
8543
8544 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8545
__might_sleep(const char * file,int line)8546 void __might_sleep(const char *file, int line)
8547 {
8548 unsigned int state = get_current_state();
8549 /*
8550 * Blocking primitives will set (and therefore destroy) current->state,
8551 * since we will exit with TASK_RUNNING make sure we enter with it,
8552 * otherwise we will destroy state.
8553 */
8554 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8555 "do not call blocking ops when !TASK_RUNNING; "
8556 "state=%x set at [<%p>] %pS\n", state,
8557 (void *)current->task_state_change,
8558 (void *)current->task_state_change);
8559
8560 __might_resched(file, line, 0);
8561 }
8562 EXPORT_SYMBOL(__might_sleep);
8563
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8564 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8565 {
8566 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8567 return;
8568
8569 if (preempt_count() == preempt_offset)
8570 return;
8571
8572 pr_err("Preemption disabled at:");
8573 print_ip_sym(KERN_ERR, ip);
8574 }
8575
resched_offsets_ok(unsigned int offsets)8576 static inline bool resched_offsets_ok(unsigned int offsets)
8577 {
8578 unsigned int nested = preempt_count();
8579
8580 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8581
8582 return nested == offsets;
8583 }
8584
__might_resched(const char * file,int line,unsigned int offsets)8585 void __might_resched(const char *file, int line, unsigned int offsets)
8586 {
8587 /* Ratelimiting timestamp: */
8588 static unsigned long prev_jiffy;
8589
8590 unsigned long preempt_disable_ip;
8591
8592 /* WARN_ON_ONCE() by default, no rate limit required: */
8593 rcu_sleep_check();
8594
8595 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8596 !is_idle_task(current) && !current->non_block_count) ||
8597 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8598 oops_in_progress)
8599 return;
8600
8601 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8602 return;
8603 prev_jiffy = jiffies;
8604
8605 /* Save this before calling printk(), since that will clobber it: */
8606 preempt_disable_ip = get_preempt_disable_ip(current);
8607
8608 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8609 file, line);
8610 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8611 in_atomic(), irqs_disabled(), current->non_block_count,
8612 current->pid, current->comm);
8613 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8614 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8615
8616 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8617 pr_err("RCU nest depth: %d, expected: %u\n",
8618 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8619 }
8620
8621 if (task_stack_end_corrupted(current))
8622 pr_emerg("Thread overran stack, or stack corrupted\n");
8623
8624 debug_show_held_locks(current);
8625 if (irqs_disabled())
8626 print_irqtrace_events(current);
8627
8628 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8629 preempt_disable_ip);
8630
8631 dump_stack();
8632 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8633 }
8634 EXPORT_SYMBOL(__might_resched);
8635
__cant_sleep(const char * file,int line,int preempt_offset)8636 void __cant_sleep(const char *file, int line, int preempt_offset)
8637 {
8638 static unsigned long prev_jiffy;
8639
8640 if (irqs_disabled())
8641 return;
8642
8643 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8644 return;
8645
8646 if (preempt_count() > preempt_offset)
8647 return;
8648
8649 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8650 return;
8651 prev_jiffy = jiffies;
8652
8653 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8654 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8655 in_atomic(), irqs_disabled(),
8656 current->pid, current->comm);
8657
8658 debug_show_held_locks(current);
8659 dump_stack();
8660 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8661 }
8662 EXPORT_SYMBOL_GPL(__cant_sleep);
8663
8664 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8665 void __cant_migrate(const char *file, int line)
8666 {
8667 static unsigned long prev_jiffy;
8668
8669 if (irqs_disabled())
8670 return;
8671
8672 if (is_migration_disabled(current))
8673 return;
8674
8675 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8676 return;
8677
8678 if (preempt_count() > 0)
8679 return;
8680
8681 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8682 return;
8683 prev_jiffy = jiffies;
8684
8685 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8686 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8687 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8688 current->pid, current->comm);
8689
8690 debug_show_held_locks(current);
8691 dump_stack();
8692 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8693 }
8694 EXPORT_SYMBOL_GPL(__cant_migrate);
8695 #endif
8696 #endif
8697
8698 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8699 void normalize_rt_tasks(void)
8700 {
8701 struct task_struct *g, *p;
8702 struct sched_attr attr = {
8703 .sched_policy = SCHED_NORMAL,
8704 };
8705
8706 read_lock(&tasklist_lock);
8707 for_each_process_thread(g, p) {
8708 /*
8709 * Only normalize user tasks:
8710 */
8711 if (p->flags & PF_KTHREAD)
8712 continue;
8713
8714 p->se.exec_start = 0;
8715 schedstat_set(p->stats.wait_start, 0);
8716 schedstat_set(p->stats.sleep_start, 0);
8717 schedstat_set(p->stats.block_start, 0);
8718
8719 if (!rt_or_dl_task(p)) {
8720 /*
8721 * Renice negative nice level userspace
8722 * tasks back to 0:
8723 */
8724 if (task_nice(p) < 0)
8725 set_user_nice(p, 0);
8726 continue;
8727 }
8728
8729 __sched_setscheduler(p, &attr, false, false);
8730 }
8731 read_unlock(&tasklist_lock);
8732 }
8733
8734 #endif /* CONFIG_MAGIC_SYSRQ */
8735
8736 #if defined(CONFIG_KGDB_KDB)
8737 /*
8738 * These functions are only useful for KDB.
8739 *
8740 * They can only be called when the whole system has been
8741 * stopped - every CPU needs to be quiescent, and no scheduling
8742 * activity can take place. Using them for anything else would
8743 * be a serious bug, and as a result, they aren't even visible
8744 * under any other configuration.
8745 */
8746
8747 /**
8748 * curr_task - return the current task for a given CPU.
8749 * @cpu: the processor in question.
8750 *
8751 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8752 *
8753 * Return: The current task for @cpu.
8754 */
curr_task(int cpu)8755 struct task_struct *curr_task(int cpu)
8756 {
8757 return cpu_curr(cpu);
8758 }
8759
8760 #endif /* defined(CONFIG_KGDB_KDB) */
8761
8762 #ifdef CONFIG_CGROUP_SCHED
8763 /* task_group_lock serializes the addition/removal of task groups */
8764 static DEFINE_SPINLOCK(task_group_lock);
8765
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8766 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8767 struct task_group *parent)
8768 {
8769 #ifdef CONFIG_UCLAMP_TASK_GROUP
8770 enum uclamp_id clamp_id;
8771
8772 for_each_clamp_id(clamp_id) {
8773 uclamp_se_set(&tg->uclamp_req[clamp_id],
8774 uclamp_none(clamp_id), false);
8775 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8776 }
8777 #endif
8778 }
8779
sched_free_group(struct task_group * tg)8780 static void sched_free_group(struct task_group *tg)
8781 {
8782 free_fair_sched_group(tg);
8783 free_rt_sched_group(tg);
8784 autogroup_free(tg);
8785 kmem_cache_free(task_group_cache, tg);
8786 }
8787
sched_free_group_rcu(struct rcu_head * rcu)8788 static void sched_free_group_rcu(struct rcu_head *rcu)
8789 {
8790 sched_free_group(container_of(rcu, struct task_group, rcu));
8791 }
8792
sched_unregister_group(struct task_group * tg)8793 static void sched_unregister_group(struct task_group *tg)
8794 {
8795 unregister_fair_sched_group(tg);
8796 unregister_rt_sched_group(tg);
8797 /*
8798 * We have to wait for yet another RCU grace period to expire, as
8799 * print_cfs_stats() might run concurrently.
8800 */
8801 call_rcu(&tg->rcu, sched_free_group_rcu);
8802 }
8803
8804 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8805 struct task_group *sched_create_group(struct task_group *parent)
8806 {
8807 struct task_group *tg;
8808
8809 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8810 if (!tg)
8811 return ERR_PTR(-ENOMEM);
8812
8813 if (!alloc_fair_sched_group(tg, parent))
8814 goto err;
8815
8816 if (!alloc_rt_sched_group(tg, parent))
8817 goto err;
8818
8819 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8820 alloc_uclamp_sched_group(tg, parent);
8821
8822 return tg;
8823
8824 err:
8825 sched_free_group(tg);
8826 return ERR_PTR(-ENOMEM);
8827 }
8828
sched_online_group(struct task_group * tg,struct task_group * parent)8829 void sched_online_group(struct task_group *tg, struct task_group *parent)
8830 {
8831 unsigned long flags;
8832
8833 spin_lock_irqsave(&task_group_lock, flags);
8834 list_add_rcu(&tg->list, &task_groups);
8835
8836 /* Root should already exist: */
8837 WARN_ON(!parent);
8838
8839 tg->parent = parent;
8840 INIT_LIST_HEAD(&tg->children);
8841 list_add_rcu(&tg->siblings, &parent->children);
8842 spin_unlock_irqrestore(&task_group_lock, flags);
8843
8844 online_fair_sched_group(tg);
8845 }
8846
8847 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)8848 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8849 {
8850 /* Now it should be safe to free those cfs_rqs: */
8851 sched_unregister_group(container_of(rhp, struct task_group, rcu));
8852 }
8853
sched_destroy_group(struct task_group * tg)8854 void sched_destroy_group(struct task_group *tg)
8855 {
8856 /* Wait for possible concurrent references to cfs_rqs complete: */
8857 call_rcu(&tg->rcu, sched_unregister_group_rcu);
8858 }
8859
sched_release_group(struct task_group * tg)8860 void sched_release_group(struct task_group *tg)
8861 {
8862 unsigned long flags;
8863
8864 /*
8865 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8866 * sched_cfs_period_timer()).
8867 *
8868 * For this to be effective, we have to wait for all pending users of
8869 * this task group to leave their RCU critical section to ensure no new
8870 * user will see our dying task group any more. Specifically ensure
8871 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
8872 *
8873 * We therefore defer calling unregister_fair_sched_group() to
8874 * sched_unregister_group() which is guarantied to get called only after the
8875 * current RCU grace period has expired.
8876 */
8877 spin_lock_irqsave(&task_group_lock, flags);
8878 list_del_rcu(&tg->list);
8879 list_del_rcu(&tg->siblings);
8880 spin_unlock_irqrestore(&task_group_lock, flags);
8881 }
8882
sched_get_task_group(struct task_struct * tsk)8883 static struct task_group *sched_get_task_group(struct task_struct *tsk)
8884 {
8885 struct task_group *tg;
8886
8887 /*
8888 * All callers are synchronized by task_rq_lock(); we do not use RCU
8889 * which is pointless here. Thus, we pass "true" to task_css_check()
8890 * to prevent lockdep warnings.
8891 */
8892 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8893 struct task_group, css);
8894 tg = autogroup_task_group(tsk, tg);
8895
8896 return tg;
8897 }
8898
sched_change_group(struct task_struct * tsk,struct task_group * group)8899 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
8900 {
8901 tsk->sched_task_group = group;
8902
8903 #ifdef CONFIG_FAIR_GROUP_SCHED
8904 if (tsk->sched_class->task_change_group)
8905 tsk->sched_class->task_change_group(tsk);
8906 else
8907 #endif
8908 set_task_rq(tsk, task_cpu(tsk));
8909 }
8910
8911 /*
8912 * Change task's runqueue when it moves between groups.
8913 *
8914 * The caller of this function should have put the task in its new group by
8915 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8916 * its new group.
8917 */
sched_move_task(struct task_struct * tsk)8918 void sched_move_task(struct task_struct *tsk)
8919 {
8920 int queued, running, queue_flags =
8921 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8922 struct task_group *group;
8923 struct rq *rq;
8924
8925 CLASS(task_rq_lock, rq_guard)(tsk);
8926 rq = rq_guard.rq;
8927
8928 /*
8929 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
8930 * group changes.
8931 */
8932 group = sched_get_task_group(tsk);
8933 if (group == tsk->sched_task_group)
8934 return;
8935
8936 update_rq_clock(rq);
8937
8938 running = task_current(rq, tsk);
8939 queued = task_on_rq_queued(tsk);
8940
8941 if (queued)
8942 dequeue_task(rq, tsk, queue_flags);
8943 if (running)
8944 put_prev_task(rq, tsk);
8945
8946 sched_change_group(tsk, group);
8947 scx_move_task(tsk);
8948
8949 if (queued)
8950 enqueue_task(rq, tsk, queue_flags);
8951 if (running) {
8952 set_next_task(rq, tsk);
8953 /*
8954 * After changing group, the running task may have joined a
8955 * throttled one but it's still the running task. Trigger a
8956 * resched to make sure that task can still run.
8957 */
8958 resched_curr(rq);
8959 }
8960 }
8961
8962 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)8963 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8964 {
8965 struct task_group *parent = css_tg(parent_css);
8966 struct task_group *tg;
8967
8968 if (!parent) {
8969 /* This is early initialization for the top cgroup */
8970 return &root_task_group.css;
8971 }
8972
8973 tg = sched_create_group(parent);
8974 if (IS_ERR(tg))
8975 return ERR_PTR(-ENOMEM);
8976
8977 return &tg->css;
8978 }
8979
8980 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)8981 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8982 {
8983 struct task_group *tg = css_tg(css);
8984 struct task_group *parent = css_tg(css->parent);
8985 int ret;
8986
8987 ret = scx_tg_online(tg);
8988 if (ret)
8989 return ret;
8990
8991 if (parent)
8992 sched_online_group(tg, parent);
8993
8994 #ifdef CONFIG_UCLAMP_TASK_GROUP
8995 /* Propagate the effective uclamp value for the new group */
8996 guard(mutex)(&uclamp_mutex);
8997 guard(rcu)();
8998 cpu_util_update_eff(css);
8999 #endif
9000
9001 return 0;
9002 }
9003
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9004 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9005 {
9006 struct task_group *tg = css_tg(css);
9007
9008 scx_tg_offline(tg);
9009 }
9010
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9011 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9012 {
9013 struct task_group *tg = css_tg(css);
9014
9015 sched_release_group(tg);
9016 }
9017
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9018 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9019 {
9020 struct task_group *tg = css_tg(css);
9021
9022 /*
9023 * Relies on the RCU grace period between css_released() and this.
9024 */
9025 sched_unregister_group(tg);
9026 }
9027
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9028 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9029 {
9030 #ifdef CONFIG_RT_GROUP_SCHED
9031 struct task_struct *task;
9032 struct cgroup_subsys_state *css;
9033
9034 cgroup_taskset_for_each(task, css, tset) {
9035 if (!sched_rt_can_attach(css_tg(css), task))
9036 return -EINVAL;
9037 }
9038 #endif
9039 return scx_cgroup_can_attach(tset);
9040 }
9041
cpu_cgroup_attach(struct cgroup_taskset * tset)9042 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9043 {
9044 struct task_struct *task;
9045 struct cgroup_subsys_state *css;
9046
9047 cgroup_taskset_for_each(task, css, tset)
9048 sched_move_task(task);
9049
9050 scx_cgroup_finish_attach();
9051 }
9052
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9053 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9054 {
9055 scx_cgroup_cancel_attach(tset);
9056 }
9057
9058 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9059 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9060 {
9061 struct cgroup_subsys_state *top_css = css;
9062 struct uclamp_se *uc_parent = NULL;
9063 struct uclamp_se *uc_se = NULL;
9064 unsigned int eff[UCLAMP_CNT];
9065 enum uclamp_id clamp_id;
9066 unsigned int clamps;
9067
9068 lockdep_assert_held(&uclamp_mutex);
9069 SCHED_WARN_ON(!rcu_read_lock_held());
9070
9071 css_for_each_descendant_pre(css, top_css) {
9072 uc_parent = css_tg(css)->parent
9073 ? css_tg(css)->parent->uclamp : NULL;
9074
9075 for_each_clamp_id(clamp_id) {
9076 /* Assume effective clamps matches requested clamps */
9077 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9078 /* Cap effective clamps with parent's effective clamps */
9079 if (uc_parent &&
9080 eff[clamp_id] > uc_parent[clamp_id].value) {
9081 eff[clamp_id] = uc_parent[clamp_id].value;
9082 }
9083 }
9084 /* Ensure protection is always capped by limit */
9085 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9086
9087 /* Propagate most restrictive effective clamps */
9088 clamps = 0x0;
9089 uc_se = css_tg(css)->uclamp;
9090 for_each_clamp_id(clamp_id) {
9091 if (eff[clamp_id] == uc_se[clamp_id].value)
9092 continue;
9093 uc_se[clamp_id].value = eff[clamp_id];
9094 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9095 clamps |= (0x1 << clamp_id);
9096 }
9097 if (!clamps) {
9098 css = css_rightmost_descendant(css);
9099 continue;
9100 }
9101
9102 /* Immediately update descendants RUNNABLE tasks */
9103 uclamp_update_active_tasks(css);
9104 }
9105 }
9106
9107 /*
9108 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9109 * C expression. Since there is no way to convert a macro argument (N) into a
9110 * character constant, use two levels of macros.
9111 */
9112 #define _POW10(exp) ((unsigned int)1e##exp)
9113 #define POW10(exp) _POW10(exp)
9114
9115 struct uclamp_request {
9116 #define UCLAMP_PERCENT_SHIFT 2
9117 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9118 s64 percent;
9119 u64 util;
9120 int ret;
9121 };
9122
9123 static inline struct uclamp_request
capacity_from_percent(char * buf)9124 capacity_from_percent(char *buf)
9125 {
9126 struct uclamp_request req = {
9127 .percent = UCLAMP_PERCENT_SCALE,
9128 .util = SCHED_CAPACITY_SCALE,
9129 .ret = 0,
9130 };
9131
9132 buf = strim(buf);
9133 if (strcmp(buf, "max")) {
9134 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9135 &req.percent);
9136 if (req.ret)
9137 return req;
9138 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9139 req.ret = -ERANGE;
9140 return req;
9141 }
9142
9143 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9144 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9145 }
9146
9147 return req;
9148 }
9149
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9150 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9151 size_t nbytes, loff_t off,
9152 enum uclamp_id clamp_id)
9153 {
9154 struct uclamp_request req;
9155 struct task_group *tg;
9156
9157 req = capacity_from_percent(buf);
9158 if (req.ret)
9159 return req.ret;
9160
9161 static_branch_enable(&sched_uclamp_used);
9162
9163 guard(mutex)(&uclamp_mutex);
9164 guard(rcu)();
9165
9166 tg = css_tg(of_css(of));
9167 if (tg->uclamp_req[clamp_id].value != req.util)
9168 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9169
9170 /*
9171 * Because of not recoverable conversion rounding we keep track of the
9172 * exact requested value
9173 */
9174 tg->uclamp_pct[clamp_id] = req.percent;
9175
9176 /* Update effective clamps to track the most restrictive value */
9177 cpu_util_update_eff(of_css(of));
9178
9179 return nbytes;
9180 }
9181
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9182 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9183 char *buf, size_t nbytes,
9184 loff_t off)
9185 {
9186 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9187 }
9188
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9189 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9190 char *buf, size_t nbytes,
9191 loff_t off)
9192 {
9193 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9194 }
9195
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9196 static inline void cpu_uclamp_print(struct seq_file *sf,
9197 enum uclamp_id clamp_id)
9198 {
9199 struct task_group *tg;
9200 u64 util_clamp;
9201 u64 percent;
9202 u32 rem;
9203
9204 scoped_guard (rcu) {
9205 tg = css_tg(seq_css(sf));
9206 util_clamp = tg->uclamp_req[clamp_id].value;
9207 }
9208
9209 if (util_clamp == SCHED_CAPACITY_SCALE) {
9210 seq_puts(sf, "max\n");
9211 return;
9212 }
9213
9214 percent = tg->uclamp_pct[clamp_id];
9215 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9216 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9217 }
9218
cpu_uclamp_min_show(struct seq_file * sf,void * v)9219 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9220 {
9221 cpu_uclamp_print(sf, UCLAMP_MIN);
9222 return 0;
9223 }
9224
cpu_uclamp_max_show(struct seq_file * sf,void * v)9225 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9226 {
9227 cpu_uclamp_print(sf, UCLAMP_MAX);
9228 return 0;
9229 }
9230 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9231
9232 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9233 static unsigned long tg_weight(struct task_group *tg)
9234 {
9235 #ifdef CONFIG_FAIR_GROUP_SCHED
9236 return scale_load_down(tg->shares);
9237 #else
9238 return sched_weight_from_cgroup(tg->scx_weight);
9239 #endif
9240 }
9241
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9242 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9243 struct cftype *cftype, u64 shareval)
9244 {
9245 int ret;
9246
9247 if (shareval > scale_load_down(ULONG_MAX))
9248 shareval = MAX_SHARES;
9249 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9250 if (!ret)
9251 scx_group_set_weight(css_tg(css),
9252 sched_weight_to_cgroup(shareval));
9253 return ret;
9254 }
9255
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9256 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9257 struct cftype *cft)
9258 {
9259 return tg_weight(css_tg(css));
9260 }
9261 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9262
9263 #ifdef CONFIG_CFS_BANDWIDTH
9264 static DEFINE_MUTEX(cfs_constraints_mutex);
9265
9266 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9267 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9268 /* More than 203 days if BW_SHIFT equals 20. */
9269 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9270
9271 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9272
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)9273 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9274 u64 burst)
9275 {
9276 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9277 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9278
9279 if (tg == &root_task_group)
9280 return -EINVAL;
9281
9282 /*
9283 * Ensure we have at some amount of bandwidth every period. This is
9284 * to prevent reaching a state of large arrears when throttled via
9285 * entity_tick() resulting in prolonged exit starvation.
9286 */
9287 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9288 return -EINVAL;
9289
9290 /*
9291 * Likewise, bound things on the other side by preventing insane quota
9292 * periods. This also allows us to normalize in computing quota
9293 * feasibility.
9294 */
9295 if (period > max_cfs_quota_period)
9296 return -EINVAL;
9297
9298 /*
9299 * Bound quota to defend quota against overflow during bandwidth shift.
9300 */
9301 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9302 return -EINVAL;
9303
9304 if (quota != RUNTIME_INF && (burst > quota ||
9305 burst + quota > max_cfs_runtime))
9306 return -EINVAL;
9307
9308 /*
9309 * Prevent race between setting of cfs_rq->runtime_enabled and
9310 * unthrottle_offline_cfs_rqs().
9311 */
9312 guard(cpus_read_lock)();
9313 guard(mutex)(&cfs_constraints_mutex);
9314
9315 ret = __cfs_schedulable(tg, period, quota);
9316 if (ret)
9317 return ret;
9318
9319 runtime_enabled = quota != RUNTIME_INF;
9320 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9321 /*
9322 * If we need to toggle cfs_bandwidth_used, off->on must occur
9323 * before making related changes, and on->off must occur afterwards
9324 */
9325 if (runtime_enabled && !runtime_was_enabled)
9326 cfs_bandwidth_usage_inc();
9327
9328 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9329 cfs_b->period = ns_to_ktime(period);
9330 cfs_b->quota = quota;
9331 cfs_b->burst = burst;
9332
9333 __refill_cfs_bandwidth_runtime(cfs_b);
9334
9335 /*
9336 * Restart the period timer (if active) to handle new
9337 * period expiry:
9338 */
9339 if (runtime_enabled)
9340 start_cfs_bandwidth(cfs_b);
9341 }
9342
9343 for_each_online_cpu(i) {
9344 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9345 struct rq *rq = cfs_rq->rq;
9346
9347 guard(rq_lock_irq)(rq);
9348 cfs_rq->runtime_enabled = runtime_enabled;
9349 cfs_rq->runtime_remaining = 0;
9350
9351 if (cfs_rq->throttled)
9352 unthrottle_cfs_rq(cfs_rq);
9353 }
9354
9355 if (runtime_was_enabled && !runtime_enabled)
9356 cfs_bandwidth_usage_dec();
9357
9358 return 0;
9359 }
9360
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)9361 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9362 {
9363 u64 quota, period, burst;
9364
9365 period = ktime_to_ns(tg->cfs_bandwidth.period);
9366 burst = tg->cfs_bandwidth.burst;
9367 if (cfs_quota_us < 0)
9368 quota = RUNTIME_INF;
9369 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9370 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9371 else
9372 return -EINVAL;
9373
9374 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9375 }
9376
tg_get_cfs_quota(struct task_group * tg)9377 static long tg_get_cfs_quota(struct task_group *tg)
9378 {
9379 u64 quota_us;
9380
9381 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9382 return -1;
9383
9384 quota_us = tg->cfs_bandwidth.quota;
9385 do_div(quota_us, NSEC_PER_USEC);
9386
9387 return quota_us;
9388 }
9389
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)9390 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9391 {
9392 u64 quota, period, burst;
9393
9394 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9395 return -EINVAL;
9396
9397 period = (u64)cfs_period_us * NSEC_PER_USEC;
9398 quota = tg->cfs_bandwidth.quota;
9399 burst = tg->cfs_bandwidth.burst;
9400
9401 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9402 }
9403
tg_get_cfs_period(struct task_group * tg)9404 static long tg_get_cfs_period(struct task_group *tg)
9405 {
9406 u64 cfs_period_us;
9407
9408 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9409 do_div(cfs_period_us, NSEC_PER_USEC);
9410
9411 return cfs_period_us;
9412 }
9413
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)9414 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9415 {
9416 u64 quota, period, burst;
9417
9418 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9419 return -EINVAL;
9420
9421 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9422 period = ktime_to_ns(tg->cfs_bandwidth.period);
9423 quota = tg->cfs_bandwidth.quota;
9424
9425 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9426 }
9427
tg_get_cfs_burst(struct task_group * tg)9428 static long tg_get_cfs_burst(struct task_group *tg)
9429 {
9430 u64 burst_us;
9431
9432 burst_us = tg->cfs_bandwidth.burst;
9433 do_div(burst_us, NSEC_PER_USEC);
9434
9435 return burst_us;
9436 }
9437
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9438 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9439 struct cftype *cft)
9440 {
9441 return tg_get_cfs_quota(css_tg(css));
9442 }
9443
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)9444 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9445 struct cftype *cftype, s64 cfs_quota_us)
9446 {
9447 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9448 }
9449
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9450 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9451 struct cftype *cft)
9452 {
9453 return tg_get_cfs_period(css_tg(css));
9454 }
9455
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)9456 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9457 struct cftype *cftype, u64 cfs_period_us)
9458 {
9459 return tg_set_cfs_period(css_tg(css), cfs_period_us);
9460 }
9461
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9462 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9463 struct cftype *cft)
9464 {
9465 return tg_get_cfs_burst(css_tg(css));
9466 }
9467
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)9468 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9469 struct cftype *cftype, u64 cfs_burst_us)
9470 {
9471 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9472 }
9473
9474 struct cfs_schedulable_data {
9475 struct task_group *tg;
9476 u64 period, quota;
9477 };
9478
9479 /*
9480 * normalize group quota/period to be quota/max_period
9481 * note: units are usecs
9482 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9483 static u64 normalize_cfs_quota(struct task_group *tg,
9484 struct cfs_schedulable_data *d)
9485 {
9486 u64 quota, period;
9487
9488 if (tg == d->tg) {
9489 period = d->period;
9490 quota = d->quota;
9491 } else {
9492 period = tg_get_cfs_period(tg);
9493 quota = tg_get_cfs_quota(tg);
9494 }
9495
9496 /* note: these should typically be equivalent */
9497 if (quota == RUNTIME_INF || quota == -1)
9498 return RUNTIME_INF;
9499
9500 return to_ratio(period, quota);
9501 }
9502
tg_cfs_schedulable_down(struct task_group * tg,void * data)9503 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9504 {
9505 struct cfs_schedulable_data *d = data;
9506 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9507 s64 quota = 0, parent_quota = -1;
9508
9509 if (!tg->parent) {
9510 quota = RUNTIME_INF;
9511 } else {
9512 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9513
9514 quota = normalize_cfs_quota(tg, d);
9515 parent_quota = parent_b->hierarchical_quota;
9516
9517 /*
9518 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9519 * always take the non-RUNTIME_INF min. On cgroup1, only
9520 * inherit when no limit is set. In both cases this is used
9521 * by the scheduler to determine if a given CFS task has a
9522 * bandwidth constraint at some higher level.
9523 */
9524 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9525 if (quota == RUNTIME_INF)
9526 quota = parent_quota;
9527 else if (parent_quota != RUNTIME_INF)
9528 quota = min(quota, parent_quota);
9529 } else {
9530 if (quota == RUNTIME_INF)
9531 quota = parent_quota;
9532 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9533 return -EINVAL;
9534 }
9535 }
9536 cfs_b->hierarchical_quota = quota;
9537
9538 return 0;
9539 }
9540
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9541 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9542 {
9543 struct cfs_schedulable_data data = {
9544 .tg = tg,
9545 .period = period,
9546 .quota = quota,
9547 };
9548
9549 if (quota != RUNTIME_INF) {
9550 do_div(data.period, NSEC_PER_USEC);
9551 do_div(data.quota, NSEC_PER_USEC);
9552 }
9553
9554 guard(rcu)();
9555 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9556 }
9557
cpu_cfs_stat_show(struct seq_file * sf,void * v)9558 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9559 {
9560 struct task_group *tg = css_tg(seq_css(sf));
9561 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9562
9563 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9564 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9565 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9566
9567 if (schedstat_enabled() && tg != &root_task_group) {
9568 struct sched_statistics *stats;
9569 u64 ws = 0;
9570 int i;
9571
9572 for_each_possible_cpu(i) {
9573 stats = __schedstats_from_se(tg->se[i]);
9574 ws += schedstat_val(stats->wait_sum);
9575 }
9576
9577 seq_printf(sf, "wait_sum %llu\n", ws);
9578 }
9579
9580 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9581 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9582
9583 return 0;
9584 }
9585
throttled_time_self(struct task_group * tg)9586 static u64 throttled_time_self(struct task_group *tg)
9587 {
9588 int i;
9589 u64 total = 0;
9590
9591 for_each_possible_cpu(i) {
9592 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9593 }
9594
9595 return total;
9596 }
9597
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9598 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9599 {
9600 struct task_group *tg = css_tg(seq_css(sf));
9601
9602 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9603
9604 return 0;
9605 }
9606 #endif /* CONFIG_CFS_BANDWIDTH */
9607
9608 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9609 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9610 struct cftype *cft, s64 val)
9611 {
9612 return sched_group_set_rt_runtime(css_tg(css), val);
9613 }
9614
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9615 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9616 struct cftype *cft)
9617 {
9618 return sched_group_rt_runtime(css_tg(css));
9619 }
9620
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9621 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9622 struct cftype *cftype, u64 rt_period_us)
9623 {
9624 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9625 }
9626
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9627 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9628 struct cftype *cft)
9629 {
9630 return sched_group_rt_period(css_tg(css));
9631 }
9632 #endif /* CONFIG_RT_GROUP_SCHED */
9633
9634 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9635 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9636 struct cftype *cft)
9637 {
9638 return css_tg(css)->idle;
9639 }
9640
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9641 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9642 struct cftype *cft, s64 idle)
9643 {
9644 int ret;
9645
9646 ret = sched_group_set_idle(css_tg(css), idle);
9647 if (!ret)
9648 scx_group_set_idle(css_tg(css), idle);
9649 return ret;
9650 }
9651 #endif
9652
9653 static struct cftype cpu_legacy_files[] = {
9654 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9655 {
9656 .name = "shares",
9657 .read_u64 = cpu_shares_read_u64,
9658 .write_u64 = cpu_shares_write_u64,
9659 },
9660 {
9661 .name = "idle",
9662 .read_s64 = cpu_idle_read_s64,
9663 .write_s64 = cpu_idle_write_s64,
9664 },
9665 #endif
9666 #ifdef CONFIG_CFS_BANDWIDTH
9667 {
9668 .name = "cfs_quota_us",
9669 .read_s64 = cpu_cfs_quota_read_s64,
9670 .write_s64 = cpu_cfs_quota_write_s64,
9671 },
9672 {
9673 .name = "cfs_period_us",
9674 .read_u64 = cpu_cfs_period_read_u64,
9675 .write_u64 = cpu_cfs_period_write_u64,
9676 },
9677 {
9678 .name = "cfs_burst_us",
9679 .read_u64 = cpu_cfs_burst_read_u64,
9680 .write_u64 = cpu_cfs_burst_write_u64,
9681 },
9682 {
9683 .name = "stat",
9684 .seq_show = cpu_cfs_stat_show,
9685 },
9686 {
9687 .name = "stat.local",
9688 .seq_show = cpu_cfs_local_stat_show,
9689 },
9690 #endif
9691 #ifdef CONFIG_RT_GROUP_SCHED
9692 {
9693 .name = "rt_runtime_us",
9694 .read_s64 = cpu_rt_runtime_read,
9695 .write_s64 = cpu_rt_runtime_write,
9696 },
9697 {
9698 .name = "rt_period_us",
9699 .read_u64 = cpu_rt_period_read_uint,
9700 .write_u64 = cpu_rt_period_write_uint,
9701 },
9702 #endif
9703 #ifdef CONFIG_UCLAMP_TASK_GROUP
9704 {
9705 .name = "uclamp.min",
9706 .flags = CFTYPE_NOT_ON_ROOT,
9707 .seq_show = cpu_uclamp_min_show,
9708 .write = cpu_uclamp_min_write,
9709 },
9710 {
9711 .name = "uclamp.max",
9712 .flags = CFTYPE_NOT_ON_ROOT,
9713 .seq_show = cpu_uclamp_max_show,
9714 .write = cpu_uclamp_max_write,
9715 },
9716 #endif
9717 { } /* Terminate */
9718 };
9719
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9720 static int cpu_extra_stat_show(struct seq_file *sf,
9721 struct cgroup_subsys_state *css)
9722 {
9723 #ifdef CONFIG_CFS_BANDWIDTH
9724 {
9725 struct task_group *tg = css_tg(css);
9726 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9727 u64 throttled_usec, burst_usec;
9728
9729 throttled_usec = cfs_b->throttled_time;
9730 do_div(throttled_usec, NSEC_PER_USEC);
9731 burst_usec = cfs_b->burst_time;
9732 do_div(burst_usec, NSEC_PER_USEC);
9733
9734 seq_printf(sf, "nr_periods %d\n"
9735 "nr_throttled %d\n"
9736 "throttled_usec %llu\n"
9737 "nr_bursts %d\n"
9738 "burst_usec %llu\n",
9739 cfs_b->nr_periods, cfs_b->nr_throttled,
9740 throttled_usec, cfs_b->nr_burst, burst_usec);
9741 }
9742 #endif
9743 return 0;
9744 }
9745
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9746 static int cpu_local_stat_show(struct seq_file *sf,
9747 struct cgroup_subsys_state *css)
9748 {
9749 #ifdef CONFIG_CFS_BANDWIDTH
9750 {
9751 struct task_group *tg = css_tg(css);
9752 u64 throttled_self_usec;
9753
9754 throttled_self_usec = throttled_time_self(tg);
9755 do_div(throttled_self_usec, NSEC_PER_USEC);
9756
9757 seq_printf(sf, "throttled_usec %llu\n",
9758 throttled_self_usec);
9759 }
9760 #endif
9761 return 0;
9762 }
9763
9764 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9765
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9766 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9767 struct cftype *cft)
9768 {
9769 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9770 }
9771
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)9772 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9773 struct cftype *cft, u64 cgrp_weight)
9774 {
9775 unsigned long weight;
9776 int ret;
9777
9778 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9779 return -ERANGE;
9780
9781 weight = sched_weight_from_cgroup(cgrp_weight);
9782
9783 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9784 if (!ret)
9785 scx_group_set_weight(css_tg(css), cgrp_weight);
9786 return ret;
9787 }
9788
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9789 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9790 struct cftype *cft)
9791 {
9792 unsigned long weight = tg_weight(css_tg(css));
9793 int last_delta = INT_MAX;
9794 int prio, delta;
9795
9796 /* find the closest nice value to the current weight */
9797 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9798 delta = abs(sched_prio_to_weight[prio] - weight);
9799 if (delta >= last_delta)
9800 break;
9801 last_delta = delta;
9802 }
9803
9804 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9805 }
9806
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)9807 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9808 struct cftype *cft, s64 nice)
9809 {
9810 unsigned long weight;
9811 int idx, ret;
9812
9813 if (nice < MIN_NICE || nice > MAX_NICE)
9814 return -ERANGE;
9815
9816 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9817 idx = array_index_nospec(idx, 40);
9818 weight = sched_prio_to_weight[idx];
9819
9820 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9821 if (!ret)
9822 scx_group_set_weight(css_tg(css),
9823 sched_weight_to_cgroup(weight));
9824 return ret;
9825 }
9826 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9827
cpu_period_quota_print(struct seq_file * sf,long period,long quota)9828 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9829 long period, long quota)
9830 {
9831 if (quota < 0)
9832 seq_puts(sf, "max");
9833 else
9834 seq_printf(sf, "%ld", quota);
9835
9836 seq_printf(sf, " %ld\n", period);
9837 }
9838
9839 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)9840 static int __maybe_unused cpu_period_quota_parse(char *buf,
9841 u64 *periodp, u64 *quotap)
9842 {
9843 char tok[21]; /* U64_MAX */
9844
9845 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9846 return -EINVAL;
9847
9848 *periodp *= NSEC_PER_USEC;
9849
9850 if (sscanf(tok, "%llu", quotap))
9851 *quotap *= NSEC_PER_USEC;
9852 else if (!strcmp(tok, "max"))
9853 *quotap = RUNTIME_INF;
9854 else
9855 return -EINVAL;
9856
9857 return 0;
9858 }
9859
9860 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)9861 static int cpu_max_show(struct seq_file *sf, void *v)
9862 {
9863 struct task_group *tg = css_tg(seq_css(sf));
9864
9865 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9866 return 0;
9867 }
9868
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9869 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9870 char *buf, size_t nbytes, loff_t off)
9871 {
9872 struct task_group *tg = css_tg(of_css(of));
9873 u64 period = tg_get_cfs_period(tg);
9874 u64 burst = tg->cfs_bandwidth.burst;
9875 u64 quota;
9876 int ret;
9877
9878 ret = cpu_period_quota_parse(buf, &period, "a);
9879 if (!ret)
9880 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
9881 return ret ?: nbytes;
9882 }
9883 #endif
9884
9885 static struct cftype cpu_files[] = {
9886 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9887 {
9888 .name = "weight",
9889 .flags = CFTYPE_NOT_ON_ROOT,
9890 .read_u64 = cpu_weight_read_u64,
9891 .write_u64 = cpu_weight_write_u64,
9892 },
9893 {
9894 .name = "weight.nice",
9895 .flags = CFTYPE_NOT_ON_ROOT,
9896 .read_s64 = cpu_weight_nice_read_s64,
9897 .write_s64 = cpu_weight_nice_write_s64,
9898 },
9899 {
9900 .name = "idle",
9901 .flags = CFTYPE_NOT_ON_ROOT,
9902 .read_s64 = cpu_idle_read_s64,
9903 .write_s64 = cpu_idle_write_s64,
9904 },
9905 #endif
9906 #ifdef CONFIG_CFS_BANDWIDTH
9907 {
9908 .name = "max",
9909 .flags = CFTYPE_NOT_ON_ROOT,
9910 .seq_show = cpu_max_show,
9911 .write = cpu_max_write,
9912 },
9913 {
9914 .name = "max.burst",
9915 .flags = CFTYPE_NOT_ON_ROOT,
9916 .read_u64 = cpu_cfs_burst_read_u64,
9917 .write_u64 = cpu_cfs_burst_write_u64,
9918 },
9919 #endif
9920 #ifdef CONFIG_UCLAMP_TASK_GROUP
9921 {
9922 .name = "uclamp.min",
9923 .flags = CFTYPE_NOT_ON_ROOT,
9924 .seq_show = cpu_uclamp_min_show,
9925 .write = cpu_uclamp_min_write,
9926 },
9927 {
9928 .name = "uclamp.max",
9929 .flags = CFTYPE_NOT_ON_ROOT,
9930 .seq_show = cpu_uclamp_max_show,
9931 .write = cpu_uclamp_max_write,
9932 },
9933 #endif
9934 { } /* terminate */
9935 };
9936
9937 struct cgroup_subsys cpu_cgrp_subsys = {
9938 .css_alloc = cpu_cgroup_css_alloc,
9939 .css_online = cpu_cgroup_css_online,
9940 .css_offline = cpu_cgroup_css_offline,
9941 .css_released = cpu_cgroup_css_released,
9942 .css_free = cpu_cgroup_css_free,
9943 .css_extra_stat_show = cpu_extra_stat_show,
9944 .css_local_stat_show = cpu_local_stat_show,
9945 .can_attach = cpu_cgroup_can_attach,
9946 .attach = cpu_cgroup_attach,
9947 .cancel_attach = cpu_cgroup_cancel_attach,
9948 .legacy_cftypes = cpu_legacy_files,
9949 .dfl_cftypes = cpu_files,
9950 .early_init = true,
9951 .threaded = true,
9952 };
9953
9954 #endif /* CONFIG_CGROUP_SCHED */
9955
dump_cpu_task(int cpu)9956 void dump_cpu_task(int cpu)
9957 {
9958 if (in_hardirq() && cpu == smp_processor_id()) {
9959 struct pt_regs *regs;
9960
9961 regs = get_irq_regs();
9962 if (regs) {
9963 show_regs(regs);
9964 return;
9965 }
9966 }
9967
9968 if (trigger_single_cpu_backtrace(cpu))
9969 return;
9970
9971 pr_info("Task dump for CPU %d:\n", cpu);
9972 sched_show_task(cpu_curr(cpu));
9973 }
9974
9975 /*
9976 * Nice levels are multiplicative, with a gentle 10% change for every
9977 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9978 * nice 1, it will get ~10% less CPU time than another CPU-bound task
9979 * that remained on nice 0.
9980 *
9981 * The "10% effect" is relative and cumulative: from _any_ nice level,
9982 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9983 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9984 * If a task goes up by ~10% and another task goes down by ~10% then
9985 * the relative distance between them is ~25%.)
9986 */
9987 const int sched_prio_to_weight[40] = {
9988 /* -20 */ 88761, 71755, 56483, 46273, 36291,
9989 /* -15 */ 29154, 23254, 18705, 14949, 11916,
9990 /* -10 */ 9548, 7620, 6100, 4904, 3906,
9991 /* -5 */ 3121, 2501, 1991, 1586, 1277,
9992 /* 0 */ 1024, 820, 655, 526, 423,
9993 /* 5 */ 335, 272, 215, 172, 137,
9994 /* 10 */ 110, 87, 70, 56, 45,
9995 /* 15 */ 36, 29, 23, 18, 15,
9996 };
9997
9998 /*
9999 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10000 *
10001 * In cases where the weight does not change often, we can use the
10002 * pre-calculated inverse to speed up arithmetics by turning divisions
10003 * into multiplications:
10004 */
10005 const u32 sched_prio_to_wmult[40] = {
10006 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10007 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10008 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10009 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10010 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10011 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10012 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10013 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10014 };
10015
call_trace_sched_update_nr_running(struct rq * rq,int count)10016 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10017 {
10018 trace_sched_update_nr_running_tp(rq, count);
10019 }
10020
10021 #ifdef CONFIG_SCHED_MM_CID
10022
10023 /*
10024 * @cid_lock: Guarantee forward-progress of cid allocation.
10025 *
10026 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10027 * is only used when contention is detected by the lock-free allocation so
10028 * forward progress can be guaranteed.
10029 */
10030 DEFINE_RAW_SPINLOCK(cid_lock);
10031
10032 /*
10033 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10034 *
10035 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10036 * detected, it is set to 1 to ensure that all newly coming allocations are
10037 * serialized by @cid_lock until the allocation which detected contention
10038 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10039 * of a cid allocation.
10040 */
10041 int use_cid_lock;
10042
10043 /*
10044 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10045 * concurrently with respect to the execution of the source runqueue context
10046 * switch.
10047 *
10048 * There is one basic properties we want to guarantee here:
10049 *
10050 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10051 * used by a task. That would lead to concurrent allocation of the cid and
10052 * userspace corruption.
10053 *
10054 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10055 * that a pair of loads observe at least one of a pair of stores, which can be
10056 * shown as:
10057 *
10058 * X = Y = 0
10059 *
10060 * w[X]=1 w[Y]=1
10061 * MB MB
10062 * r[Y]=y r[X]=x
10063 *
10064 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10065 * values 0 and 1, this algorithm cares about specific state transitions of the
10066 * runqueue current task (as updated by the scheduler context switch), and the
10067 * per-mm/cpu cid value.
10068 *
10069 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10070 * task->mm != mm for the rest of the discussion. There are two scheduler state
10071 * transitions on context switch we care about:
10072 *
10073 * (TSA) Store to rq->curr with transition from (N) to (Y)
10074 *
10075 * (TSB) Store to rq->curr with transition from (Y) to (N)
10076 *
10077 * On the remote-clear side, there is one transition we care about:
10078 *
10079 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10080 *
10081 * There is also a transition to UNSET state which can be performed from all
10082 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10083 * guarantees that only a single thread will succeed:
10084 *
10085 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10086 *
10087 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10088 * when a thread is actively using the cid (property (1)).
10089 *
10090 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10091 *
10092 * Scenario A) (TSA)+(TMA) (from next task perspective)
10093 *
10094 * CPU0 CPU1
10095 *
10096 * Context switch CS-1 Remote-clear
10097 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10098 * (implied barrier after cmpxchg)
10099 * - switch_mm_cid()
10100 * - memory barrier (see switch_mm_cid()
10101 * comment explaining how this barrier
10102 * is combined with other scheduler
10103 * barriers)
10104 * - mm_cid_get (next)
10105 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10106 *
10107 * This Dekker ensures that either task (Y) is observed by the
10108 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10109 * observed.
10110 *
10111 * If task (Y) store is observed by rcu_dereference(), it means that there is
10112 * still an active task on the cpu. Remote-clear will therefore not transition
10113 * to UNSET, which fulfills property (1).
10114 *
10115 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10116 * it will move its state to UNSET, which clears the percpu cid perhaps
10117 * uselessly (which is not an issue for correctness). Because task (Y) is not
10118 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10119 * state to UNSET is done with a cmpxchg expecting that the old state has the
10120 * LAZY flag set, only one thread will successfully UNSET.
10121 *
10122 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10123 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10124 * CPU1 will observe task (Y) and do nothing more, which is fine.
10125 *
10126 * What we are effectively preventing with this Dekker is a scenario where
10127 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10128 * because this would UNSET a cid which is actively used.
10129 */
10130
sched_mm_cid_migrate_from(struct task_struct * t)10131 void sched_mm_cid_migrate_from(struct task_struct *t)
10132 {
10133 t->migrate_from_cpu = task_cpu(t);
10134 }
10135
10136 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10137 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10138 struct task_struct *t,
10139 struct mm_cid *src_pcpu_cid)
10140 {
10141 struct mm_struct *mm = t->mm;
10142 struct task_struct *src_task;
10143 int src_cid, last_mm_cid;
10144
10145 if (!mm)
10146 return -1;
10147
10148 last_mm_cid = t->last_mm_cid;
10149 /*
10150 * If the migrated task has no last cid, or if the current
10151 * task on src rq uses the cid, it means the source cid does not need
10152 * to be moved to the destination cpu.
10153 */
10154 if (last_mm_cid == -1)
10155 return -1;
10156 src_cid = READ_ONCE(src_pcpu_cid->cid);
10157 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10158 return -1;
10159
10160 /*
10161 * If we observe an active task using the mm on this rq, it means we
10162 * are not the last task to be migrated from this cpu for this mm, so
10163 * there is no need to move src_cid to the destination cpu.
10164 */
10165 guard(rcu)();
10166 src_task = rcu_dereference(src_rq->curr);
10167 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10168 t->last_mm_cid = -1;
10169 return -1;
10170 }
10171
10172 return src_cid;
10173 }
10174
10175 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10176 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10177 struct task_struct *t,
10178 struct mm_cid *src_pcpu_cid,
10179 int src_cid)
10180 {
10181 struct task_struct *src_task;
10182 struct mm_struct *mm = t->mm;
10183 int lazy_cid;
10184
10185 if (src_cid == -1)
10186 return -1;
10187
10188 /*
10189 * Attempt to clear the source cpu cid to move it to the destination
10190 * cpu.
10191 */
10192 lazy_cid = mm_cid_set_lazy_put(src_cid);
10193 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10194 return -1;
10195
10196 /*
10197 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10198 * rq->curr->mm matches the scheduler barrier in context_switch()
10199 * between store to rq->curr and load of prev and next task's
10200 * per-mm/cpu cid.
10201 *
10202 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10203 * rq->curr->mm_cid_active matches the barrier in
10204 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10205 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10206 * load of per-mm/cpu cid.
10207 */
10208
10209 /*
10210 * If we observe an active task using the mm on this rq after setting
10211 * the lazy-put flag, this task will be responsible for transitioning
10212 * from lazy-put flag set to MM_CID_UNSET.
10213 */
10214 scoped_guard (rcu) {
10215 src_task = rcu_dereference(src_rq->curr);
10216 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10217 /*
10218 * We observed an active task for this mm, there is therefore
10219 * no point in moving this cid to the destination cpu.
10220 */
10221 t->last_mm_cid = -1;
10222 return -1;
10223 }
10224 }
10225
10226 /*
10227 * The src_cid is unused, so it can be unset.
10228 */
10229 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10230 return -1;
10231 return src_cid;
10232 }
10233
10234 /*
10235 * Migration to dst cpu. Called with dst_rq lock held.
10236 * Interrupts are disabled, which keeps the window of cid ownership without the
10237 * source rq lock held small.
10238 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10239 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10240 {
10241 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10242 struct mm_struct *mm = t->mm;
10243 int src_cid, dst_cid, src_cpu;
10244 struct rq *src_rq;
10245
10246 lockdep_assert_rq_held(dst_rq);
10247
10248 if (!mm)
10249 return;
10250 src_cpu = t->migrate_from_cpu;
10251 if (src_cpu == -1) {
10252 t->last_mm_cid = -1;
10253 return;
10254 }
10255 /*
10256 * Move the src cid if the dst cid is unset. This keeps id
10257 * allocation closest to 0 in cases where few threads migrate around
10258 * many CPUs.
10259 *
10260 * If destination cid is already set, we may have to just clear
10261 * the src cid to ensure compactness in frequent migrations
10262 * scenarios.
10263 *
10264 * It is not useful to clear the src cid when the number of threads is
10265 * greater or equal to the number of allowed CPUs, because user-space
10266 * can expect that the number of allowed cids can reach the number of
10267 * allowed CPUs.
10268 */
10269 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10270 dst_cid = READ_ONCE(dst_pcpu_cid->cid);
10271 if (!mm_cid_is_unset(dst_cid) &&
10272 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
10273 return;
10274 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10275 src_rq = cpu_rq(src_cpu);
10276 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10277 if (src_cid == -1)
10278 return;
10279 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10280 src_cid);
10281 if (src_cid == -1)
10282 return;
10283 if (!mm_cid_is_unset(dst_cid)) {
10284 __mm_cid_put(mm, src_cid);
10285 return;
10286 }
10287 /* Move src_cid to dst cpu. */
10288 mm_cid_snapshot_time(dst_rq, mm);
10289 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10290 }
10291
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10292 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10293 int cpu)
10294 {
10295 struct rq *rq = cpu_rq(cpu);
10296 struct task_struct *t;
10297 int cid, lazy_cid;
10298
10299 cid = READ_ONCE(pcpu_cid->cid);
10300 if (!mm_cid_is_valid(cid))
10301 return;
10302
10303 /*
10304 * Clear the cpu cid if it is set to keep cid allocation compact. If
10305 * there happens to be other tasks left on the source cpu using this
10306 * mm, the next task using this mm will reallocate its cid on context
10307 * switch.
10308 */
10309 lazy_cid = mm_cid_set_lazy_put(cid);
10310 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10311 return;
10312
10313 /*
10314 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10315 * rq->curr->mm matches the scheduler barrier in context_switch()
10316 * between store to rq->curr and load of prev and next task's
10317 * per-mm/cpu cid.
10318 *
10319 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10320 * rq->curr->mm_cid_active matches the barrier in
10321 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10322 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10323 * load of per-mm/cpu cid.
10324 */
10325
10326 /*
10327 * If we observe an active task using the mm on this rq after setting
10328 * the lazy-put flag, that task will be responsible for transitioning
10329 * from lazy-put flag set to MM_CID_UNSET.
10330 */
10331 scoped_guard (rcu) {
10332 t = rcu_dereference(rq->curr);
10333 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10334 return;
10335 }
10336
10337 /*
10338 * The cid is unused, so it can be unset.
10339 * Disable interrupts to keep the window of cid ownership without rq
10340 * lock small.
10341 */
10342 scoped_guard (irqsave) {
10343 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10344 __mm_cid_put(mm, cid);
10345 }
10346 }
10347
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10348 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10349 {
10350 struct rq *rq = cpu_rq(cpu);
10351 struct mm_cid *pcpu_cid;
10352 struct task_struct *curr;
10353 u64 rq_clock;
10354
10355 /*
10356 * rq->clock load is racy on 32-bit but one spurious clear once in a
10357 * while is irrelevant.
10358 */
10359 rq_clock = READ_ONCE(rq->clock);
10360 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10361
10362 /*
10363 * In order to take care of infrequently scheduled tasks, bump the time
10364 * snapshot associated with this cid if an active task using the mm is
10365 * observed on this rq.
10366 */
10367 scoped_guard (rcu) {
10368 curr = rcu_dereference(rq->curr);
10369 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10370 WRITE_ONCE(pcpu_cid->time, rq_clock);
10371 return;
10372 }
10373 }
10374
10375 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10376 return;
10377 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10378 }
10379
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10380 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10381 int weight)
10382 {
10383 struct mm_cid *pcpu_cid;
10384 int cid;
10385
10386 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10387 cid = READ_ONCE(pcpu_cid->cid);
10388 if (!mm_cid_is_valid(cid) || cid < weight)
10389 return;
10390 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10391 }
10392
task_mm_cid_work(struct callback_head * work)10393 static void task_mm_cid_work(struct callback_head *work)
10394 {
10395 unsigned long now = jiffies, old_scan, next_scan;
10396 struct task_struct *t = current;
10397 struct cpumask *cidmask;
10398 struct mm_struct *mm;
10399 int weight, cpu;
10400
10401 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10402
10403 work->next = work; /* Prevent double-add */
10404 if (t->flags & PF_EXITING)
10405 return;
10406 mm = t->mm;
10407 if (!mm)
10408 return;
10409 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10410 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10411 if (!old_scan) {
10412 unsigned long res;
10413
10414 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10415 if (res != old_scan)
10416 old_scan = res;
10417 else
10418 old_scan = next_scan;
10419 }
10420 if (time_before(now, old_scan))
10421 return;
10422 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10423 return;
10424 cidmask = mm_cidmask(mm);
10425 /* Clear cids that were not recently used. */
10426 for_each_possible_cpu(cpu)
10427 sched_mm_cid_remote_clear_old(mm, cpu);
10428 weight = cpumask_weight(cidmask);
10429 /*
10430 * Clear cids that are greater or equal to the cidmask weight to
10431 * recompact it.
10432 */
10433 for_each_possible_cpu(cpu)
10434 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10435 }
10436
init_sched_mm_cid(struct task_struct * t)10437 void init_sched_mm_cid(struct task_struct *t)
10438 {
10439 struct mm_struct *mm = t->mm;
10440 int mm_users = 0;
10441
10442 if (mm) {
10443 mm_users = atomic_read(&mm->mm_users);
10444 if (mm_users == 1)
10445 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10446 }
10447 t->cid_work.next = &t->cid_work; /* Protect against double add */
10448 init_task_work(&t->cid_work, task_mm_cid_work);
10449 }
10450
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10451 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10452 {
10453 struct callback_head *work = &curr->cid_work;
10454 unsigned long now = jiffies;
10455
10456 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10457 work->next != work)
10458 return;
10459 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10460 return;
10461 task_work_add(curr, work, TWA_RESUME);
10462 }
10463
sched_mm_cid_exit_signals(struct task_struct * t)10464 void sched_mm_cid_exit_signals(struct task_struct *t)
10465 {
10466 struct mm_struct *mm = t->mm;
10467 struct rq *rq;
10468
10469 if (!mm)
10470 return;
10471
10472 preempt_disable();
10473 rq = this_rq();
10474 guard(rq_lock_irqsave)(rq);
10475 preempt_enable_no_resched(); /* holding spinlock */
10476 WRITE_ONCE(t->mm_cid_active, 0);
10477 /*
10478 * Store t->mm_cid_active before loading per-mm/cpu cid.
10479 * Matches barrier in sched_mm_cid_remote_clear_old().
10480 */
10481 smp_mb();
10482 mm_cid_put(mm);
10483 t->last_mm_cid = t->mm_cid = -1;
10484 }
10485
sched_mm_cid_before_execve(struct task_struct * t)10486 void sched_mm_cid_before_execve(struct task_struct *t)
10487 {
10488 struct mm_struct *mm = t->mm;
10489 struct rq *rq;
10490
10491 if (!mm)
10492 return;
10493
10494 preempt_disable();
10495 rq = this_rq();
10496 guard(rq_lock_irqsave)(rq);
10497 preempt_enable_no_resched(); /* holding spinlock */
10498 WRITE_ONCE(t->mm_cid_active, 0);
10499 /*
10500 * Store t->mm_cid_active before loading per-mm/cpu cid.
10501 * Matches barrier in sched_mm_cid_remote_clear_old().
10502 */
10503 smp_mb();
10504 mm_cid_put(mm);
10505 t->last_mm_cid = t->mm_cid = -1;
10506 }
10507
sched_mm_cid_after_execve(struct task_struct * t)10508 void sched_mm_cid_after_execve(struct task_struct *t)
10509 {
10510 struct mm_struct *mm = t->mm;
10511 struct rq *rq;
10512
10513 if (!mm)
10514 return;
10515
10516 preempt_disable();
10517 rq = this_rq();
10518 scoped_guard (rq_lock_irqsave, rq) {
10519 preempt_enable_no_resched(); /* holding spinlock */
10520 WRITE_ONCE(t->mm_cid_active, 1);
10521 /*
10522 * Store t->mm_cid_active before loading per-mm/cpu cid.
10523 * Matches barrier in sched_mm_cid_remote_clear_old().
10524 */
10525 smp_mb();
10526 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
10527 }
10528 rseq_set_notify_resume(t);
10529 }
10530
sched_mm_cid_fork(struct task_struct * t)10531 void sched_mm_cid_fork(struct task_struct *t)
10532 {
10533 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10534 t->mm_cid_active = 1;
10535 }
10536 #endif
10537
10538 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10539 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10540 struct sched_enq_and_set_ctx *ctx)
10541 {
10542 struct rq *rq = task_rq(p);
10543
10544 lockdep_assert_rq_held(rq);
10545
10546 *ctx = (struct sched_enq_and_set_ctx){
10547 .p = p,
10548 .queue_flags = queue_flags,
10549 .queued = task_on_rq_queued(p),
10550 .running = task_current(rq, p),
10551 };
10552
10553 update_rq_clock(rq);
10554 if (ctx->queued)
10555 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10556 if (ctx->running)
10557 put_prev_task(rq, p);
10558 }
10559
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10560 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10561 {
10562 struct rq *rq = task_rq(ctx->p);
10563
10564 lockdep_assert_rq_held(rq);
10565
10566 if (ctx->queued)
10567 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10568 if (ctx->running)
10569 set_next_task(rq, ctx->p);
10570 }
10571 #endif /* CONFIG_SCHED_CLASS_EXT */
10572