1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 # include <linux/entry-common.h>
73 # endif
74 #endif
75
76 #include <uapi/linux/sched/types.h>
77
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87
88 #include "sched.h"
89 #include "stats.h"
90
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
103 /*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124 * Debugging: various feature bits
125 *
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
129 */
130 #define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 0;
135 #undef SCHED_FEAT
136
137 /*
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
140 *
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142 * per boot.
143 */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147
148 /*
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
151 */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153
154 __read_mostly int scheduler_running;
155
156 #ifdef CONFIG_SCHED_CORE
157
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159
160 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)161 static inline int __task_prio(const struct task_struct *p)
162 {
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 return -2;
165
166 if (p->dl_server)
167 return -1; /* deadline */
168
169 if (rt_or_dl_prio(p->prio))
170 return p->prio; /* [-1, 99] */
171
172 if (p->sched_class == &idle_sched_class)
173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174
175 if (task_on_scx(p))
176 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177
178 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180
181 /*
182 * l(a,b)
183 * le(a,b) := !l(b,a)
184 * g(a,b) := l(b,a)
185 * ge(a,b) := !l(a,b)
186 */
187
188 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)189 static inline bool prio_less(const struct task_struct *a,
190 const struct task_struct *b, bool in_fi)
191 {
192
193 int pa = __task_prio(a), pb = __task_prio(b);
194
195 if (-pa < -pb)
196 return true;
197
198 if (-pb < -pa)
199 return false;
200
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity *a_dl, *b_dl;
203
204 a_dl = &a->dl;
205 /*
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
209 */
210 if (a->dl_server)
211 a_dl = a->dl_server;
212
213 b_dl = &b->dl;
214 if (b->dl_server)
215 b_dl = b->dl_server;
216
217 return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 }
219
220 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
221 return cfs_prio_less(a, b, in_fi);
222
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
225 return scx_prio_less(a, b, in_fi);
226 #endif
227
228 return false;
229 }
230
__sched_core_less(const struct task_struct * a,const struct task_struct * b)231 static inline bool __sched_core_less(const struct task_struct *a,
232 const struct task_struct *b)
233 {
234 if (a->core_cookie < b->core_cookie)
235 return true;
236
237 if (a->core_cookie > b->core_cookie)
238 return false;
239
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 return true;
243
244 return false;
245 }
246
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253
rb_sched_core_cmp(const void * key,const struct rb_node * node)254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 const struct task_struct *p = __node_2_sc(node);
257 unsigned long cookie = (unsigned long)key;
258
259 if (cookie < p->core_cookie)
260 return -1;
261
262 if (cookie > p->core_cookie)
263 return 1;
264
265 return 0;
266 }
267
sched_core_enqueue(struct rq * rq,struct task_struct * p)268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 if (p->se.sched_delayed)
271 return;
272
273 rq->core->core_task_seq++;
274
275 if (!p->core_cookie)
276 return;
277
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 if (p->se.sched_delayed)
284 return;
285
286 rq->core->core_task_seq++;
287
288 if (sched_core_enqueued(p)) {
289 rb_erase(&p->core_node, &rq->core_tree);
290 RB_CLEAR_NODE(&p->core_node);
291 }
292
293 /*
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
297 */
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 rq->core->core_forceidle_count && rq->curr == rq->idle)
300 resched_curr(rq);
301 }
302
sched_task_is_throttled(struct task_struct * p,int cpu)303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 if (p->sched_class->task_is_throttled)
306 return p->sched_class->task_is_throttled(p, cpu);
307
308 return 0;
309 }
310
sched_core_next(struct task_struct * p,unsigned long cookie)311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 struct rb_node *node = &p->core_node;
314 int cpu = task_cpu(p);
315
316 do {
317 node = rb_next(node);
318 if (!node)
319 return NULL;
320
321 p = __node_2_sc(node);
322 if (p->core_cookie != cookie)
323 return NULL;
324
325 } while (sched_task_is_throttled(p, cpu));
326
327 return p;
328 }
329
330 /*
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
333 */
sched_core_find(struct rq * rq,unsigned long cookie)334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 struct task_struct *p;
337 struct rb_node *node;
338
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 if (!node)
341 return NULL;
342
343 p = __node_2_sc(node);
344 if (!sched_task_is_throttled(p, rq->cpu))
345 return p;
346
347 return sched_core_next(p, cookie);
348 }
349
350 /*
351 * Magic required such that:
352 *
353 * raw_spin_rq_lock(rq);
354 * ...
355 * raw_spin_rq_unlock(rq);
356 *
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
359 *
360 * XXX entirely possible to selectively enable cores, don't bother for now.
361 */
362
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366
sched_core_lock(int cpu,unsigned long * flags)367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 int t, i = 0;
371
372 local_irq_save(*flags);
373 for_each_cpu(t, smt_mask)
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376
sched_core_unlock(int cpu,unsigned long * flags)377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 int t;
381
382 for_each_cpu(t, smt_mask)
383 raw_spin_unlock(&cpu_rq(t)->__lock);
384 local_irq_restore(*flags);
385 }
386
__sched_core_flip(bool enabled)387 static void __sched_core_flip(bool enabled)
388 {
389 unsigned long flags;
390 int cpu, t;
391
392 cpus_read_lock();
393
394 /*
395 * Toggle the online cores, one by one.
396 */
397 cpumask_copy(&sched_core_mask, cpu_online_mask);
398 for_each_cpu(cpu, &sched_core_mask) {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400
401 sched_core_lock(cpu, &flags);
402
403 for_each_cpu(t, smt_mask)
404 cpu_rq(t)->core_enabled = enabled;
405
406 cpu_rq(cpu)->core->core_forceidle_start = 0;
407
408 sched_core_unlock(cpu, &flags);
409
410 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 }
412
413 /*
414 * Toggle the offline CPUs.
415 */
416 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 cpu_rq(cpu)->core_enabled = enabled;
418
419 cpus_read_unlock();
420 }
421
sched_core_assert_empty(void)422 static void sched_core_assert_empty(void)
423 {
424 int cpu;
425
426 for_each_possible_cpu(cpu)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429
__sched_core_enable(void)430 static void __sched_core_enable(void)
431 {
432 static_branch_enable(&__sched_core_enabled);
433 /*
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
436 */
437 synchronize_rcu();
438 __sched_core_flip(true);
439 sched_core_assert_empty();
440 }
441
__sched_core_disable(void)442 static void __sched_core_disable(void)
443 {
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled);
447 }
448
sched_core_get(void)449 void sched_core_get(void)
450 {
451 if (atomic_inc_not_zero(&sched_core_count))
452 return;
453
454 mutex_lock(&sched_core_mutex);
455 if (!atomic_read(&sched_core_count))
456 __sched_core_enable();
457
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count);
460 mutex_unlock(&sched_core_mutex);
461 }
462
__sched_core_put(struct work_struct * work)463 static void __sched_core_put(struct work_struct *work)
464 {
465 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex);
468 }
469 }
470
sched_core_put(void)471 void sched_core_put(void)
472 {
473 static DECLARE_WORK(_work, __sched_core_put);
474
475 /*
476 * "There can be only one"
477 *
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
481 */
482 if (!atomic_add_unless(&sched_core_count, -1, 1))
483 schedule_work(&_work);
484 }
485
486 #else /* !CONFIG_SCHED_CORE */
487
sched_core_enqueue(struct rq * rq,struct task_struct * p)488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491
492 #endif /* CONFIG_SCHED_CORE */
493
494 /*
495 * Serialization rules:
496 *
497 * Lock order:
498 *
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502 *
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
505 *
506 * Regular state:
507 *
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
511 * to run next.
512 *
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
517 *
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
520 *
521 * Special state:
522 *
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
526 *
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
536 *
537 * p->state <- TASK_*:
538 *
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
542 * concurrent self.
543 *
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545 *
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550 *
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553 * but will be dequeued as soon as they get picked again. See the
554 * task_is_runnable() helper.
555 *
556 * p->on_cpu <- { 0, 1 }:
557 *
558 * is set by prepare_task() and cleared by finish_task() such that it will be
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
561 *
562 * [ The astute reader will observe that it is possible for two tasks on one
563 * CPU to have ->on_cpu = 1 at the same time. ]
564 *
565 * task_cpu(p): is changed by set_task_cpu(), the rules are:
566 *
567 * - Don't call set_task_cpu() on a blocked task:
568 *
569 * We don't care what CPU we're not running on, this simplifies hotplug,
570 * the CPU assignment of blocked tasks isn't required to be valid.
571 *
572 * - for try_to_wake_up(), called under p->pi_lock:
573 *
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
575 *
576 * - for migration called under rq->lock:
577 * [ see task_on_rq_migrating() in task_rq_lock() ]
578 *
579 * o move_queued_task()
580 * o detach_task()
581 *
582 * - for migration called under double_rq_lock():
583 *
584 * o __migrate_swap_task()
585 * o push_rt_task() / pull_rt_task()
586 * o push_dl_task() / pull_dl_task()
587 * o dl_task_offline_migration()
588 *
589 */
590
raw_spin_rq_lock_nested(struct rq * rq,int subclass)591 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
592 {
593 raw_spinlock_t *lock;
594
595 /* Matches synchronize_rcu() in __sched_core_enable() */
596 preempt_disable();
597 if (sched_core_disabled()) {
598 raw_spin_lock_nested(&rq->__lock, subclass);
599 /* preempt_count *MUST* be > 1 */
600 preempt_enable_no_resched();
601 return;
602 }
603
604 for (;;) {
605 lock = __rq_lockp(rq);
606 raw_spin_lock_nested(lock, subclass);
607 if (likely(lock == __rq_lockp(rq))) {
608 /* preempt_count *MUST* be > 1 */
609 preempt_enable_no_resched();
610 return;
611 }
612 raw_spin_unlock(lock);
613 }
614 }
615
raw_spin_rq_trylock(struct rq * rq)616 bool raw_spin_rq_trylock(struct rq *rq)
617 {
618 raw_spinlock_t *lock;
619 bool ret;
620
621 /* Matches synchronize_rcu() in __sched_core_enable() */
622 preempt_disable();
623 if (sched_core_disabled()) {
624 ret = raw_spin_trylock(&rq->__lock);
625 preempt_enable();
626 return ret;
627 }
628
629 for (;;) {
630 lock = __rq_lockp(rq);
631 ret = raw_spin_trylock(lock);
632 if (!ret || (likely(lock == __rq_lockp(rq)))) {
633 preempt_enable();
634 return ret;
635 }
636 raw_spin_unlock(lock);
637 }
638 }
639
raw_spin_rq_unlock(struct rq * rq)640 void raw_spin_rq_unlock(struct rq *rq)
641 {
642 raw_spin_unlock(rq_lockp(rq));
643 }
644
645 #ifdef CONFIG_SMP
646 /*
647 * double_rq_lock - safely lock two runqueues
648 */
double_rq_lock(struct rq * rq1,struct rq * rq2)649 void double_rq_lock(struct rq *rq1, struct rq *rq2)
650 {
651 lockdep_assert_irqs_disabled();
652
653 if (rq_order_less(rq2, rq1))
654 swap(rq1, rq2);
655
656 raw_spin_rq_lock(rq1);
657 if (__rq_lockp(rq1) != __rq_lockp(rq2))
658 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
659
660 double_rq_clock_clear_update(rq1, rq2);
661 }
662 #endif
663
664 /*
665 * __task_rq_lock - lock the rq @p resides on.
666 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)667 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
668 __acquires(rq->lock)
669 {
670 struct rq *rq;
671
672 lockdep_assert_held(&p->pi_lock);
673
674 for (;;) {
675 rq = task_rq(p);
676 raw_spin_rq_lock(rq);
677 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
678 rq_pin_lock(rq, rf);
679 return rq;
680 }
681 raw_spin_rq_unlock(rq);
682
683 while (unlikely(task_on_rq_migrating(p)))
684 cpu_relax();
685 }
686 }
687
688 /*
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
690 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)691 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
692 __acquires(p->pi_lock)
693 __acquires(rq->lock)
694 {
695 struct rq *rq;
696
697 for (;;) {
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
699 rq = task_rq(p);
700 raw_spin_rq_lock(rq);
701 /*
702 * move_queued_task() task_rq_lock()
703 *
704 * ACQUIRE (rq->lock)
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
707 * [S] ->cpu = new_cpu [L] task_rq()
708 * [L] ->on_rq
709 * RELEASE (rq->lock)
710 *
711 * If we observe the old CPU in task_rq_lock(), the acquire of
712 * the old rq->lock will fully serialize against the stores.
713 *
714 * If we observe the new CPU in task_rq_lock(), the address
715 * dependency headed by '[L] rq = task_rq()' and the acquire
716 * will pair with the WMB to ensure we then also see migrating.
717 */
718 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
719 rq_pin_lock(rq, rf);
720 return rq;
721 }
722 raw_spin_rq_unlock(rq);
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
724
725 while (unlikely(task_on_rq_migrating(p)))
726 cpu_relax();
727 }
728 }
729
730 /*
731 * RQ-clock updating methods:
732 */
733
update_rq_clock_task(struct rq * rq,s64 delta)734 static void update_rq_clock_task(struct rq *rq, s64 delta)
735 {
736 /*
737 * In theory, the compile should just see 0 here, and optimize out the call
738 * to sched_rt_avg_update. But I don't trust it...
739 */
740 s64 __maybe_unused steal = 0, irq_delta = 0;
741
742 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 if (irqtime_enabled()) {
744 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
745
746 /*
747 * Since irq_time is only updated on {soft,}irq_exit, we might run into
748 * this case when a previous update_rq_clock() happened inside a
749 * {soft,}IRQ region.
750 *
751 * When this happens, we stop ->clock_task and only update the
752 * prev_irq_time stamp to account for the part that fit, so that a next
753 * update will consume the rest. This ensures ->clock_task is
754 * monotonic.
755 *
756 * It does however cause some slight miss-attribution of {soft,}IRQ
757 * time, a more accurate solution would be to update the irq_time using
758 * the current rq->clock timestamp, except that would require using
759 * atomic ops.
760 */
761 if (irq_delta > delta)
762 irq_delta = delta;
763
764 rq->prev_irq_time += irq_delta;
765 delta -= irq_delta;
766 delayacct_irq(rq->curr, irq_delta);
767 }
768 #endif
769 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
770 if (static_key_false((¶virt_steal_rq_enabled))) {
771 u64 prev_steal;
772
773 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
774 steal -= rq->prev_steal_time_rq;
775
776 if (unlikely(steal > delta))
777 steal = delta;
778
779 rq->prev_steal_time_rq = prev_steal;
780 delta -= steal;
781 }
782 #endif
783
784 rq->clock_task += delta;
785
786 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
787 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
788 update_irq_load_avg(rq, irq_delta + steal);
789 #endif
790 update_rq_clock_pelt(rq, delta);
791 }
792
update_rq_clock(struct rq * rq)793 void update_rq_clock(struct rq *rq)
794 {
795 s64 delta;
796 u64 clock;
797
798 lockdep_assert_rq_held(rq);
799
800 if (rq->clock_update_flags & RQCF_ACT_SKIP)
801 return;
802
803 #ifdef CONFIG_SCHED_DEBUG
804 if (sched_feat(WARN_DOUBLE_CLOCK))
805 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
806 rq->clock_update_flags |= RQCF_UPDATED;
807 #endif
808 clock = sched_clock_cpu(cpu_of(rq));
809 scx_rq_clock_update(rq, clock);
810
811 delta = clock - rq->clock;
812 if (delta < 0)
813 return;
814 rq->clock += delta;
815
816 update_rq_clock_task(rq, delta);
817 }
818
819 #ifdef CONFIG_SCHED_HRTICK
820 /*
821 * Use HR-timers to deliver accurate preemption points.
822 */
823
hrtick_clear(struct rq * rq)824 static void hrtick_clear(struct rq *rq)
825 {
826 if (hrtimer_active(&rq->hrtick_timer))
827 hrtimer_cancel(&rq->hrtick_timer);
828 }
829
830 /*
831 * High-resolution timer tick.
832 * Runs from hardirq context with interrupts disabled.
833 */
hrtick(struct hrtimer * timer)834 static enum hrtimer_restart hrtick(struct hrtimer *timer)
835 {
836 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
837 struct rq_flags rf;
838
839 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
840
841 rq_lock(rq, &rf);
842 update_rq_clock(rq);
843 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
844 rq_unlock(rq, &rf);
845
846 return HRTIMER_NORESTART;
847 }
848
849 #ifdef CONFIG_SMP
850
__hrtick_restart(struct rq * rq)851 static void __hrtick_restart(struct rq *rq)
852 {
853 struct hrtimer *timer = &rq->hrtick_timer;
854 ktime_t time = rq->hrtick_time;
855
856 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
857 }
858
859 /*
860 * called from hardirq (IPI) context
861 */
__hrtick_start(void * arg)862 static void __hrtick_start(void *arg)
863 {
864 struct rq *rq = arg;
865 struct rq_flags rf;
866
867 rq_lock(rq, &rf);
868 __hrtick_restart(rq);
869 rq_unlock(rq, &rf);
870 }
871
872 /*
873 * Called to set the hrtick timer state.
874 *
875 * called with rq->lock held and IRQs disabled
876 */
hrtick_start(struct rq * rq,u64 delay)877 void hrtick_start(struct rq *rq, u64 delay)
878 {
879 struct hrtimer *timer = &rq->hrtick_timer;
880 s64 delta;
881
882 /*
883 * Don't schedule slices shorter than 10000ns, that just
884 * doesn't make sense and can cause timer DoS.
885 */
886 delta = max_t(s64, delay, 10000LL);
887 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
888
889 if (rq == this_rq())
890 __hrtick_restart(rq);
891 else
892 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
893 }
894
895 #else
896 /*
897 * Called to set the hrtick timer state.
898 *
899 * called with rq->lock held and IRQs disabled
900 */
hrtick_start(struct rq * rq,u64 delay)901 void hrtick_start(struct rq *rq, u64 delay)
902 {
903 /*
904 * Don't schedule slices shorter than 10000ns, that just
905 * doesn't make sense. Rely on vruntime for fairness.
906 */
907 delay = max_t(u64, delay, 10000LL);
908 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
909 HRTIMER_MODE_REL_PINNED_HARD);
910 }
911
912 #endif /* CONFIG_SMP */
913
hrtick_rq_init(struct rq * rq)914 static void hrtick_rq_init(struct rq *rq)
915 {
916 #ifdef CONFIG_SMP
917 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
918 #endif
919 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
920 rq->hrtick_timer.function = hrtick;
921 }
922 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)923 static inline void hrtick_clear(struct rq *rq)
924 {
925 }
926
hrtick_rq_init(struct rq * rq)927 static inline void hrtick_rq_init(struct rq *rq)
928 {
929 }
930 #endif /* CONFIG_SCHED_HRTICK */
931
932 /*
933 * try_cmpxchg based fetch_or() macro so it works for different integer types:
934 */
935 #define fetch_or(ptr, mask) \
936 ({ \
937 typeof(ptr) _ptr = (ptr); \
938 typeof(mask) _mask = (mask); \
939 typeof(*_ptr) _val = *_ptr; \
940 \
941 do { \
942 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
943 _val; \
944 })
945
946 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
947 /*
948 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
949 * this avoids any races wrt polling state changes and thereby avoids
950 * spurious IPIs.
951 */
set_nr_and_not_polling(struct thread_info * ti,int tif)952 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
953 {
954 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
955 }
956
957 /*
958 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
959 *
960 * If this returns true, then the idle task promises to call
961 * sched_ttwu_pending() and reschedule soon.
962 */
set_nr_if_polling(struct task_struct * p)963 static bool set_nr_if_polling(struct task_struct *p)
964 {
965 struct thread_info *ti = task_thread_info(p);
966 typeof(ti->flags) val = READ_ONCE(ti->flags);
967
968 do {
969 if (!(val & _TIF_POLLING_NRFLAG))
970 return false;
971 if (val & _TIF_NEED_RESCHED)
972 return true;
973 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
974
975 return true;
976 }
977
978 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)979 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
980 {
981 set_ti_thread_flag(ti, tif);
982 return true;
983 }
984
985 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)986 static inline bool set_nr_if_polling(struct task_struct *p)
987 {
988 return false;
989 }
990 #endif
991 #endif
992
__wake_q_add(struct wake_q_head * head,struct task_struct * task)993 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
994 {
995 struct wake_q_node *node = &task->wake_q;
996
997 /*
998 * Atomically grab the task, if ->wake_q is !nil already it means
999 * it's already queued (either by us or someone else) and will get the
1000 * wakeup due to that.
1001 *
1002 * In order to ensure that a pending wakeup will observe our pending
1003 * state, even in the failed case, an explicit smp_mb() must be used.
1004 */
1005 smp_mb__before_atomic();
1006 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1007 return false;
1008
1009 /*
1010 * The head is context local, there can be no concurrency.
1011 */
1012 *head->lastp = node;
1013 head->lastp = &node->next;
1014 return true;
1015 }
1016
1017 /**
1018 * wake_q_add() - queue a wakeup for 'later' waking.
1019 * @head: the wake_q_head to add @task to
1020 * @task: the task to queue for 'later' wakeup
1021 *
1022 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1023 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1024 * instantly.
1025 *
1026 * This function must be used as-if it were wake_up_process(); IOW the task
1027 * must be ready to be woken at this location.
1028 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1029 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1030 {
1031 if (__wake_q_add(head, task))
1032 get_task_struct(task);
1033 }
1034
1035 /**
1036 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1037 * @head: the wake_q_head to add @task to
1038 * @task: the task to queue for 'later' wakeup
1039 *
1040 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1041 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1042 * instantly.
1043 *
1044 * This function must be used as-if it were wake_up_process(); IOW the task
1045 * must be ready to be woken at this location.
1046 *
1047 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1048 * that already hold reference to @task can call the 'safe' version and trust
1049 * wake_q to do the right thing depending whether or not the @task is already
1050 * queued for wakeup.
1051 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1052 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1053 {
1054 if (!__wake_q_add(head, task))
1055 put_task_struct(task);
1056 }
1057
wake_up_q(struct wake_q_head * head)1058 void wake_up_q(struct wake_q_head *head)
1059 {
1060 struct wake_q_node *node = head->first;
1061
1062 while (node != WAKE_Q_TAIL) {
1063 struct task_struct *task;
1064
1065 task = container_of(node, struct task_struct, wake_q);
1066 /* Task can safely be re-inserted now: */
1067 node = node->next;
1068 task->wake_q.next = NULL;
1069
1070 /*
1071 * wake_up_process() executes a full barrier, which pairs with
1072 * the queueing in wake_q_add() so as not to miss wakeups.
1073 */
1074 wake_up_process(task);
1075 put_task_struct(task);
1076 }
1077 }
1078
1079 /*
1080 * resched_curr - mark rq's current task 'to be rescheduled now'.
1081 *
1082 * On UP this means the setting of the need_resched flag, on SMP it
1083 * might also involve a cross-CPU call to trigger the scheduler on
1084 * the target CPU.
1085 */
__resched_curr(struct rq * rq,int tif)1086 static void __resched_curr(struct rq *rq, int tif)
1087 {
1088 struct task_struct *curr = rq->curr;
1089 struct thread_info *cti = task_thread_info(curr);
1090 int cpu;
1091
1092 lockdep_assert_rq_held(rq);
1093
1094 /*
1095 * Always immediately preempt the idle task; no point in delaying doing
1096 * actual work.
1097 */
1098 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1099 tif = TIF_NEED_RESCHED;
1100
1101 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1102 return;
1103
1104 cpu = cpu_of(rq);
1105
1106 if (cpu == smp_processor_id()) {
1107 set_ti_thread_flag(cti, tif);
1108 if (tif == TIF_NEED_RESCHED)
1109 set_preempt_need_resched();
1110 return;
1111 }
1112
1113 if (set_nr_and_not_polling(cti, tif)) {
1114 if (tif == TIF_NEED_RESCHED)
1115 smp_send_reschedule(cpu);
1116 } else {
1117 trace_sched_wake_idle_without_ipi(cpu);
1118 }
1119 }
1120
resched_curr(struct rq * rq)1121 void resched_curr(struct rq *rq)
1122 {
1123 __resched_curr(rq, TIF_NEED_RESCHED);
1124 }
1125
1126 #ifdef CONFIG_PREEMPT_DYNAMIC
1127 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1128 static __always_inline bool dynamic_preempt_lazy(void)
1129 {
1130 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1131 }
1132 #else
dynamic_preempt_lazy(void)1133 static __always_inline bool dynamic_preempt_lazy(void)
1134 {
1135 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1136 }
1137 #endif
1138
get_lazy_tif_bit(void)1139 static __always_inline int get_lazy_tif_bit(void)
1140 {
1141 if (dynamic_preempt_lazy())
1142 return TIF_NEED_RESCHED_LAZY;
1143
1144 return TIF_NEED_RESCHED;
1145 }
1146
resched_curr_lazy(struct rq * rq)1147 void resched_curr_lazy(struct rq *rq)
1148 {
1149 __resched_curr(rq, get_lazy_tif_bit());
1150 }
1151
resched_cpu(int cpu)1152 void resched_cpu(int cpu)
1153 {
1154 struct rq *rq = cpu_rq(cpu);
1155 unsigned long flags;
1156
1157 raw_spin_rq_lock_irqsave(rq, flags);
1158 if (cpu_online(cpu) || cpu == smp_processor_id())
1159 resched_curr(rq);
1160 raw_spin_rq_unlock_irqrestore(rq, flags);
1161 }
1162
1163 #ifdef CONFIG_SMP
1164 #ifdef CONFIG_NO_HZ_COMMON
1165 /*
1166 * In the semi idle case, use the nearest busy CPU for migrating timers
1167 * from an idle CPU. This is good for power-savings.
1168 *
1169 * We don't do similar optimization for completely idle system, as
1170 * selecting an idle CPU will add more delays to the timers than intended
1171 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1172 */
get_nohz_timer_target(void)1173 int get_nohz_timer_target(void)
1174 {
1175 int i, cpu = smp_processor_id(), default_cpu = -1;
1176 struct sched_domain *sd;
1177 const struct cpumask *hk_mask;
1178
1179 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1180 if (!idle_cpu(cpu))
1181 return cpu;
1182 default_cpu = cpu;
1183 }
1184
1185 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1186
1187 guard(rcu)();
1188
1189 for_each_domain(cpu, sd) {
1190 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1191 if (cpu == i)
1192 continue;
1193
1194 if (!idle_cpu(i))
1195 return i;
1196 }
1197 }
1198
1199 if (default_cpu == -1)
1200 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1201
1202 return default_cpu;
1203 }
1204
1205 /*
1206 * When add_timer_on() enqueues a timer into the timer wheel of an
1207 * idle CPU then this timer might expire before the next timer event
1208 * which is scheduled to wake up that CPU. In case of a completely
1209 * idle system the next event might even be infinite time into the
1210 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1211 * leaves the inner idle loop so the newly added timer is taken into
1212 * account when the CPU goes back to idle and evaluates the timer
1213 * wheel for the next timer event.
1214 */
wake_up_idle_cpu(int cpu)1215 static void wake_up_idle_cpu(int cpu)
1216 {
1217 struct rq *rq = cpu_rq(cpu);
1218
1219 if (cpu == smp_processor_id())
1220 return;
1221
1222 /*
1223 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1224 * part of the idle loop. This forces an exit from the idle loop
1225 * and a round trip to schedule(). Now this could be optimized
1226 * because a simple new idle loop iteration is enough to
1227 * re-evaluate the next tick. Provided some re-ordering of tick
1228 * nohz functions that would need to follow TIF_NR_POLLING
1229 * clearing:
1230 *
1231 * - On most architectures, a simple fetch_or on ti::flags with a
1232 * "0" value would be enough to know if an IPI needs to be sent.
1233 *
1234 * - x86 needs to perform a last need_resched() check between
1235 * monitor and mwait which doesn't take timers into account.
1236 * There a dedicated TIF_TIMER flag would be required to
1237 * fetch_or here and be checked along with TIF_NEED_RESCHED
1238 * before mwait().
1239 *
1240 * However, remote timer enqueue is not such a frequent event
1241 * and testing of the above solutions didn't appear to report
1242 * much benefits.
1243 */
1244 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1245 smp_send_reschedule(cpu);
1246 else
1247 trace_sched_wake_idle_without_ipi(cpu);
1248 }
1249
wake_up_full_nohz_cpu(int cpu)1250 static bool wake_up_full_nohz_cpu(int cpu)
1251 {
1252 /*
1253 * We just need the target to call irq_exit() and re-evaluate
1254 * the next tick. The nohz full kick at least implies that.
1255 * If needed we can still optimize that later with an
1256 * empty IRQ.
1257 */
1258 if (cpu_is_offline(cpu))
1259 return true; /* Don't try to wake offline CPUs. */
1260 if (tick_nohz_full_cpu(cpu)) {
1261 if (cpu != smp_processor_id() ||
1262 tick_nohz_tick_stopped())
1263 tick_nohz_full_kick_cpu(cpu);
1264 return true;
1265 }
1266
1267 return false;
1268 }
1269
1270 /*
1271 * Wake up the specified CPU. If the CPU is going offline, it is the
1272 * caller's responsibility to deal with the lost wakeup, for example,
1273 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1274 */
wake_up_nohz_cpu(int cpu)1275 void wake_up_nohz_cpu(int cpu)
1276 {
1277 if (!wake_up_full_nohz_cpu(cpu))
1278 wake_up_idle_cpu(cpu);
1279 }
1280
nohz_csd_func(void * info)1281 static void nohz_csd_func(void *info)
1282 {
1283 struct rq *rq = info;
1284 int cpu = cpu_of(rq);
1285 unsigned int flags;
1286
1287 /*
1288 * Release the rq::nohz_csd.
1289 */
1290 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1291 WARN_ON(!(flags & NOHZ_KICK_MASK));
1292
1293 rq->idle_balance = idle_cpu(cpu);
1294 if (rq->idle_balance) {
1295 rq->nohz_idle_balance = flags;
1296 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1297 }
1298 }
1299
1300 #endif /* CONFIG_NO_HZ_COMMON */
1301
1302 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1303 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1304 {
1305 if (rq->nr_running != 1)
1306 return false;
1307
1308 if (p->sched_class != &fair_sched_class)
1309 return false;
1310
1311 if (!task_on_rq_queued(p))
1312 return false;
1313
1314 return true;
1315 }
1316
sched_can_stop_tick(struct rq * rq)1317 bool sched_can_stop_tick(struct rq *rq)
1318 {
1319 int fifo_nr_running;
1320
1321 /* Deadline tasks, even if single, need the tick */
1322 if (rq->dl.dl_nr_running)
1323 return false;
1324
1325 /*
1326 * If there are more than one RR tasks, we need the tick to affect the
1327 * actual RR behaviour.
1328 */
1329 if (rq->rt.rr_nr_running) {
1330 if (rq->rt.rr_nr_running == 1)
1331 return true;
1332 else
1333 return false;
1334 }
1335
1336 /*
1337 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1338 * forced preemption between FIFO tasks.
1339 */
1340 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1341 if (fifo_nr_running)
1342 return true;
1343
1344 /*
1345 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1346 * left. For CFS, if there's more than one we need the tick for
1347 * involuntary preemption. For SCX, ask.
1348 */
1349 if (scx_enabled() && !scx_can_stop_tick(rq))
1350 return false;
1351
1352 if (rq->cfs.h_nr_queued > 1)
1353 return false;
1354
1355 /*
1356 * If there is one task and it has CFS runtime bandwidth constraints
1357 * and it's on the cpu now we don't want to stop the tick.
1358 * This check prevents clearing the bit if a newly enqueued task here is
1359 * dequeued by migrating while the constrained task continues to run.
1360 * E.g. going from 2->1 without going through pick_next_task().
1361 */
1362 if (__need_bw_check(rq, rq->curr)) {
1363 if (cfs_task_bw_constrained(rq->curr))
1364 return false;
1365 }
1366
1367 return true;
1368 }
1369 #endif /* CONFIG_NO_HZ_FULL */
1370 #endif /* CONFIG_SMP */
1371
1372 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1373 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1374 /*
1375 * Iterate task_group tree rooted at *from, calling @down when first entering a
1376 * node and @up when leaving it for the final time.
1377 *
1378 * Caller must hold rcu_lock or sufficient equivalent.
1379 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1380 int walk_tg_tree_from(struct task_group *from,
1381 tg_visitor down, tg_visitor up, void *data)
1382 {
1383 struct task_group *parent, *child;
1384 int ret;
1385
1386 parent = from;
1387
1388 down:
1389 ret = (*down)(parent, data);
1390 if (ret)
1391 goto out;
1392 list_for_each_entry_rcu(child, &parent->children, siblings) {
1393 parent = child;
1394 goto down;
1395
1396 up:
1397 continue;
1398 }
1399 ret = (*up)(parent, data);
1400 if (ret || parent == from)
1401 goto out;
1402
1403 child = parent;
1404 parent = parent->parent;
1405 if (parent)
1406 goto up;
1407 out:
1408 return ret;
1409 }
1410
tg_nop(struct task_group * tg,void * data)1411 int tg_nop(struct task_group *tg, void *data)
1412 {
1413 return 0;
1414 }
1415 #endif
1416
set_load_weight(struct task_struct * p,bool update_load)1417 void set_load_weight(struct task_struct *p, bool update_load)
1418 {
1419 int prio = p->static_prio - MAX_RT_PRIO;
1420 struct load_weight lw;
1421
1422 if (task_has_idle_policy(p)) {
1423 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1424 lw.inv_weight = WMULT_IDLEPRIO;
1425 } else {
1426 lw.weight = scale_load(sched_prio_to_weight[prio]);
1427 lw.inv_weight = sched_prio_to_wmult[prio];
1428 }
1429
1430 /*
1431 * SCHED_OTHER tasks have to update their load when changing their
1432 * weight
1433 */
1434 if (update_load && p->sched_class->reweight_task)
1435 p->sched_class->reweight_task(task_rq(p), p, &lw);
1436 else
1437 p->se.load = lw;
1438 }
1439
1440 #ifdef CONFIG_UCLAMP_TASK
1441 /*
1442 * Serializes updates of utilization clamp values
1443 *
1444 * The (slow-path) user-space triggers utilization clamp value updates which
1445 * can require updates on (fast-path) scheduler's data structures used to
1446 * support enqueue/dequeue operations.
1447 * While the per-CPU rq lock protects fast-path update operations, user-space
1448 * requests are serialized using a mutex to reduce the risk of conflicting
1449 * updates or API abuses.
1450 */
1451 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1452
1453 /* Max allowed minimum utilization */
1454 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1455
1456 /* Max allowed maximum utilization */
1457 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1458
1459 /*
1460 * By default RT tasks run at the maximum performance point/capacity of the
1461 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1462 * SCHED_CAPACITY_SCALE.
1463 *
1464 * This knob allows admins to change the default behavior when uclamp is being
1465 * used. In battery powered devices, particularly, running at the maximum
1466 * capacity and frequency will increase energy consumption and shorten the
1467 * battery life.
1468 *
1469 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1470 *
1471 * This knob will not override the system default sched_util_clamp_min defined
1472 * above.
1473 */
1474 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1475
1476 /* All clamps are required to be less or equal than these values */
1477 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1478
1479 /*
1480 * This static key is used to reduce the uclamp overhead in the fast path. It
1481 * primarily disables the call to uclamp_rq_{inc, dec}() in
1482 * enqueue/dequeue_task().
1483 *
1484 * This allows users to continue to enable uclamp in their kernel config with
1485 * minimum uclamp overhead in the fast path.
1486 *
1487 * As soon as userspace modifies any of the uclamp knobs, the static key is
1488 * enabled, since we have an actual users that make use of uclamp
1489 * functionality.
1490 *
1491 * The knobs that would enable this static key are:
1492 *
1493 * * A task modifying its uclamp value with sched_setattr().
1494 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1495 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1496 */
1497 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1498
1499 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1500 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1501 unsigned int clamp_value)
1502 {
1503 /*
1504 * Avoid blocked utilization pushing up the frequency when we go
1505 * idle (which drops the max-clamp) by retaining the last known
1506 * max-clamp.
1507 */
1508 if (clamp_id == UCLAMP_MAX) {
1509 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1510 return clamp_value;
1511 }
1512
1513 return uclamp_none(UCLAMP_MIN);
1514 }
1515
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1516 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1517 unsigned int clamp_value)
1518 {
1519 /* Reset max-clamp retention only on idle exit */
1520 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1521 return;
1522
1523 uclamp_rq_set(rq, clamp_id, clamp_value);
1524 }
1525
1526 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1527 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1528 unsigned int clamp_value)
1529 {
1530 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1531 int bucket_id = UCLAMP_BUCKETS - 1;
1532
1533 /*
1534 * Since both min and max clamps are max aggregated, find the
1535 * top most bucket with tasks in.
1536 */
1537 for ( ; bucket_id >= 0; bucket_id--) {
1538 if (!bucket[bucket_id].tasks)
1539 continue;
1540 return bucket[bucket_id].value;
1541 }
1542
1543 /* No tasks -- default clamp values */
1544 return uclamp_idle_value(rq, clamp_id, clamp_value);
1545 }
1546
__uclamp_update_util_min_rt_default(struct task_struct * p)1547 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1548 {
1549 unsigned int default_util_min;
1550 struct uclamp_se *uc_se;
1551
1552 lockdep_assert_held(&p->pi_lock);
1553
1554 uc_se = &p->uclamp_req[UCLAMP_MIN];
1555
1556 /* Only sync if user didn't override the default */
1557 if (uc_se->user_defined)
1558 return;
1559
1560 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1561 uclamp_se_set(uc_se, default_util_min, false);
1562 }
1563
uclamp_update_util_min_rt_default(struct task_struct * p)1564 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1565 {
1566 if (!rt_task(p))
1567 return;
1568
1569 /* Protect updates to p->uclamp_* */
1570 guard(task_rq_lock)(p);
1571 __uclamp_update_util_min_rt_default(p);
1572 }
1573
1574 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1575 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1576 {
1577 /* Copy by value as we could modify it */
1578 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1579 #ifdef CONFIG_UCLAMP_TASK_GROUP
1580 unsigned int tg_min, tg_max, value;
1581
1582 /*
1583 * Tasks in autogroups or root task group will be
1584 * restricted by system defaults.
1585 */
1586 if (task_group_is_autogroup(task_group(p)))
1587 return uc_req;
1588 if (task_group(p) == &root_task_group)
1589 return uc_req;
1590
1591 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1592 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1593 value = uc_req.value;
1594 value = clamp(value, tg_min, tg_max);
1595 uclamp_se_set(&uc_req, value, false);
1596 #endif
1597
1598 return uc_req;
1599 }
1600
1601 /*
1602 * The effective clamp bucket index of a task depends on, by increasing
1603 * priority:
1604 * - the task specific clamp value, when explicitly requested from userspace
1605 * - the task group effective clamp value, for tasks not either in the root
1606 * group or in an autogroup
1607 * - the system default clamp value, defined by the sysadmin
1608 */
1609 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1610 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1611 {
1612 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1613 struct uclamp_se uc_max = uclamp_default[clamp_id];
1614
1615 /* System default restrictions always apply */
1616 if (unlikely(uc_req.value > uc_max.value))
1617 return uc_max;
1618
1619 return uc_req;
1620 }
1621
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1622 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1623 {
1624 struct uclamp_se uc_eff;
1625
1626 /* Task currently refcounted: use back-annotated (effective) value */
1627 if (p->uclamp[clamp_id].active)
1628 return (unsigned long)p->uclamp[clamp_id].value;
1629
1630 uc_eff = uclamp_eff_get(p, clamp_id);
1631
1632 return (unsigned long)uc_eff.value;
1633 }
1634
1635 /*
1636 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1637 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1638 * updates the rq's clamp value if required.
1639 *
1640 * Tasks can have a task-specific value requested from user-space, track
1641 * within each bucket the maximum value for tasks refcounted in it.
1642 * This "local max aggregation" allows to track the exact "requested" value
1643 * for each bucket when all its RUNNABLE tasks require the same clamp.
1644 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1645 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1646 enum uclamp_id clamp_id)
1647 {
1648 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1649 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1650 struct uclamp_bucket *bucket;
1651
1652 lockdep_assert_rq_held(rq);
1653
1654 /* Update task effective clamp */
1655 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1656
1657 bucket = &uc_rq->bucket[uc_se->bucket_id];
1658 bucket->tasks++;
1659 uc_se->active = true;
1660
1661 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1662
1663 /*
1664 * Local max aggregation: rq buckets always track the max
1665 * "requested" clamp value of its RUNNABLE tasks.
1666 */
1667 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1668 bucket->value = uc_se->value;
1669
1670 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1671 uclamp_rq_set(rq, clamp_id, uc_se->value);
1672 }
1673
1674 /*
1675 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1676 * is released. If this is the last task reference counting the rq's max
1677 * active clamp value, then the rq's clamp value is updated.
1678 *
1679 * Both refcounted tasks and rq's cached clamp values are expected to be
1680 * always valid. If it's detected they are not, as defensive programming,
1681 * enforce the expected state and warn.
1682 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1683 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1684 enum uclamp_id clamp_id)
1685 {
1686 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1687 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1688 struct uclamp_bucket *bucket;
1689 unsigned int bkt_clamp;
1690 unsigned int rq_clamp;
1691
1692 lockdep_assert_rq_held(rq);
1693
1694 /*
1695 * If sched_uclamp_used was enabled after task @p was enqueued,
1696 * we could end up with unbalanced call to uclamp_rq_dec_id().
1697 *
1698 * In this case the uc_se->active flag should be false since no uclamp
1699 * accounting was performed at enqueue time and we can just return
1700 * here.
1701 *
1702 * Need to be careful of the following enqueue/dequeue ordering
1703 * problem too
1704 *
1705 * enqueue(taskA)
1706 * // sched_uclamp_used gets enabled
1707 * enqueue(taskB)
1708 * dequeue(taskA)
1709 * // Must not decrement bucket->tasks here
1710 * dequeue(taskB)
1711 *
1712 * where we could end up with stale data in uc_se and
1713 * bucket[uc_se->bucket_id].
1714 *
1715 * The following check here eliminates the possibility of such race.
1716 */
1717 if (unlikely(!uc_se->active))
1718 return;
1719
1720 bucket = &uc_rq->bucket[uc_se->bucket_id];
1721
1722 SCHED_WARN_ON(!bucket->tasks);
1723 if (likely(bucket->tasks))
1724 bucket->tasks--;
1725
1726 uc_se->active = false;
1727
1728 /*
1729 * Keep "local max aggregation" simple and accept to (possibly)
1730 * overboost some RUNNABLE tasks in the same bucket.
1731 * The rq clamp bucket value is reset to its base value whenever
1732 * there are no more RUNNABLE tasks refcounting it.
1733 */
1734 if (likely(bucket->tasks))
1735 return;
1736
1737 rq_clamp = uclamp_rq_get(rq, clamp_id);
1738 /*
1739 * Defensive programming: this should never happen. If it happens,
1740 * e.g. due to future modification, warn and fix up the expected value.
1741 */
1742 SCHED_WARN_ON(bucket->value > rq_clamp);
1743 if (bucket->value >= rq_clamp) {
1744 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1745 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1746 }
1747 }
1748
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1749 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1750 {
1751 enum uclamp_id clamp_id;
1752
1753 /*
1754 * Avoid any overhead until uclamp is actually used by the userspace.
1755 *
1756 * The condition is constructed such that a NOP is generated when
1757 * sched_uclamp_used is disabled.
1758 */
1759 if (!static_branch_unlikely(&sched_uclamp_used))
1760 return;
1761
1762 if (unlikely(!p->sched_class->uclamp_enabled))
1763 return;
1764
1765 if (p->se.sched_delayed)
1766 return;
1767
1768 for_each_clamp_id(clamp_id)
1769 uclamp_rq_inc_id(rq, p, clamp_id);
1770
1771 /* Reset clamp idle holding when there is one RUNNABLE task */
1772 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1773 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1774 }
1775
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1776 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1777 {
1778 enum uclamp_id clamp_id;
1779
1780 /*
1781 * Avoid any overhead until uclamp is actually used by the userspace.
1782 *
1783 * The condition is constructed such that a NOP is generated when
1784 * sched_uclamp_used is disabled.
1785 */
1786 if (!static_branch_unlikely(&sched_uclamp_used))
1787 return;
1788
1789 if (unlikely(!p->sched_class->uclamp_enabled))
1790 return;
1791
1792 if (p->se.sched_delayed)
1793 return;
1794
1795 for_each_clamp_id(clamp_id)
1796 uclamp_rq_dec_id(rq, p, clamp_id);
1797 }
1798
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1799 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1800 enum uclamp_id clamp_id)
1801 {
1802 if (!p->uclamp[clamp_id].active)
1803 return;
1804
1805 uclamp_rq_dec_id(rq, p, clamp_id);
1806 uclamp_rq_inc_id(rq, p, clamp_id);
1807
1808 /*
1809 * Make sure to clear the idle flag if we've transiently reached 0
1810 * active tasks on rq.
1811 */
1812 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1813 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1814 }
1815
1816 static inline void
uclamp_update_active(struct task_struct * p)1817 uclamp_update_active(struct task_struct *p)
1818 {
1819 enum uclamp_id clamp_id;
1820 struct rq_flags rf;
1821 struct rq *rq;
1822
1823 /*
1824 * Lock the task and the rq where the task is (or was) queued.
1825 *
1826 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1827 * price to pay to safely serialize util_{min,max} updates with
1828 * enqueues, dequeues and migration operations.
1829 * This is the same locking schema used by __set_cpus_allowed_ptr().
1830 */
1831 rq = task_rq_lock(p, &rf);
1832
1833 /*
1834 * Setting the clamp bucket is serialized by task_rq_lock().
1835 * If the task is not yet RUNNABLE and its task_struct is not
1836 * affecting a valid clamp bucket, the next time it's enqueued,
1837 * it will already see the updated clamp bucket value.
1838 */
1839 for_each_clamp_id(clamp_id)
1840 uclamp_rq_reinc_id(rq, p, clamp_id);
1841
1842 task_rq_unlock(rq, p, &rf);
1843 }
1844
1845 #ifdef CONFIG_UCLAMP_TASK_GROUP
1846 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1847 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1848 {
1849 struct css_task_iter it;
1850 struct task_struct *p;
1851
1852 css_task_iter_start(css, 0, &it);
1853 while ((p = css_task_iter_next(&it)))
1854 uclamp_update_active(p);
1855 css_task_iter_end(&it);
1856 }
1857
1858 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1859 #endif
1860
1861 #ifdef CONFIG_SYSCTL
1862 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1863 static void uclamp_update_root_tg(void)
1864 {
1865 struct task_group *tg = &root_task_group;
1866
1867 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1868 sysctl_sched_uclamp_util_min, false);
1869 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1870 sysctl_sched_uclamp_util_max, false);
1871
1872 guard(rcu)();
1873 cpu_util_update_eff(&root_task_group.css);
1874 }
1875 #else
uclamp_update_root_tg(void)1876 static void uclamp_update_root_tg(void) { }
1877 #endif
1878
uclamp_sync_util_min_rt_default(void)1879 static void uclamp_sync_util_min_rt_default(void)
1880 {
1881 struct task_struct *g, *p;
1882
1883 /*
1884 * copy_process() sysctl_uclamp
1885 * uclamp_min_rt = X;
1886 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1887 * // link thread smp_mb__after_spinlock()
1888 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1889 * sched_post_fork() for_each_process_thread()
1890 * __uclamp_sync_rt() __uclamp_sync_rt()
1891 *
1892 * Ensures that either sched_post_fork() will observe the new
1893 * uclamp_min_rt or for_each_process_thread() will observe the new
1894 * task.
1895 */
1896 read_lock(&tasklist_lock);
1897 smp_mb__after_spinlock();
1898 read_unlock(&tasklist_lock);
1899
1900 guard(rcu)();
1901 for_each_process_thread(g, p)
1902 uclamp_update_util_min_rt_default(p);
1903 }
1904
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1905 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1906 void *buffer, size_t *lenp, loff_t *ppos)
1907 {
1908 bool update_root_tg = false;
1909 int old_min, old_max, old_min_rt;
1910 int result;
1911
1912 guard(mutex)(&uclamp_mutex);
1913
1914 old_min = sysctl_sched_uclamp_util_min;
1915 old_max = sysctl_sched_uclamp_util_max;
1916 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1917
1918 result = proc_dointvec(table, write, buffer, lenp, ppos);
1919 if (result)
1920 goto undo;
1921 if (!write)
1922 return 0;
1923
1924 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1925 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1926 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1927
1928 result = -EINVAL;
1929 goto undo;
1930 }
1931
1932 if (old_min != sysctl_sched_uclamp_util_min) {
1933 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1934 sysctl_sched_uclamp_util_min, false);
1935 update_root_tg = true;
1936 }
1937 if (old_max != sysctl_sched_uclamp_util_max) {
1938 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1939 sysctl_sched_uclamp_util_max, false);
1940 update_root_tg = true;
1941 }
1942
1943 if (update_root_tg) {
1944 static_branch_enable(&sched_uclamp_used);
1945 uclamp_update_root_tg();
1946 }
1947
1948 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1949 static_branch_enable(&sched_uclamp_used);
1950 uclamp_sync_util_min_rt_default();
1951 }
1952
1953 /*
1954 * We update all RUNNABLE tasks only when task groups are in use.
1955 * Otherwise, keep it simple and do just a lazy update at each next
1956 * task enqueue time.
1957 */
1958 return 0;
1959
1960 undo:
1961 sysctl_sched_uclamp_util_min = old_min;
1962 sysctl_sched_uclamp_util_max = old_max;
1963 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1964 return result;
1965 }
1966 #endif
1967
uclamp_fork(struct task_struct * p)1968 static void uclamp_fork(struct task_struct *p)
1969 {
1970 enum uclamp_id clamp_id;
1971
1972 /*
1973 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1974 * as the task is still at its early fork stages.
1975 */
1976 for_each_clamp_id(clamp_id)
1977 p->uclamp[clamp_id].active = false;
1978
1979 if (likely(!p->sched_reset_on_fork))
1980 return;
1981
1982 for_each_clamp_id(clamp_id) {
1983 uclamp_se_set(&p->uclamp_req[clamp_id],
1984 uclamp_none(clamp_id), false);
1985 }
1986 }
1987
uclamp_post_fork(struct task_struct * p)1988 static void uclamp_post_fork(struct task_struct *p)
1989 {
1990 uclamp_update_util_min_rt_default(p);
1991 }
1992
init_uclamp_rq(struct rq * rq)1993 static void __init init_uclamp_rq(struct rq *rq)
1994 {
1995 enum uclamp_id clamp_id;
1996 struct uclamp_rq *uc_rq = rq->uclamp;
1997
1998 for_each_clamp_id(clamp_id) {
1999 uc_rq[clamp_id] = (struct uclamp_rq) {
2000 .value = uclamp_none(clamp_id)
2001 };
2002 }
2003
2004 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2005 }
2006
init_uclamp(void)2007 static void __init init_uclamp(void)
2008 {
2009 struct uclamp_se uc_max = {};
2010 enum uclamp_id clamp_id;
2011 int cpu;
2012
2013 for_each_possible_cpu(cpu)
2014 init_uclamp_rq(cpu_rq(cpu));
2015
2016 for_each_clamp_id(clamp_id) {
2017 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2018 uclamp_none(clamp_id), false);
2019 }
2020
2021 /* System defaults allow max clamp values for both indexes */
2022 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2023 for_each_clamp_id(clamp_id) {
2024 uclamp_default[clamp_id] = uc_max;
2025 #ifdef CONFIG_UCLAMP_TASK_GROUP
2026 root_task_group.uclamp_req[clamp_id] = uc_max;
2027 root_task_group.uclamp[clamp_id] = uc_max;
2028 #endif
2029 }
2030 }
2031
2032 #else /* !CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)2033 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2034 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2035 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2036 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2037 static inline void init_uclamp(void) { }
2038 #endif /* CONFIG_UCLAMP_TASK */
2039
sched_task_on_rq(struct task_struct * p)2040 bool sched_task_on_rq(struct task_struct *p)
2041 {
2042 return task_on_rq_queued(p);
2043 }
2044
get_wchan(struct task_struct * p)2045 unsigned long get_wchan(struct task_struct *p)
2046 {
2047 unsigned long ip = 0;
2048 unsigned int state;
2049
2050 if (!p || p == current)
2051 return 0;
2052
2053 /* Only get wchan if task is blocked and we can keep it that way. */
2054 raw_spin_lock_irq(&p->pi_lock);
2055 state = READ_ONCE(p->__state);
2056 smp_rmb(); /* see try_to_wake_up() */
2057 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2058 ip = __get_wchan(p);
2059 raw_spin_unlock_irq(&p->pi_lock);
2060
2061 return ip;
2062 }
2063
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2064 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2065 {
2066 if (!(flags & ENQUEUE_NOCLOCK))
2067 update_rq_clock(rq);
2068
2069 p->sched_class->enqueue_task(rq, p, flags);
2070 /*
2071 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2072 * ->sched_delayed.
2073 */
2074 uclamp_rq_inc(rq, p);
2075
2076 psi_enqueue(p, flags);
2077
2078 if (!(flags & ENQUEUE_RESTORE))
2079 sched_info_enqueue(rq, p);
2080
2081 if (sched_core_enabled(rq))
2082 sched_core_enqueue(rq, p);
2083 }
2084
2085 /*
2086 * Must only return false when DEQUEUE_SLEEP.
2087 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2088 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2089 {
2090 if (sched_core_enabled(rq))
2091 sched_core_dequeue(rq, p, flags);
2092
2093 if (!(flags & DEQUEUE_NOCLOCK))
2094 update_rq_clock(rq);
2095
2096 if (!(flags & DEQUEUE_SAVE))
2097 sched_info_dequeue(rq, p);
2098
2099 psi_dequeue(p, flags);
2100
2101 /*
2102 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2103 * and mark the task ->sched_delayed.
2104 */
2105 uclamp_rq_dec(rq, p);
2106 return p->sched_class->dequeue_task(rq, p, flags);
2107 }
2108
activate_task(struct rq * rq,struct task_struct * p,int flags)2109 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2110 {
2111 if (task_on_rq_migrating(p))
2112 flags |= ENQUEUE_MIGRATED;
2113 if (flags & ENQUEUE_MIGRATED)
2114 sched_mm_cid_migrate_to(rq, p);
2115
2116 enqueue_task(rq, p, flags);
2117
2118 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2119 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2120 }
2121
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2122 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2123 {
2124 SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2125
2126 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2127 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2128
2129 /*
2130 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2131 * dequeue_task() and cleared *after* enqueue_task().
2132 */
2133
2134 dequeue_task(rq, p, flags);
2135 }
2136
block_task(struct rq * rq,struct task_struct * p,int flags)2137 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2138 {
2139 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2140 __block_task(rq, p);
2141 }
2142
2143 /**
2144 * task_curr - is this task currently executing on a CPU?
2145 * @p: the task in question.
2146 *
2147 * Return: 1 if the task is currently executing. 0 otherwise.
2148 */
task_curr(const struct task_struct * p)2149 inline int task_curr(const struct task_struct *p)
2150 {
2151 return cpu_curr(task_cpu(p)) == p;
2152 }
2153
2154 /*
2155 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2156 * mess with locking.
2157 */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2158 void check_class_changing(struct rq *rq, struct task_struct *p,
2159 const struct sched_class *prev_class)
2160 {
2161 if (prev_class != p->sched_class && p->sched_class->switching_to)
2162 p->sched_class->switching_to(rq, p);
2163 }
2164
2165 /*
2166 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2167 * use the balance_callback list if you want balancing.
2168 *
2169 * this means any call to check_class_changed() must be followed by a call to
2170 * balance_callback().
2171 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2172 void check_class_changed(struct rq *rq, struct task_struct *p,
2173 const struct sched_class *prev_class,
2174 int oldprio)
2175 {
2176 if (prev_class != p->sched_class) {
2177 if (prev_class->switched_from)
2178 prev_class->switched_from(rq, p);
2179
2180 p->sched_class->switched_to(rq, p);
2181 } else if (oldprio != p->prio || dl_task(p))
2182 p->sched_class->prio_changed(rq, p, oldprio);
2183 }
2184
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2185 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2186 {
2187 struct task_struct *donor = rq->donor;
2188
2189 if (p->sched_class == donor->sched_class)
2190 donor->sched_class->wakeup_preempt(rq, p, flags);
2191 else if (sched_class_above(p->sched_class, donor->sched_class))
2192 resched_curr(rq);
2193
2194 /*
2195 * A queue event has occurred, and we're going to schedule. In
2196 * this case, we can save a useless back to back clock update.
2197 */
2198 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2199 rq_clock_skip_update(rq);
2200 }
2201
2202 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2203 int __task_state_match(struct task_struct *p, unsigned int state)
2204 {
2205 if (READ_ONCE(p->__state) & state)
2206 return 1;
2207
2208 if (READ_ONCE(p->saved_state) & state)
2209 return -1;
2210
2211 return 0;
2212 }
2213
2214 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2215 int task_state_match(struct task_struct *p, unsigned int state)
2216 {
2217 /*
2218 * Serialize against current_save_and_set_rtlock_wait_state(),
2219 * current_restore_rtlock_saved_state(), and __refrigerator().
2220 */
2221 guard(raw_spinlock_irq)(&p->pi_lock);
2222 return __task_state_match(p, state);
2223 }
2224
2225 /*
2226 * wait_task_inactive - wait for a thread to unschedule.
2227 *
2228 * Wait for the thread to block in any of the states set in @match_state.
2229 * If it changes, i.e. @p might have woken up, then return zero. When we
2230 * succeed in waiting for @p to be off its CPU, we return a positive number
2231 * (its total switch count). If a second call a short while later returns the
2232 * same number, the caller can be sure that @p has remained unscheduled the
2233 * whole time.
2234 *
2235 * The caller must ensure that the task *will* unschedule sometime soon,
2236 * else this function might spin for a *long* time. This function can't
2237 * be called with interrupts off, or it may introduce deadlock with
2238 * smp_call_function() if an IPI is sent by the same process we are
2239 * waiting to become inactive.
2240 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2241 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2242 {
2243 int running, queued, match;
2244 struct rq_flags rf;
2245 unsigned long ncsw;
2246 struct rq *rq;
2247
2248 for (;;) {
2249 /*
2250 * We do the initial early heuristics without holding
2251 * any task-queue locks at all. We'll only try to get
2252 * the runqueue lock when things look like they will
2253 * work out!
2254 */
2255 rq = task_rq(p);
2256
2257 /*
2258 * If the task is actively running on another CPU
2259 * still, just relax and busy-wait without holding
2260 * any locks.
2261 *
2262 * NOTE! Since we don't hold any locks, it's not
2263 * even sure that "rq" stays as the right runqueue!
2264 * But we don't care, since "task_on_cpu()" will
2265 * return false if the runqueue has changed and p
2266 * is actually now running somewhere else!
2267 */
2268 while (task_on_cpu(rq, p)) {
2269 if (!task_state_match(p, match_state))
2270 return 0;
2271 cpu_relax();
2272 }
2273
2274 /*
2275 * Ok, time to look more closely! We need the rq
2276 * lock now, to be *sure*. If we're wrong, we'll
2277 * just go back and repeat.
2278 */
2279 rq = task_rq_lock(p, &rf);
2280 trace_sched_wait_task(p);
2281 running = task_on_cpu(rq, p);
2282 queued = task_on_rq_queued(p);
2283 ncsw = 0;
2284 if ((match = __task_state_match(p, match_state))) {
2285 /*
2286 * When matching on p->saved_state, consider this task
2287 * still queued so it will wait.
2288 */
2289 if (match < 0)
2290 queued = 1;
2291 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2292 }
2293 task_rq_unlock(rq, p, &rf);
2294
2295 /*
2296 * If it changed from the expected state, bail out now.
2297 */
2298 if (unlikely(!ncsw))
2299 break;
2300
2301 /*
2302 * Was it really running after all now that we
2303 * checked with the proper locks actually held?
2304 *
2305 * Oops. Go back and try again..
2306 */
2307 if (unlikely(running)) {
2308 cpu_relax();
2309 continue;
2310 }
2311
2312 /*
2313 * It's not enough that it's not actively running,
2314 * it must be off the runqueue _entirely_, and not
2315 * preempted!
2316 *
2317 * So if it was still runnable (but just not actively
2318 * running right now), it's preempted, and we should
2319 * yield - it could be a while.
2320 */
2321 if (unlikely(queued)) {
2322 ktime_t to = NSEC_PER_SEC / HZ;
2323
2324 set_current_state(TASK_UNINTERRUPTIBLE);
2325 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2326 continue;
2327 }
2328
2329 /*
2330 * Ahh, all good. It wasn't running, and it wasn't
2331 * runnable, which means that it will never become
2332 * running in the future either. We're all done!
2333 */
2334 break;
2335 }
2336
2337 return ncsw;
2338 }
2339
2340 #ifdef CONFIG_SMP
2341
2342 static void
2343 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2344
migrate_disable_switch(struct rq * rq,struct task_struct * p)2345 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2346 {
2347 struct affinity_context ac = {
2348 .new_mask = cpumask_of(rq->cpu),
2349 .flags = SCA_MIGRATE_DISABLE,
2350 };
2351
2352 if (likely(!p->migration_disabled))
2353 return;
2354
2355 if (p->cpus_ptr != &p->cpus_mask)
2356 return;
2357
2358 /*
2359 * Violates locking rules! See comment in __do_set_cpus_allowed().
2360 */
2361 __do_set_cpus_allowed(p, &ac);
2362 }
2363
migrate_disable(void)2364 void migrate_disable(void)
2365 {
2366 struct task_struct *p = current;
2367
2368 if (p->migration_disabled) {
2369 #ifdef CONFIG_DEBUG_PREEMPT
2370 /*
2371 *Warn about overflow half-way through the range.
2372 */
2373 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2374 #endif
2375 p->migration_disabled++;
2376 return;
2377 }
2378
2379 guard(preempt)();
2380 this_rq()->nr_pinned++;
2381 p->migration_disabled = 1;
2382 }
2383 EXPORT_SYMBOL_GPL(migrate_disable);
2384
migrate_enable(void)2385 void migrate_enable(void)
2386 {
2387 struct task_struct *p = current;
2388 struct affinity_context ac = {
2389 .new_mask = &p->cpus_mask,
2390 .flags = SCA_MIGRATE_ENABLE,
2391 };
2392
2393 #ifdef CONFIG_DEBUG_PREEMPT
2394 /*
2395 * Check both overflow from migrate_disable() and superfluous
2396 * migrate_enable().
2397 */
2398 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2399 return;
2400 #endif
2401
2402 if (p->migration_disabled > 1) {
2403 p->migration_disabled--;
2404 return;
2405 }
2406
2407 /*
2408 * Ensure stop_task runs either before or after this, and that
2409 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2410 */
2411 guard(preempt)();
2412 if (p->cpus_ptr != &p->cpus_mask)
2413 __set_cpus_allowed_ptr(p, &ac);
2414 /*
2415 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2416 * regular cpus_mask, otherwise things that race (eg.
2417 * select_fallback_rq) get confused.
2418 */
2419 barrier();
2420 p->migration_disabled = 0;
2421 this_rq()->nr_pinned--;
2422 }
2423 EXPORT_SYMBOL_GPL(migrate_enable);
2424
rq_has_pinned_tasks(struct rq * rq)2425 static inline bool rq_has_pinned_tasks(struct rq *rq)
2426 {
2427 return rq->nr_pinned;
2428 }
2429
2430 /*
2431 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2432 * __set_cpus_allowed_ptr() and select_fallback_rq().
2433 */
is_cpu_allowed(struct task_struct * p,int cpu)2434 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2435 {
2436 /* When not in the task's cpumask, no point in looking further. */
2437 if (!task_allowed_on_cpu(p, cpu))
2438 return false;
2439
2440 /* migrate_disabled() must be allowed to finish. */
2441 if (is_migration_disabled(p))
2442 return cpu_online(cpu);
2443
2444 /* Non kernel threads are not allowed during either online or offline. */
2445 if (!(p->flags & PF_KTHREAD))
2446 return cpu_active(cpu);
2447
2448 /* KTHREAD_IS_PER_CPU is always allowed. */
2449 if (kthread_is_per_cpu(p))
2450 return cpu_online(cpu);
2451
2452 /* Regular kernel threads don't get to stay during offline. */
2453 if (cpu_dying(cpu))
2454 return false;
2455
2456 /* But are allowed during online. */
2457 return cpu_online(cpu);
2458 }
2459
2460 /*
2461 * This is how migration works:
2462 *
2463 * 1) we invoke migration_cpu_stop() on the target CPU using
2464 * stop_one_cpu().
2465 * 2) stopper starts to run (implicitly forcing the migrated thread
2466 * off the CPU)
2467 * 3) it checks whether the migrated task is still in the wrong runqueue.
2468 * 4) if it's in the wrong runqueue then the migration thread removes
2469 * it and puts it into the right queue.
2470 * 5) stopper completes and stop_one_cpu() returns and the migration
2471 * is done.
2472 */
2473
2474 /*
2475 * move_queued_task - move a queued task to new rq.
2476 *
2477 * Returns (locked) new rq. Old rq's lock is released.
2478 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2479 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2480 struct task_struct *p, int new_cpu)
2481 {
2482 lockdep_assert_rq_held(rq);
2483
2484 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2485 set_task_cpu(p, new_cpu);
2486 rq_unlock(rq, rf);
2487
2488 rq = cpu_rq(new_cpu);
2489
2490 rq_lock(rq, rf);
2491 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2492 activate_task(rq, p, 0);
2493 wakeup_preempt(rq, p, 0);
2494
2495 return rq;
2496 }
2497
2498 struct migration_arg {
2499 struct task_struct *task;
2500 int dest_cpu;
2501 struct set_affinity_pending *pending;
2502 };
2503
2504 /*
2505 * @refs: number of wait_for_completion()
2506 * @stop_pending: is @stop_work in use
2507 */
2508 struct set_affinity_pending {
2509 refcount_t refs;
2510 unsigned int stop_pending;
2511 struct completion done;
2512 struct cpu_stop_work stop_work;
2513 struct migration_arg arg;
2514 };
2515
2516 /*
2517 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2518 * this because either it can't run here any more (set_cpus_allowed()
2519 * away from this CPU, or CPU going down), or because we're
2520 * attempting to rebalance this task on exec (sched_exec).
2521 *
2522 * So we race with normal scheduler movements, but that's OK, as long
2523 * as the task is no longer on this CPU.
2524 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2525 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2526 struct task_struct *p, int dest_cpu)
2527 {
2528 /* Affinity changed (again). */
2529 if (!is_cpu_allowed(p, dest_cpu))
2530 return rq;
2531
2532 rq = move_queued_task(rq, rf, p, dest_cpu);
2533
2534 return rq;
2535 }
2536
2537 /*
2538 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2539 * and performs thread migration by bumping thread off CPU then
2540 * 'pushing' onto another runqueue.
2541 */
migration_cpu_stop(void * data)2542 static int migration_cpu_stop(void *data)
2543 {
2544 struct migration_arg *arg = data;
2545 struct set_affinity_pending *pending = arg->pending;
2546 struct task_struct *p = arg->task;
2547 struct rq *rq = this_rq();
2548 bool complete = false;
2549 struct rq_flags rf;
2550
2551 /*
2552 * The original target CPU might have gone down and we might
2553 * be on another CPU but it doesn't matter.
2554 */
2555 local_irq_save(rf.flags);
2556 /*
2557 * We need to explicitly wake pending tasks before running
2558 * __migrate_task() such that we will not miss enforcing cpus_ptr
2559 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2560 */
2561 flush_smp_call_function_queue();
2562
2563 raw_spin_lock(&p->pi_lock);
2564 rq_lock(rq, &rf);
2565
2566 /*
2567 * If we were passed a pending, then ->stop_pending was set, thus
2568 * p->migration_pending must have remained stable.
2569 */
2570 WARN_ON_ONCE(pending && pending != p->migration_pending);
2571
2572 /*
2573 * If task_rq(p) != rq, it cannot be migrated here, because we're
2574 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2575 * we're holding p->pi_lock.
2576 */
2577 if (task_rq(p) == rq) {
2578 if (is_migration_disabled(p))
2579 goto out;
2580
2581 if (pending) {
2582 p->migration_pending = NULL;
2583 complete = true;
2584
2585 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2586 goto out;
2587 }
2588
2589 if (task_on_rq_queued(p)) {
2590 update_rq_clock(rq);
2591 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2592 } else {
2593 p->wake_cpu = arg->dest_cpu;
2594 }
2595
2596 /*
2597 * XXX __migrate_task() can fail, at which point we might end
2598 * up running on a dodgy CPU, AFAICT this can only happen
2599 * during CPU hotplug, at which point we'll get pushed out
2600 * anyway, so it's probably not a big deal.
2601 */
2602
2603 } else if (pending) {
2604 /*
2605 * This happens when we get migrated between migrate_enable()'s
2606 * preempt_enable() and scheduling the stopper task. At that
2607 * point we're a regular task again and not current anymore.
2608 *
2609 * A !PREEMPT kernel has a giant hole here, which makes it far
2610 * more likely.
2611 */
2612
2613 /*
2614 * The task moved before the stopper got to run. We're holding
2615 * ->pi_lock, so the allowed mask is stable - if it got
2616 * somewhere allowed, we're done.
2617 */
2618 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2619 p->migration_pending = NULL;
2620 complete = true;
2621 goto out;
2622 }
2623
2624 /*
2625 * When migrate_enable() hits a rq mis-match we can't reliably
2626 * determine is_migration_disabled() and so have to chase after
2627 * it.
2628 */
2629 WARN_ON_ONCE(!pending->stop_pending);
2630 preempt_disable();
2631 task_rq_unlock(rq, p, &rf);
2632 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2633 &pending->arg, &pending->stop_work);
2634 preempt_enable();
2635 return 0;
2636 }
2637 out:
2638 if (pending)
2639 pending->stop_pending = false;
2640 task_rq_unlock(rq, p, &rf);
2641
2642 if (complete)
2643 complete_all(&pending->done);
2644
2645 return 0;
2646 }
2647
push_cpu_stop(void * arg)2648 int push_cpu_stop(void *arg)
2649 {
2650 struct rq *lowest_rq = NULL, *rq = this_rq();
2651 struct task_struct *p = arg;
2652
2653 raw_spin_lock_irq(&p->pi_lock);
2654 raw_spin_rq_lock(rq);
2655
2656 if (task_rq(p) != rq)
2657 goto out_unlock;
2658
2659 if (is_migration_disabled(p)) {
2660 p->migration_flags |= MDF_PUSH;
2661 goto out_unlock;
2662 }
2663
2664 p->migration_flags &= ~MDF_PUSH;
2665
2666 if (p->sched_class->find_lock_rq)
2667 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2668
2669 if (!lowest_rq)
2670 goto out_unlock;
2671
2672 // XXX validate p is still the highest prio task
2673 if (task_rq(p) == rq) {
2674 move_queued_task_locked(rq, lowest_rq, p);
2675 resched_curr(lowest_rq);
2676 }
2677
2678 double_unlock_balance(rq, lowest_rq);
2679
2680 out_unlock:
2681 rq->push_busy = false;
2682 raw_spin_rq_unlock(rq);
2683 raw_spin_unlock_irq(&p->pi_lock);
2684
2685 put_task_struct(p);
2686 return 0;
2687 }
2688
2689 /*
2690 * sched_class::set_cpus_allowed must do the below, but is not required to
2691 * actually call this function.
2692 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2693 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2694 {
2695 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2696 p->cpus_ptr = ctx->new_mask;
2697 return;
2698 }
2699
2700 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2701 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2702
2703 /*
2704 * Swap in a new user_cpus_ptr if SCA_USER flag set
2705 */
2706 if (ctx->flags & SCA_USER)
2707 swap(p->user_cpus_ptr, ctx->user_mask);
2708 }
2709
2710 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2711 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2712 {
2713 struct rq *rq = task_rq(p);
2714 bool queued, running;
2715
2716 /*
2717 * This here violates the locking rules for affinity, since we're only
2718 * supposed to change these variables while holding both rq->lock and
2719 * p->pi_lock.
2720 *
2721 * HOWEVER, it magically works, because ttwu() is the only code that
2722 * accesses these variables under p->pi_lock and only does so after
2723 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2724 * before finish_task().
2725 *
2726 * XXX do further audits, this smells like something putrid.
2727 */
2728 if (ctx->flags & SCA_MIGRATE_DISABLE)
2729 SCHED_WARN_ON(!p->on_cpu);
2730 else
2731 lockdep_assert_held(&p->pi_lock);
2732
2733 queued = task_on_rq_queued(p);
2734 running = task_current_donor(rq, p);
2735
2736 if (queued) {
2737 /*
2738 * Because __kthread_bind() calls this on blocked tasks without
2739 * holding rq->lock.
2740 */
2741 lockdep_assert_rq_held(rq);
2742 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2743 }
2744 if (running)
2745 put_prev_task(rq, p);
2746
2747 p->sched_class->set_cpus_allowed(p, ctx);
2748 mm_set_cpus_allowed(p->mm, ctx->new_mask);
2749
2750 if (queued)
2751 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2752 if (running)
2753 set_next_task(rq, p);
2754 }
2755
2756 /*
2757 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2758 * affinity (if any) should be destroyed too.
2759 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2760 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2761 {
2762 struct affinity_context ac = {
2763 .new_mask = new_mask,
2764 .user_mask = NULL,
2765 .flags = SCA_USER, /* clear the user requested mask */
2766 };
2767 union cpumask_rcuhead {
2768 cpumask_t cpumask;
2769 struct rcu_head rcu;
2770 };
2771
2772 __do_set_cpus_allowed(p, &ac);
2773
2774 /*
2775 * Because this is called with p->pi_lock held, it is not possible
2776 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2777 * kfree_rcu().
2778 */
2779 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2780 }
2781
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2782 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2783 int node)
2784 {
2785 cpumask_t *user_mask;
2786 unsigned long flags;
2787
2788 /*
2789 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2790 * may differ by now due to racing.
2791 */
2792 dst->user_cpus_ptr = NULL;
2793
2794 /*
2795 * This check is racy and losing the race is a valid situation.
2796 * It is not worth the extra overhead of taking the pi_lock on
2797 * every fork/clone.
2798 */
2799 if (data_race(!src->user_cpus_ptr))
2800 return 0;
2801
2802 user_mask = alloc_user_cpus_ptr(node);
2803 if (!user_mask)
2804 return -ENOMEM;
2805
2806 /*
2807 * Use pi_lock to protect content of user_cpus_ptr
2808 *
2809 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2810 * do_set_cpus_allowed().
2811 */
2812 raw_spin_lock_irqsave(&src->pi_lock, flags);
2813 if (src->user_cpus_ptr) {
2814 swap(dst->user_cpus_ptr, user_mask);
2815 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2816 }
2817 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2818
2819 if (unlikely(user_mask))
2820 kfree(user_mask);
2821
2822 return 0;
2823 }
2824
clear_user_cpus_ptr(struct task_struct * p)2825 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2826 {
2827 struct cpumask *user_mask = NULL;
2828
2829 swap(p->user_cpus_ptr, user_mask);
2830
2831 return user_mask;
2832 }
2833
release_user_cpus_ptr(struct task_struct * p)2834 void release_user_cpus_ptr(struct task_struct *p)
2835 {
2836 kfree(clear_user_cpus_ptr(p));
2837 }
2838
2839 /*
2840 * This function is wildly self concurrent; here be dragons.
2841 *
2842 *
2843 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2844 * designated task is enqueued on an allowed CPU. If that task is currently
2845 * running, we have to kick it out using the CPU stopper.
2846 *
2847 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2848 * Consider:
2849 *
2850 * Initial conditions: P0->cpus_mask = [0, 1]
2851 *
2852 * P0@CPU0 P1
2853 *
2854 * migrate_disable();
2855 * <preempted>
2856 * set_cpus_allowed_ptr(P0, [1]);
2857 *
2858 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2859 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2860 * This means we need the following scheme:
2861 *
2862 * P0@CPU0 P1
2863 *
2864 * migrate_disable();
2865 * <preempted>
2866 * set_cpus_allowed_ptr(P0, [1]);
2867 * <blocks>
2868 * <resumes>
2869 * migrate_enable();
2870 * __set_cpus_allowed_ptr();
2871 * <wakes local stopper>
2872 * `--> <woken on migration completion>
2873 *
2874 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2875 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2876 * task p are serialized by p->pi_lock, which we can leverage: the one that
2877 * should come into effect at the end of the Migrate-Disable region is the last
2878 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2879 * but we still need to properly signal those waiting tasks at the appropriate
2880 * moment.
2881 *
2882 * This is implemented using struct set_affinity_pending. The first
2883 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2884 * setup an instance of that struct and install it on the targeted task_struct.
2885 * Any and all further callers will reuse that instance. Those then wait for
2886 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2887 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2888 *
2889 *
2890 * (1) In the cases covered above. There is one more where the completion is
2891 * signaled within affine_move_task() itself: when a subsequent affinity request
2892 * occurs after the stopper bailed out due to the targeted task still being
2893 * Migrate-Disable. Consider:
2894 *
2895 * Initial conditions: P0->cpus_mask = [0, 1]
2896 *
2897 * CPU0 P1 P2
2898 * <P0>
2899 * migrate_disable();
2900 * <preempted>
2901 * set_cpus_allowed_ptr(P0, [1]);
2902 * <blocks>
2903 * <migration/0>
2904 * migration_cpu_stop()
2905 * is_migration_disabled()
2906 * <bails>
2907 * set_cpus_allowed_ptr(P0, [0, 1]);
2908 * <signal completion>
2909 * <awakes>
2910 *
2911 * Note that the above is safe vs a concurrent migrate_enable(), as any
2912 * pending affinity completion is preceded by an uninstallation of
2913 * p->migration_pending done with p->pi_lock held.
2914 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2915 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2916 int dest_cpu, unsigned int flags)
2917 __releases(rq->lock)
2918 __releases(p->pi_lock)
2919 {
2920 struct set_affinity_pending my_pending = { }, *pending = NULL;
2921 bool stop_pending, complete = false;
2922
2923 /* Can the task run on the task's current CPU? If so, we're done */
2924 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2925 struct task_struct *push_task = NULL;
2926
2927 if ((flags & SCA_MIGRATE_ENABLE) &&
2928 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2929 rq->push_busy = true;
2930 push_task = get_task_struct(p);
2931 }
2932
2933 /*
2934 * If there are pending waiters, but no pending stop_work,
2935 * then complete now.
2936 */
2937 pending = p->migration_pending;
2938 if (pending && !pending->stop_pending) {
2939 p->migration_pending = NULL;
2940 complete = true;
2941 }
2942
2943 preempt_disable();
2944 task_rq_unlock(rq, p, rf);
2945 if (push_task) {
2946 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2947 p, &rq->push_work);
2948 }
2949 preempt_enable();
2950
2951 if (complete)
2952 complete_all(&pending->done);
2953
2954 return 0;
2955 }
2956
2957 if (!(flags & SCA_MIGRATE_ENABLE)) {
2958 /* serialized by p->pi_lock */
2959 if (!p->migration_pending) {
2960 /* Install the request */
2961 refcount_set(&my_pending.refs, 1);
2962 init_completion(&my_pending.done);
2963 my_pending.arg = (struct migration_arg) {
2964 .task = p,
2965 .dest_cpu = dest_cpu,
2966 .pending = &my_pending,
2967 };
2968
2969 p->migration_pending = &my_pending;
2970 } else {
2971 pending = p->migration_pending;
2972 refcount_inc(&pending->refs);
2973 /*
2974 * Affinity has changed, but we've already installed a
2975 * pending. migration_cpu_stop() *must* see this, else
2976 * we risk a completion of the pending despite having a
2977 * task on a disallowed CPU.
2978 *
2979 * Serialized by p->pi_lock, so this is safe.
2980 */
2981 pending->arg.dest_cpu = dest_cpu;
2982 }
2983 }
2984 pending = p->migration_pending;
2985 /*
2986 * - !MIGRATE_ENABLE:
2987 * we'll have installed a pending if there wasn't one already.
2988 *
2989 * - MIGRATE_ENABLE:
2990 * we're here because the current CPU isn't matching anymore,
2991 * the only way that can happen is because of a concurrent
2992 * set_cpus_allowed_ptr() call, which should then still be
2993 * pending completion.
2994 *
2995 * Either way, we really should have a @pending here.
2996 */
2997 if (WARN_ON_ONCE(!pending)) {
2998 task_rq_unlock(rq, p, rf);
2999 return -EINVAL;
3000 }
3001
3002 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3003 /*
3004 * MIGRATE_ENABLE gets here because 'p == current', but for
3005 * anything else we cannot do is_migration_disabled(), punt
3006 * and have the stopper function handle it all race-free.
3007 */
3008 stop_pending = pending->stop_pending;
3009 if (!stop_pending)
3010 pending->stop_pending = true;
3011
3012 if (flags & SCA_MIGRATE_ENABLE)
3013 p->migration_flags &= ~MDF_PUSH;
3014
3015 preempt_disable();
3016 task_rq_unlock(rq, p, rf);
3017 if (!stop_pending) {
3018 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3019 &pending->arg, &pending->stop_work);
3020 }
3021 preempt_enable();
3022
3023 if (flags & SCA_MIGRATE_ENABLE)
3024 return 0;
3025 } else {
3026
3027 if (!is_migration_disabled(p)) {
3028 if (task_on_rq_queued(p))
3029 rq = move_queued_task(rq, rf, p, dest_cpu);
3030
3031 if (!pending->stop_pending) {
3032 p->migration_pending = NULL;
3033 complete = true;
3034 }
3035 }
3036 task_rq_unlock(rq, p, rf);
3037
3038 if (complete)
3039 complete_all(&pending->done);
3040 }
3041
3042 wait_for_completion(&pending->done);
3043
3044 if (refcount_dec_and_test(&pending->refs))
3045 wake_up_var(&pending->refs); /* No UaF, just an address */
3046
3047 /*
3048 * Block the original owner of &pending until all subsequent callers
3049 * have seen the completion and decremented the refcount
3050 */
3051 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3052
3053 /* ARGH */
3054 WARN_ON_ONCE(my_pending.stop_pending);
3055
3056 return 0;
3057 }
3058
3059 /*
3060 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3061 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3062 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3063 struct affinity_context *ctx,
3064 struct rq *rq,
3065 struct rq_flags *rf)
3066 __releases(rq->lock)
3067 __releases(p->pi_lock)
3068 {
3069 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3070 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3071 bool kthread = p->flags & PF_KTHREAD;
3072 unsigned int dest_cpu;
3073 int ret = 0;
3074
3075 update_rq_clock(rq);
3076
3077 if (kthread || is_migration_disabled(p)) {
3078 /*
3079 * Kernel threads are allowed on online && !active CPUs,
3080 * however, during cpu-hot-unplug, even these might get pushed
3081 * away if not KTHREAD_IS_PER_CPU.
3082 *
3083 * Specifically, migration_disabled() tasks must not fail the
3084 * cpumask_any_and_distribute() pick below, esp. so on
3085 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3086 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3087 */
3088 cpu_valid_mask = cpu_online_mask;
3089 }
3090
3091 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3092 ret = -EINVAL;
3093 goto out;
3094 }
3095
3096 /*
3097 * Must re-check here, to close a race against __kthread_bind(),
3098 * sched_setaffinity() is not guaranteed to observe the flag.
3099 */
3100 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3101 ret = -EINVAL;
3102 goto out;
3103 }
3104
3105 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3106 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3107 if (ctx->flags & SCA_USER)
3108 swap(p->user_cpus_ptr, ctx->user_mask);
3109 goto out;
3110 }
3111
3112 if (WARN_ON_ONCE(p == current &&
3113 is_migration_disabled(p) &&
3114 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3115 ret = -EBUSY;
3116 goto out;
3117 }
3118 }
3119
3120 /*
3121 * Picking a ~random cpu helps in cases where we are changing affinity
3122 * for groups of tasks (ie. cpuset), so that load balancing is not
3123 * immediately required to distribute the tasks within their new mask.
3124 */
3125 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3126 if (dest_cpu >= nr_cpu_ids) {
3127 ret = -EINVAL;
3128 goto out;
3129 }
3130
3131 __do_set_cpus_allowed(p, ctx);
3132
3133 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3134
3135 out:
3136 task_rq_unlock(rq, p, rf);
3137
3138 return ret;
3139 }
3140
3141 /*
3142 * Change a given task's CPU affinity. Migrate the thread to a
3143 * proper CPU and schedule it away if the CPU it's executing on
3144 * is removed from the allowed bitmask.
3145 *
3146 * NOTE: the caller must have a valid reference to the task, the
3147 * task must not exit() & deallocate itself prematurely. The
3148 * call is not atomic; no spinlocks may be held.
3149 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3150 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3151 {
3152 struct rq_flags rf;
3153 struct rq *rq;
3154
3155 rq = task_rq_lock(p, &rf);
3156 /*
3157 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3158 * flags are set.
3159 */
3160 if (p->user_cpus_ptr &&
3161 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3162 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3163 ctx->new_mask = rq->scratch_mask;
3164
3165 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3166 }
3167
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3168 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3169 {
3170 struct affinity_context ac = {
3171 .new_mask = new_mask,
3172 .flags = 0,
3173 };
3174
3175 return __set_cpus_allowed_ptr(p, &ac);
3176 }
3177 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3178
3179 /*
3180 * Change a given task's CPU affinity to the intersection of its current
3181 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3182 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3183 * affinity or use cpu_online_mask instead.
3184 *
3185 * If the resulting mask is empty, leave the affinity unchanged and return
3186 * -EINVAL.
3187 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3188 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3189 struct cpumask *new_mask,
3190 const struct cpumask *subset_mask)
3191 {
3192 struct affinity_context ac = {
3193 .new_mask = new_mask,
3194 .flags = 0,
3195 };
3196 struct rq_flags rf;
3197 struct rq *rq;
3198 int err;
3199
3200 rq = task_rq_lock(p, &rf);
3201
3202 /*
3203 * Forcefully restricting the affinity of a deadline task is
3204 * likely to cause problems, so fail and noisily override the
3205 * mask entirely.
3206 */
3207 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3208 err = -EPERM;
3209 goto err_unlock;
3210 }
3211
3212 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3213 err = -EINVAL;
3214 goto err_unlock;
3215 }
3216
3217 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3218
3219 err_unlock:
3220 task_rq_unlock(rq, p, &rf);
3221 return err;
3222 }
3223
3224 /*
3225 * Restrict the CPU affinity of task @p so that it is a subset of
3226 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3227 * old affinity mask. If the resulting mask is empty, we warn and walk
3228 * up the cpuset hierarchy until we find a suitable mask.
3229 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3230 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3231 {
3232 cpumask_var_t new_mask;
3233 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3234
3235 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3236
3237 /*
3238 * __migrate_task() can fail silently in the face of concurrent
3239 * offlining of the chosen destination CPU, so take the hotplug
3240 * lock to ensure that the migration succeeds.
3241 */
3242 cpus_read_lock();
3243 if (!cpumask_available(new_mask))
3244 goto out_set_mask;
3245
3246 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3247 goto out_free_mask;
3248
3249 /*
3250 * We failed to find a valid subset of the affinity mask for the
3251 * task, so override it based on its cpuset hierarchy.
3252 */
3253 cpuset_cpus_allowed(p, new_mask);
3254 override_mask = new_mask;
3255
3256 out_set_mask:
3257 if (printk_ratelimit()) {
3258 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3259 task_pid_nr(p), p->comm,
3260 cpumask_pr_args(override_mask));
3261 }
3262
3263 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3264 out_free_mask:
3265 cpus_read_unlock();
3266 free_cpumask_var(new_mask);
3267 }
3268
3269 /*
3270 * Restore the affinity of a task @p which was previously restricted by a
3271 * call to force_compatible_cpus_allowed_ptr().
3272 *
3273 * It is the caller's responsibility to serialise this with any calls to
3274 * force_compatible_cpus_allowed_ptr(@p).
3275 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3276 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3277 {
3278 struct affinity_context ac = {
3279 .new_mask = task_user_cpus(p),
3280 .flags = 0,
3281 };
3282 int ret;
3283
3284 /*
3285 * Try to restore the old affinity mask with __sched_setaffinity().
3286 * Cpuset masking will be done there too.
3287 */
3288 ret = __sched_setaffinity(p, &ac);
3289 WARN_ON_ONCE(ret);
3290 }
3291
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3292 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3293 {
3294 #ifdef CONFIG_SCHED_DEBUG
3295 unsigned int state = READ_ONCE(p->__state);
3296
3297 /*
3298 * We should never call set_task_cpu() on a blocked task,
3299 * ttwu() will sort out the placement.
3300 */
3301 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3302
3303 /*
3304 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3305 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3306 * time relying on p->on_rq.
3307 */
3308 WARN_ON_ONCE(state == TASK_RUNNING &&
3309 p->sched_class == &fair_sched_class &&
3310 (p->on_rq && !task_on_rq_migrating(p)));
3311
3312 #ifdef CONFIG_LOCKDEP
3313 /*
3314 * The caller should hold either p->pi_lock or rq->lock, when changing
3315 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3316 *
3317 * sched_move_task() holds both and thus holding either pins the cgroup,
3318 * see task_group().
3319 *
3320 * Furthermore, all task_rq users should acquire both locks, see
3321 * task_rq_lock().
3322 */
3323 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3324 lockdep_is_held(__rq_lockp(task_rq(p)))));
3325 #endif
3326 /*
3327 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3328 */
3329 WARN_ON_ONCE(!cpu_online(new_cpu));
3330
3331 WARN_ON_ONCE(is_migration_disabled(p));
3332 #endif
3333
3334 trace_sched_migrate_task(p, new_cpu);
3335
3336 if (task_cpu(p) != new_cpu) {
3337 if (p->sched_class->migrate_task_rq)
3338 p->sched_class->migrate_task_rq(p, new_cpu);
3339 p->se.nr_migrations++;
3340 rseq_migrate(p);
3341 sched_mm_cid_migrate_from(p);
3342 perf_event_task_migrate(p);
3343 }
3344
3345 __set_task_cpu(p, new_cpu);
3346 }
3347
3348 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3349 static void __migrate_swap_task(struct task_struct *p, int cpu)
3350 {
3351 if (task_on_rq_queued(p)) {
3352 struct rq *src_rq, *dst_rq;
3353 struct rq_flags srf, drf;
3354
3355 src_rq = task_rq(p);
3356 dst_rq = cpu_rq(cpu);
3357
3358 rq_pin_lock(src_rq, &srf);
3359 rq_pin_lock(dst_rq, &drf);
3360
3361 move_queued_task_locked(src_rq, dst_rq, p);
3362 wakeup_preempt(dst_rq, p, 0);
3363
3364 rq_unpin_lock(dst_rq, &drf);
3365 rq_unpin_lock(src_rq, &srf);
3366
3367 } else {
3368 /*
3369 * Task isn't running anymore; make it appear like we migrated
3370 * it before it went to sleep. This means on wakeup we make the
3371 * previous CPU our target instead of where it really is.
3372 */
3373 p->wake_cpu = cpu;
3374 }
3375 }
3376
3377 struct migration_swap_arg {
3378 struct task_struct *src_task, *dst_task;
3379 int src_cpu, dst_cpu;
3380 };
3381
migrate_swap_stop(void * data)3382 static int migrate_swap_stop(void *data)
3383 {
3384 struct migration_swap_arg *arg = data;
3385 struct rq *src_rq, *dst_rq;
3386
3387 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3388 return -EAGAIN;
3389
3390 src_rq = cpu_rq(arg->src_cpu);
3391 dst_rq = cpu_rq(arg->dst_cpu);
3392
3393 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3394 guard(double_rq_lock)(src_rq, dst_rq);
3395
3396 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3397 return -EAGAIN;
3398
3399 if (task_cpu(arg->src_task) != arg->src_cpu)
3400 return -EAGAIN;
3401
3402 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3403 return -EAGAIN;
3404
3405 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3406 return -EAGAIN;
3407
3408 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3409 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3410
3411 return 0;
3412 }
3413
3414 /*
3415 * Cross migrate two tasks
3416 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3417 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3418 int target_cpu, int curr_cpu)
3419 {
3420 struct migration_swap_arg arg;
3421 int ret = -EINVAL;
3422
3423 arg = (struct migration_swap_arg){
3424 .src_task = cur,
3425 .src_cpu = curr_cpu,
3426 .dst_task = p,
3427 .dst_cpu = target_cpu,
3428 };
3429
3430 if (arg.src_cpu == arg.dst_cpu)
3431 goto out;
3432
3433 /*
3434 * These three tests are all lockless; this is OK since all of them
3435 * will be re-checked with proper locks held further down the line.
3436 */
3437 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3438 goto out;
3439
3440 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3441 goto out;
3442
3443 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3444 goto out;
3445
3446 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3447 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3448
3449 out:
3450 return ret;
3451 }
3452 #endif /* CONFIG_NUMA_BALANCING */
3453
3454 /***
3455 * kick_process - kick a running thread to enter/exit the kernel
3456 * @p: the to-be-kicked thread
3457 *
3458 * Cause a process which is running on another CPU to enter
3459 * kernel-mode, without any delay. (to get signals handled.)
3460 *
3461 * NOTE: this function doesn't have to take the runqueue lock,
3462 * because all it wants to ensure is that the remote task enters
3463 * the kernel. If the IPI races and the task has been migrated
3464 * to another CPU then no harm is done and the purpose has been
3465 * achieved as well.
3466 */
kick_process(struct task_struct * p)3467 void kick_process(struct task_struct *p)
3468 {
3469 guard(preempt)();
3470 int cpu = task_cpu(p);
3471
3472 if ((cpu != smp_processor_id()) && task_curr(p))
3473 smp_send_reschedule(cpu);
3474 }
3475 EXPORT_SYMBOL_GPL(kick_process);
3476
3477 /*
3478 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3479 *
3480 * A few notes on cpu_active vs cpu_online:
3481 *
3482 * - cpu_active must be a subset of cpu_online
3483 *
3484 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3485 * see __set_cpus_allowed_ptr(). At this point the newly online
3486 * CPU isn't yet part of the sched domains, and balancing will not
3487 * see it.
3488 *
3489 * - on CPU-down we clear cpu_active() to mask the sched domains and
3490 * avoid the load balancer to place new tasks on the to be removed
3491 * CPU. Existing tasks will remain running there and will be taken
3492 * off.
3493 *
3494 * This means that fallback selection must not select !active CPUs.
3495 * And can assume that any active CPU must be online. Conversely
3496 * select_task_rq() below may allow selection of !active CPUs in order
3497 * to satisfy the above rules.
3498 */
select_fallback_rq(int cpu,struct task_struct * p)3499 static int select_fallback_rq(int cpu, struct task_struct *p)
3500 {
3501 int nid = cpu_to_node(cpu);
3502 const struct cpumask *nodemask = NULL;
3503 enum { cpuset, possible, fail } state = cpuset;
3504 int dest_cpu;
3505
3506 /*
3507 * If the node that the CPU is on has been offlined, cpu_to_node()
3508 * will return -1. There is no CPU on the node, and we should
3509 * select the CPU on the other node.
3510 */
3511 if (nid != -1) {
3512 nodemask = cpumask_of_node(nid);
3513
3514 /* Look for allowed, online CPU in same node. */
3515 for_each_cpu(dest_cpu, nodemask) {
3516 if (is_cpu_allowed(p, dest_cpu))
3517 return dest_cpu;
3518 }
3519 }
3520
3521 for (;;) {
3522 /* Any allowed, online CPU? */
3523 for_each_cpu(dest_cpu, p->cpus_ptr) {
3524 if (!is_cpu_allowed(p, dest_cpu))
3525 continue;
3526
3527 goto out;
3528 }
3529
3530 /* No more Mr. Nice Guy. */
3531 switch (state) {
3532 case cpuset:
3533 if (cpuset_cpus_allowed_fallback(p)) {
3534 state = possible;
3535 break;
3536 }
3537 fallthrough;
3538 case possible:
3539 /*
3540 * XXX When called from select_task_rq() we only
3541 * hold p->pi_lock and again violate locking order.
3542 *
3543 * More yuck to audit.
3544 */
3545 do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
3546 state = fail;
3547 break;
3548 case fail:
3549 BUG();
3550 break;
3551 }
3552 }
3553
3554 out:
3555 if (state != cpuset) {
3556 /*
3557 * Don't tell them about moving exiting tasks or
3558 * kernel threads (both mm NULL), since they never
3559 * leave kernel.
3560 */
3561 if (p->mm && printk_ratelimit()) {
3562 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3563 task_pid_nr(p), p->comm, cpu);
3564 }
3565 }
3566
3567 return dest_cpu;
3568 }
3569
3570 /*
3571 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3572 */
3573 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3574 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3575 {
3576 lockdep_assert_held(&p->pi_lock);
3577
3578 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3579 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3580 *wake_flags |= WF_RQ_SELECTED;
3581 } else {
3582 cpu = cpumask_any(p->cpus_ptr);
3583 }
3584
3585 /*
3586 * In order not to call set_task_cpu() on a blocking task we need
3587 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3588 * CPU.
3589 *
3590 * Since this is common to all placement strategies, this lives here.
3591 *
3592 * [ this allows ->select_task() to simply return task_cpu(p) and
3593 * not worry about this generic constraint ]
3594 */
3595 if (unlikely(!is_cpu_allowed(p, cpu)))
3596 cpu = select_fallback_rq(task_cpu(p), p);
3597
3598 return cpu;
3599 }
3600
sched_set_stop_task(int cpu,struct task_struct * stop)3601 void sched_set_stop_task(int cpu, struct task_struct *stop)
3602 {
3603 static struct lock_class_key stop_pi_lock;
3604 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3605 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3606
3607 if (stop) {
3608 /*
3609 * Make it appear like a SCHED_FIFO task, its something
3610 * userspace knows about and won't get confused about.
3611 *
3612 * Also, it will make PI more or less work without too
3613 * much confusion -- but then, stop work should not
3614 * rely on PI working anyway.
3615 */
3616 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3617
3618 stop->sched_class = &stop_sched_class;
3619
3620 /*
3621 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3622 * adjust the effective priority of a task. As a result,
3623 * rt_mutex_setprio() can trigger (RT) balancing operations,
3624 * which can then trigger wakeups of the stop thread to push
3625 * around the current task.
3626 *
3627 * The stop task itself will never be part of the PI-chain, it
3628 * never blocks, therefore that ->pi_lock recursion is safe.
3629 * Tell lockdep about this by placing the stop->pi_lock in its
3630 * own class.
3631 */
3632 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3633 }
3634
3635 cpu_rq(cpu)->stop = stop;
3636
3637 if (old_stop) {
3638 /*
3639 * Reset it back to a normal scheduling class so that
3640 * it can die in pieces.
3641 */
3642 old_stop->sched_class = &rt_sched_class;
3643 }
3644 }
3645
3646 #else /* CONFIG_SMP */
3647
migrate_disable_switch(struct rq * rq,struct task_struct * p)3648 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3649
rq_has_pinned_tasks(struct rq * rq)3650 static inline bool rq_has_pinned_tasks(struct rq *rq)
3651 {
3652 return false;
3653 }
3654
3655 #endif /* !CONFIG_SMP */
3656
3657 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3658 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3659 {
3660 struct rq *rq;
3661
3662 if (!schedstat_enabled())
3663 return;
3664
3665 rq = this_rq();
3666
3667 #ifdef CONFIG_SMP
3668 if (cpu == rq->cpu) {
3669 __schedstat_inc(rq->ttwu_local);
3670 __schedstat_inc(p->stats.nr_wakeups_local);
3671 } else {
3672 struct sched_domain *sd;
3673
3674 __schedstat_inc(p->stats.nr_wakeups_remote);
3675
3676 guard(rcu)();
3677 for_each_domain(rq->cpu, sd) {
3678 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3679 __schedstat_inc(sd->ttwu_wake_remote);
3680 break;
3681 }
3682 }
3683 }
3684
3685 if (wake_flags & WF_MIGRATED)
3686 __schedstat_inc(p->stats.nr_wakeups_migrate);
3687 #endif /* CONFIG_SMP */
3688
3689 __schedstat_inc(rq->ttwu_count);
3690 __schedstat_inc(p->stats.nr_wakeups);
3691
3692 if (wake_flags & WF_SYNC)
3693 __schedstat_inc(p->stats.nr_wakeups_sync);
3694 }
3695
3696 /*
3697 * Mark the task runnable.
3698 */
ttwu_do_wakeup(struct task_struct * p)3699 static inline void ttwu_do_wakeup(struct task_struct *p)
3700 {
3701 WRITE_ONCE(p->__state, TASK_RUNNING);
3702 trace_sched_wakeup(p);
3703 }
3704
3705 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3706 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3707 struct rq_flags *rf)
3708 {
3709 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3710
3711 lockdep_assert_rq_held(rq);
3712
3713 if (p->sched_contributes_to_load)
3714 rq->nr_uninterruptible--;
3715
3716 #ifdef CONFIG_SMP
3717 if (wake_flags & WF_RQ_SELECTED)
3718 en_flags |= ENQUEUE_RQ_SELECTED;
3719 if (wake_flags & WF_MIGRATED)
3720 en_flags |= ENQUEUE_MIGRATED;
3721 else
3722 #endif
3723 if (p->in_iowait) {
3724 delayacct_blkio_end(p);
3725 atomic_dec(&task_rq(p)->nr_iowait);
3726 }
3727
3728 activate_task(rq, p, en_flags);
3729 wakeup_preempt(rq, p, wake_flags);
3730
3731 ttwu_do_wakeup(p);
3732
3733 #ifdef CONFIG_SMP
3734 if (p->sched_class->task_woken) {
3735 /*
3736 * Our task @p is fully woken up and running; so it's safe to
3737 * drop the rq->lock, hereafter rq is only used for statistics.
3738 */
3739 rq_unpin_lock(rq, rf);
3740 p->sched_class->task_woken(rq, p);
3741 rq_repin_lock(rq, rf);
3742 }
3743
3744 if (rq->idle_stamp) {
3745 u64 delta = rq_clock(rq) - rq->idle_stamp;
3746 u64 max = 2*rq->max_idle_balance_cost;
3747
3748 update_avg(&rq->avg_idle, delta);
3749
3750 if (rq->avg_idle > max)
3751 rq->avg_idle = max;
3752
3753 rq->idle_stamp = 0;
3754 }
3755 #endif
3756 }
3757
3758 /*
3759 * Consider @p being inside a wait loop:
3760 *
3761 * for (;;) {
3762 * set_current_state(TASK_UNINTERRUPTIBLE);
3763 *
3764 * if (CONDITION)
3765 * break;
3766 *
3767 * schedule();
3768 * }
3769 * __set_current_state(TASK_RUNNING);
3770 *
3771 * between set_current_state() and schedule(). In this case @p is still
3772 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3773 * an atomic manner.
3774 *
3775 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3776 * then schedule() must still happen and p->state can be changed to
3777 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3778 * need to do a full wakeup with enqueue.
3779 *
3780 * Returns: %true when the wakeup is done,
3781 * %false otherwise.
3782 */
ttwu_runnable(struct task_struct * p,int wake_flags)3783 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3784 {
3785 struct rq_flags rf;
3786 struct rq *rq;
3787 int ret = 0;
3788
3789 rq = __task_rq_lock(p, &rf);
3790 if (task_on_rq_queued(p)) {
3791 update_rq_clock(rq);
3792 if (p->se.sched_delayed)
3793 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3794 if (!task_on_cpu(rq, p)) {
3795 /*
3796 * When on_rq && !on_cpu the task is preempted, see if
3797 * it should preempt the task that is current now.
3798 */
3799 wakeup_preempt(rq, p, wake_flags);
3800 }
3801 ttwu_do_wakeup(p);
3802 ret = 1;
3803 }
3804 __task_rq_unlock(rq, &rf);
3805
3806 return ret;
3807 }
3808
3809 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3810 void sched_ttwu_pending(void *arg)
3811 {
3812 struct llist_node *llist = arg;
3813 struct rq *rq = this_rq();
3814 struct task_struct *p, *t;
3815 struct rq_flags rf;
3816
3817 if (!llist)
3818 return;
3819
3820 rq_lock_irqsave(rq, &rf);
3821 update_rq_clock(rq);
3822
3823 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3824 if (WARN_ON_ONCE(p->on_cpu))
3825 smp_cond_load_acquire(&p->on_cpu, !VAL);
3826
3827 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3828 set_task_cpu(p, cpu_of(rq));
3829
3830 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3831 }
3832
3833 /*
3834 * Must be after enqueueing at least once task such that
3835 * idle_cpu() does not observe a false-negative -- if it does,
3836 * it is possible for select_idle_siblings() to stack a number
3837 * of tasks on this CPU during that window.
3838 *
3839 * It is OK to clear ttwu_pending when another task pending.
3840 * We will receive IPI after local IRQ enabled and then enqueue it.
3841 * Since now nr_running > 0, idle_cpu() will always get correct result.
3842 */
3843 WRITE_ONCE(rq->ttwu_pending, 0);
3844 rq_unlock_irqrestore(rq, &rf);
3845 }
3846
3847 /*
3848 * Prepare the scene for sending an IPI for a remote smp_call
3849 *
3850 * Returns true if the caller can proceed with sending the IPI.
3851 * Returns false otherwise.
3852 */
call_function_single_prep_ipi(int cpu)3853 bool call_function_single_prep_ipi(int cpu)
3854 {
3855 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3856 trace_sched_wake_idle_without_ipi(cpu);
3857 return false;
3858 }
3859
3860 return true;
3861 }
3862
3863 /*
3864 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3865 * necessary. The wakee CPU on receipt of the IPI will queue the task
3866 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3867 * of the wakeup instead of the waker.
3868 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3869 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3870 {
3871 struct rq *rq = cpu_rq(cpu);
3872
3873 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3874
3875 WRITE_ONCE(rq->ttwu_pending, 1);
3876 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3877 }
3878
wake_up_if_idle(int cpu)3879 void wake_up_if_idle(int cpu)
3880 {
3881 struct rq *rq = cpu_rq(cpu);
3882
3883 guard(rcu)();
3884 if (is_idle_task(rcu_dereference(rq->curr))) {
3885 guard(rq_lock_irqsave)(rq);
3886 if (is_idle_task(rq->curr))
3887 resched_curr(rq);
3888 }
3889 }
3890
cpus_equal_capacity(int this_cpu,int that_cpu)3891 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3892 {
3893 if (!sched_asym_cpucap_active())
3894 return true;
3895
3896 if (this_cpu == that_cpu)
3897 return true;
3898
3899 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3900 }
3901
cpus_share_cache(int this_cpu,int that_cpu)3902 bool cpus_share_cache(int this_cpu, int that_cpu)
3903 {
3904 if (this_cpu == that_cpu)
3905 return true;
3906
3907 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3908 }
3909
3910 /*
3911 * Whether CPUs are share cache resources, which means LLC on non-cluster
3912 * machines and LLC tag or L2 on machines with clusters.
3913 */
cpus_share_resources(int this_cpu,int that_cpu)3914 bool cpus_share_resources(int this_cpu, int that_cpu)
3915 {
3916 if (this_cpu == that_cpu)
3917 return true;
3918
3919 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3920 }
3921
ttwu_queue_cond(struct task_struct * p,int cpu)3922 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3923 {
3924 /*
3925 * The BPF scheduler may depend on select_task_rq() being invoked during
3926 * wakeups. In addition, @p may end up executing on a different CPU
3927 * regardless of what happens in the wakeup path making the ttwu_queue
3928 * optimization less meaningful. Skip if on SCX.
3929 */
3930 if (task_on_scx(p))
3931 return false;
3932
3933 /*
3934 * Do not complicate things with the async wake_list while the CPU is
3935 * in hotplug state.
3936 */
3937 if (!cpu_active(cpu))
3938 return false;
3939
3940 /* Ensure the task will still be allowed to run on the CPU. */
3941 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3942 return false;
3943
3944 /*
3945 * If the CPU does not share cache, then queue the task on the
3946 * remote rqs wakelist to avoid accessing remote data.
3947 */
3948 if (!cpus_share_cache(smp_processor_id(), cpu))
3949 return true;
3950
3951 if (cpu == smp_processor_id())
3952 return false;
3953
3954 /*
3955 * If the wakee cpu is idle, or the task is descheduling and the
3956 * only running task on the CPU, then use the wakelist to offload
3957 * the task activation to the idle (or soon-to-be-idle) CPU as
3958 * the current CPU is likely busy. nr_running is checked to
3959 * avoid unnecessary task stacking.
3960 *
3961 * Note that we can only get here with (wakee) p->on_rq=0,
3962 * p->on_cpu can be whatever, we've done the dequeue, so
3963 * the wakee has been accounted out of ->nr_running.
3964 */
3965 if (!cpu_rq(cpu)->nr_running)
3966 return true;
3967
3968 return false;
3969 }
3970
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3971 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3972 {
3973 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3974 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3975 __ttwu_queue_wakelist(p, cpu, wake_flags);
3976 return true;
3977 }
3978
3979 return false;
3980 }
3981
3982 #else /* !CONFIG_SMP */
3983
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3984 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3985 {
3986 return false;
3987 }
3988
3989 #endif /* CONFIG_SMP */
3990
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3991 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3992 {
3993 struct rq *rq = cpu_rq(cpu);
3994 struct rq_flags rf;
3995
3996 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3997 return;
3998
3999 rq_lock(rq, &rf);
4000 update_rq_clock(rq);
4001 ttwu_do_activate(rq, p, wake_flags, &rf);
4002 rq_unlock(rq, &rf);
4003 }
4004
4005 /*
4006 * Invoked from try_to_wake_up() to check whether the task can be woken up.
4007 *
4008 * The caller holds p::pi_lock if p != current or has preemption
4009 * disabled when p == current.
4010 *
4011 * The rules of saved_state:
4012 *
4013 * The related locking code always holds p::pi_lock when updating
4014 * p::saved_state, which means the code is fully serialized in both cases.
4015 *
4016 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4017 * No other bits set. This allows to distinguish all wakeup scenarios.
4018 *
4019 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4020 * allows us to prevent early wakeup of tasks before they can be run on
4021 * asymmetric ISA architectures (eg ARMv9).
4022 */
4023 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4024 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4025 {
4026 int match;
4027
4028 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4029 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4030 state != TASK_RTLOCK_WAIT);
4031 }
4032
4033 *success = !!(match = __task_state_match(p, state));
4034
4035 /*
4036 * Saved state preserves the task state across blocking on
4037 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4038 * set p::saved_state to TASK_RUNNING, but do not wake the task
4039 * because it waits for a lock wakeup or __thaw_task(). Also
4040 * indicate success because from the regular waker's point of
4041 * view this has succeeded.
4042 *
4043 * After acquiring the lock the task will restore p::__state
4044 * from p::saved_state which ensures that the regular
4045 * wakeup is not lost. The restore will also set
4046 * p::saved_state to TASK_RUNNING so any further tests will
4047 * not result in false positives vs. @success
4048 */
4049 if (match < 0)
4050 p->saved_state = TASK_RUNNING;
4051
4052 return match > 0;
4053 }
4054
4055 /*
4056 * Notes on Program-Order guarantees on SMP systems.
4057 *
4058 * MIGRATION
4059 *
4060 * The basic program-order guarantee on SMP systems is that when a task [t]
4061 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4062 * execution on its new CPU [c1].
4063 *
4064 * For migration (of runnable tasks) this is provided by the following means:
4065 *
4066 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4067 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4068 * rq(c1)->lock (if not at the same time, then in that order).
4069 * C) LOCK of the rq(c1)->lock scheduling in task
4070 *
4071 * Release/acquire chaining guarantees that B happens after A and C after B.
4072 * Note: the CPU doing B need not be c0 or c1
4073 *
4074 * Example:
4075 *
4076 * CPU0 CPU1 CPU2
4077 *
4078 * LOCK rq(0)->lock
4079 * sched-out X
4080 * sched-in Y
4081 * UNLOCK rq(0)->lock
4082 *
4083 * LOCK rq(0)->lock // orders against CPU0
4084 * dequeue X
4085 * UNLOCK rq(0)->lock
4086 *
4087 * LOCK rq(1)->lock
4088 * enqueue X
4089 * UNLOCK rq(1)->lock
4090 *
4091 * LOCK rq(1)->lock // orders against CPU2
4092 * sched-out Z
4093 * sched-in X
4094 * UNLOCK rq(1)->lock
4095 *
4096 *
4097 * BLOCKING -- aka. SLEEP + WAKEUP
4098 *
4099 * For blocking we (obviously) need to provide the same guarantee as for
4100 * migration. However the means are completely different as there is no lock
4101 * chain to provide order. Instead we do:
4102 *
4103 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4104 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4105 *
4106 * Example:
4107 *
4108 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4109 *
4110 * LOCK rq(0)->lock LOCK X->pi_lock
4111 * dequeue X
4112 * sched-out X
4113 * smp_store_release(X->on_cpu, 0);
4114 *
4115 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4116 * X->state = WAKING
4117 * set_task_cpu(X,2)
4118 *
4119 * LOCK rq(2)->lock
4120 * enqueue X
4121 * X->state = RUNNING
4122 * UNLOCK rq(2)->lock
4123 *
4124 * LOCK rq(2)->lock // orders against CPU1
4125 * sched-out Z
4126 * sched-in X
4127 * UNLOCK rq(2)->lock
4128 *
4129 * UNLOCK X->pi_lock
4130 * UNLOCK rq(0)->lock
4131 *
4132 *
4133 * However, for wakeups there is a second guarantee we must provide, namely we
4134 * must ensure that CONDITION=1 done by the caller can not be reordered with
4135 * accesses to the task state; see try_to_wake_up() and set_current_state().
4136 */
4137
4138 /**
4139 * try_to_wake_up - wake up a thread
4140 * @p: the thread to be awakened
4141 * @state: the mask of task states that can be woken
4142 * @wake_flags: wake modifier flags (WF_*)
4143 *
4144 * Conceptually does:
4145 *
4146 * If (@state & @p->state) @p->state = TASK_RUNNING.
4147 *
4148 * If the task was not queued/runnable, also place it back on a runqueue.
4149 *
4150 * This function is atomic against schedule() which would dequeue the task.
4151 *
4152 * It issues a full memory barrier before accessing @p->state, see the comment
4153 * with set_current_state().
4154 *
4155 * Uses p->pi_lock to serialize against concurrent wake-ups.
4156 *
4157 * Relies on p->pi_lock stabilizing:
4158 * - p->sched_class
4159 * - p->cpus_ptr
4160 * - p->sched_task_group
4161 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4162 *
4163 * Tries really hard to only take one task_rq(p)->lock for performance.
4164 * Takes rq->lock in:
4165 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4166 * - ttwu_queue() -- new rq, for enqueue of the task;
4167 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4168 *
4169 * As a consequence we race really badly with just about everything. See the
4170 * many memory barriers and their comments for details.
4171 *
4172 * Return: %true if @p->state changes (an actual wakeup was done),
4173 * %false otherwise.
4174 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4175 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4176 {
4177 guard(preempt)();
4178 int cpu, success = 0;
4179
4180 wake_flags |= WF_TTWU;
4181
4182 if (p == current) {
4183 /*
4184 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4185 * == smp_processor_id()'. Together this means we can special
4186 * case the whole 'p->on_rq && ttwu_runnable()' case below
4187 * without taking any locks.
4188 *
4189 * Specifically, given current runs ttwu() we must be before
4190 * schedule()'s block_task(), as such this must not observe
4191 * sched_delayed.
4192 *
4193 * In particular:
4194 * - we rely on Program-Order guarantees for all the ordering,
4195 * - we're serialized against set_special_state() by virtue of
4196 * it disabling IRQs (this allows not taking ->pi_lock).
4197 */
4198 SCHED_WARN_ON(p->se.sched_delayed);
4199 if (!ttwu_state_match(p, state, &success))
4200 goto out;
4201
4202 trace_sched_waking(p);
4203 ttwu_do_wakeup(p);
4204 goto out;
4205 }
4206
4207 /*
4208 * If we are going to wake up a thread waiting for CONDITION we
4209 * need to ensure that CONDITION=1 done by the caller can not be
4210 * reordered with p->state check below. This pairs with smp_store_mb()
4211 * in set_current_state() that the waiting thread does.
4212 */
4213 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4214 smp_mb__after_spinlock();
4215 if (!ttwu_state_match(p, state, &success))
4216 break;
4217
4218 trace_sched_waking(p);
4219
4220 /*
4221 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4222 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4223 * in smp_cond_load_acquire() below.
4224 *
4225 * sched_ttwu_pending() try_to_wake_up()
4226 * STORE p->on_rq = 1 LOAD p->state
4227 * UNLOCK rq->lock
4228 *
4229 * __schedule() (switch to task 'p')
4230 * LOCK rq->lock smp_rmb();
4231 * smp_mb__after_spinlock();
4232 * UNLOCK rq->lock
4233 *
4234 * [task p]
4235 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4236 *
4237 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4238 * __schedule(). See the comment for smp_mb__after_spinlock().
4239 *
4240 * A similar smp_rmb() lives in __task_needs_rq_lock().
4241 */
4242 smp_rmb();
4243 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4244 break;
4245
4246 #ifdef CONFIG_SMP
4247 /*
4248 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4249 * possible to, falsely, observe p->on_cpu == 0.
4250 *
4251 * One must be running (->on_cpu == 1) in order to remove oneself
4252 * from the runqueue.
4253 *
4254 * __schedule() (switch to task 'p') try_to_wake_up()
4255 * STORE p->on_cpu = 1 LOAD p->on_rq
4256 * UNLOCK rq->lock
4257 *
4258 * __schedule() (put 'p' to sleep)
4259 * LOCK rq->lock smp_rmb();
4260 * smp_mb__after_spinlock();
4261 * STORE p->on_rq = 0 LOAD p->on_cpu
4262 *
4263 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4264 * __schedule(). See the comment for smp_mb__after_spinlock().
4265 *
4266 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4267 * schedule()'s deactivate_task() has 'happened' and p will no longer
4268 * care about it's own p->state. See the comment in __schedule().
4269 */
4270 smp_acquire__after_ctrl_dep();
4271
4272 /*
4273 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4274 * == 0), which means we need to do an enqueue, change p->state to
4275 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4276 * enqueue, such as ttwu_queue_wakelist().
4277 */
4278 WRITE_ONCE(p->__state, TASK_WAKING);
4279
4280 /*
4281 * If the owning (remote) CPU is still in the middle of schedule() with
4282 * this task as prev, considering queueing p on the remote CPUs wake_list
4283 * which potentially sends an IPI instead of spinning on p->on_cpu to
4284 * let the waker make forward progress. This is safe because IRQs are
4285 * disabled and the IPI will deliver after on_cpu is cleared.
4286 *
4287 * Ensure we load task_cpu(p) after p->on_cpu:
4288 *
4289 * set_task_cpu(p, cpu);
4290 * STORE p->cpu = @cpu
4291 * __schedule() (switch to task 'p')
4292 * LOCK rq->lock
4293 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4294 * STORE p->on_cpu = 1 LOAD p->cpu
4295 *
4296 * to ensure we observe the correct CPU on which the task is currently
4297 * scheduling.
4298 */
4299 if (smp_load_acquire(&p->on_cpu) &&
4300 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4301 break;
4302
4303 /*
4304 * If the owning (remote) CPU is still in the middle of schedule() with
4305 * this task as prev, wait until it's done referencing the task.
4306 *
4307 * Pairs with the smp_store_release() in finish_task().
4308 *
4309 * This ensures that tasks getting woken will be fully ordered against
4310 * their previous state and preserve Program Order.
4311 */
4312 smp_cond_load_acquire(&p->on_cpu, !VAL);
4313
4314 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4315 if (task_cpu(p) != cpu) {
4316 if (p->in_iowait) {
4317 delayacct_blkio_end(p);
4318 atomic_dec(&task_rq(p)->nr_iowait);
4319 }
4320
4321 wake_flags |= WF_MIGRATED;
4322 psi_ttwu_dequeue(p);
4323 set_task_cpu(p, cpu);
4324 }
4325 #else
4326 cpu = task_cpu(p);
4327 #endif /* CONFIG_SMP */
4328
4329 ttwu_queue(p, cpu, wake_flags);
4330 }
4331 out:
4332 if (success)
4333 ttwu_stat(p, task_cpu(p), wake_flags);
4334
4335 return success;
4336 }
4337
__task_needs_rq_lock(struct task_struct * p)4338 static bool __task_needs_rq_lock(struct task_struct *p)
4339 {
4340 unsigned int state = READ_ONCE(p->__state);
4341
4342 /*
4343 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4344 * the task is blocked. Make sure to check @state since ttwu() can drop
4345 * locks at the end, see ttwu_queue_wakelist().
4346 */
4347 if (state == TASK_RUNNING || state == TASK_WAKING)
4348 return true;
4349
4350 /*
4351 * Ensure we load p->on_rq after p->__state, otherwise it would be
4352 * possible to, falsely, observe p->on_rq == 0.
4353 *
4354 * See try_to_wake_up() for a longer comment.
4355 */
4356 smp_rmb();
4357 if (p->on_rq)
4358 return true;
4359
4360 #ifdef CONFIG_SMP
4361 /*
4362 * Ensure the task has finished __schedule() and will not be referenced
4363 * anymore. Again, see try_to_wake_up() for a longer comment.
4364 */
4365 smp_rmb();
4366 smp_cond_load_acquire(&p->on_cpu, !VAL);
4367 #endif
4368
4369 return false;
4370 }
4371
4372 /**
4373 * task_call_func - Invoke a function on task in fixed state
4374 * @p: Process for which the function is to be invoked, can be @current.
4375 * @func: Function to invoke.
4376 * @arg: Argument to function.
4377 *
4378 * Fix the task in it's current state by avoiding wakeups and or rq operations
4379 * and call @func(@arg) on it. This function can use task_is_runnable() and
4380 * task_curr() to work out what the state is, if required. Given that @func
4381 * can be invoked with a runqueue lock held, it had better be quite
4382 * lightweight.
4383 *
4384 * Returns:
4385 * Whatever @func returns
4386 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4387 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4388 {
4389 struct rq *rq = NULL;
4390 struct rq_flags rf;
4391 int ret;
4392
4393 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4394
4395 if (__task_needs_rq_lock(p))
4396 rq = __task_rq_lock(p, &rf);
4397
4398 /*
4399 * At this point the task is pinned; either:
4400 * - blocked and we're holding off wakeups (pi->lock)
4401 * - woken, and we're holding off enqueue (rq->lock)
4402 * - queued, and we're holding off schedule (rq->lock)
4403 * - running, and we're holding off de-schedule (rq->lock)
4404 *
4405 * The called function (@func) can use: task_curr(), p->on_rq and
4406 * p->__state to differentiate between these states.
4407 */
4408 ret = func(p, arg);
4409
4410 if (rq)
4411 rq_unlock(rq, &rf);
4412
4413 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4414 return ret;
4415 }
4416
4417 /**
4418 * cpu_curr_snapshot - Return a snapshot of the currently running task
4419 * @cpu: The CPU on which to snapshot the task.
4420 *
4421 * Returns the task_struct pointer of the task "currently" running on
4422 * the specified CPU.
4423 *
4424 * If the specified CPU was offline, the return value is whatever it
4425 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4426 * task, but there is no guarantee. Callers wishing a useful return
4427 * value must take some action to ensure that the specified CPU remains
4428 * online throughout.
4429 *
4430 * This function executes full memory barriers before and after fetching
4431 * the pointer, which permits the caller to confine this function's fetch
4432 * with respect to the caller's accesses to other shared variables.
4433 */
cpu_curr_snapshot(int cpu)4434 struct task_struct *cpu_curr_snapshot(int cpu)
4435 {
4436 struct rq *rq = cpu_rq(cpu);
4437 struct task_struct *t;
4438 struct rq_flags rf;
4439
4440 rq_lock_irqsave(rq, &rf);
4441 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4442 t = rcu_dereference(cpu_curr(cpu));
4443 rq_unlock_irqrestore(rq, &rf);
4444 smp_mb(); /* Pairing determined by caller's synchronization design. */
4445
4446 return t;
4447 }
4448
4449 /**
4450 * wake_up_process - Wake up a specific process
4451 * @p: The process to be woken up.
4452 *
4453 * Attempt to wake up the nominated process and move it to the set of runnable
4454 * processes.
4455 *
4456 * Return: 1 if the process was woken up, 0 if it was already running.
4457 *
4458 * This function executes a full memory barrier before accessing the task state.
4459 */
wake_up_process(struct task_struct * p)4460 int wake_up_process(struct task_struct *p)
4461 {
4462 return try_to_wake_up(p, TASK_NORMAL, 0);
4463 }
4464 EXPORT_SYMBOL(wake_up_process);
4465
wake_up_state(struct task_struct * p,unsigned int state)4466 int wake_up_state(struct task_struct *p, unsigned int state)
4467 {
4468 return try_to_wake_up(p, state, 0);
4469 }
4470
4471 /*
4472 * Perform scheduler related setup for a newly forked process p.
4473 * p is forked by current.
4474 *
4475 * __sched_fork() is basic setup which is also used by sched_init() to
4476 * initialize the boot CPU's idle task.
4477 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4478 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4479 {
4480 p->on_rq = 0;
4481
4482 p->se.on_rq = 0;
4483 p->se.exec_start = 0;
4484 p->se.sum_exec_runtime = 0;
4485 p->se.prev_sum_exec_runtime = 0;
4486 p->se.nr_migrations = 0;
4487 p->se.vruntime = 0;
4488 p->se.vlag = 0;
4489 INIT_LIST_HEAD(&p->se.group_node);
4490
4491 /* A delayed task cannot be in clone(). */
4492 SCHED_WARN_ON(p->se.sched_delayed);
4493
4494 #ifdef CONFIG_FAIR_GROUP_SCHED
4495 p->se.cfs_rq = NULL;
4496 #endif
4497
4498 #ifdef CONFIG_SCHEDSTATS
4499 /* Even if schedstat is disabled, there should not be garbage */
4500 memset(&p->stats, 0, sizeof(p->stats));
4501 #endif
4502
4503 init_dl_entity(&p->dl);
4504
4505 INIT_LIST_HEAD(&p->rt.run_list);
4506 p->rt.timeout = 0;
4507 p->rt.time_slice = sched_rr_timeslice;
4508 p->rt.on_rq = 0;
4509 p->rt.on_list = 0;
4510
4511 #ifdef CONFIG_SCHED_CLASS_EXT
4512 init_scx_entity(&p->scx);
4513 #endif
4514
4515 #ifdef CONFIG_PREEMPT_NOTIFIERS
4516 INIT_HLIST_HEAD(&p->preempt_notifiers);
4517 #endif
4518
4519 #ifdef CONFIG_COMPACTION
4520 p->capture_control = NULL;
4521 #endif
4522 init_numa_balancing(clone_flags, p);
4523 #ifdef CONFIG_SMP
4524 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4525 p->migration_pending = NULL;
4526 #endif
4527 init_sched_mm_cid(p);
4528 }
4529
4530 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4531
4532 #ifdef CONFIG_NUMA_BALANCING
4533
4534 int sysctl_numa_balancing_mode;
4535
__set_numabalancing_state(bool enabled)4536 static void __set_numabalancing_state(bool enabled)
4537 {
4538 if (enabled)
4539 static_branch_enable(&sched_numa_balancing);
4540 else
4541 static_branch_disable(&sched_numa_balancing);
4542 }
4543
set_numabalancing_state(bool enabled)4544 void set_numabalancing_state(bool enabled)
4545 {
4546 if (enabled)
4547 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4548 else
4549 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4550 __set_numabalancing_state(enabled);
4551 }
4552
4553 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4554 static void reset_memory_tiering(void)
4555 {
4556 struct pglist_data *pgdat;
4557
4558 for_each_online_pgdat(pgdat) {
4559 pgdat->nbp_threshold = 0;
4560 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4561 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4562 }
4563 }
4564
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4565 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4566 void *buffer, size_t *lenp, loff_t *ppos)
4567 {
4568 struct ctl_table t;
4569 int err;
4570 int state = sysctl_numa_balancing_mode;
4571
4572 if (write && !capable(CAP_SYS_ADMIN))
4573 return -EPERM;
4574
4575 t = *table;
4576 t.data = &state;
4577 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4578 if (err < 0)
4579 return err;
4580 if (write) {
4581 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4582 (state & NUMA_BALANCING_MEMORY_TIERING))
4583 reset_memory_tiering();
4584 sysctl_numa_balancing_mode = state;
4585 __set_numabalancing_state(state);
4586 }
4587 return err;
4588 }
4589 #endif
4590 #endif
4591
4592 #ifdef CONFIG_SCHEDSTATS
4593
4594 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4595
set_schedstats(bool enabled)4596 static void set_schedstats(bool enabled)
4597 {
4598 if (enabled)
4599 static_branch_enable(&sched_schedstats);
4600 else
4601 static_branch_disable(&sched_schedstats);
4602 }
4603
force_schedstat_enabled(void)4604 void force_schedstat_enabled(void)
4605 {
4606 if (!schedstat_enabled()) {
4607 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4608 static_branch_enable(&sched_schedstats);
4609 }
4610 }
4611
setup_schedstats(char * str)4612 static int __init setup_schedstats(char *str)
4613 {
4614 int ret = 0;
4615 if (!str)
4616 goto out;
4617
4618 if (!strcmp(str, "enable")) {
4619 set_schedstats(true);
4620 ret = 1;
4621 } else if (!strcmp(str, "disable")) {
4622 set_schedstats(false);
4623 ret = 1;
4624 }
4625 out:
4626 if (!ret)
4627 pr_warn("Unable to parse schedstats=\n");
4628
4629 return ret;
4630 }
4631 __setup("schedstats=", setup_schedstats);
4632
4633 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4634 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4635 size_t *lenp, loff_t *ppos)
4636 {
4637 struct ctl_table t;
4638 int err;
4639 int state = static_branch_likely(&sched_schedstats);
4640
4641 if (write && !capable(CAP_SYS_ADMIN))
4642 return -EPERM;
4643
4644 t = *table;
4645 t.data = &state;
4646 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4647 if (err < 0)
4648 return err;
4649 if (write)
4650 set_schedstats(state);
4651 return err;
4652 }
4653 #endif /* CONFIG_PROC_SYSCTL */
4654 #endif /* CONFIG_SCHEDSTATS */
4655
4656 #ifdef CONFIG_SYSCTL
4657 static const struct ctl_table sched_core_sysctls[] = {
4658 #ifdef CONFIG_SCHEDSTATS
4659 {
4660 .procname = "sched_schedstats",
4661 .data = NULL,
4662 .maxlen = sizeof(unsigned int),
4663 .mode = 0644,
4664 .proc_handler = sysctl_schedstats,
4665 .extra1 = SYSCTL_ZERO,
4666 .extra2 = SYSCTL_ONE,
4667 },
4668 #endif /* CONFIG_SCHEDSTATS */
4669 #ifdef CONFIG_UCLAMP_TASK
4670 {
4671 .procname = "sched_util_clamp_min",
4672 .data = &sysctl_sched_uclamp_util_min,
4673 .maxlen = sizeof(unsigned int),
4674 .mode = 0644,
4675 .proc_handler = sysctl_sched_uclamp_handler,
4676 },
4677 {
4678 .procname = "sched_util_clamp_max",
4679 .data = &sysctl_sched_uclamp_util_max,
4680 .maxlen = sizeof(unsigned int),
4681 .mode = 0644,
4682 .proc_handler = sysctl_sched_uclamp_handler,
4683 },
4684 {
4685 .procname = "sched_util_clamp_min_rt_default",
4686 .data = &sysctl_sched_uclamp_util_min_rt_default,
4687 .maxlen = sizeof(unsigned int),
4688 .mode = 0644,
4689 .proc_handler = sysctl_sched_uclamp_handler,
4690 },
4691 #endif /* CONFIG_UCLAMP_TASK */
4692 #ifdef CONFIG_NUMA_BALANCING
4693 {
4694 .procname = "numa_balancing",
4695 .data = NULL, /* filled in by handler */
4696 .maxlen = sizeof(unsigned int),
4697 .mode = 0644,
4698 .proc_handler = sysctl_numa_balancing,
4699 .extra1 = SYSCTL_ZERO,
4700 .extra2 = SYSCTL_FOUR,
4701 },
4702 #endif /* CONFIG_NUMA_BALANCING */
4703 };
sched_core_sysctl_init(void)4704 static int __init sched_core_sysctl_init(void)
4705 {
4706 register_sysctl_init("kernel", sched_core_sysctls);
4707 return 0;
4708 }
4709 late_initcall(sched_core_sysctl_init);
4710 #endif /* CONFIG_SYSCTL */
4711
4712 /*
4713 * fork()/clone()-time setup:
4714 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4715 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4716 {
4717 __sched_fork(clone_flags, p);
4718 /*
4719 * We mark the process as NEW here. This guarantees that
4720 * nobody will actually run it, and a signal or other external
4721 * event cannot wake it up and insert it on the runqueue either.
4722 */
4723 p->__state = TASK_NEW;
4724
4725 /*
4726 * Make sure we do not leak PI boosting priority to the child.
4727 */
4728 p->prio = current->normal_prio;
4729
4730 uclamp_fork(p);
4731
4732 /*
4733 * Revert to default priority/policy on fork if requested.
4734 */
4735 if (unlikely(p->sched_reset_on_fork)) {
4736 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4737 p->policy = SCHED_NORMAL;
4738 p->static_prio = NICE_TO_PRIO(0);
4739 p->rt_priority = 0;
4740 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4741 p->static_prio = NICE_TO_PRIO(0);
4742
4743 p->prio = p->normal_prio = p->static_prio;
4744 set_load_weight(p, false);
4745 p->se.custom_slice = 0;
4746 p->se.slice = sysctl_sched_base_slice;
4747
4748 /*
4749 * We don't need the reset flag anymore after the fork. It has
4750 * fulfilled its duty:
4751 */
4752 p->sched_reset_on_fork = 0;
4753 }
4754
4755 if (dl_prio(p->prio))
4756 return -EAGAIN;
4757
4758 scx_pre_fork(p);
4759
4760 if (rt_prio(p->prio)) {
4761 p->sched_class = &rt_sched_class;
4762 #ifdef CONFIG_SCHED_CLASS_EXT
4763 } else if (task_should_scx(p->policy)) {
4764 p->sched_class = &ext_sched_class;
4765 #endif
4766 } else {
4767 p->sched_class = &fair_sched_class;
4768 }
4769
4770 init_entity_runnable_average(&p->se);
4771
4772
4773 #ifdef CONFIG_SCHED_INFO
4774 if (likely(sched_info_on()))
4775 memset(&p->sched_info, 0, sizeof(p->sched_info));
4776 #endif
4777 #if defined(CONFIG_SMP)
4778 p->on_cpu = 0;
4779 #endif
4780 init_task_preempt_count(p);
4781 #ifdef CONFIG_SMP
4782 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4783 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4784 #endif
4785 return 0;
4786 }
4787
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4788 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4789 {
4790 unsigned long flags;
4791
4792 /*
4793 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4794 * required yet, but lockdep gets upset if rules are violated.
4795 */
4796 raw_spin_lock_irqsave(&p->pi_lock, flags);
4797 #ifdef CONFIG_CGROUP_SCHED
4798 if (1) {
4799 struct task_group *tg;
4800 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4801 struct task_group, css);
4802 tg = autogroup_task_group(p, tg);
4803 p->sched_task_group = tg;
4804 }
4805 #endif
4806 rseq_migrate(p);
4807 /*
4808 * We're setting the CPU for the first time, we don't migrate,
4809 * so use __set_task_cpu().
4810 */
4811 __set_task_cpu(p, smp_processor_id());
4812 if (p->sched_class->task_fork)
4813 p->sched_class->task_fork(p);
4814 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4815
4816 return scx_fork(p);
4817 }
4818
sched_cancel_fork(struct task_struct * p)4819 void sched_cancel_fork(struct task_struct *p)
4820 {
4821 scx_cancel_fork(p);
4822 }
4823
sched_post_fork(struct task_struct * p)4824 void sched_post_fork(struct task_struct *p)
4825 {
4826 uclamp_post_fork(p);
4827 scx_post_fork(p);
4828 }
4829
to_ratio(u64 period,u64 runtime)4830 unsigned long to_ratio(u64 period, u64 runtime)
4831 {
4832 if (runtime == RUNTIME_INF)
4833 return BW_UNIT;
4834
4835 /*
4836 * Doing this here saves a lot of checks in all
4837 * the calling paths, and returning zero seems
4838 * safe for them anyway.
4839 */
4840 if (period == 0)
4841 return 0;
4842
4843 return div64_u64(runtime << BW_SHIFT, period);
4844 }
4845
4846 /*
4847 * wake_up_new_task - wake up a newly created task for the first time.
4848 *
4849 * This function will do some initial scheduler statistics housekeeping
4850 * that must be done for every newly created context, then puts the task
4851 * on the runqueue and wakes it.
4852 */
wake_up_new_task(struct task_struct * p)4853 void wake_up_new_task(struct task_struct *p)
4854 {
4855 struct rq_flags rf;
4856 struct rq *rq;
4857 int wake_flags = WF_FORK;
4858
4859 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4860 WRITE_ONCE(p->__state, TASK_RUNNING);
4861 #ifdef CONFIG_SMP
4862 /*
4863 * Fork balancing, do it here and not earlier because:
4864 * - cpus_ptr can change in the fork path
4865 * - any previously selected CPU might disappear through hotplug
4866 *
4867 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4868 * as we're not fully set-up yet.
4869 */
4870 p->recent_used_cpu = task_cpu(p);
4871 rseq_migrate(p);
4872 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4873 #endif
4874 rq = __task_rq_lock(p, &rf);
4875 update_rq_clock(rq);
4876 post_init_entity_util_avg(p);
4877
4878 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4879 trace_sched_wakeup_new(p);
4880 wakeup_preempt(rq, p, wake_flags);
4881 #ifdef CONFIG_SMP
4882 if (p->sched_class->task_woken) {
4883 /*
4884 * Nothing relies on rq->lock after this, so it's fine to
4885 * drop it.
4886 */
4887 rq_unpin_lock(rq, &rf);
4888 p->sched_class->task_woken(rq, p);
4889 rq_repin_lock(rq, &rf);
4890 }
4891 #endif
4892 task_rq_unlock(rq, p, &rf);
4893 }
4894
4895 #ifdef CONFIG_PREEMPT_NOTIFIERS
4896
4897 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4898
preempt_notifier_inc(void)4899 void preempt_notifier_inc(void)
4900 {
4901 static_branch_inc(&preempt_notifier_key);
4902 }
4903 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4904
preempt_notifier_dec(void)4905 void preempt_notifier_dec(void)
4906 {
4907 static_branch_dec(&preempt_notifier_key);
4908 }
4909 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4910
4911 /**
4912 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4913 * @notifier: notifier struct to register
4914 */
preempt_notifier_register(struct preempt_notifier * notifier)4915 void preempt_notifier_register(struct preempt_notifier *notifier)
4916 {
4917 if (!static_branch_unlikely(&preempt_notifier_key))
4918 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4919
4920 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4921 }
4922 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4923
4924 /**
4925 * preempt_notifier_unregister - no longer interested in preemption notifications
4926 * @notifier: notifier struct to unregister
4927 *
4928 * This is *not* safe to call from within a preemption notifier.
4929 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4930 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4931 {
4932 hlist_del(¬ifier->link);
4933 }
4934 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4935
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4936 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4937 {
4938 struct preempt_notifier *notifier;
4939
4940 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4941 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4942 }
4943
fire_sched_in_preempt_notifiers(struct task_struct * curr)4944 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4945 {
4946 if (static_branch_unlikely(&preempt_notifier_key))
4947 __fire_sched_in_preempt_notifiers(curr);
4948 }
4949
4950 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4951 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4952 struct task_struct *next)
4953 {
4954 struct preempt_notifier *notifier;
4955
4956 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4957 notifier->ops->sched_out(notifier, next);
4958 }
4959
4960 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4961 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4962 struct task_struct *next)
4963 {
4964 if (static_branch_unlikely(&preempt_notifier_key))
4965 __fire_sched_out_preempt_notifiers(curr, next);
4966 }
4967
4968 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4969
fire_sched_in_preempt_notifiers(struct task_struct * curr)4970 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4971 {
4972 }
4973
4974 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4975 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4976 struct task_struct *next)
4977 {
4978 }
4979
4980 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4981
prepare_task(struct task_struct * next)4982 static inline void prepare_task(struct task_struct *next)
4983 {
4984 #ifdef CONFIG_SMP
4985 /*
4986 * Claim the task as running, we do this before switching to it
4987 * such that any running task will have this set.
4988 *
4989 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4990 * its ordering comment.
4991 */
4992 WRITE_ONCE(next->on_cpu, 1);
4993 #endif
4994 }
4995
finish_task(struct task_struct * prev)4996 static inline void finish_task(struct task_struct *prev)
4997 {
4998 #ifdef CONFIG_SMP
4999 /*
5000 * This must be the very last reference to @prev from this CPU. After
5001 * p->on_cpu is cleared, the task can be moved to a different CPU. We
5002 * must ensure this doesn't happen until the switch is completely
5003 * finished.
5004 *
5005 * In particular, the load of prev->state in finish_task_switch() must
5006 * happen before this.
5007 *
5008 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5009 */
5010 smp_store_release(&prev->on_cpu, 0);
5011 #endif
5012 }
5013
5014 #ifdef CONFIG_SMP
5015
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5016 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5017 {
5018 void (*func)(struct rq *rq);
5019 struct balance_callback *next;
5020
5021 lockdep_assert_rq_held(rq);
5022
5023 while (head) {
5024 func = (void (*)(struct rq *))head->func;
5025 next = head->next;
5026 head->next = NULL;
5027 head = next;
5028
5029 func(rq);
5030 }
5031 }
5032
5033 static void balance_push(struct rq *rq);
5034
5035 /*
5036 * balance_push_callback is a right abuse of the callback interface and plays
5037 * by significantly different rules.
5038 *
5039 * Where the normal balance_callback's purpose is to be ran in the same context
5040 * that queued it (only later, when it's safe to drop rq->lock again),
5041 * balance_push_callback is specifically targeted at __schedule().
5042 *
5043 * This abuse is tolerated because it places all the unlikely/odd cases behind
5044 * a single test, namely: rq->balance_callback == NULL.
5045 */
5046 struct balance_callback balance_push_callback = {
5047 .next = NULL,
5048 .func = balance_push,
5049 };
5050
5051 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5052 __splice_balance_callbacks(struct rq *rq, bool split)
5053 {
5054 struct balance_callback *head = rq->balance_callback;
5055
5056 if (likely(!head))
5057 return NULL;
5058
5059 lockdep_assert_rq_held(rq);
5060 /*
5061 * Must not take balance_push_callback off the list when
5062 * splice_balance_callbacks() and balance_callbacks() are not
5063 * in the same rq->lock section.
5064 *
5065 * In that case it would be possible for __schedule() to interleave
5066 * and observe the list empty.
5067 */
5068 if (split && head == &balance_push_callback)
5069 head = NULL;
5070 else
5071 rq->balance_callback = NULL;
5072
5073 return head;
5074 }
5075
splice_balance_callbacks(struct rq * rq)5076 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5077 {
5078 return __splice_balance_callbacks(rq, true);
5079 }
5080
__balance_callbacks(struct rq * rq)5081 static void __balance_callbacks(struct rq *rq)
5082 {
5083 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5084 }
5085
balance_callbacks(struct rq * rq,struct balance_callback * head)5086 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5087 {
5088 unsigned long flags;
5089
5090 if (unlikely(head)) {
5091 raw_spin_rq_lock_irqsave(rq, flags);
5092 do_balance_callbacks(rq, head);
5093 raw_spin_rq_unlock_irqrestore(rq, flags);
5094 }
5095 }
5096
5097 #else
5098
__balance_callbacks(struct rq * rq)5099 static inline void __balance_callbacks(struct rq *rq)
5100 {
5101 }
5102
5103 #endif
5104
5105 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5106 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5107 {
5108 /*
5109 * Since the runqueue lock will be released by the next
5110 * task (which is an invalid locking op but in the case
5111 * of the scheduler it's an obvious special-case), so we
5112 * do an early lockdep release here:
5113 */
5114 rq_unpin_lock(rq, rf);
5115 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5116 #ifdef CONFIG_DEBUG_SPINLOCK
5117 /* this is a valid case when another task releases the spinlock */
5118 rq_lockp(rq)->owner = next;
5119 #endif
5120 }
5121
finish_lock_switch(struct rq * rq)5122 static inline void finish_lock_switch(struct rq *rq)
5123 {
5124 /*
5125 * If we are tracking spinlock dependencies then we have to
5126 * fix up the runqueue lock - which gets 'carried over' from
5127 * prev into current:
5128 */
5129 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5130 __balance_callbacks(rq);
5131 raw_spin_rq_unlock_irq(rq);
5132 }
5133
5134 /*
5135 * NOP if the arch has not defined these:
5136 */
5137
5138 #ifndef prepare_arch_switch
5139 # define prepare_arch_switch(next) do { } while (0)
5140 #endif
5141
5142 #ifndef finish_arch_post_lock_switch
5143 # define finish_arch_post_lock_switch() do { } while (0)
5144 #endif
5145
kmap_local_sched_out(void)5146 static inline void kmap_local_sched_out(void)
5147 {
5148 #ifdef CONFIG_KMAP_LOCAL
5149 if (unlikely(current->kmap_ctrl.idx))
5150 __kmap_local_sched_out();
5151 #endif
5152 }
5153
kmap_local_sched_in(void)5154 static inline void kmap_local_sched_in(void)
5155 {
5156 #ifdef CONFIG_KMAP_LOCAL
5157 if (unlikely(current->kmap_ctrl.idx))
5158 __kmap_local_sched_in();
5159 #endif
5160 }
5161
5162 /**
5163 * prepare_task_switch - prepare to switch tasks
5164 * @rq: the runqueue preparing to switch
5165 * @prev: the current task that is being switched out
5166 * @next: the task we are going to switch to.
5167 *
5168 * This is called with the rq lock held and interrupts off. It must
5169 * be paired with a subsequent finish_task_switch after the context
5170 * switch.
5171 *
5172 * prepare_task_switch sets up locking and calls architecture specific
5173 * hooks.
5174 */
5175 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5176 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5177 struct task_struct *next)
5178 {
5179 kcov_prepare_switch(prev);
5180 sched_info_switch(rq, prev, next);
5181 perf_event_task_sched_out(prev, next);
5182 rseq_preempt(prev);
5183 fire_sched_out_preempt_notifiers(prev, next);
5184 kmap_local_sched_out();
5185 prepare_task(next);
5186 prepare_arch_switch(next);
5187 }
5188
5189 /**
5190 * finish_task_switch - clean up after a task-switch
5191 * @prev: the thread we just switched away from.
5192 *
5193 * finish_task_switch must be called after the context switch, paired
5194 * with a prepare_task_switch call before the context switch.
5195 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5196 * and do any other architecture-specific cleanup actions.
5197 *
5198 * Note that we may have delayed dropping an mm in context_switch(). If
5199 * so, we finish that here outside of the runqueue lock. (Doing it
5200 * with the lock held can cause deadlocks; see schedule() for
5201 * details.)
5202 *
5203 * The context switch have flipped the stack from under us and restored the
5204 * local variables which were saved when this task called schedule() in the
5205 * past. 'prev == current' is still correct but we need to recalculate this_rq
5206 * because prev may have moved to another CPU.
5207 */
finish_task_switch(struct task_struct * prev)5208 static struct rq *finish_task_switch(struct task_struct *prev)
5209 __releases(rq->lock)
5210 {
5211 struct rq *rq = this_rq();
5212 struct mm_struct *mm = rq->prev_mm;
5213 unsigned int prev_state;
5214
5215 /*
5216 * The previous task will have left us with a preempt_count of 2
5217 * because it left us after:
5218 *
5219 * schedule()
5220 * preempt_disable(); // 1
5221 * __schedule()
5222 * raw_spin_lock_irq(&rq->lock) // 2
5223 *
5224 * Also, see FORK_PREEMPT_COUNT.
5225 */
5226 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5227 "corrupted preempt_count: %s/%d/0x%x\n",
5228 current->comm, current->pid, preempt_count()))
5229 preempt_count_set(FORK_PREEMPT_COUNT);
5230
5231 rq->prev_mm = NULL;
5232
5233 /*
5234 * A task struct has one reference for the use as "current".
5235 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5236 * schedule one last time. The schedule call will never return, and
5237 * the scheduled task must drop that reference.
5238 *
5239 * We must observe prev->state before clearing prev->on_cpu (in
5240 * finish_task), otherwise a concurrent wakeup can get prev
5241 * running on another CPU and we could rave with its RUNNING -> DEAD
5242 * transition, resulting in a double drop.
5243 */
5244 prev_state = READ_ONCE(prev->__state);
5245 vtime_task_switch(prev);
5246 perf_event_task_sched_in(prev, current);
5247 finish_task(prev);
5248 tick_nohz_task_switch();
5249 finish_lock_switch(rq);
5250 finish_arch_post_lock_switch();
5251 kcov_finish_switch(current);
5252 /*
5253 * kmap_local_sched_out() is invoked with rq::lock held and
5254 * interrupts disabled. There is no requirement for that, but the
5255 * sched out code does not have an interrupt enabled section.
5256 * Restoring the maps on sched in does not require interrupts being
5257 * disabled either.
5258 */
5259 kmap_local_sched_in();
5260
5261 fire_sched_in_preempt_notifiers(current);
5262 /*
5263 * When switching through a kernel thread, the loop in
5264 * membarrier_{private,global}_expedited() may have observed that
5265 * kernel thread and not issued an IPI. It is therefore possible to
5266 * schedule between user->kernel->user threads without passing though
5267 * switch_mm(). Membarrier requires a barrier after storing to
5268 * rq->curr, before returning to userspace, so provide them here:
5269 *
5270 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5271 * provided by mmdrop_lazy_tlb(),
5272 * - a sync_core for SYNC_CORE.
5273 */
5274 if (mm) {
5275 membarrier_mm_sync_core_before_usermode(mm);
5276 mmdrop_lazy_tlb_sched(mm);
5277 }
5278
5279 if (unlikely(prev_state == TASK_DEAD)) {
5280 if (prev->sched_class->task_dead)
5281 prev->sched_class->task_dead(prev);
5282
5283 /* Task is done with its stack. */
5284 put_task_stack(prev);
5285
5286 put_task_struct_rcu_user(prev);
5287 }
5288
5289 return rq;
5290 }
5291
5292 /**
5293 * schedule_tail - first thing a freshly forked thread must call.
5294 * @prev: the thread we just switched away from.
5295 */
schedule_tail(struct task_struct * prev)5296 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5297 __releases(rq->lock)
5298 {
5299 /*
5300 * New tasks start with FORK_PREEMPT_COUNT, see there and
5301 * finish_task_switch() for details.
5302 *
5303 * finish_task_switch() will drop rq->lock() and lower preempt_count
5304 * and the preempt_enable() will end up enabling preemption (on
5305 * PREEMPT_COUNT kernels).
5306 */
5307
5308 finish_task_switch(prev);
5309 preempt_enable();
5310
5311 if (current->set_child_tid)
5312 put_user(task_pid_vnr(current), current->set_child_tid);
5313
5314 calculate_sigpending();
5315 }
5316
5317 /*
5318 * context_switch - switch to the new MM and the new thread's register state.
5319 */
5320 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5321 context_switch(struct rq *rq, struct task_struct *prev,
5322 struct task_struct *next, struct rq_flags *rf)
5323 {
5324 prepare_task_switch(rq, prev, next);
5325
5326 /*
5327 * For paravirt, this is coupled with an exit in switch_to to
5328 * combine the page table reload and the switch backend into
5329 * one hypercall.
5330 */
5331 arch_start_context_switch(prev);
5332
5333 /*
5334 * kernel -> kernel lazy + transfer active
5335 * user -> kernel lazy + mmgrab_lazy_tlb() active
5336 *
5337 * kernel -> user switch + mmdrop_lazy_tlb() active
5338 * user -> user switch
5339 *
5340 * switch_mm_cid() needs to be updated if the barriers provided
5341 * by context_switch() are modified.
5342 */
5343 if (!next->mm) { // to kernel
5344 enter_lazy_tlb(prev->active_mm, next);
5345
5346 next->active_mm = prev->active_mm;
5347 if (prev->mm) // from user
5348 mmgrab_lazy_tlb(prev->active_mm);
5349 else
5350 prev->active_mm = NULL;
5351 } else { // to user
5352 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5353 /*
5354 * sys_membarrier() requires an smp_mb() between setting
5355 * rq->curr / membarrier_switch_mm() and returning to userspace.
5356 *
5357 * The below provides this either through switch_mm(), or in
5358 * case 'prev->active_mm == next->mm' through
5359 * finish_task_switch()'s mmdrop().
5360 */
5361 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5362 lru_gen_use_mm(next->mm);
5363
5364 if (!prev->mm) { // from kernel
5365 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5366 rq->prev_mm = prev->active_mm;
5367 prev->active_mm = NULL;
5368 }
5369 }
5370
5371 /* switch_mm_cid() requires the memory barriers above. */
5372 switch_mm_cid(rq, prev, next);
5373
5374 prepare_lock_switch(rq, next, rf);
5375
5376 /* Here we just switch the register state and the stack. */
5377 switch_to(prev, next, prev);
5378 barrier();
5379
5380 return finish_task_switch(prev);
5381 }
5382
5383 /*
5384 * nr_running and nr_context_switches:
5385 *
5386 * externally visible scheduler statistics: current number of runnable
5387 * threads, total number of context switches performed since bootup.
5388 */
nr_running(void)5389 unsigned int nr_running(void)
5390 {
5391 unsigned int i, sum = 0;
5392
5393 for_each_online_cpu(i)
5394 sum += cpu_rq(i)->nr_running;
5395
5396 return sum;
5397 }
5398
5399 /*
5400 * Check if only the current task is running on the CPU.
5401 *
5402 * Caution: this function does not check that the caller has disabled
5403 * preemption, thus the result might have a time-of-check-to-time-of-use
5404 * race. The caller is responsible to use it correctly, for example:
5405 *
5406 * - from a non-preemptible section (of course)
5407 *
5408 * - from a thread that is bound to a single CPU
5409 *
5410 * - in a loop with very short iterations (e.g. a polling loop)
5411 */
single_task_running(void)5412 bool single_task_running(void)
5413 {
5414 return raw_rq()->nr_running == 1;
5415 }
5416 EXPORT_SYMBOL(single_task_running);
5417
nr_context_switches_cpu(int cpu)5418 unsigned long long nr_context_switches_cpu(int cpu)
5419 {
5420 return cpu_rq(cpu)->nr_switches;
5421 }
5422
nr_context_switches(void)5423 unsigned long long nr_context_switches(void)
5424 {
5425 int i;
5426 unsigned long long sum = 0;
5427
5428 for_each_possible_cpu(i)
5429 sum += cpu_rq(i)->nr_switches;
5430
5431 return sum;
5432 }
5433
5434 /*
5435 * Consumers of these two interfaces, like for example the cpuidle menu
5436 * governor, are using nonsensical data. Preferring shallow idle state selection
5437 * for a CPU that has IO-wait which might not even end up running the task when
5438 * it does become runnable.
5439 */
5440
nr_iowait_cpu(int cpu)5441 unsigned int nr_iowait_cpu(int cpu)
5442 {
5443 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5444 }
5445
5446 /*
5447 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5448 *
5449 * The idea behind IO-wait account is to account the idle time that we could
5450 * have spend running if it were not for IO. That is, if we were to improve the
5451 * storage performance, we'd have a proportional reduction in IO-wait time.
5452 *
5453 * This all works nicely on UP, where, when a task blocks on IO, we account
5454 * idle time as IO-wait, because if the storage were faster, it could've been
5455 * running and we'd not be idle.
5456 *
5457 * This has been extended to SMP, by doing the same for each CPU. This however
5458 * is broken.
5459 *
5460 * Imagine for instance the case where two tasks block on one CPU, only the one
5461 * CPU will have IO-wait accounted, while the other has regular idle. Even
5462 * though, if the storage were faster, both could've ran at the same time,
5463 * utilising both CPUs.
5464 *
5465 * This means, that when looking globally, the current IO-wait accounting on
5466 * SMP is a lower bound, by reason of under accounting.
5467 *
5468 * Worse, since the numbers are provided per CPU, they are sometimes
5469 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5470 * associated with any one particular CPU, it can wake to another CPU than it
5471 * blocked on. This means the per CPU IO-wait number is meaningless.
5472 *
5473 * Task CPU affinities can make all that even more 'interesting'.
5474 */
5475
nr_iowait(void)5476 unsigned int nr_iowait(void)
5477 {
5478 unsigned int i, sum = 0;
5479
5480 for_each_possible_cpu(i)
5481 sum += nr_iowait_cpu(i);
5482
5483 return sum;
5484 }
5485
5486 #ifdef CONFIG_SMP
5487
5488 /*
5489 * sched_exec - execve() is a valuable balancing opportunity, because at
5490 * this point the task has the smallest effective memory and cache footprint.
5491 */
sched_exec(void)5492 void sched_exec(void)
5493 {
5494 struct task_struct *p = current;
5495 struct migration_arg arg;
5496 int dest_cpu;
5497
5498 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5499 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5500 if (dest_cpu == smp_processor_id())
5501 return;
5502
5503 if (unlikely(!cpu_active(dest_cpu)))
5504 return;
5505
5506 arg = (struct migration_arg){ p, dest_cpu };
5507 }
5508 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5509 }
5510
5511 #endif
5512
5513 DEFINE_PER_CPU(struct kernel_stat, kstat);
5514 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5515
5516 EXPORT_PER_CPU_SYMBOL(kstat);
5517 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5518
5519 /*
5520 * The function fair_sched_class.update_curr accesses the struct curr
5521 * and its field curr->exec_start; when called from task_sched_runtime(),
5522 * we observe a high rate of cache misses in practice.
5523 * Prefetching this data results in improved performance.
5524 */
prefetch_curr_exec_start(struct task_struct * p)5525 static inline void prefetch_curr_exec_start(struct task_struct *p)
5526 {
5527 #ifdef CONFIG_FAIR_GROUP_SCHED
5528 struct sched_entity *curr = p->se.cfs_rq->curr;
5529 #else
5530 struct sched_entity *curr = task_rq(p)->cfs.curr;
5531 #endif
5532 prefetch(curr);
5533 prefetch(&curr->exec_start);
5534 }
5535
5536 /*
5537 * Return accounted runtime for the task.
5538 * In case the task is currently running, return the runtime plus current's
5539 * pending runtime that have not been accounted yet.
5540 */
task_sched_runtime(struct task_struct * p)5541 unsigned long long task_sched_runtime(struct task_struct *p)
5542 {
5543 struct rq_flags rf;
5544 struct rq *rq;
5545 u64 ns;
5546
5547 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5548 /*
5549 * 64-bit doesn't need locks to atomically read a 64-bit value.
5550 * So we have a optimization chance when the task's delta_exec is 0.
5551 * Reading ->on_cpu is racy, but this is OK.
5552 *
5553 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5554 * If we race with it entering CPU, unaccounted time is 0. This is
5555 * indistinguishable from the read occurring a few cycles earlier.
5556 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5557 * been accounted, so we're correct here as well.
5558 */
5559 if (!p->on_cpu || !task_on_rq_queued(p))
5560 return p->se.sum_exec_runtime;
5561 #endif
5562
5563 rq = task_rq_lock(p, &rf);
5564 /*
5565 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5566 * project cycles that may never be accounted to this
5567 * thread, breaking clock_gettime().
5568 */
5569 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5570 prefetch_curr_exec_start(p);
5571 update_rq_clock(rq);
5572 p->sched_class->update_curr(rq);
5573 }
5574 ns = p->se.sum_exec_runtime;
5575 task_rq_unlock(rq, p, &rf);
5576
5577 return ns;
5578 }
5579
5580 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5581 static u64 cpu_resched_latency(struct rq *rq)
5582 {
5583 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5584 u64 resched_latency, now = rq_clock(rq);
5585 static bool warned_once;
5586
5587 if (sysctl_resched_latency_warn_once && warned_once)
5588 return 0;
5589
5590 if (!need_resched() || !latency_warn_ms)
5591 return 0;
5592
5593 if (system_state == SYSTEM_BOOTING)
5594 return 0;
5595
5596 if (!rq->last_seen_need_resched_ns) {
5597 rq->last_seen_need_resched_ns = now;
5598 rq->ticks_without_resched = 0;
5599 return 0;
5600 }
5601
5602 rq->ticks_without_resched++;
5603 resched_latency = now - rq->last_seen_need_resched_ns;
5604 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5605 return 0;
5606
5607 warned_once = true;
5608
5609 return resched_latency;
5610 }
5611
setup_resched_latency_warn_ms(char * str)5612 static int __init setup_resched_latency_warn_ms(char *str)
5613 {
5614 long val;
5615
5616 if ((kstrtol(str, 0, &val))) {
5617 pr_warn("Unable to set resched_latency_warn_ms\n");
5618 return 1;
5619 }
5620
5621 sysctl_resched_latency_warn_ms = val;
5622 return 1;
5623 }
5624 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5625 #else
cpu_resched_latency(struct rq * rq)5626 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5627 #endif /* CONFIG_SCHED_DEBUG */
5628
5629 /*
5630 * This function gets called by the timer code, with HZ frequency.
5631 * We call it with interrupts disabled.
5632 */
sched_tick(void)5633 void sched_tick(void)
5634 {
5635 int cpu = smp_processor_id();
5636 struct rq *rq = cpu_rq(cpu);
5637 /* accounting goes to the donor task */
5638 struct task_struct *donor;
5639 struct rq_flags rf;
5640 unsigned long hw_pressure;
5641 u64 resched_latency;
5642
5643 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5644 arch_scale_freq_tick();
5645
5646 sched_clock_tick();
5647
5648 rq_lock(rq, &rf);
5649 donor = rq->donor;
5650
5651 psi_account_irqtime(rq, donor, NULL);
5652
5653 update_rq_clock(rq);
5654 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5655 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5656
5657 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5658 resched_curr(rq);
5659
5660 donor->sched_class->task_tick(rq, donor, 0);
5661 if (sched_feat(LATENCY_WARN))
5662 resched_latency = cpu_resched_latency(rq);
5663 calc_global_load_tick(rq);
5664 sched_core_tick(rq);
5665 task_tick_mm_cid(rq, donor);
5666 scx_tick(rq);
5667
5668 rq_unlock(rq, &rf);
5669
5670 if (sched_feat(LATENCY_WARN) && resched_latency)
5671 resched_latency_warn(cpu, resched_latency);
5672
5673 perf_event_task_tick();
5674
5675 if (donor->flags & PF_WQ_WORKER)
5676 wq_worker_tick(donor);
5677
5678 #ifdef CONFIG_SMP
5679 if (!scx_switched_all()) {
5680 rq->idle_balance = idle_cpu(cpu);
5681 sched_balance_trigger(rq);
5682 }
5683 #endif
5684 }
5685
5686 #ifdef CONFIG_NO_HZ_FULL
5687
5688 struct tick_work {
5689 int cpu;
5690 atomic_t state;
5691 struct delayed_work work;
5692 };
5693 /* Values for ->state, see diagram below. */
5694 #define TICK_SCHED_REMOTE_OFFLINE 0
5695 #define TICK_SCHED_REMOTE_OFFLINING 1
5696 #define TICK_SCHED_REMOTE_RUNNING 2
5697
5698 /*
5699 * State diagram for ->state:
5700 *
5701 *
5702 * TICK_SCHED_REMOTE_OFFLINE
5703 * | ^
5704 * | |
5705 * | | sched_tick_remote()
5706 * | |
5707 * | |
5708 * +--TICK_SCHED_REMOTE_OFFLINING
5709 * | ^
5710 * | |
5711 * sched_tick_start() | | sched_tick_stop()
5712 * | |
5713 * V |
5714 * TICK_SCHED_REMOTE_RUNNING
5715 *
5716 *
5717 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5718 * and sched_tick_start() are happy to leave the state in RUNNING.
5719 */
5720
5721 static struct tick_work __percpu *tick_work_cpu;
5722
sched_tick_remote(struct work_struct * work)5723 static void sched_tick_remote(struct work_struct *work)
5724 {
5725 struct delayed_work *dwork = to_delayed_work(work);
5726 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5727 int cpu = twork->cpu;
5728 struct rq *rq = cpu_rq(cpu);
5729 int os;
5730
5731 /*
5732 * Handle the tick only if it appears the remote CPU is running in full
5733 * dynticks mode. The check is racy by nature, but missing a tick or
5734 * having one too much is no big deal because the scheduler tick updates
5735 * statistics and checks timeslices in a time-independent way, regardless
5736 * of when exactly it is running.
5737 */
5738 if (tick_nohz_tick_stopped_cpu(cpu)) {
5739 guard(rq_lock_irq)(rq);
5740 struct task_struct *curr = rq->curr;
5741
5742 if (cpu_online(cpu)) {
5743 /*
5744 * Since this is a remote tick for full dynticks mode,
5745 * we are always sure that there is no proxy (only a
5746 * single task is running).
5747 */
5748 SCHED_WARN_ON(rq->curr != rq->donor);
5749 update_rq_clock(rq);
5750
5751 if (!is_idle_task(curr)) {
5752 /*
5753 * Make sure the next tick runs within a
5754 * reasonable amount of time.
5755 */
5756 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5757 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5758 }
5759 curr->sched_class->task_tick(rq, curr, 0);
5760
5761 calc_load_nohz_remote(rq);
5762 }
5763 }
5764
5765 /*
5766 * Run the remote tick once per second (1Hz). This arbitrary
5767 * frequency is large enough to avoid overload but short enough
5768 * to keep scheduler internal stats reasonably up to date. But
5769 * first update state to reflect hotplug activity if required.
5770 */
5771 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5772 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5773 if (os == TICK_SCHED_REMOTE_RUNNING)
5774 queue_delayed_work(system_unbound_wq, dwork, HZ);
5775 }
5776
sched_tick_start(int cpu)5777 static void sched_tick_start(int cpu)
5778 {
5779 int os;
5780 struct tick_work *twork;
5781
5782 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5783 return;
5784
5785 WARN_ON_ONCE(!tick_work_cpu);
5786
5787 twork = per_cpu_ptr(tick_work_cpu, cpu);
5788 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5789 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5790 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5791 twork->cpu = cpu;
5792 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5793 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5794 }
5795 }
5796
5797 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5798 static void sched_tick_stop(int cpu)
5799 {
5800 struct tick_work *twork;
5801 int os;
5802
5803 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5804 return;
5805
5806 WARN_ON_ONCE(!tick_work_cpu);
5807
5808 twork = per_cpu_ptr(tick_work_cpu, cpu);
5809 /* There cannot be competing actions, but don't rely on stop-machine. */
5810 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5811 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5812 /* Don't cancel, as this would mess up the state machine. */
5813 }
5814 #endif /* CONFIG_HOTPLUG_CPU */
5815
sched_tick_offload_init(void)5816 int __init sched_tick_offload_init(void)
5817 {
5818 tick_work_cpu = alloc_percpu(struct tick_work);
5819 BUG_ON(!tick_work_cpu);
5820 return 0;
5821 }
5822
5823 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5824 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5825 static inline void sched_tick_stop(int cpu) { }
5826 #endif
5827
5828 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5829 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5830 /*
5831 * If the value passed in is equal to the current preempt count
5832 * then we just disabled preemption. Start timing the latency.
5833 */
preempt_latency_start(int val)5834 static inline void preempt_latency_start(int val)
5835 {
5836 if (preempt_count() == val) {
5837 unsigned long ip = get_lock_parent_ip();
5838 #ifdef CONFIG_DEBUG_PREEMPT
5839 current->preempt_disable_ip = ip;
5840 #endif
5841 trace_preempt_off(CALLER_ADDR0, ip);
5842 }
5843 }
5844
preempt_count_add(int val)5845 void preempt_count_add(int val)
5846 {
5847 #ifdef CONFIG_DEBUG_PREEMPT
5848 /*
5849 * Underflow?
5850 */
5851 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5852 return;
5853 #endif
5854 __preempt_count_add(val);
5855 #ifdef CONFIG_DEBUG_PREEMPT
5856 /*
5857 * Spinlock count overflowing soon?
5858 */
5859 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5860 PREEMPT_MASK - 10);
5861 #endif
5862 preempt_latency_start(val);
5863 }
5864 EXPORT_SYMBOL(preempt_count_add);
5865 NOKPROBE_SYMBOL(preempt_count_add);
5866
5867 /*
5868 * If the value passed in equals to the current preempt count
5869 * then we just enabled preemption. Stop timing the latency.
5870 */
preempt_latency_stop(int val)5871 static inline void preempt_latency_stop(int val)
5872 {
5873 if (preempt_count() == val)
5874 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5875 }
5876
preempt_count_sub(int val)5877 void preempt_count_sub(int val)
5878 {
5879 #ifdef CONFIG_DEBUG_PREEMPT
5880 /*
5881 * Underflow?
5882 */
5883 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5884 return;
5885 /*
5886 * Is the spinlock portion underflowing?
5887 */
5888 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5889 !(preempt_count() & PREEMPT_MASK)))
5890 return;
5891 #endif
5892
5893 preempt_latency_stop(val);
5894 __preempt_count_sub(val);
5895 }
5896 EXPORT_SYMBOL(preempt_count_sub);
5897 NOKPROBE_SYMBOL(preempt_count_sub);
5898
5899 #else
preempt_latency_start(int val)5900 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5901 static inline void preempt_latency_stop(int val) { }
5902 #endif
5903
get_preempt_disable_ip(struct task_struct * p)5904 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5905 {
5906 #ifdef CONFIG_DEBUG_PREEMPT
5907 return p->preempt_disable_ip;
5908 #else
5909 return 0;
5910 #endif
5911 }
5912
5913 /*
5914 * Print scheduling while atomic bug:
5915 */
__schedule_bug(struct task_struct * prev)5916 static noinline void __schedule_bug(struct task_struct *prev)
5917 {
5918 /* Save this before calling printk(), since that will clobber it */
5919 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5920
5921 if (oops_in_progress)
5922 return;
5923
5924 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5925 prev->comm, prev->pid, preempt_count());
5926
5927 debug_show_held_locks(prev);
5928 print_modules();
5929 if (irqs_disabled())
5930 print_irqtrace_events(prev);
5931 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5932 pr_err("Preemption disabled at:");
5933 print_ip_sym(KERN_ERR, preempt_disable_ip);
5934 }
5935 check_panic_on_warn("scheduling while atomic");
5936
5937 dump_stack();
5938 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5939 }
5940
5941 /*
5942 * Various schedule()-time debugging checks and statistics:
5943 */
schedule_debug(struct task_struct * prev,bool preempt)5944 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5945 {
5946 #ifdef CONFIG_SCHED_STACK_END_CHECK
5947 if (task_stack_end_corrupted(prev))
5948 panic("corrupted stack end detected inside scheduler\n");
5949
5950 if (task_scs_end_corrupted(prev))
5951 panic("corrupted shadow stack detected inside scheduler\n");
5952 #endif
5953
5954 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5955 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5956 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5957 prev->comm, prev->pid, prev->non_block_count);
5958 dump_stack();
5959 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5960 }
5961 #endif
5962
5963 if (unlikely(in_atomic_preempt_off())) {
5964 __schedule_bug(prev);
5965 preempt_count_set(PREEMPT_DISABLED);
5966 }
5967 rcu_sleep_check();
5968 SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5969
5970 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5971
5972 schedstat_inc(this_rq()->sched_count);
5973 }
5974
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5975 static void prev_balance(struct rq *rq, struct task_struct *prev,
5976 struct rq_flags *rf)
5977 {
5978 const struct sched_class *start_class = prev->sched_class;
5979 const struct sched_class *class;
5980
5981 #ifdef CONFIG_SCHED_CLASS_EXT
5982 /*
5983 * SCX requires a balance() call before every pick_task() including when
5984 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5985 * SCX instead. Also, set a flag to detect missing balance() call.
5986 */
5987 if (scx_enabled()) {
5988 rq->scx.flags |= SCX_RQ_BAL_PENDING;
5989 if (sched_class_above(&ext_sched_class, start_class))
5990 start_class = &ext_sched_class;
5991 }
5992 #endif
5993
5994 /*
5995 * We must do the balancing pass before put_prev_task(), such
5996 * that when we release the rq->lock the task is in the same
5997 * state as before we took rq->lock.
5998 *
5999 * We can terminate the balance pass as soon as we know there is
6000 * a runnable task of @class priority or higher.
6001 */
6002 for_active_class_range(class, start_class, &idle_sched_class) {
6003 if (class->balance && class->balance(rq, prev, rf))
6004 break;
6005 }
6006 }
6007
6008 /*
6009 * Pick up the highest-prio task:
6010 */
6011 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6012 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6013 {
6014 const struct sched_class *class;
6015 struct task_struct *p;
6016
6017 rq->dl_server = NULL;
6018
6019 if (scx_enabled())
6020 goto restart;
6021
6022 /*
6023 * Optimization: we know that if all tasks are in the fair class we can
6024 * call that function directly, but only if the @prev task wasn't of a
6025 * higher scheduling class, because otherwise those lose the
6026 * opportunity to pull in more work from other CPUs.
6027 */
6028 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6029 rq->nr_running == rq->cfs.h_nr_queued)) {
6030
6031 p = pick_next_task_fair(rq, prev, rf);
6032 if (unlikely(p == RETRY_TASK))
6033 goto restart;
6034
6035 /* Assume the next prioritized class is idle_sched_class */
6036 if (!p) {
6037 p = pick_task_idle(rq);
6038 put_prev_set_next_task(rq, prev, p);
6039 }
6040
6041 return p;
6042 }
6043
6044 restart:
6045 prev_balance(rq, prev, rf);
6046
6047 for_each_active_class(class) {
6048 if (class->pick_next_task) {
6049 p = class->pick_next_task(rq, prev);
6050 if (p)
6051 return p;
6052 } else {
6053 p = class->pick_task(rq);
6054 if (p) {
6055 put_prev_set_next_task(rq, prev, p);
6056 return p;
6057 }
6058 }
6059 }
6060
6061 BUG(); /* The idle class should always have a runnable task. */
6062 }
6063
6064 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6065 static inline bool is_task_rq_idle(struct task_struct *t)
6066 {
6067 return (task_rq(t)->idle == t);
6068 }
6069
cookie_equals(struct task_struct * a,unsigned long cookie)6070 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6071 {
6072 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6073 }
6074
cookie_match(struct task_struct * a,struct task_struct * b)6075 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6076 {
6077 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6078 return true;
6079
6080 return a->core_cookie == b->core_cookie;
6081 }
6082
pick_task(struct rq * rq)6083 static inline struct task_struct *pick_task(struct rq *rq)
6084 {
6085 const struct sched_class *class;
6086 struct task_struct *p;
6087
6088 rq->dl_server = NULL;
6089
6090 for_each_active_class(class) {
6091 p = class->pick_task(rq);
6092 if (p)
6093 return p;
6094 }
6095
6096 BUG(); /* The idle class should always have a runnable task. */
6097 }
6098
6099 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6100
6101 static void queue_core_balance(struct rq *rq);
6102
6103 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6104 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6105 {
6106 struct task_struct *next, *p, *max = NULL;
6107 const struct cpumask *smt_mask;
6108 bool fi_before = false;
6109 bool core_clock_updated = (rq == rq->core);
6110 unsigned long cookie;
6111 int i, cpu, occ = 0;
6112 struct rq *rq_i;
6113 bool need_sync;
6114
6115 if (!sched_core_enabled(rq))
6116 return __pick_next_task(rq, prev, rf);
6117
6118 cpu = cpu_of(rq);
6119
6120 /* Stopper task is switching into idle, no need core-wide selection. */
6121 if (cpu_is_offline(cpu)) {
6122 /*
6123 * Reset core_pick so that we don't enter the fastpath when
6124 * coming online. core_pick would already be migrated to
6125 * another cpu during offline.
6126 */
6127 rq->core_pick = NULL;
6128 rq->core_dl_server = NULL;
6129 return __pick_next_task(rq, prev, rf);
6130 }
6131
6132 /*
6133 * If there were no {en,de}queues since we picked (IOW, the task
6134 * pointers are all still valid), and we haven't scheduled the last
6135 * pick yet, do so now.
6136 *
6137 * rq->core_pick can be NULL if no selection was made for a CPU because
6138 * it was either offline or went offline during a sibling's core-wide
6139 * selection. In this case, do a core-wide selection.
6140 */
6141 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6142 rq->core->core_pick_seq != rq->core_sched_seq &&
6143 rq->core_pick) {
6144 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6145
6146 next = rq->core_pick;
6147 rq->dl_server = rq->core_dl_server;
6148 rq->core_pick = NULL;
6149 rq->core_dl_server = NULL;
6150 goto out_set_next;
6151 }
6152
6153 prev_balance(rq, prev, rf);
6154
6155 smt_mask = cpu_smt_mask(cpu);
6156 need_sync = !!rq->core->core_cookie;
6157
6158 /* reset state */
6159 rq->core->core_cookie = 0UL;
6160 if (rq->core->core_forceidle_count) {
6161 if (!core_clock_updated) {
6162 update_rq_clock(rq->core);
6163 core_clock_updated = true;
6164 }
6165 sched_core_account_forceidle(rq);
6166 /* reset after accounting force idle */
6167 rq->core->core_forceidle_start = 0;
6168 rq->core->core_forceidle_count = 0;
6169 rq->core->core_forceidle_occupation = 0;
6170 need_sync = true;
6171 fi_before = true;
6172 }
6173
6174 /*
6175 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6176 *
6177 * @task_seq guards the task state ({en,de}queues)
6178 * @pick_seq is the @task_seq we did a selection on
6179 * @sched_seq is the @pick_seq we scheduled
6180 *
6181 * However, preemptions can cause multiple picks on the same task set.
6182 * 'Fix' this by also increasing @task_seq for every pick.
6183 */
6184 rq->core->core_task_seq++;
6185
6186 /*
6187 * Optimize for common case where this CPU has no cookies
6188 * and there are no cookied tasks running on siblings.
6189 */
6190 if (!need_sync) {
6191 next = pick_task(rq);
6192 if (!next->core_cookie) {
6193 rq->core_pick = NULL;
6194 rq->core_dl_server = NULL;
6195 /*
6196 * For robustness, update the min_vruntime_fi for
6197 * unconstrained picks as well.
6198 */
6199 WARN_ON_ONCE(fi_before);
6200 task_vruntime_update(rq, next, false);
6201 goto out_set_next;
6202 }
6203 }
6204
6205 /*
6206 * For each thread: do the regular task pick and find the max prio task
6207 * amongst them.
6208 *
6209 * Tie-break prio towards the current CPU
6210 */
6211 for_each_cpu_wrap(i, smt_mask, cpu) {
6212 rq_i = cpu_rq(i);
6213
6214 /*
6215 * Current cpu always has its clock updated on entrance to
6216 * pick_next_task(). If the current cpu is not the core,
6217 * the core may also have been updated above.
6218 */
6219 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6220 update_rq_clock(rq_i);
6221
6222 rq_i->core_pick = p = pick_task(rq_i);
6223 rq_i->core_dl_server = rq_i->dl_server;
6224
6225 if (!max || prio_less(max, p, fi_before))
6226 max = p;
6227 }
6228
6229 cookie = rq->core->core_cookie = max->core_cookie;
6230
6231 /*
6232 * For each thread: try and find a runnable task that matches @max or
6233 * force idle.
6234 */
6235 for_each_cpu(i, smt_mask) {
6236 rq_i = cpu_rq(i);
6237 p = rq_i->core_pick;
6238
6239 if (!cookie_equals(p, cookie)) {
6240 p = NULL;
6241 if (cookie)
6242 p = sched_core_find(rq_i, cookie);
6243 if (!p)
6244 p = idle_sched_class.pick_task(rq_i);
6245 }
6246
6247 rq_i->core_pick = p;
6248 rq_i->core_dl_server = NULL;
6249
6250 if (p == rq_i->idle) {
6251 if (rq_i->nr_running) {
6252 rq->core->core_forceidle_count++;
6253 if (!fi_before)
6254 rq->core->core_forceidle_seq++;
6255 }
6256 } else {
6257 occ++;
6258 }
6259 }
6260
6261 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6262 rq->core->core_forceidle_start = rq_clock(rq->core);
6263 rq->core->core_forceidle_occupation = occ;
6264 }
6265
6266 rq->core->core_pick_seq = rq->core->core_task_seq;
6267 next = rq->core_pick;
6268 rq->core_sched_seq = rq->core->core_pick_seq;
6269
6270 /* Something should have been selected for current CPU */
6271 WARN_ON_ONCE(!next);
6272
6273 /*
6274 * Reschedule siblings
6275 *
6276 * NOTE: L1TF -- at this point we're no longer running the old task and
6277 * sending an IPI (below) ensures the sibling will no longer be running
6278 * their task. This ensures there is no inter-sibling overlap between
6279 * non-matching user state.
6280 */
6281 for_each_cpu(i, smt_mask) {
6282 rq_i = cpu_rq(i);
6283
6284 /*
6285 * An online sibling might have gone offline before a task
6286 * could be picked for it, or it might be offline but later
6287 * happen to come online, but its too late and nothing was
6288 * picked for it. That's Ok - it will pick tasks for itself,
6289 * so ignore it.
6290 */
6291 if (!rq_i->core_pick)
6292 continue;
6293
6294 /*
6295 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6296 * fi_before fi update?
6297 * 0 0 1
6298 * 0 1 1
6299 * 1 0 1
6300 * 1 1 0
6301 */
6302 if (!(fi_before && rq->core->core_forceidle_count))
6303 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6304
6305 rq_i->core_pick->core_occupation = occ;
6306
6307 if (i == cpu) {
6308 rq_i->core_pick = NULL;
6309 rq_i->core_dl_server = NULL;
6310 continue;
6311 }
6312
6313 /* Did we break L1TF mitigation requirements? */
6314 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6315
6316 if (rq_i->curr == rq_i->core_pick) {
6317 rq_i->core_pick = NULL;
6318 rq_i->core_dl_server = NULL;
6319 continue;
6320 }
6321
6322 resched_curr(rq_i);
6323 }
6324
6325 out_set_next:
6326 put_prev_set_next_task(rq, prev, next);
6327 if (rq->core->core_forceidle_count && next == rq->idle)
6328 queue_core_balance(rq);
6329
6330 return next;
6331 }
6332
try_steal_cookie(int this,int that)6333 static bool try_steal_cookie(int this, int that)
6334 {
6335 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6336 struct task_struct *p;
6337 unsigned long cookie;
6338 bool success = false;
6339
6340 guard(irq)();
6341 guard(double_rq_lock)(dst, src);
6342
6343 cookie = dst->core->core_cookie;
6344 if (!cookie)
6345 return false;
6346
6347 if (dst->curr != dst->idle)
6348 return false;
6349
6350 p = sched_core_find(src, cookie);
6351 if (!p)
6352 return false;
6353
6354 do {
6355 if (p == src->core_pick || p == src->curr)
6356 goto next;
6357
6358 if (!is_cpu_allowed(p, this))
6359 goto next;
6360
6361 if (p->core_occupation > dst->idle->core_occupation)
6362 goto next;
6363 /*
6364 * sched_core_find() and sched_core_next() will ensure
6365 * that task @p is not throttled now, we also need to
6366 * check whether the runqueue of the destination CPU is
6367 * being throttled.
6368 */
6369 if (sched_task_is_throttled(p, this))
6370 goto next;
6371
6372 move_queued_task_locked(src, dst, p);
6373 resched_curr(dst);
6374
6375 success = true;
6376 break;
6377
6378 next:
6379 p = sched_core_next(p, cookie);
6380 } while (p);
6381
6382 return success;
6383 }
6384
steal_cookie_task(int cpu,struct sched_domain * sd)6385 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6386 {
6387 int i;
6388
6389 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6390 if (i == cpu)
6391 continue;
6392
6393 if (need_resched())
6394 break;
6395
6396 if (try_steal_cookie(cpu, i))
6397 return true;
6398 }
6399
6400 return false;
6401 }
6402
sched_core_balance(struct rq * rq)6403 static void sched_core_balance(struct rq *rq)
6404 {
6405 struct sched_domain *sd;
6406 int cpu = cpu_of(rq);
6407
6408 guard(preempt)();
6409 guard(rcu)();
6410
6411 raw_spin_rq_unlock_irq(rq);
6412 for_each_domain(cpu, sd) {
6413 if (need_resched())
6414 break;
6415
6416 if (steal_cookie_task(cpu, sd))
6417 break;
6418 }
6419 raw_spin_rq_lock_irq(rq);
6420 }
6421
6422 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6423
queue_core_balance(struct rq * rq)6424 static void queue_core_balance(struct rq *rq)
6425 {
6426 if (!sched_core_enabled(rq))
6427 return;
6428
6429 if (!rq->core->core_cookie)
6430 return;
6431
6432 if (!rq->nr_running) /* not forced idle */
6433 return;
6434
6435 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6436 }
6437
6438 DEFINE_LOCK_GUARD_1(core_lock, int,
6439 sched_core_lock(*_T->lock, &_T->flags),
6440 sched_core_unlock(*_T->lock, &_T->flags),
6441 unsigned long flags)
6442
sched_core_cpu_starting(unsigned int cpu)6443 static void sched_core_cpu_starting(unsigned int cpu)
6444 {
6445 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6446 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6447 int t;
6448
6449 guard(core_lock)(&cpu);
6450
6451 WARN_ON_ONCE(rq->core != rq);
6452
6453 /* if we're the first, we'll be our own leader */
6454 if (cpumask_weight(smt_mask) == 1)
6455 return;
6456
6457 /* find the leader */
6458 for_each_cpu(t, smt_mask) {
6459 if (t == cpu)
6460 continue;
6461 rq = cpu_rq(t);
6462 if (rq->core == rq) {
6463 core_rq = rq;
6464 break;
6465 }
6466 }
6467
6468 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6469 return;
6470
6471 /* install and validate core_rq */
6472 for_each_cpu(t, smt_mask) {
6473 rq = cpu_rq(t);
6474
6475 if (t == cpu)
6476 rq->core = core_rq;
6477
6478 WARN_ON_ONCE(rq->core != core_rq);
6479 }
6480 }
6481
sched_core_cpu_deactivate(unsigned int cpu)6482 static void sched_core_cpu_deactivate(unsigned int cpu)
6483 {
6484 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6485 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6486 int t;
6487
6488 guard(core_lock)(&cpu);
6489
6490 /* if we're the last man standing, nothing to do */
6491 if (cpumask_weight(smt_mask) == 1) {
6492 WARN_ON_ONCE(rq->core != rq);
6493 return;
6494 }
6495
6496 /* if we're not the leader, nothing to do */
6497 if (rq->core != rq)
6498 return;
6499
6500 /* find a new leader */
6501 for_each_cpu(t, smt_mask) {
6502 if (t == cpu)
6503 continue;
6504 core_rq = cpu_rq(t);
6505 break;
6506 }
6507
6508 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6509 return;
6510
6511 /* copy the shared state to the new leader */
6512 core_rq->core_task_seq = rq->core_task_seq;
6513 core_rq->core_pick_seq = rq->core_pick_seq;
6514 core_rq->core_cookie = rq->core_cookie;
6515 core_rq->core_forceidle_count = rq->core_forceidle_count;
6516 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6517 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6518
6519 /*
6520 * Accounting edge for forced idle is handled in pick_next_task().
6521 * Don't need another one here, since the hotplug thread shouldn't
6522 * have a cookie.
6523 */
6524 core_rq->core_forceidle_start = 0;
6525
6526 /* install new leader */
6527 for_each_cpu(t, smt_mask) {
6528 rq = cpu_rq(t);
6529 rq->core = core_rq;
6530 }
6531 }
6532
sched_core_cpu_dying(unsigned int cpu)6533 static inline void sched_core_cpu_dying(unsigned int cpu)
6534 {
6535 struct rq *rq = cpu_rq(cpu);
6536
6537 if (rq->core != rq)
6538 rq->core = rq;
6539 }
6540
6541 #else /* !CONFIG_SCHED_CORE */
6542
sched_core_cpu_starting(unsigned int cpu)6543 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6544 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6545 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6546
6547 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6548 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6549 {
6550 return __pick_next_task(rq, prev, rf);
6551 }
6552
6553 #endif /* CONFIG_SCHED_CORE */
6554
6555 /*
6556 * Constants for the sched_mode argument of __schedule().
6557 *
6558 * The mode argument allows RT enabled kernels to differentiate a
6559 * preemption from blocking on an 'sleeping' spin/rwlock.
6560 */
6561 #define SM_IDLE (-1)
6562 #define SM_NONE 0
6563 #define SM_PREEMPT 1
6564 #define SM_RTLOCK_WAIT 2
6565
6566 /*
6567 * Helper function for __schedule()
6568 *
6569 * If a task does not have signals pending, deactivate it
6570 * Otherwise marks the task's __state as RUNNING
6571 */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long task_state)6572 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6573 unsigned long task_state)
6574 {
6575 int flags = DEQUEUE_NOCLOCK;
6576
6577 if (signal_pending_state(task_state, p)) {
6578 WRITE_ONCE(p->__state, TASK_RUNNING);
6579 return false;
6580 }
6581
6582 p->sched_contributes_to_load =
6583 (task_state & TASK_UNINTERRUPTIBLE) &&
6584 !(task_state & TASK_NOLOAD) &&
6585 !(task_state & TASK_FROZEN);
6586
6587 if (unlikely(is_special_task_state(task_state)))
6588 flags |= DEQUEUE_SPECIAL;
6589
6590 /*
6591 * __schedule() ttwu()
6592 * prev_state = prev->state; if (p->on_rq && ...)
6593 * if (prev_state) goto out;
6594 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6595 * p->state = TASK_WAKING
6596 *
6597 * Where __schedule() and ttwu() have matching control dependencies.
6598 *
6599 * After this, schedule() must not care about p->state any more.
6600 */
6601 block_task(rq, p, flags);
6602 return true;
6603 }
6604
6605 /*
6606 * __schedule() is the main scheduler function.
6607 *
6608 * The main means of driving the scheduler and thus entering this function are:
6609 *
6610 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6611 *
6612 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6613 * paths. For example, see arch/x86/entry_64.S.
6614 *
6615 * To drive preemption between tasks, the scheduler sets the flag in timer
6616 * interrupt handler sched_tick().
6617 *
6618 * 3. Wakeups don't really cause entry into schedule(). They add a
6619 * task to the run-queue and that's it.
6620 *
6621 * Now, if the new task added to the run-queue preempts the current
6622 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6623 * called on the nearest possible occasion:
6624 *
6625 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6626 *
6627 * - in syscall or exception context, at the next outmost
6628 * preempt_enable(). (this might be as soon as the wake_up()'s
6629 * spin_unlock()!)
6630 *
6631 * - in IRQ context, return from interrupt-handler to
6632 * preemptible context
6633 *
6634 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6635 * then at the next:
6636 *
6637 * - cond_resched() call
6638 * - explicit schedule() call
6639 * - return from syscall or exception to user-space
6640 * - return from interrupt-handler to user-space
6641 *
6642 * WARNING: must be called with preemption disabled!
6643 */
__schedule(int sched_mode)6644 static void __sched notrace __schedule(int sched_mode)
6645 {
6646 struct task_struct *prev, *next;
6647 /*
6648 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6649 * as a preemption by schedule_debug() and RCU.
6650 */
6651 bool preempt = sched_mode > SM_NONE;
6652 unsigned long *switch_count;
6653 unsigned long prev_state;
6654 struct rq_flags rf;
6655 struct rq *rq;
6656 int cpu;
6657
6658 cpu = smp_processor_id();
6659 rq = cpu_rq(cpu);
6660 prev = rq->curr;
6661
6662 schedule_debug(prev, preempt);
6663
6664 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6665 hrtick_clear(rq);
6666
6667 local_irq_disable();
6668 rcu_note_context_switch(preempt);
6669
6670 /*
6671 * Make sure that signal_pending_state()->signal_pending() below
6672 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6673 * done by the caller to avoid the race with signal_wake_up():
6674 *
6675 * __set_current_state(@state) signal_wake_up()
6676 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6677 * wake_up_state(p, state)
6678 * LOCK rq->lock LOCK p->pi_state
6679 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6680 * if (signal_pending_state()) if (p->state & @state)
6681 *
6682 * Also, the membarrier system call requires a full memory barrier
6683 * after coming from user-space, before storing to rq->curr; this
6684 * barrier matches a full barrier in the proximity of the membarrier
6685 * system call exit.
6686 */
6687 rq_lock(rq, &rf);
6688 smp_mb__after_spinlock();
6689
6690 /* Promote REQ to ACT */
6691 rq->clock_update_flags <<= 1;
6692 update_rq_clock(rq);
6693 rq->clock_update_flags = RQCF_UPDATED;
6694
6695 switch_count = &prev->nivcsw;
6696
6697 /* Task state changes only considers SM_PREEMPT as preemption */
6698 preempt = sched_mode == SM_PREEMPT;
6699
6700 /*
6701 * We must load prev->state once (task_struct::state is volatile), such
6702 * that we form a control dependency vs deactivate_task() below.
6703 */
6704 prev_state = READ_ONCE(prev->__state);
6705 if (sched_mode == SM_IDLE) {
6706 /* SCX must consult the BPF scheduler to tell if rq is empty */
6707 if (!rq->nr_running && !scx_enabled()) {
6708 next = prev;
6709 goto picked;
6710 }
6711 } else if (!preempt && prev_state) {
6712 try_to_block_task(rq, prev, prev_state);
6713 switch_count = &prev->nvcsw;
6714 }
6715
6716 next = pick_next_task(rq, prev, &rf);
6717 rq_set_donor(rq, next);
6718 picked:
6719 clear_tsk_need_resched(prev);
6720 clear_preempt_need_resched();
6721 #ifdef CONFIG_SCHED_DEBUG
6722 rq->last_seen_need_resched_ns = 0;
6723 #endif
6724
6725 if (likely(prev != next)) {
6726 rq->nr_switches++;
6727 /*
6728 * RCU users of rcu_dereference(rq->curr) may not see
6729 * changes to task_struct made by pick_next_task().
6730 */
6731 RCU_INIT_POINTER(rq->curr, next);
6732 /*
6733 * The membarrier system call requires each architecture
6734 * to have a full memory barrier after updating
6735 * rq->curr, before returning to user-space.
6736 *
6737 * Here are the schemes providing that barrier on the
6738 * various architectures:
6739 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6740 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6741 * on PowerPC and on RISC-V.
6742 * - finish_lock_switch() for weakly-ordered
6743 * architectures where spin_unlock is a full barrier,
6744 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6745 * is a RELEASE barrier),
6746 *
6747 * The barrier matches a full barrier in the proximity of
6748 * the membarrier system call entry.
6749 *
6750 * On RISC-V, this barrier pairing is also needed for the
6751 * SYNC_CORE command when switching between processes, cf.
6752 * the inline comments in membarrier_arch_switch_mm().
6753 */
6754 ++*switch_count;
6755
6756 migrate_disable_switch(rq, prev);
6757 psi_account_irqtime(rq, prev, next);
6758 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6759 prev->se.sched_delayed);
6760
6761 trace_sched_switch(preempt, prev, next, prev_state);
6762
6763 /* Also unlocks the rq: */
6764 rq = context_switch(rq, prev, next, &rf);
6765 } else {
6766 rq_unpin_lock(rq, &rf);
6767 __balance_callbacks(rq);
6768 raw_spin_rq_unlock_irq(rq);
6769 }
6770 }
6771
do_task_dead(void)6772 void __noreturn do_task_dead(void)
6773 {
6774 /* Causes final put_task_struct in finish_task_switch(): */
6775 set_special_state(TASK_DEAD);
6776
6777 /* Tell freezer to ignore us: */
6778 current->flags |= PF_NOFREEZE;
6779
6780 __schedule(SM_NONE);
6781 BUG();
6782
6783 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6784 for (;;)
6785 cpu_relax();
6786 }
6787
sched_submit_work(struct task_struct * tsk)6788 static inline void sched_submit_work(struct task_struct *tsk)
6789 {
6790 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6791 unsigned int task_flags;
6792
6793 /*
6794 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6795 * will use a blocking primitive -- which would lead to recursion.
6796 */
6797 lock_map_acquire_try(&sched_map);
6798
6799 task_flags = tsk->flags;
6800 /*
6801 * If a worker goes to sleep, notify and ask workqueue whether it
6802 * wants to wake up a task to maintain concurrency.
6803 */
6804 if (task_flags & PF_WQ_WORKER)
6805 wq_worker_sleeping(tsk);
6806 else if (task_flags & PF_IO_WORKER)
6807 io_wq_worker_sleeping(tsk);
6808
6809 /*
6810 * spinlock and rwlock must not flush block requests. This will
6811 * deadlock if the callback attempts to acquire a lock which is
6812 * already acquired.
6813 */
6814 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6815
6816 /*
6817 * If we are going to sleep and we have plugged IO queued,
6818 * make sure to submit it to avoid deadlocks.
6819 */
6820 blk_flush_plug(tsk->plug, true);
6821
6822 lock_map_release(&sched_map);
6823 }
6824
sched_update_worker(struct task_struct * tsk)6825 static void sched_update_worker(struct task_struct *tsk)
6826 {
6827 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6828 if (tsk->flags & PF_BLOCK_TS)
6829 blk_plug_invalidate_ts(tsk);
6830 if (tsk->flags & PF_WQ_WORKER)
6831 wq_worker_running(tsk);
6832 else if (tsk->flags & PF_IO_WORKER)
6833 io_wq_worker_running(tsk);
6834 }
6835 }
6836
__schedule_loop(int sched_mode)6837 static __always_inline void __schedule_loop(int sched_mode)
6838 {
6839 do {
6840 preempt_disable();
6841 __schedule(sched_mode);
6842 sched_preempt_enable_no_resched();
6843 } while (need_resched());
6844 }
6845
schedule(void)6846 asmlinkage __visible void __sched schedule(void)
6847 {
6848 struct task_struct *tsk = current;
6849
6850 #ifdef CONFIG_RT_MUTEXES
6851 lockdep_assert(!tsk->sched_rt_mutex);
6852 #endif
6853
6854 if (!task_is_running(tsk))
6855 sched_submit_work(tsk);
6856 __schedule_loop(SM_NONE);
6857 sched_update_worker(tsk);
6858 }
6859 EXPORT_SYMBOL(schedule);
6860
6861 /*
6862 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6863 * state (have scheduled out non-voluntarily) by making sure that all
6864 * tasks have either left the run queue or have gone into user space.
6865 * As idle tasks do not do either, they must not ever be preempted
6866 * (schedule out non-voluntarily).
6867 *
6868 * schedule_idle() is similar to schedule_preempt_disable() except that it
6869 * never enables preemption because it does not call sched_submit_work().
6870 */
schedule_idle(void)6871 void __sched schedule_idle(void)
6872 {
6873 /*
6874 * As this skips calling sched_submit_work(), which the idle task does
6875 * regardless because that function is a NOP when the task is in a
6876 * TASK_RUNNING state, make sure this isn't used someplace that the
6877 * current task can be in any other state. Note, idle is always in the
6878 * TASK_RUNNING state.
6879 */
6880 WARN_ON_ONCE(current->__state);
6881 do {
6882 __schedule(SM_IDLE);
6883 } while (need_resched());
6884 }
6885
6886 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6887 asmlinkage __visible void __sched schedule_user(void)
6888 {
6889 /*
6890 * If we come here after a random call to set_need_resched(),
6891 * or we have been woken up remotely but the IPI has not yet arrived,
6892 * we haven't yet exited the RCU idle mode. Do it here manually until
6893 * we find a better solution.
6894 *
6895 * NB: There are buggy callers of this function. Ideally we
6896 * should warn if prev_state != CT_STATE_USER, but that will trigger
6897 * too frequently to make sense yet.
6898 */
6899 enum ctx_state prev_state = exception_enter();
6900 schedule();
6901 exception_exit(prev_state);
6902 }
6903 #endif
6904
6905 /**
6906 * schedule_preempt_disabled - called with preemption disabled
6907 *
6908 * Returns with preemption disabled. Note: preempt_count must be 1
6909 */
schedule_preempt_disabled(void)6910 void __sched schedule_preempt_disabled(void)
6911 {
6912 sched_preempt_enable_no_resched();
6913 schedule();
6914 preempt_disable();
6915 }
6916
6917 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6918 void __sched notrace schedule_rtlock(void)
6919 {
6920 __schedule_loop(SM_RTLOCK_WAIT);
6921 }
6922 NOKPROBE_SYMBOL(schedule_rtlock);
6923 #endif
6924
preempt_schedule_common(void)6925 static void __sched notrace preempt_schedule_common(void)
6926 {
6927 do {
6928 /*
6929 * Because the function tracer can trace preempt_count_sub()
6930 * and it also uses preempt_enable/disable_notrace(), if
6931 * NEED_RESCHED is set, the preempt_enable_notrace() called
6932 * by the function tracer will call this function again and
6933 * cause infinite recursion.
6934 *
6935 * Preemption must be disabled here before the function
6936 * tracer can trace. Break up preempt_disable() into two
6937 * calls. One to disable preemption without fear of being
6938 * traced. The other to still record the preemption latency,
6939 * which can also be traced by the function tracer.
6940 */
6941 preempt_disable_notrace();
6942 preempt_latency_start(1);
6943 __schedule(SM_PREEMPT);
6944 preempt_latency_stop(1);
6945 preempt_enable_no_resched_notrace();
6946
6947 /*
6948 * Check again in case we missed a preemption opportunity
6949 * between schedule and now.
6950 */
6951 } while (need_resched());
6952 }
6953
6954 #ifdef CONFIG_PREEMPTION
6955 /*
6956 * This is the entry point to schedule() from in-kernel preemption
6957 * off of preempt_enable.
6958 */
preempt_schedule(void)6959 asmlinkage __visible void __sched notrace preempt_schedule(void)
6960 {
6961 /*
6962 * If there is a non-zero preempt_count or interrupts are disabled,
6963 * we do not want to preempt the current task. Just return..
6964 */
6965 if (likely(!preemptible()))
6966 return;
6967 preempt_schedule_common();
6968 }
6969 NOKPROBE_SYMBOL(preempt_schedule);
6970 EXPORT_SYMBOL(preempt_schedule);
6971
6972 #ifdef CONFIG_PREEMPT_DYNAMIC
6973 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6974 #ifndef preempt_schedule_dynamic_enabled
6975 #define preempt_schedule_dynamic_enabled preempt_schedule
6976 #define preempt_schedule_dynamic_disabled NULL
6977 #endif
6978 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6979 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6980 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6981 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6982 void __sched notrace dynamic_preempt_schedule(void)
6983 {
6984 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6985 return;
6986 preempt_schedule();
6987 }
6988 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6989 EXPORT_SYMBOL(dynamic_preempt_schedule);
6990 #endif
6991 #endif
6992
6993 /**
6994 * preempt_schedule_notrace - preempt_schedule called by tracing
6995 *
6996 * The tracing infrastructure uses preempt_enable_notrace to prevent
6997 * recursion and tracing preempt enabling caused by the tracing
6998 * infrastructure itself. But as tracing can happen in areas coming
6999 * from userspace or just about to enter userspace, a preempt enable
7000 * can occur before user_exit() is called. This will cause the scheduler
7001 * to be called when the system is still in usermode.
7002 *
7003 * To prevent this, the preempt_enable_notrace will use this function
7004 * instead of preempt_schedule() to exit user context if needed before
7005 * calling the scheduler.
7006 */
preempt_schedule_notrace(void)7007 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7008 {
7009 enum ctx_state prev_ctx;
7010
7011 if (likely(!preemptible()))
7012 return;
7013
7014 do {
7015 /*
7016 * Because the function tracer can trace preempt_count_sub()
7017 * and it also uses preempt_enable/disable_notrace(), if
7018 * NEED_RESCHED is set, the preempt_enable_notrace() called
7019 * by the function tracer will call this function again and
7020 * cause infinite recursion.
7021 *
7022 * Preemption must be disabled here before the function
7023 * tracer can trace. Break up preempt_disable() into two
7024 * calls. One to disable preemption without fear of being
7025 * traced. The other to still record the preemption latency,
7026 * which can also be traced by the function tracer.
7027 */
7028 preempt_disable_notrace();
7029 preempt_latency_start(1);
7030 /*
7031 * Needs preempt disabled in case user_exit() is traced
7032 * and the tracer calls preempt_enable_notrace() causing
7033 * an infinite recursion.
7034 */
7035 prev_ctx = exception_enter();
7036 __schedule(SM_PREEMPT);
7037 exception_exit(prev_ctx);
7038
7039 preempt_latency_stop(1);
7040 preempt_enable_no_resched_notrace();
7041 } while (need_resched());
7042 }
7043 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7044
7045 #ifdef CONFIG_PREEMPT_DYNAMIC
7046 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7047 #ifndef preempt_schedule_notrace_dynamic_enabled
7048 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7049 #define preempt_schedule_notrace_dynamic_disabled NULL
7050 #endif
7051 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7052 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7053 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7054 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7055 void __sched notrace dynamic_preempt_schedule_notrace(void)
7056 {
7057 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7058 return;
7059 preempt_schedule_notrace();
7060 }
7061 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7062 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7063 #endif
7064 #endif
7065
7066 #endif /* CONFIG_PREEMPTION */
7067
7068 /*
7069 * This is the entry point to schedule() from kernel preemption
7070 * off of IRQ context.
7071 * Note, that this is called and return with IRQs disabled. This will
7072 * protect us against recursive calling from IRQ contexts.
7073 */
preempt_schedule_irq(void)7074 asmlinkage __visible void __sched preempt_schedule_irq(void)
7075 {
7076 enum ctx_state prev_state;
7077
7078 /* Catch callers which need to be fixed */
7079 BUG_ON(preempt_count() || !irqs_disabled());
7080
7081 prev_state = exception_enter();
7082
7083 do {
7084 preempt_disable();
7085 local_irq_enable();
7086 __schedule(SM_PREEMPT);
7087 local_irq_disable();
7088 sched_preempt_enable_no_resched();
7089 } while (need_resched());
7090
7091 exception_exit(prev_state);
7092 }
7093
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7094 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7095 void *key)
7096 {
7097 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7098 return try_to_wake_up(curr->private, mode, wake_flags);
7099 }
7100 EXPORT_SYMBOL(default_wake_function);
7101
__setscheduler_class(int policy,int prio)7102 const struct sched_class *__setscheduler_class(int policy, int prio)
7103 {
7104 if (dl_prio(prio))
7105 return &dl_sched_class;
7106
7107 if (rt_prio(prio))
7108 return &rt_sched_class;
7109
7110 #ifdef CONFIG_SCHED_CLASS_EXT
7111 if (task_should_scx(policy))
7112 return &ext_sched_class;
7113 #endif
7114
7115 return &fair_sched_class;
7116 }
7117
7118 #ifdef CONFIG_RT_MUTEXES
7119
7120 /*
7121 * Would be more useful with typeof()/auto_type but they don't mix with
7122 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7123 * name such that if someone were to implement this function we get to compare
7124 * notes.
7125 */
7126 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7127
rt_mutex_pre_schedule(void)7128 void rt_mutex_pre_schedule(void)
7129 {
7130 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7131 sched_submit_work(current);
7132 }
7133
rt_mutex_schedule(void)7134 void rt_mutex_schedule(void)
7135 {
7136 lockdep_assert(current->sched_rt_mutex);
7137 __schedule_loop(SM_NONE);
7138 }
7139
rt_mutex_post_schedule(void)7140 void rt_mutex_post_schedule(void)
7141 {
7142 sched_update_worker(current);
7143 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7144 }
7145
7146 /*
7147 * rt_mutex_setprio - set the current priority of a task
7148 * @p: task to boost
7149 * @pi_task: donor task
7150 *
7151 * This function changes the 'effective' priority of a task. It does
7152 * not touch ->normal_prio like __setscheduler().
7153 *
7154 * Used by the rt_mutex code to implement priority inheritance
7155 * logic. Call site only calls if the priority of the task changed.
7156 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7157 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7158 {
7159 int prio, oldprio, queued, running, queue_flag =
7160 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7161 const struct sched_class *prev_class, *next_class;
7162 struct rq_flags rf;
7163 struct rq *rq;
7164
7165 /* XXX used to be waiter->prio, not waiter->task->prio */
7166 prio = __rt_effective_prio(pi_task, p->normal_prio);
7167
7168 /*
7169 * If nothing changed; bail early.
7170 */
7171 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7172 return;
7173
7174 rq = __task_rq_lock(p, &rf);
7175 update_rq_clock(rq);
7176 /*
7177 * Set under pi_lock && rq->lock, such that the value can be used under
7178 * either lock.
7179 *
7180 * Note that there is loads of tricky to make this pointer cache work
7181 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7182 * ensure a task is de-boosted (pi_task is set to NULL) before the
7183 * task is allowed to run again (and can exit). This ensures the pointer
7184 * points to a blocked task -- which guarantees the task is present.
7185 */
7186 p->pi_top_task = pi_task;
7187
7188 /*
7189 * For FIFO/RR we only need to set prio, if that matches we're done.
7190 */
7191 if (prio == p->prio && !dl_prio(prio))
7192 goto out_unlock;
7193
7194 /*
7195 * Idle task boosting is a no-no in general. There is one
7196 * exception, when PREEMPT_RT and NOHZ is active:
7197 *
7198 * The idle task calls get_next_timer_interrupt() and holds
7199 * the timer wheel base->lock on the CPU and another CPU wants
7200 * to access the timer (probably to cancel it). We can safely
7201 * ignore the boosting request, as the idle CPU runs this code
7202 * with interrupts disabled and will complete the lock
7203 * protected section without being interrupted. So there is no
7204 * real need to boost.
7205 */
7206 if (unlikely(p == rq->idle)) {
7207 WARN_ON(p != rq->curr);
7208 WARN_ON(p->pi_blocked_on);
7209 goto out_unlock;
7210 }
7211
7212 trace_sched_pi_setprio(p, pi_task);
7213 oldprio = p->prio;
7214
7215 if (oldprio == prio)
7216 queue_flag &= ~DEQUEUE_MOVE;
7217
7218 prev_class = p->sched_class;
7219 next_class = __setscheduler_class(p->policy, prio);
7220
7221 if (prev_class != next_class && p->se.sched_delayed)
7222 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7223
7224 queued = task_on_rq_queued(p);
7225 running = task_current_donor(rq, p);
7226 if (queued)
7227 dequeue_task(rq, p, queue_flag);
7228 if (running)
7229 put_prev_task(rq, p);
7230
7231 /*
7232 * Boosting condition are:
7233 * 1. -rt task is running and holds mutex A
7234 * --> -dl task blocks on mutex A
7235 *
7236 * 2. -dl task is running and holds mutex A
7237 * --> -dl task blocks on mutex A and could preempt the
7238 * running task
7239 */
7240 if (dl_prio(prio)) {
7241 if (!dl_prio(p->normal_prio) ||
7242 (pi_task && dl_prio(pi_task->prio) &&
7243 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7244 p->dl.pi_se = pi_task->dl.pi_se;
7245 queue_flag |= ENQUEUE_REPLENISH;
7246 } else {
7247 p->dl.pi_se = &p->dl;
7248 }
7249 } else if (rt_prio(prio)) {
7250 if (dl_prio(oldprio))
7251 p->dl.pi_se = &p->dl;
7252 if (oldprio < prio)
7253 queue_flag |= ENQUEUE_HEAD;
7254 } else {
7255 if (dl_prio(oldprio))
7256 p->dl.pi_se = &p->dl;
7257 if (rt_prio(oldprio))
7258 p->rt.timeout = 0;
7259 }
7260
7261 p->sched_class = next_class;
7262 p->prio = prio;
7263
7264 check_class_changing(rq, p, prev_class);
7265
7266 if (queued)
7267 enqueue_task(rq, p, queue_flag);
7268 if (running)
7269 set_next_task(rq, p);
7270
7271 check_class_changed(rq, p, prev_class, oldprio);
7272 out_unlock:
7273 /* Avoid rq from going away on us: */
7274 preempt_disable();
7275
7276 rq_unpin_lock(rq, &rf);
7277 __balance_callbacks(rq);
7278 raw_spin_rq_unlock(rq);
7279
7280 preempt_enable();
7281 }
7282 #endif
7283
7284 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7285 int __sched __cond_resched(void)
7286 {
7287 if (should_resched(0)) {
7288 preempt_schedule_common();
7289 return 1;
7290 }
7291 /*
7292 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7293 * whether the current CPU is in an RCU read-side critical section,
7294 * so the tick can report quiescent states even for CPUs looping
7295 * in kernel context. In contrast, in non-preemptible kernels,
7296 * RCU readers leave no in-memory hints, which means that CPU-bound
7297 * processes executing in kernel context might never report an
7298 * RCU quiescent state. Therefore, the following code causes
7299 * cond_resched() to report a quiescent state, but only when RCU
7300 * is in urgent need of one.
7301 */
7302 #ifndef CONFIG_PREEMPT_RCU
7303 rcu_all_qs();
7304 #endif
7305 return 0;
7306 }
7307 EXPORT_SYMBOL(__cond_resched);
7308 #endif
7309
7310 #ifdef CONFIG_PREEMPT_DYNAMIC
7311 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7312 #define cond_resched_dynamic_enabled __cond_resched
7313 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7314 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7315 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7316
7317 #define might_resched_dynamic_enabled __cond_resched
7318 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7319 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7320 EXPORT_STATIC_CALL_TRAMP(might_resched);
7321 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7322 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7323 int __sched dynamic_cond_resched(void)
7324 {
7325 klp_sched_try_switch();
7326 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7327 return 0;
7328 return __cond_resched();
7329 }
7330 EXPORT_SYMBOL(dynamic_cond_resched);
7331
7332 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7333 int __sched dynamic_might_resched(void)
7334 {
7335 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7336 return 0;
7337 return __cond_resched();
7338 }
7339 EXPORT_SYMBOL(dynamic_might_resched);
7340 #endif
7341 #endif
7342
7343 /*
7344 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7345 * call schedule, and on return reacquire the lock.
7346 *
7347 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7348 * operations here to prevent schedule() from being called twice (once via
7349 * spin_unlock(), once by hand).
7350 */
__cond_resched_lock(spinlock_t * lock)7351 int __cond_resched_lock(spinlock_t *lock)
7352 {
7353 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7354 int ret = 0;
7355
7356 lockdep_assert_held(lock);
7357
7358 if (spin_needbreak(lock) || resched) {
7359 spin_unlock(lock);
7360 if (!_cond_resched())
7361 cpu_relax();
7362 ret = 1;
7363 spin_lock(lock);
7364 }
7365 return ret;
7366 }
7367 EXPORT_SYMBOL(__cond_resched_lock);
7368
__cond_resched_rwlock_read(rwlock_t * lock)7369 int __cond_resched_rwlock_read(rwlock_t *lock)
7370 {
7371 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7372 int ret = 0;
7373
7374 lockdep_assert_held_read(lock);
7375
7376 if (rwlock_needbreak(lock) || resched) {
7377 read_unlock(lock);
7378 if (!_cond_resched())
7379 cpu_relax();
7380 ret = 1;
7381 read_lock(lock);
7382 }
7383 return ret;
7384 }
7385 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7386
__cond_resched_rwlock_write(rwlock_t * lock)7387 int __cond_resched_rwlock_write(rwlock_t *lock)
7388 {
7389 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7390 int ret = 0;
7391
7392 lockdep_assert_held_write(lock);
7393
7394 if (rwlock_needbreak(lock) || resched) {
7395 write_unlock(lock);
7396 if (!_cond_resched())
7397 cpu_relax();
7398 ret = 1;
7399 write_lock(lock);
7400 }
7401 return ret;
7402 }
7403 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7404
7405 #ifdef CONFIG_PREEMPT_DYNAMIC
7406
7407 #ifdef CONFIG_GENERIC_ENTRY
7408 #include <linux/entry-common.h>
7409 #endif
7410
7411 /*
7412 * SC:cond_resched
7413 * SC:might_resched
7414 * SC:preempt_schedule
7415 * SC:preempt_schedule_notrace
7416 * SC:irqentry_exit_cond_resched
7417 *
7418 *
7419 * NONE:
7420 * cond_resched <- __cond_resched
7421 * might_resched <- RET0
7422 * preempt_schedule <- NOP
7423 * preempt_schedule_notrace <- NOP
7424 * irqentry_exit_cond_resched <- NOP
7425 * dynamic_preempt_lazy <- false
7426 *
7427 * VOLUNTARY:
7428 * cond_resched <- __cond_resched
7429 * might_resched <- __cond_resched
7430 * preempt_schedule <- NOP
7431 * preempt_schedule_notrace <- NOP
7432 * irqentry_exit_cond_resched <- NOP
7433 * dynamic_preempt_lazy <- false
7434 *
7435 * FULL:
7436 * cond_resched <- RET0
7437 * might_resched <- RET0
7438 * preempt_schedule <- preempt_schedule
7439 * preempt_schedule_notrace <- preempt_schedule_notrace
7440 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7441 * dynamic_preempt_lazy <- false
7442 *
7443 * LAZY:
7444 * cond_resched <- RET0
7445 * might_resched <- RET0
7446 * preempt_schedule <- preempt_schedule
7447 * preempt_schedule_notrace <- preempt_schedule_notrace
7448 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7449 * dynamic_preempt_lazy <- true
7450 */
7451
7452 enum {
7453 preempt_dynamic_undefined = -1,
7454 preempt_dynamic_none,
7455 preempt_dynamic_voluntary,
7456 preempt_dynamic_full,
7457 preempt_dynamic_lazy,
7458 };
7459
7460 int preempt_dynamic_mode = preempt_dynamic_undefined;
7461
sched_dynamic_mode(const char * str)7462 int sched_dynamic_mode(const char *str)
7463 {
7464 #ifndef CONFIG_PREEMPT_RT
7465 if (!strcmp(str, "none"))
7466 return preempt_dynamic_none;
7467
7468 if (!strcmp(str, "voluntary"))
7469 return preempt_dynamic_voluntary;
7470 #endif
7471
7472 if (!strcmp(str, "full"))
7473 return preempt_dynamic_full;
7474
7475 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7476 if (!strcmp(str, "lazy"))
7477 return preempt_dynamic_lazy;
7478 #endif
7479
7480 return -EINVAL;
7481 }
7482
7483 #define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7484 #define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7485
7486 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7487 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7488 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7489 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7490 #define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7491 #define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7492 #else
7493 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7494 #endif
7495
7496 static DEFINE_MUTEX(sched_dynamic_mutex);
7497 static bool klp_override;
7498
__sched_dynamic_update(int mode)7499 static void __sched_dynamic_update(int mode)
7500 {
7501 /*
7502 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7503 * the ZERO state, which is invalid.
7504 */
7505 if (!klp_override)
7506 preempt_dynamic_enable(cond_resched);
7507 preempt_dynamic_enable(might_resched);
7508 preempt_dynamic_enable(preempt_schedule);
7509 preempt_dynamic_enable(preempt_schedule_notrace);
7510 preempt_dynamic_enable(irqentry_exit_cond_resched);
7511 preempt_dynamic_key_disable(preempt_lazy);
7512
7513 switch (mode) {
7514 case preempt_dynamic_none:
7515 if (!klp_override)
7516 preempt_dynamic_enable(cond_resched);
7517 preempt_dynamic_disable(might_resched);
7518 preempt_dynamic_disable(preempt_schedule);
7519 preempt_dynamic_disable(preempt_schedule_notrace);
7520 preempt_dynamic_disable(irqentry_exit_cond_resched);
7521 preempt_dynamic_key_disable(preempt_lazy);
7522 if (mode != preempt_dynamic_mode)
7523 pr_info("Dynamic Preempt: none\n");
7524 break;
7525
7526 case preempt_dynamic_voluntary:
7527 if (!klp_override)
7528 preempt_dynamic_enable(cond_resched);
7529 preempt_dynamic_enable(might_resched);
7530 preempt_dynamic_disable(preempt_schedule);
7531 preempt_dynamic_disable(preempt_schedule_notrace);
7532 preempt_dynamic_disable(irqentry_exit_cond_resched);
7533 preempt_dynamic_key_disable(preempt_lazy);
7534 if (mode != preempt_dynamic_mode)
7535 pr_info("Dynamic Preempt: voluntary\n");
7536 break;
7537
7538 case preempt_dynamic_full:
7539 if (!klp_override)
7540 preempt_dynamic_disable(cond_resched);
7541 preempt_dynamic_disable(might_resched);
7542 preempt_dynamic_enable(preempt_schedule);
7543 preempt_dynamic_enable(preempt_schedule_notrace);
7544 preempt_dynamic_enable(irqentry_exit_cond_resched);
7545 preempt_dynamic_key_disable(preempt_lazy);
7546 if (mode != preempt_dynamic_mode)
7547 pr_info("Dynamic Preempt: full\n");
7548 break;
7549
7550 case preempt_dynamic_lazy:
7551 if (!klp_override)
7552 preempt_dynamic_disable(cond_resched);
7553 preempt_dynamic_disable(might_resched);
7554 preempt_dynamic_enable(preempt_schedule);
7555 preempt_dynamic_enable(preempt_schedule_notrace);
7556 preempt_dynamic_enable(irqentry_exit_cond_resched);
7557 preempt_dynamic_key_enable(preempt_lazy);
7558 if (mode != preempt_dynamic_mode)
7559 pr_info("Dynamic Preempt: lazy\n");
7560 break;
7561 }
7562
7563 preempt_dynamic_mode = mode;
7564 }
7565
sched_dynamic_update(int mode)7566 void sched_dynamic_update(int mode)
7567 {
7568 mutex_lock(&sched_dynamic_mutex);
7569 __sched_dynamic_update(mode);
7570 mutex_unlock(&sched_dynamic_mutex);
7571 }
7572
7573 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7574
klp_cond_resched(void)7575 static int klp_cond_resched(void)
7576 {
7577 __klp_sched_try_switch();
7578 return __cond_resched();
7579 }
7580
sched_dynamic_klp_enable(void)7581 void sched_dynamic_klp_enable(void)
7582 {
7583 mutex_lock(&sched_dynamic_mutex);
7584
7585 klp_override = true;
7586 static_call_update(cond_resched, klp_cond_resched);
7587
7588 mutex_unlock(&sched_dynamic_mutex);
7589 }
7590
sched_dynamic_klp_disable(void)7591 void sched_dynamic_klp_disable(void)
7592 {
7593 mutex_lock(&sched_dynamic_mutex);
7594
7595 klp_override = false;
7596 __sched_dynamic_update(preempt_dynamic_mode);
7597
7598 mutex_unlock(&sched_dynamic_mutex);
7599 }
7600
7601 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7602
setup_preempt_mode(char * str)7603 static int __init setup_preempt_mode(char *str)
7604 {
7605 int mode = sched_dynamic_mode(str);
7606 if (mode < 0) {
7607 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7608 return 0;
7609 }
7610
7611 sched_dynamic_update(mode);
7612 return 1;
7613 }
7614 __setup("preempt=", setup_preempt_mode);
7615
preempt_dynamic_init(void)7616 static void __init preempt_dynamic_init(void)
7617 {
7618 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7619 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7620 sched_dynamic_update(preempt_dynamic_none);
7621 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7622 sched_dynamic_update(preempt_dynamic_voluntary);
7623 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7624 sched_dynamic_update(preempt_dynamic_lazy);
7625 } else {
7626 /* Default static call setting, nothing to do */
7627 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7628 preempt_dynamic_mode = preempt_dynamic_full;
7629 pr_info("Dynamic Preempt: full\n");
7630 }
7631 }
7632 }
7633
7634 #define PREEMPT_MODEL_ACCESSOR(mode) \
7635 bool preempt_model_##mode(void) \
7636 { \
7637 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7638 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7639 } \
7640 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7641
7642 PREEMPT_MODEL_ACCESSOR(none);
7643 PREEMPT_MODEL_ACCESSOR(voluntary);
7644 PREEMPT_MODEL_ACCESSOR(full);
7645 PREEMPT_MODEL_ACCESSOR(lazy);
7646
7647 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7648
preempt_dynamic_init(void)7649 static inline void preempt_dynamic_init(void) { }
7650
7651 #endif /* CONFIG_PREEMPT_DYNAMIC */
7652
io_schedule_prepare(void)7653 int io_schedule_prepare(void)
7654 {
7655 int old_iowait = current->in_iowait;
7656
7657 current->in_iowait = 1;
7658 blk_flush_plug(current->plug, true);
7659 return old_iowait;
7660 }
7661
io_schedule_finish(int token)7662 void io_schedule_finish(int token)
7663 {
7664 current->in_iowait = token;
7665 }
7666
7667 /*
7668 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7669 * that process accounting knows that this is a task in IO wait state.
7670 */
io_schedule_timeout(long timeout)7671 long __sched io_schedule_timeout(long timeout)
7672 {
7673 int token;
7674 long ret;
7675
7676 token = io_schedule_prepare();
7677 ret = schedule_timeout(timeout);
7678 io_schedule_finish(token);
7679
7680 return ret;
7681 }
7682 EXPORT_SYMBOL(io_schedule_timeout);
7683
io_schedule(void)7684 void __sched io_schedule(void)
7685 {
7686 int token;
7687
7688 token = io_schedule_prepare();
7689 schedule();
7690 io_schedule_finish(token);
7691 }
7692 EXPORT_SYMBOL(io_schedule);
7693
sched_show_task(struct task_struct * p)7694 void sched_show_task(struct task_struct *p)
7695 {
7696 unsigned long free;
7697 int ppid;
7698
7699 if (!try_get_task_stack(p))
7700 return;
7701
7702 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7703
7704 if (task_is_running(p))
7705 pr_cont(" running task ");
7706 free = stack_not_used(p);
7707 ppid = 0;
7708 rcu_read_lock();
7709 if (pid_alive(p))
7710 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7711 rcu_read_unlock();
7712 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7713 free, task_pid_nr(p), task_tgid_nr(p),
7714 ppid, p->flags, read_task_thread_flags(p));
7715
7716 print_worker_info(KERN_INFO, p);
7717 print_stop_info(KERN_INFO, p);
7718 print_scx_info(KERN_INFO, p);
7719 show_stack(p, NULL, KERN_INFO);
7720 put_task_stack(p);
7721 }
7722 EXPORT_SYMBOL_GPL(sched_show_task);
7723
7724 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7725 state_filter_match(unsigned long state_filter, struct task_struct *p)
7726 {
7727 unsigned int state = READ_ONCE(p->__state);
7728
7729 /* no filter, everything matches */
7730 if (!state_filter)
7731 return true;
7732
7733 /* filter, but doesn't match */
7734 if (!(state & state_filter))
7735 return false;
7736
7737 /*
7738 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7739 * TASK_KILLABLE).
7740 */
7741 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7742 return false;
7743
7744 return true;
7745 }
7746
7747
show_state_filter(unsigned int state_filter)7748 void show_state_filter(unsigned int state_filter)
7749 {
7750 struct task_struct *g, *p;
7751
7752 rcu_read_lock();
7753 for_each_process_thread(g, p) {
7754 /*
7755 * reset the NMI-timeout, listing all files on a slow
7756 * console might take a lot of time:
7757 * Also, reset softlockup watchdogs on all CPUs, because
7758 * another CPU might be blocked waiting for us to process
7759 * an IPI.
7760 */
7761 touch_nmi_watchdog();
7762 touch_all_softlockup_watchdogs();
7763 if (state_filter_match(state_filter, p))
7764 sched_show_task(p);
7765 }
7766
7767 #ifdef CONFIG_SCHED_DEBUG
7768 if (!state_filter)
7769 sysrq_sched_debug_show();
7770 #endif
7771 rcu_read_unlock();
7772 /*
7773 * Only show locks if all tasks are dumped:
7774 */
7775 if (!state_filter)
7776 debug_show_all_locks();
7777 }
7778
7779 /**
7780 * init_idle - set up an idle thread for a given CPU
7781 * @idle: task in question
7782 * @cpu: CPU the idle task belongs to
7783 *
7784 * NOTE: this function does not set the idle thread's NEED_RESCHED
7785 * flag, to make booting more robust.
7786 */
init_idle(struct task_struct * idle,int cpu)7787 void __init init_idle(struct task_struct *idle, int cpu)
7788 {
7789 #ifdef CONFIG_SMP
7790 struct affinity_context ac = (struct affinity_context) {
7791 .new_mask = cpumask_of(cpu),
7792 .flags = 0,
7793 };
7794 #endif
7795 struct rq *rq = cpu_rq(cpu);
7796 unsigned long flags;
7797
7798 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7799 raw_spin_rq_lock(rq);
7800
7801 idle->__state = TASK_RUNNING;
7802 idle->se.exec_start = sched_clock();
7803 /*
7804 * PF_KTHREAD should already be set at this point; regardless, make it
7805 * look like a proper per-CPU kthread.
7806 */
7807 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7808 kthread_set_per_cpu(idle, cpu);
7809
7810 #ifdef CONFIG_SMP
7811 /*
7812 * No validation and serialization required at boot time and for
7813 * setting up the idle tasks of not yet online CPUs.
7814 */
7815 set_cpus_allowed_common(idle, &ac);
7816 #endif
7817 /*
7818 * We're having a chicken and egg problem, even though we are
7819 * holding rq->lock, the CPU isn't yet set to this CPU so the
7820 * lockdep check in task_group() will fail.
7821 *
7822 * Similar case to sched_fork(). / Alternatively we could
7823 * use task_rq_lock() here and obtain the other rq->lock.
7824 *
7825 * Silence PROVE_RCU
7826 */
7827 rcu_read_lock();
7828 __set_task_cpu(idle, cpu);
7829 rcu_read_unlock();
7830
7831 rq->idle = idle;
7832 rq_set_donor(rq, idle);
7833 rcu_assign_pointer(rq->curr, idle);
7834 idle->on_rq = TASK_ON_RQ_QUEUED;
7835 #ifdef CONFIG_SMP
7836 idle->on_cpu = 1;
7837 #endif
7838 raw_spin_rq_unlock(rq);
7839 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7840
7841 /* Set the preempt count _outside_ the spinlocks! */
7842 init_idle_preempt_count(idle, cpu);
7843
7844 /*
7845 * The idle tasks have their own, simple scheduling class:
7846 */
7847 idle->sched_class = &idle_sched_class;
7848 ftrace_graph_init_idle_task(idle, cpu);
7849 vtime_init_idle(idle, cpu);
7850 #ifdef CONFIG_SMP
7851 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7852 #endif
7853 }
7854
7855 #ifdef CONFIG_SMP
7856
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7857 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7858 const struct cpumask *trial)
7859 {
7860 int ret = 1;
7861
7862 if (cpumask_empty(cur))
7863 return ret;
7864
7865 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7866
7867 return ret;
7868 }
7869
task_can_attach(struct task_struct * p)7870 int task_can_attach(struct task_struct *p)
7871 {
7872 int ret = 0;
7873
7874 /*
7875 * Kthreads which disallow setaffinity shouldn't be moved
7876 * to a new cpuset; we don't want to change their CPU
7877 * affinity and isolating such threads by their set of
7878 * allowed nodes is unnecessary. Thus, cpusets are not
7879 * applicable for such threads. This prevents checking for
7880 * success of set_cpus_allowed_ptr() on all attached tasks
7881 * before cpus_mask may be changed.
7882 */
7883 if (p->flags & PF_NO_SETAFFINITY)
7884 ret = -EINVAL;
7885
7886 return ret;
7887 }
7888
7889 bool sched_smp_initialized __read_mostly;
7890
7891 #ifdef CONFIG_NUMA_BALANCING
7892 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7893 int migrate_task_to(struct task_struct *p, int target_cpu)
7894 {
7895 struct migration_arg arg = { p, target_cpu };
7896 int curr_cpu = task_cpu(p);
7897
7898 if (curr_cpu == target_cpu)
7899 return 0;
7900
7901 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7902 return -EINVAL;
7903
7904 /* TODO: This is not properly updating schedstats */
7905
7906 trace_sched_move_numa(p, curr_cpu, target_cpu);
7907 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7908 }
7909
7910 /*
7911 * Requeue a task on a given node and accurately track the number of NUMA
7912 * tasks on the runqueues
7913 */
sched_setnuma(struct task_struct * p,int nid)7914 void sched_setnuma(struct task_struct *p, int nid)
7915 {
7916 bool queued, running;
7917 struct rq_flags rf;
7918 struct rq *rq;
7919
7920 rq = task_rq_lock(p, &rf);
7921 queued = task_on_rq_queued(p);
7922 running = task_current_donor(rq, p);
7923
7924 if (queued)
7925 dequeue_task(rq, p, DEQUEUE_SAVE);
7926 if (running)
7927 put_prev_task(rq, p);
7928
7929 p->numa_preferred_nid = nid;
7930
7931 if (queued)
7932 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7933 if (running)
7934 set_next_task(rq, p);
7935 task_rq_unlock(rq, p, &rf);
7936 }
7937 #endif /* CONFIG_NUMA_BALANCING */
7938
7939 #ifdef CONFIG_HOTPLUG_CPU
7940 /*
7941 * Invoked on the outgoing CPU in context of the CPU hotplug thread
7942 * after ensuring that there are no user space tasks left on the CPU.
7943 *
7944 * If there is a lazy mm in use on the hotplug thread, drop it and
7945 * switch to init_mm.
7946 *
7947 * The reference count on init_mm is dropped in finish_cpu().
7948 */
sched_force_init_mm(void)7949 static void sched_force_init_mm(void)
7950 {
7951 struct mm_struct *mm = current->active_mm;
7952
7953 if (mm != &init_mm) {
7954 mmgrab_lazy_tlb(&init_mm);
7955 local_irq_disable();
7956 current->active_mm = &init_mm;
7957 switch_mm_irqs_off(mm, &init_mm, current);
7958 local_irq_enable();
7959 finish_arch_post_lock_switch();
7960 mmdrop_lazy_tlb(mm);
7961 }
7962
7963 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7964 }
7965
__balance_push_cpu_stop(void * arg)7966 static int __balance_push_cpu_stop(void *arg)
7967 {
7968 struct task_struct *p = arg;
7969 struct rq *rq = this_rq();
7970 struct rq_flags rf;
7971 int cpu;
7972
7973 raw_spin_lock_irq(&p->pi_lock);
7974 rq_lock(rq, &rf);
7975
7976 update_rq_clock(rq);
7977
7978 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7979 cpu = select_fallback_rq(rq->cpu, p);
7980 rq = __migrate_task(rq, &rf, p, cpu);
7981 }
7982
7983 rq_unlock(rq, &rf);
7984 raw_spin_unlock_irq(&p->pi_lock);
7985
7986 put_task_struct(p);
7987
7988 return 0;
7989 }
7990
7991 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7992
7993 /*
7994 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7995 *
7996 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7997 * effective when the hotplug motion is down.
7998 */
balance_push(struct rq * rq)7999 static void balance_push(struct rq *rq)
8000 {
8001 struct task_struct *push_task = rq->curr;
8002
8003 lockdep_assert_rq_held(rq);
8004
8005 /*
8006 * Ensure the thing is persistent until balance_push_set(.on = false);
8007 */
8008 rq->balance_callback = &balance_push_callback;
8009
8010 /*
8011 * Only active while going offline and when invoked on the outgoing
8012 * CPU.
8013 */
8014 if (!cpu_dying(rq->cpu) || rq != this_rq())
8015 return;
8016
8017 /*
8018 * Both the cpu-hotplug and stop task are in this case and are
8019 * required to complete the hotplug process.
8020 */
8021 if (kthread_is_per_cpu(push_task) ||
8022 is_migration_disabled(push_task)) {
8023
8024 /*
8025 * If this is the idle task on the outgoing CPU try to wake
8026 * up the hotplug control thread which might wait for the
8027 * last task to vanish. The rcuwait_active() check is
8028 * accurate here because the waiter is pinned on this CPU
8029 * and can't obviously be running in parallel.
8030 *
8031 * On RT kernels this also has to check whether there are
8032 * pinned and scheduled out tasks on the runqueue. They
8033 * need to leave the migrate disabled section first.
8034 */
8035 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8036 rcuwait_active(&rq->hotplug_wait)) {
8037 raw_spin_rq_unlock(rq);
8038 rcuwait_wake_up(&rq->hotplug_wait);
8039 raw_spin_rq_lock(rq);
8040 }
8041 return;
8042 }
8043
8044 get_task_struct(push_task);
8045 /*
8046 * Temporarily drop rq->lock such that we can wake-up the stop task.
8047 * Both preemption and IRQs are still disabled.
8048 */
8049 preempt_disable();
8050 raw_spin_rq_unlock(rq);
8051 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8052 this_cpu_ptr(&push_work));
8053 preempt_enable();
8054 /*
8055 * At this point need_resched() is true and we'll take the loop in
8056 * schedule(). The next pick is obviously going to be the stop task
8057 * which kthread_is_per_cpu() and will push this task away.
8058 */
8059 raw_spin_rq_lock(rq);
8060 }
8061
balance_push_set(int cpu,bool on)8062 static void balance_push_set(int cpu, bool on)
8063 {
8064 struct rq *rq = cpu_rq(cpu);
8065 struct rq_flags rf;
8066
8067 rq_lock_irqsave(rq, &rf);
8068 if (on) {
8069 WARN_ON_ONCE(rq->balance_callback);
8070 rq->balance_callback = &balance_push_callback;
8071 } else if (rq->balance_callback == &balance_push_callback) {
8072 rq->balance_callback = NULL;
8073 }
8074 rq_unlock_irqrestore(rq, &rf);
8075 }
8076
8077 /*
8078 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8079 * inactive. All tasks which are not per CPU kernel threads are either
8080 * pushed off this CPU now via balance_push() or placed on a different CPU
8081 * during wakeup. Wait until the CPU is quiescent.
8082 */
balance_hotplug_wait(void)8083 static void balance_hotplug_wait(void)
8084 {
8085 struct rq *rq = this_rq();
8086
8087 rcuwait_wait_event(&rq->hotplug_wait,
8088 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8089 TASK_UNINTERRUPTIBLE);
8090 }
8091
8092 #else
8093
balance_push(struct rq * rq)8094 static inline void balance_push(struct rq *rq)
8095 {
8096 }
8097
balance_push_set(int cpu,bool on)8098 static inline void balance_push_set(int cpu, bool on)
8099 {
8100 }
8101
balance_hotplug_wait(void)8102 static inline void balance_hotplug_wait(void)
8103 {
8104 }
8105
8106 #endif /* CONFIG_HOTPLUG_CPU */
8107
set_rq_online(struct rq * rq)8108 void set_rq_online(struct rq *rq)
8109 {
8110 if (!rq->online) {
8111 const struct sched_class *class;
8112
8113 cpumask_set_cpu(rq->cpu, rq->rd->online);
8114 rq->online = 1;
8115
8116 for_each_class(class) {
8117 if (class->rq_online)
8118 class->rq_online(rq);
8119 }
8120 }
8121 }
8122
set_rq_offline(struct rq * rq)8123 void set_rq_offline(struct rq *rq)
8124 {
8125 if (rq->online) {
8126 const struct sched_class *class;
8127
8128 update_rq_clock(rq);
8129 for_each_class(class) {
8130 if (class->rq_offline)
8131 class->rq_offline(rq);
8132 }
8133
8134 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8135 rq->online = 0;
8136 }
8137 }
8138
sched_set_rq_online(struct rq * rq,int cpu)8139 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8140 {
8141 struct rq_flags rf;
8142
8143 rq_lock_irqsave(rq, &rf);
8144 if (rq->rd) {
8145 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8146 set_rq_online(rq);
8147 }
8148 rq_unlock_irqrestore(rq, &rf);
8149 }
8150
sched_set_rq_offline(struct rq * rq,int cpu)8151 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8152 {
8153 struct rq_flags rf;
8154
8155 rq_lock_irqsave(rq, &rf);
8156 if (rq->rd) {
8157 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8158 set_rq_offline(rq);
8159 }
8160 rq_unlock_irqrestore(rq, &rf);
8161 }
8162
8163 /*
8164 * used to mark begin/end of suspend/resume:
8165 */
8166 static int num_cpus_frozen;
8167
8168 /*
8169 * Update cpusets according to cpu_active mask. If cpusets are
8170 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8171 * around partition_sched_domains().
8172 *
8173 * If we come here as part of a suspend/resume, don't touch cpusets because we
8174 * want to restore it back to its original state upon resume anyway.
8175 */
cpuset_cpu_active(void)8176 static void cpuset_cpu_active(void)
8177 {
8178 if (cpuhp_tasks_frozen) {
8179 /*
8180 * num_cpus_frozen tracks how many CPUs are involved in suspend
8181 * resume sequence. As long as this is not the last online
8182 * operation in the resume sequence, just build a single sched
8183 * domain, ignoring cpusets.
8184 */
8185 partition_sched_domains(1, NULL, NULL);
8186 if (--num_cpus_frozen)
8187 return;
8188 /*
8189 * This is the last CPU online operation. So fall through and
8190 * restore the original sched domains by considering the
8191 * cpuset configurations.
8192 */
8193 cpuset_force_rebuild();
8194 }
8195 cpuset_update_active_cpus();
8196 }
8197
cpuset_cpu_inactive(unsigned int cpu)8198 static void cpuset_cpu_inactive(unsigned int cpu)
8199 {
8200 if (!cpuhp_tasks_frozen) {
8201 cpuset_update_active_cpus();
8202 } else {
8203 num_cpus_frozen++;
8204 partition_sched_domains(1, NULL, NULL);
8205 }
8206 }
8207
sched_smt_present_inc(int cpu)8208 static inline void sched_smt_present_inc(int cpu)
8209 {
8210 #ifdef CONFIG_SCHED_SMT
8211 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8212 static_branch_inc_cpuslocked(&sched_smt_present);
8213 #endif
8214 }
8215
sched_smt_present_dec(int cpu)8216 static inline void sched_smt_present_dec(int cpu)
8217 {
8218 #ifdef CONFIG_SCHED_SMT
8219 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8220 static_branch_dec_cpuslocked(&sched_smt_present);
8221 #endif
8222 }
8223
sched_cpu_activate(unsigned int cpu)8224 int sched_cpu_activate(unsigned int cpu)
8225 {
8226 struct rq *rq = cpu_rq(cpu);
8227
8228 /*
8229 * Clear the balance_push callback and prepare to schedule
8230 * regular tasks.
8231 */
8232 balance_push_set(cpu, false);
8233
8234 /*
8235 * When going up, increment the number of cores with SMT present.
8236 */
8237 sched_smt_present_inc(cpu);
8238 set_cpu_active(cpu, true);
8239
8240 if (sched_smp_initialized) {
8241 sched_update_numa(cpu, true);
8242 sched_domains_numa_masks_set(cpu);
8243 cpuset_cpu_active();
8244 }
8245
8246 scx_rq_activate(rq);
8247
8248 /*
8249 * Put the rq online, if not already. This happens:
8250 *
8251 * 1) In the early boot process, because we build the real domains
8252 * after all CPUs have been brought up.
8253 *
8254 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8255 * domains.
8256 */
8257 sched_set_rq_online(rq, cpu);
8258
8259 return 0;
8260 }
8261
sched_cpu_deactivate(unsigned int cpu)8262 int sched_cpu_deactivate(unsigned int cpu)
8263 {
8264 struct rq *rq = cpu_rq(cpu);
8265 int ret;
8266
8267 ret = dl_bw_deactivate(cpu);
8268
8269 if (ret)
8270 return ret;
8271
8272 /*
8273 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8274 * load balancing when not active
8275 */
8276 nohz_balance_exit_idle(rq);
8277
8278 set_cpu_active(cpu, false);
8279
8280 /*
8281 * From this point forward, this CPU will refuse to run any task that
8282 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8283 * push those tasks away until this gets cleared, see
8284 * sched_cpu_dying().
8285 */
8286 balance_push_set(cpu, true);
8287
8288 /*
8289 * We've cleared cpu_active_mask / set balance_push, wait for all
8290 * preempt-disabled and RCU users of this state to go away such that
8291 * all new such users will observe it.
8292 *
8293 * Specifically, we rely on ttwu to no longer target this CPU, see
8294 * ttwu_queue_cond() and is_cpu_allowed().
8295 *
8296 * Do sync before park smpboot threads to take care the RCU boost case.
8297 */
8298 synchronize_rcu();
8299
8300 sched_set_rq_offline(rq, cpu);
8301
8302 scx_rq_deactivate(rq);
8303
8304 /*
8305 * When going down, decrement the number of cores with SMT present.
8306 */
8307 sched_smt_present_dec(cpu);
8308
8309 #ifdef CONFIG_SCHED_SMT
8310 sched_core_cpu_deactivate(cpu);
8311 #endif
8312
8313 if (!sched_smp_initialized)
8314 return 0;
8315
8316 sched_update_numa(cpu, false);
8317 cpuset_cpu_inactive(cpu);
8318 sched_domains_numa_masks_clear(cpu);
8319 return 0;
8320 }
8321
sched_rq_cpu_starting(unsigned int cpu)8322 static void sched_rq_cpu_starting(unsigned int cpu)
8323 {
8324 struct rq *rq = cpu_rq(cpu);
8325
8326 rq->calc_load_update = calc_load_update;
8327 update_max_interval();
8328 }
8329
sched_cpu_starting(unsigned int cpu)8330 int sched_cpu_starting(unsigned int cpu)
8331 {
8332 sched_core_cpu_starting(cpu);
8333 sched_rq_cpu_starting(cpu);
8334 sched_tick_start(cpu);
8335 return 0;
8336 }
8337
8338 #ifdef CONFIG_HOTPLUG_CPU
8339
8340 /*
8341 * Invoked immediately before the stopper thread is invoked to bring the
8342 * CPU down completely. At this point all per CPU kthreads except the
8343 * hotplug thread (current) and the stopper thread (inactive) have been
8344 * either parked or have been unbound from the outgoing CPU. Ensure that
8345 * any of those which might be on the way out are gone.
8346 *
8347 * If after this point a bound task is being woken on this CPU then the
8348 * responsible hotplug callback has failed to do it's job.
8349 * sched_cpu_dying() will catch it with the appropriate fireworks.
8350 */
sched_cpu_wait_empty(unsigned int cpu)8351 int sched_cpu_wait_empty(unsigned int cpu)
8352 {
8353 balance_hotplug_wait();
8354 sched_force_init_mm();
8355 return 0;
8356 }
8357
8358 /*
8359 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8360 * might have. Called from the CPU stopper task after ensuring that the
8361 * stopper is the last running task on the CPU, so nr_active count is
8362 * stable. We need to take the tear-down thread which is calling this into
8363 * account, so we hand in adjust = 1 to the load calculation.
8364 *
8365 * Also see the comment "Global load-average calculations".
8366 */
calc_load_migrate(struct rq * rq)8367 static void calc_load_migrate(struct rq *rq)
8368 {
8369 long delta = calc_load_fold_active(rq, 1);
8370
8371 if (delta)
8372 atomic_long_add(delta, &calc_load_tasks);
8373 }
8374
dump_rq_tasks(struct rq * rq,const char * loglvl)8375 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8376 {
8377 struct task_struct *g, *p;
8378 int cpu = cpu_of(rq);
8379
8380 lockdep_assert_rq_held(rq);
8381
8382 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8383 for_each_process_thread(g, p) {
8384 if (task_cpu(p) != cpu)
8385 continue;
8386
8387 if (!task_on_rq_queued(p))
8388 continue;
8389
8390 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8391 }
8392 }
8393
sched_cpu_dying(unsigned int cpu)8394 int sched_cpu_dying(unsigned int cpu)
8395 {
8396 struct rq *rq = cpu_rq(cpu);
8397 struct rq_flags rf;
8398
8399 /* Handle pending wakeups and then migrate everything off */
8400 sched_tick_stop(cpu);
8401
8402 rq_lock_irqsave(rq, &rf);
8403 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8404 WARN(true, "Dying CPU not properly vacated!");
8405 dump_rq_tasks(rq, KERN_WARNING);
8406 }
8407 rq_unlock_irqrestore(rq, &rf);
8408
8409 calc_load_migrate(rq);
8410 update_max_interval();
8411 hrtick_clear(rq);
8412 sched_core_cpu_dying(cpu);
8413 return 0;
8414 }
8415 #endif
8416
sched_init_smp(void)8417 void __init sched_init_smp(void)
8418 {
8419 sched_init_numa(NUMA_NO_NODE);
8420
8421 /*
8422 * There's no userspace yet to cause hotplug operations; hence all the
8423 * CPU masks are stable and all blatant races in the below code cannot
8424 * happen.
8425 */
8426 mutex_lock(&sched_domains_mutex);
8427 sched_init_domains(cpu_active_mask);
8428 mutex_unlock(&sched_domains_mutex);
8429
8430 /* Move init over to a non-isolated CPU */
8431 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8432 BUG();
8433 current->flags &= ~PF_NO_SETAFFINITY;
8434 sched_init_granularity();
8435
8436 init_sched_rt_class();
8437 init_sched_dl_class();
8438
8439 sched_smp_initialized = true;
8440 }
8441
migration_init(void)8442 static int __init migration_init(void)
8443 {
8444 sched_cpu_starting(smp_processor_id());
8445 return 0;
8446 }
8447 early_initcall(migration_init);
8448
8449 #else
sched_init_smp(void)8450 void __init sched_init_smp(void)
8451 {
8452 sched_init_granularity();
8453 }
8454 #endif /* CONFIG_SMP */
8455
in_sched_functions(unsigned long addr)8456 int in_sched_functions(unsigned long addr)
8457 {
8458 return in_lock_functions(addr) ||
8459 (addr >= (unsigned long)__sched_text_start
8460 && addr < (unsigned long)__sched_text_end);
8461 }
8462
8463 #ifdef CONFIG_CGROUP_SCHED
8464 /*
8465 * Default task group.
8466 * Every task in system belongs to this group at bootup.
8467 */
8468 struct task_group root_task_group;
8469 LIST_HEAD(task_groups);
8470
8471 /* Cacheline aligned slab cache for task_group */
8472 static struct kmem_cache *task_group_cache __ro_after_init;
8473 #endif
8474
sched_init(void)8475 void __init sched_init(void)
8476 {
8477 unsigned long ptr = 0;
8478 int i;
8479
8480 /* Make sure the linker didn't screw up */
8481 #ifdef CONFIG_SMP
8482 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8483 #endif
8484 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8485 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8486 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8487 #ifdef CONFIG_SCHED_CLASS_EXT
8488 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8489 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8490 #endif
8491
8492 wait_bit_init();
8493
8494 #ifdef CONFIG_FAIR_GROUP_SCHED
8495 ptr += 2 * nr_cpu_ids * sizeof(void **);
8496 #endif
8497 #ifdef CONFIG_RT_GROUP_SCHED
8498 ptr += 2 * nr_cpu_ids * sizeof(void **);
8499 #endif
8500 if (ptr) {
8501 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8502
8503 #ifdef CONFIG_FAIR_GROUP_SCHED
8504 root_task_group.se = (struct sched_entity **)ptr;
8505 ptr += nr_cpu_ids * sizeof(void **);
8506
8507 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8508 ptr += nr_cpu_ids * sizeof(void **);
8509
8510 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8511 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8512 #endif /* CONFIG_FAIR_GROUP_SCHED */
8513 #ifdef CONFIG_EXT_GROUP_SCHED
8514 root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8515 #endif /* CONFIG_EXT_GROUP_SCHED */
8516 #ifdef CONFIG_RT_GROUP_SCHED
8517 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8518 ptr += nr_cpu_ids * sizeof(void **);
8519
8520 root_task_group.rt_rq = (struct rt_rq **)ptr;
8521 ptr += nr_cpu_ids * sizeof(void **);
8522
8523 #endif /* CONFIG_RT_GROUP_SCHED */
8524 }
8525
8526 #ifdef CONFIG_SMP
8527 init_defrootdomain();
8528 #endif
8529
8530 #ifdef CONFIG_RT_GROUP_SCHED
8531 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8532 global_rt_period(), global_rt_runtime());
8533 #endif /* CONFIG_RT_GROUP_SCHED */
8534
8535 #ifdef CONFIG_CGROUP_SCHED
8536 task_group_cache = KMEM_CACHE(task_group, 0);
8537
8538 list_add(&root_task_group.list, &task_groups);
8539 INIT_LIST_HEAD(&root_task_group.children);
8540 INIT_LIST_HEAD(&root_task_group.siblings);
8541 autogroup_init(&init_task);
8542 #endif /* CONFIG_CGROUP_SCHED */
8543
8544 for_each_possible_cpu(i) {
8545 struct rq *rq;
8546
8547 rq = cpu_rq(i);
8548 raw_spin_lock_init(&rq->__lock);
8549 rq->nr_running = 0;
8550 rq->calc_load_active = 0;
8551 rq->calc_load_update = jiffies + LOAD_FREQ;
8552 init_cfs_rq(&rq->cfs);
8553 init_rt_rq(&rq->rt);
8554 init_dl_rq(&rq->dl);
8555 #ifdef CONFIG_FAIR_GROUP_SCHED
8556 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8557 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8558 /*
8559 * How much CPU bandwidth does root_task_group get?
8560 *
8561 * In case of task-groups formed through the cgroup filesystem, it
8562 * gets 100% of the CPU resources in the system. This overall
8563 * system CPU resource is divided among the tasks of
8564 * root_task_group and its child task-groups in a fair manner,
8565 * based on each entity's (task or task-group's) weight
8566 * (se->load.weight).
8567 *
8568 * In other words, if root_task_group has 10 tasks of weight
8569 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8570 * then A0's share of the CPU resource is:
8571 *
8572 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8573 *
8574 * We achieve this by letting root_task_group's tasks sit
8575 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8576 */
8577 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8578 #endif /* CONFIG_FAIR_GROUP_SCHED */
8579
8580 #ifdef CONFIG_RT_GROUP_SCHED
8581 /*
8582 * This is required for init cpu because rt.c:__enable_runtime()
8583 * starts working after scheduler_running, which is not the case
8584 * yet.
8585 */
8586 rq->rt.rt_runtime = global_rt_runtime();
8587 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8588 #endif
8589 #ifdef CONFIG_SMP
8590 rq->sd = NULL;
8591 rq->rd = NULL;
8592 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8593 rq->balance_callback = &balance_push_callback;
8594 rq->active_balance = 0;
8595 rq->next_balance = jiffies;
8596 rq->push_cpu = 0;
8597 rq->cpu = i;
8598 rq->online = 0;
8599 rq->idle_stamp = 0;
8600 rq->avg_idle = 2*sysctl_sched_migration_cost;
8601 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8602
8603 INIT_LIST_HEAD(&rq->cfs_tasks);
8604
8605 rq_attach_root(rq, &def_root_domain);
8606 #ifdef CONFIG_NO_HZ_COMMON
8607 rq->last_blocked_load_update_tick = jiffies;
8608 atomic_set(&rq->nohz_flags, 0);
8609
8610 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8611 #endif
8612 #ifdef CONFIG_HOTPLUG_CPU
8613 rcuwait_init(&rq->hotplug_wait);
8614 #endif
8615 #endif /* CONFIG_SMP */
8616 hrtick_rq_init(rq);
8617 atomic_set(&rq->nr_iowait, 0);
8618 fair_server_init(rq);
8619
8620 #ifdef CONFIG_SCHED_CORE
8621 rq->core = rq;
8622 rq->core_pick = NULL;
8623 rq->core_dl_server = NULL;
8624 rq->core_enabled = 0;
8625 rq->core_tree = RB_ROOT;
8626 rq->core_forceidle_count = 0;
8627 rq->core_forceidle_occupation = 0;
8628 rq->core_forceidle_start = 0;
8629
8630 rq->core_cookie = 0UL;
8631 #endif
8632 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8633 }
8634
8635 set_load_weight(&init_task, false);
8636 init_task.se.slice = sysctl_sched_base_slice,
8637
8638 /*
8639 * The boot idle thread does lazy MMU switching as well:
8640 */
8641 mmgrab_lazy_tlb(&init_mm);
8642 enter_lazy_tlb(&init_mm, current);
8643
8644 /*
8645 * The idle task doesn't need the kthread struct to function, but it
8646 * is dressed up as a per-CPU kthread and thus needs to play the part
8647 * if we want to avoid special-casing it in code that deals with per-CPU
8648 * kthreads.
8649 */
8650 WARN_ON(!set_kthread_struct(current));
8651
8652 /*
8653 * Make us the idle thread. Technically, schedule() should not be
8654 * called from this thread, however somewhere below it might be,
8655 * but because we are the idle thread, we just pick up running again
8656 * when this runqueue becomes "idle".
8657 */
8658 __sched_fork(0, current);
8659 init_idle(current, smp_processor_id());
8660
8661 calc_load_update = jiffies + LOAD_FREQ;
8662
8663 #ifdef CONFIG_SMP
8664 idle_thread_set_boot_cpu();
8665 balance_push_set(smp_processor_id(), false);
8666 #endif
8667 init_sched_fair_class();
8668 init_sched_ext_class();
8669
8670 psi_init();
8671
8672 init_uclamp();
8673
8674 preempt_dynamic_init();
8675
8676 scheduler_running = 1;
8677 }
8678
8679 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8680
__might_sleep(const char * file,int line)8681 void __might_sleep(const char *file, int line)
8682 {
8683 unsigned int state = get_current_state();
8684 /*
8685 * Blocking primitives will set (and therefore destroy) current->state,
8686 * since we will exit with TASK_RUNNING make sure we enter with it,
8687 * otherwise we will destroy state.
8688 */
8689 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8690 "do not call blocking ops when !TASK_RUNNING; "
8691 "state=%x set at [<%p>] %pS\n", state,
8692 (void *)current->task_state_change,
8693 (void *)current->task_state_change);
8694
8695 __might_resched(file, line, 0);
8696 }
8697 EXPORT_SYMBOL(__might_sleep);
8698
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8699 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8700 {
8701 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8702 return;
8703
8704 if (preempt_count() == preempt_offset)
8705 return;
8706
8707 pr_err("Preemption disabled at:");
8708 print_ip_sym(KERN_ERR, ip);
8709 }
8710
resched_offsets_ok(unsigned int offsets)8711 static inline bool resched_offsets_ok(unsigned int offsets)
8712 {
8713 unsigned int nested = preempt_count();
8714
8715 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8716
8717 return nested == offsets;
8718 }
8719
__might_resched(const char * file,int line,unsigned int offsets)8720 void __might_resched(const char *file, int line, unsigned int offsets)
8721 {
8722 /* Ratelimiting timestamp: */
8723 static unsigned long prev_jiffy;
8724
8725 unsigned long preempt_disable_ip;
8726
8727 /* WARN_ON_ONCE() by default, no rate limit required: */
8728 rcu_sleep_check();
8729
8730 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8731 !is_idle_task(current) && !current->non_block_count) ||
8732 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8733 oops_in_progress)
8734 return;
8735
8736 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8737 return;
8738 prev_jiffy = jiffies;
8739
8740 /* Save this before calling printk(), since that will clobber it: */
8741 preempt_disable_ip = get_preempt_disable_ip(current);
8742
8743 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8744 file, line);
8745 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8746 in_atomic(), irqs_disabled(), current->non_block_count,
8747 current->pid, current->comm);
8748 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8749 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8750
8751 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8752 pr_err("RCU nest depth: %d, expected: %u\n",
8753 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8754 }
8755
8756 if (task_stack_end_corrupted(current))
8757 pr_emerg("Thread overran stack, or stack corrupted\n");
8758
8759 debug_show_held_locks(current);
8760 if (irqs_disabled())
8761 print_irqtrace_events(current);
8762
8763 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8764 preempt_disable_ip);
8765
8766 dump_stack();
8767 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8768 }
8769 EXPORT_SYMBOL(__might_resched);
8770
__cant_sleep(const char * file,int line,int preempt_offset)8771 void __cant_sleep(const char *file, int line, int preempt_offset)
8772 {
8773 static unsigned long prev_jiffy;
8774
8775 if (irqs_disabled())
8776 return;
8777
8778 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8779 return;
8780
8781 if (preempt_count() > preempt_offset)
8782 return;
8783
8784 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8785 return;
8786 prev_jiffy = jiffies;
8787
8788 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8789 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8790 in_atomic(), irqs_disabled(),
8791 current->pid, current->comm);
8792
8793 debug_show_held_locks(current);
8794 dump_stack();
8795 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8796 }
8797 EXPORT_SYMBOL_GPL(__cant_sleep);
8798
8799 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8800 void __cant_migrate(const char *file, int line)
8801 {
8802 static unsigned long prev_jiffy;
8803
8804 if (irqs_disabled())
8805 return;
8806
8807 if (is_migration_disabled(current))
8808 return;
8809
8810 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8811 return;
8812
8813 if (preempt_count() > 0)
8814 return;
8815
8816 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8817 return;
8818 prev_jiffy = jiffies;
8819
8820 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8821 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8822 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8823 current->pid, current->comm);
8824
8825 debug_show_held_locks(current);
8826 dump_stack();
8827 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8828 }
8829 EXPORT_SYMBOL_GPL(__cant_migrate);
8830 #endif
8831 #endif
8832
8833 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8834 void normalize_rt_tasks(void)
8835 {
8836 struct task_struct *g, *p;
8837 struct sched_attr attr = {
8838 .sched_policy = SCHED_NORMAL,
8839 };
8840
8841 read_lock(&tasklist_lock);
8842 for_each_process_thread(g, p) {
8843 /*
8844 * Only normalize user tasks:
8845 */
8846 if (p->flags & PF_KTHREAD)
8847 continue;
8848
8849 p->se.exec_start = 0;
8850 schedstat_set(p->stats.wait_start, 0);
8851 schedstat_set(p->stats.sleep_start, 0);
8852 schedstat_set(p->stats.block_start, 0);
8853
8854 if (!rt_or_dl_task(p)) {
8855 /*
8856 * Renice negative nice level userspace
8857 * tasks back to 0:
8858 */
8859 if (task_nice(p) < 0)
8860 set_user_nice(p, 0);
8861 continue;
8862 }
8863
8864 __sched_setscheduler(p, &attr, false, false);
8865 }
8866 read_unlock(&tasklist_lock);
8867 }
8868
8869 #endif /* CONFIG_MAGIC_SYSRQ */
8870
8871 #if defined(CONFIG_KGDB_KDB)
8872 /*
8873 * These functions are only useful for KDB.
8874 *
8875 * They can only be called when the whole system has been
8876 * stopped - every CPU needs to be quiescent, and no scheduling
8877 * activity can take place. Using them for anything else would
8878 * be a serious bug, and as a result, they aren't even visible
8879 * under any other configuration.
8880 */
8881
8882 /**
8883 * curr_task - return the current task for a given CPU.
8884 * @cpu: the processor in question.
8885 *
8886 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8887 *
8888 * Return: The current task for @cpu.
8889 */
curr_task(int cpu)8890 struct task_struct *curr_task(int cpu)
8891 {
8892 return cpu_curr(cpu);
8893 }
8894
8895 #endif /* defined(CONFIG_KGDB_KDB) */
8896
8897 #ifdef CONFIG_CGROUP_SCHED
8898 /* task_group_lock serializes the addition/removal of task groups */
8899 static DEFINE_SPINLOCK(task_group_lock);
8900
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8901 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8902 struct task_group *parent)
8903 {
8904 #ifdef CONFIG_UCLAMP_TASK_GROUP
8905 enum uclamp_id clamp_id;
8906
8907 for_each_clamp_id(clamp_id) {
8908 uclamp_se_set(&tg->uclamp_req[clamp_id],
8909 uclamp_none(clamp_id), false);
8910 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8911 }
8912 #endif
8913 }
8914
sched_free_group(struct task_group * tg)8915 static void sched_free_group(struct task_group *tg)
8916 {
8917 free_fair_sched_group(tg);
8918 free_rt_sched_group(tg);
8919 autogroup_free(tg);
8920 kmem_cache_free(task_group_cache, tg);
8921 }
8922
sched_free_group_rcu(struct rcu_head * rcu)8923 static void sched_free_group_rcu(struct rcu_head *rcu)
8924 {
8925 sched_free_group(container_of(rcu, struct task_group, rcu));
8926 }
8927
sched_unregister_group(struct task_group * tg)8928 static void sched_unregister_group(struct task_group *tg)
8929 {
8930 unregister_fair_sched_group(tg);
8931 unregister_rt_sched_group(tg);
8932 /*
8933 * We have to wait for yet another RCU grace period to expire, as
8934 * print_cfs_stats() might run concurrently.
8935 */
8936 call_rcu(&tg->rcu, sched_free_group_rcu);
8937 }
8938
8939 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8940 struct task_group *sched_create_group(struct task_group *parent)
8941 {
8942 struct task_group *tg;
8943
8944 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8945 if (!tg)
8946 return ERR_PTR(-ENOMEM);
8947
8948 if (!alloc_fair_sched_group(tg, parent))
8949 goto err;
8950
8951 if (!alloc_rt_sched_group(tg, parent))
8952 goto err;
8953
8954 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8955 alloc_uclamp_sched_group(tg, parent);
8956
8957 return tg;
8958
8959 err:
8960 sched_free_group(tg);
8961 return ERR_PTR(-ENOMEM);
8962 }
8963
sched_online_group(struct task_group * tg,struct task_group * parent)8964 void sched_online_group(struct task_group *tg, struct task_group *parent)
8965 {
8966 unsigned long flags;
8967
8968 spin_lock_irqsave(&task_group_lock, flags);
8969 list_add_rcu(&tg->list, &task_groups);
8970
8971 /* Root should already exist: */
8972 WARN_ON(!parent);
8973
8974 tg->parent = parent;
8975 INIT_LIST_HEAD(&tg->children);
8976 list_add_rcu(&tg->siblings, &parent->children);
8977 spin_unlock_irqrestore(&task_group_lock, flags);
8978
8979 online_fair_sched_group(tg);
8980 }
8981
8982 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)8983 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8984 {
8985 /* Now it should be safe to free those cfs_rqs: */
8986 sched_unregister_group(container_of(rhp, struct task_group, rcu));
8987 }
8988
sched_destroy_group(struct task_group * tg)8989 void sched_destroy_group(struct task_group *tg)
8990 {
8991 /* Wait for possible concurrent references to cfs_rqs complete: */
8992 call_rcu(&tg->rcu, sched_unregister_group_rcu);
8993 }
8994
sched_release_group(struct task_group * tg)8995 void sched_release_group(struct task_group *tg)
8996 {
8997 unsigned long flags;
8998
8999 /*
9000 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9001 * sched_cfs_period_timer()).
9002 *
9003 * For this to be effective, we have to wait for all pending users of
9004 * this task group to leave their RCU critical section to ensure no new
9005 * user will see our dying task group any more. Specifically ensure
9006 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9007 *
9008 * We therefore defer calling unregister_fair_sched_group() to
9009 * sched_unregister_group() which is guarantied to get called only after the
9010 * current RCU grace period has expired.
9011 */
9012 spin_lock_irqsave(&task_group_lock, flags);
9013 list_del_rcu(&tg->list);
9014 list_del_rcu(&tg->siblings);
9015 spin_unlock_irqrestore(&task_group_lock, flags);
9016 }
9017
sched_get_task_group(struct task_struct * tsk)9018 static struct task_group *sched_get_task_group(struct task_struct *tsk)
9019 {
9020 struct task_group *tg;
9021
9022 /*
9023 * All callers are synchronized by task_rq_lock(); we do not use RCU
9024 * which is pointless here. Thus, we pass "true" to task_css_check()
9025 * to prevent lockdep warnings.
9026 */
9027 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9028 struct task_group, css);
9029 tg = autogroup_task_group(tsk, tg);
9030
9031 return tg;
9032 }
9033
sched_change_group(struct task_struct * tsk,struct task_group * group)9034 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
9035 {
9036 tsk->sched_task_group = group;
9037
9038 #ifdef CONFIG_FAIR_GROUP_SCHED
9039 if (tsk->sched_class->task_change_group)
9040 tsk->sched_class->task_change_group(tsk);
9041 else
9042 #endif
9043 set_task_rq(tsk, task_cpu(tsk));
9044 }
9045
9046 /*
9047 * Change task's runqueue when it moves between groups.
9048 *
9049 * The caller of this function should have put the task in its new group by
9050 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9051 * its new group.
9052 */
sched_move_task(struct task_struct * tsk,bool for_autogroup)9053 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9054 {
9055 int queued, running, queue_flags =
9056 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9057 struct task_group *group;
9058 struct rq *rq;
9059
9060 CLASS(task_rq_lock, rq_guard)(tsk);
9061 rq = rq_guard.rq;
9062
9063 /*
9064 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9065 * group changes.
9066 */
9067 group = sched_get_task_group(tsk);
9068 if (group == tsk->sched_task_group)
9069 return;
9070
9071 update_rq_clock(rq);
9072
9073 running = task_current_donor(rq, tsk);
9074 queued = task_on_rq_queued(tsk);
9075
9076 if (queued)
9077 dequeue_task(rq, tsk, queue_flags);
9078 if (running)
9079 put_prev_task(rq, tsk);
9080
9081 sched_change_group(tsk, group);
9082 if (!for_autogroup)
9083 scx_cgroup_move_task(tsk);
9084
9085 if (queued)
9086 enqueue_task(rq, tsk, queue_flags);
9087 if (running) {
9088 set_next_task(rq, tsk);
9089 /*
9090 * After changing group, the running task may have joined a
9091 * throttled one but it's still the running task. Trigger a
9092 * resched to make sure that task can still run.
9093 */
9094 resched_curr(rq);
9095 }
9096 }
9097
9098 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9099 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9100 {
9101 struct task_group *parent = css_tg(parent_css);
9102 struct task_group *tg;
9103
9104 if (!parent) {
9105 /* This is early initialization for the top cgroup */
9106 return &root_task_group.css;
9107 }
9108
9109 tg = sched_create_group(parent);
9110 if (IS_ERR(tg))
9111 return ERR_PTR(-ENOMEM);
9112
9113 return &tg->css;
9114 }
9115
9116 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9117 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9118 {
9119 struct task_group *tg = css_tg(css);
9120 struct task_group *parent = css_tg(css->parent);
9121 int ret;
9122
9123 ret = scx_tg_online(tg);
9124 if (ret)
9125 return ret;
9126
9127 if (parent)
9128 sched_online_group(tg, parent);
9129
9130 #ifdef CONFIG_UCLAMP_TASK_GROUP
9131 /* Propagate the effective uclamp value for the new group */
9132 guard(mutex)(&uclamp_mutex);
9133 guard(rcu)();
9134 cpu_util_update_eff(css);
9135 #endif
9136
9137 return 0;
9138 }
9139
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9140 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9141 {
9142 struct task_group *tg = css_tg(css);
9143
9144 scx_tg_offline(tg);
9145 }
9146
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9147 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9148 {
9149 struct task_group *tg = css_tg(css);
9150
9151 sched_release_group(tg);
9152 }
9153
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9154 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9155 {
9156 struct task_group *tg = css_tg(css);
9157
9158 /*
9159 * Relies on the RCU grace period between css_released() and this.
9160 */
9161 sched_unregister_group(tg);
9162 }
9163
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9164 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9165 {
9166 #ifdef CONFIG_RT_GROUP_SCHED
9167 struct task_struct *task;
9168 struct cgroup_subsys_state *css;
9169
9170 cgroup_taskset_for_each(task, css, tset) {
9171 if (!sched_rt_can_attach(css_tg(css), task))
9172 return -EINVAL;
9173 }
9174 #endif
9175 return scx_cgroup_can_attach(tset);
9176 }
9177
cpu_cgroup_attach(struct cgroup_taskset * tset)9178 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9179 {
9180 struct task_struct *task;
9181 struct cgroup_subsys_state *css;
9182
9183 cgroup_taskset_for_each(task, css, tset)
9184 sched_move_task(task, false);
9185
9186 scx_cgroup_finish_attach();
9187 }
9188
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9189 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9190 {
9191 scx_cgroup_cancel_attach(tset);
9192 }
9193
9194 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9195 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9196 {
9197 struct cgroup_subsys_state *top_css = css;
9198 struct uclamp_se *uc_parent = NULL;
9199 struct uclamp_se *uc_se = NULL;
9200 unsigned int eff[UCLAMP_CNT];
9201 enum uclamp_id clamp_id;
9202 unsigned int clamps;
9203
9204 lockdep_assert_held(&uclamp_mutex);
9205 SCHED_WARN_ON(!rcu_read_lock_held());
9206
9207 css_for_each_descendant_pre(css, top_css) {
9208 uc_parent = css_tg(css)->parent
9209 ? css_tg(css)->parent->uclamp : NULL;
9210
9211 for_each_clamp_id(clamp_id) {
9212 /* Assume effective clamps matches requested clamps */
9213 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9214 /* Cap effective clamps with parent's effective clamps */
9215 if (uc_parent &&
9216 eff[clamp_id] > uc_parent[clamp_id].value) {
9217 eff[clamp_id] = uc_parent[clamp_id].value;
9218 }
9219 }
9220 /* Ensure protection is always capped by limit */
9221 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9222
9223 /* Propagate most restrictive effective clamps */
9224 clamps = 0x0;
9225 uc_se = css_tg(css)->uclamp;
9226 for_each_clamp_id(clamp_id) {
9227 if (eff[clamp_id] == uc_se[clamp_id].value)
9228 continue;
9229 uc_se[clamp_id].value = eff[clamp_id];
9230 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9231 clamps |= (0x1 << clamp_id);
9232 }
9233 if (!clamps) {
9234 css = css_rightmost_descendant(css);
9235 continue;
9236 }
9237
9238 /* Immediately update descendants RUNNABLE tasks */
9239 uclamp_update_active_tasks(css);
9240 }
9241 }
9242
9243 /*
9244 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9245 * C expression. Since there is no way to convert a macro argument (N) into a
9246 * character constant, use two levels of macros.
9247 */
9248 #define _POW10(exp) ((unsigned int)1e##exp)
9249 #define POW10(exp) _POW10(exp)
9250
9251 struct uclamp_request {
9252 #define UCLAMP_PERCENT_SHIFT 2
9253 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9254 s64 percent;
9255 u64 util;
9256 int ret;
9257 };
9258
9259 static inline struct uclamp_request
capacity_from_percent(char * buf)9260 capacity_from_percent(char *buf)
9261 {
9262 struct uclamp_request req = {
9263 .percent = UCLAMP_PERCENT_SCALE,
9264 .util = SCHED_CAPACITY_SCALE,
9265 .ret = 0,
9266 };
9267
9268 buf = strim(buf);
9269 if (strcmp(buf, "max")) {
9270 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9271 &req.percent);
9272 if (req.ret)
9273 return req;
9274 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9275 req.ret = -ERANGE;
9276 return req;
9277 }
9278
9279 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9280 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9281 }
9282
9283 return req;
9284 }
9285
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9286 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9287 size_t nbytes, loff_t off,
9288 enum uclamp_id clamp_id)
9289 {
9290 struct uclamp_request req;
9291 struct task_group *tg;
9292
9293 req = capacity_from_percent(buf);
9294 if (req.ret)
9295 return req.ret;
9296
9297 static_branch_enable(&sched_uclamp_used);
9298
9299 guard(mutex)(&uclamp_mutex);
9300 guard(rcu)();
9301
9302 tg = css_tg(of_css(of));
9303 if (tg->uclamp_req[clamp_id].value != req.util)
9304 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9305
9306 /*
9307 * Because of not recoverable conversion rounding we keep track of the
9308 * exact requested value
9309 */
9310 tg->uclamp_pct[clamp_id] = req.percent;
9311
9312 /* Update effective clamps to track the most restrictive value */
9313 cpu_util_update_eff(of_css(of));
9314
9315 return nbytes;
9316 }
9317
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9318 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9319 char *buf, size_t nbytes,
9320 loff_t off)
9321 {
9322 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9323 }
9324
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9325 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9326 char *buf, size_t nbytes,
9327 loff_t off)
9328 {
9329 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9330 }
9331
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9332 static inline void cpu_uclamp_print(struct seq_file *sf,
9333 enum uclamp_id clamp_id)
9334 {
9335 struct task_group *tg;
9336 u64 util_clamp;
9337 u64 percent;
9338 u32 rem;
9339
9340 scoped_guard (rcu) {
9341 tg = css_tg(seq_css(sf));
9342 util_clamp = tg->uclamp_req[clamp_id].value;
9343 }
9344
9345 if (util_clamp == SCHED_CAPACITY_SCALE) {
9346 seq_puts(sf, "max\n");
9347 return;
9348 }
9349
9350 percent = tg->uclamp_pct[clamp_id];
9351 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9352 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9353 }
9354
cpu_uclamp_min_show(struct seq_file * sf,void * v)9355 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9356 {
9357 cpu_uclamp_print(sf, UCLAMP_MIN);
9358 return 0;
9359 }
9360
cpu_uclamp_max_show(struct seq_file * sf,void * v)9361 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9362 {
9363 cpu_uclamp_print(sf, UCLAMP_MAX);
9364 return 0;
9365 }
9366 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9367
9368 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9369 static unsigned long tg_weight(struct task_group *tg)
9370 {
9371 #ifdef CONFIG_FAIR_GROUP_SCHED
9372 return scale_load_down(tg->shares);
9373 #else
9374 return sched_weight_from_cgroup(tg->scx_weight);
9375 #endif
9376 }
9377
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9378 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9379 struct cftype *cftype, u64 shareval)
9380 {
9381 int ret;
9382
9383 if (shareval > scale_load_down(ULONG_MAX))
9384 shareval = MAX_SHARES;
9385 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9386 if (!ret)
9387 scx_group_set_weight(css_tg(css),
9388 sched_weight_to_cgroup(shareval));
9389 return ret;
9390 }
9391
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9392 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9393 struct cftype *cft)
9394 {
9395 return tg_weight(css_tg(css));
9396 }
9397 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9398
9399 #ifdef CONFIG_CFS_BANDWIDTH
9400 static DEFINE_MUTEX(cfs_constraints_mutex);
9401
9402 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9403 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9404 /* More than 203 days if BW_SHIFT equals 20. */
9405 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9406
9407 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9408
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)9409 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9410 u64 burst)
9411 {
9412 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9413 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9414
9415 if (tg == &root_task_group)
9416 return -EINVAL;
9417
9418 /*
9419 * Ensure we have at some amount of bandwidth every period. This is
9420 * to prevent reaching a state of large arrears when throttled via
9421 * entity_tick() resulting in prolonged exit starvation.
9422 */
9423 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9424 return -EINVAL;
9425
9426 /*
9427 * Likewise, bound things on the other side by preventing insane quota
9428 * periods. This also allows us to normalize in computing quota
9429 * feasibility.
9430 */
9431 if (period > max_cfs_quota_period)
9432 return -EINVAL;
9433
9434 /*
9435 * Bound quota to defend quota against overflow during bandwidth shift.
9436 */
9437 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9438 return -EINVAL;
9439
9440 if (quota != RUNTIME_INF && (burst > quota ||
9441 burst + quota > max_cfs_runtime))
9442 return -EINVAL;
9443
9444 /*
9445 * Prevent race between setting of cfs_rq->runtime_enabled and
9446 * unthrottle_offline_cfs_rqs().
9447 */
9448 guard(cpus_read_lock)();
9449 guard(mutex)(&cfs_constraints_mutex);
9450
9451 ret = __cfs_schedulable(tg, period, quota);
9452 if (ret)
9453 return ret;
9454
9455 runtime_enabled = quota != RUNTIME_INF;
9456 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9457 /*
9458 * If we need to toggle cfs_bandwidth_used, off->on must occur
9459 * before making related changes, and on->off must occur afterwards
9460 */
9461 if (runtime_enabled && !runtime_was_enabled)
9462 cfs_bandwidth_usage_inc();
9463
9464 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9465 cfs_b->period = ns_to_ktime(period);
9466 cfs_b->quota = quota;
9467 cfs_b->burst = burst;
9468
9469 __refill_cfs_bandwidth_runtime(cfs_b);
9470
9471 /*
9472 * Restart the period timer (if active) to handle new
9473 * period expiry:
9474 */
9475 if (runtime_enabled)
9476 start_cfs_bandwidth(cfs_b);
9477 }
9478
9479 for_each_online_cpu(i) {
9480 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9481 struct rq *rq = cfs_rq->rq;
9482
9483 guard(rq_lock_irq)(rq);
9484 cfs_rq->runtime_enabled = runtime_enabled;
9485 cfs_rq->runtime_remaining = 0;
9486
9487 if (cfs_rq->throttled)
9488 unthrottle_cfs_rq(cfs_rq);
9489 }
9490
9491 if (runtime_was_enabled && !runtime_enabled)
9492 cfs_bandwidth_usage_dec();
9493
9494 return 0;
9495 }
9496
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)9497 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9498 {
9499 u64 quota, period, burst;
9500
9501 period = ktime_to_ns(tg->cfs_bandwidth.period);
9502 burst = tg->cfs_bandwidth.burst;
9503 if (cfs_quota_us < 0)
9504 quota = RUNTIME_INF;
9505 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9506 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9507 else
9508 return -EINVAL;
9509
9510 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9511 }
9512
tg_get_cfs_quota(struct task_group * tg)9513 static long tg_get_cfs_quota(struct task_group *tg)
9514 {
9515 u64 quota_us;
9516
9517 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9518 return -1;
9519
9520 quota_us = tg->cfs_bandwidth.quota;
9521 do_div(quota_us, NSEC_PER_USEC);
9522
9523 return quota_us;
9524 }
9525
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)9526 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9527 {
9528 u64 quota, period, burst;
9529
9530 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9531 return -EINVAL;
9532
9533 period = (u64)cfs_period_us * NSEC_PER_USEC;
9534 quota = tg->cfs_bandwidth.quota;
9535 burst = tg->cfs_bandwidth.burst;
9536
9537 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9538 }
9539
tg_get_cfs_period(struct task_group * tg)9540 static long tg_get_cfs_period(struct task_group *tg)
9541 {
9542 u64 cfs_period_us;
9543
9544 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9545 do_div(cfs_period_us, NSEC_PER_USEC);
9546
9547 return cfs_period_us;
9548 }
9549
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)9550 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9551 {
9552 u64 quota, period, burst;
9553
9554 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9555 return -EINVAL;
9556
9557 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9558 period = ktime_to_ns(tg->cfs_bandwidth.period);
9559 quota = tg->cfs_bandwidth.quota;
9560
9561 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9562 }
9563
tg_get_cfs_burst(struct task_group * tg)9564 static long tg_get_cfs_burst(struct task_group *tg)
9565 {
9566 u64 burst_us;
9567
9568 burst_us = tg->cfs_bandwidth.burst;
9569 do_div(burst_us, NSEC_PER_USEC);
9570
9571 return burst_us;
9572 }
9573
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9574 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9575 struct cftype *cft)
9576 {
9577 return tg_get_cfs_quota(css_tg(css));
9578 }
9579
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)9580 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9581 struct cftype *cftype, s64 cfs_quota_us)
9582 {
9583 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9584 }
9585
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9586 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9587 struct cftype *cft)
9588 {
9589 return tg_get_cfs_period(css_tg(css));
9590 }
9591
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)9592 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9593 struct cftype *cftype, u64 cfs_period_us)
9594 {
9595 return tg_set_cfs_period(css_tg(css), cfs_period_us);
9596 }
9597
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9598 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9599 struct cftype *cft)
9600 {
9601 return tg_get_cfs_burst(css_tg(css));
9602 }
9603
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)9604 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9605 struct cftype *cftype, u64 cfs_burst_us)
9606 {
9607 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9608 }
9609
9610 struct cfs_schedulable_data {
9611 struct task_group *tg;
9612 u64 period, quota;
9613 };
9614
9615 /*
9616 * normalize group quota/period to be quota/max_period
9617 * note: units are usecs
9618 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9619 static u64 normalize_cfs_quota(struct task_group *tg,
9620 struct cfs_schedulable_data *d)
9621 {
9622 u64 quota, period;
9623
9624 if (tg == d->tg) {
9625 period = d->period;
9626 quota = d->quota;
9627 } else {
9628 period = tg_get_cfs_period(tg);
9629 quota = tg_get_cfs_quota(tg);
9630 }
9631
9632 /* note: these should typically be equivalent */
9633 if (quota == RUNTIME_INF || quota == -1)
9634 return RUNTIME_INF;
9635
9636 return to_ratio(period, quota);
9637 }
9638
tg_cfs_schedulable_down(struct task_group * tg,void * data)9639 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9640 {
9641 struct cfs_schedulable_data *d = data;
9642 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9643 s64 quota = 0, parent_quota = -1;
9644
9645 if (!tg->parent) {
9646 quota = RUNTIME_INF;
9647 } else {
9648 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9649
9650 quota = normalize_cfs_quota(tg, d);
9651 parent_quota = parent_b->hierarchical_quota;
9652
9653 /*
9654 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9655 * always take the non-RUNTIME_INF min. On cgroup1, only
9656 * inherit when no limit is set. In both cases this is used
9657 * by the scheduler to determine if a given CFS task has a
9658 * bandwidth constraint at some higher level.
9659 */
9660 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9661 if (quota == RUNTIME_INF)
9662 quota = parent_quota;
9663 else if (parent_quota != RUNTIME_INF)
9664 quota = min(quota, parent_quota);
9665 } else {
9666 if (quota == RUNTIME_INF)
9667 quota = parent_quota;
9668 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9669 return -EINVAL;
9670 }
9671 }
9672 cfs_b->hierarchical_quota = quota;
9673
9674 return 0;
9675 }
9676
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9677 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9678 {
9679 struct cfs_schedulable_data data = {
9680 .tg = tg,
9681 .period = period,
9682 .quota = quota,
9683 };
9684
9685 if (quota != RUNTIME_INF) {
9686 do_div(data.period, NSEC_PER_USEC);
9687 do_div(data.quota, NSEC_PER_USEC);
9688 }
9689
9690 guard(rcu)();
9691 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9692 }
9693
cpu_cfs_stat_show(struct seq_file * sf,void * v)9694 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9695 {
9696 struct task_group *tg = css_tg(seq_css(sf));
9697 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9698
9699 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9700 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9701 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9702
9703 if (schedstat_enabled() && tg != &root_task_group) {
9704 struct sched_statistics *stats;
9705 u64 ws = 0;
9706 int i;
9707
9708 for_each_possible_cpu(i) {
9709 stats = __schedstats_from_se(tg->se[i]);
9710 ws += schedstat_val(stats->wait_sum);
9711 }
9712
9713 seq_printf(sf, "wait_sum %llu\n", ws);
9714 }
9715
9716 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9717 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9718
9719 return 0;
9720 }
9721
throttled_time_self(struct task_group * tg)9722 static u64 throttled_time_self(struct task_group *tg)
9723 {
9724 int i;
9725 u64 total = 0;
9726
9727 for_each_possible_cpu(i) {
9728 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9729 }
9730
9731 return total;
9732 }
9733
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9734 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9735 {
9736 struct task_group *tg = css_tg(seq_css(sf));
9737
9738 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9739
9740 return 0;
9741 }
9742 #endif /* CONFIG_CFS_BANDWIDTH */
9743
9744 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9745 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9746 struct cftype *cft, s64 val)
9747 {
9748 return sched_group_set_rt_runtime(css_tg(css), val);
9749 }
9750
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9751 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9752 struct cftype *cft)
9753 {
9754 return sched_group_rt_runtime(css_tg(css));
9755 }
9756
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9757 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9758 struct cftype *cftype, u64 rt_period_us)
9759 {
9760 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9761 }
9762
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9763 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9764 struct cftype *cft)
9765 {
9766 return sched_group_rt_period(css_tg(css));
9767 }
9768 #endif /* CONFIG_RT_GROUP_SCHED */
9769
9770 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9771 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9772 struct cftype *cft)
9773 {
9774 return css_tg(css)->idle;
9775 }
9776
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9777 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9778 struct cftype *cft, s64 idle)
9779 {
9780 int ret;
9781
9782 ret = sched_group_set_idle(css_tg(css), idle);
9783 if (!ret)
9784 scx_group_set_idle(css_tg(css), idle);
9785 return ret;
9786 }
9787 #endif
9788
9789 static struct cftype cpu_legacy_files[] = {
9790 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9791 {
9792 .name = "shares",
9793 .read_u64 = cpu_shares_read_u64,
9794 .write_u64 = cpu_shares_write_u64,
9795 },
9796 {
9797 .name = "idle",
9798 .read_s64 = cpu_idle_read_s64,
9799 .write_s64 = cpu_idle_write_s64,
9800 },
9801 #endif
9802 #ifdef CONFIG_CFS_BANDWIDTH
9803 {
9804 .name = "cfs_quota_us",
9805 .read_s64 = cpu_cfs_quota_read_s64,
9806 .write_s64 = cpu_cfs_quota_write_s64,
9807 },
9808 {
9809 .name = "cfs_period_us",
9810 .read_u64 = cpu_cfs_period_read_u64,
9811 .write_u64 = cpu_cfs_period_write_u64,
9812 },
9813 {
9814 .name = "cfs_burst_us",
9815 .read_u64 = cpu_cfs_burst_read_u64,
9816 .write_u64 = cpu_cfs_burst_write_u64,
9817 },
9818 {
9819 .name = "stat",
9820 .seq_show = cpu_cfs_stat_show,
9821 },
9822 {
9823 .name = "stat.local",
9824 .seq_show = cpu_cfs_local_stat_show,
9825 },
9826 #endif
9827 #ifdef CONFIG_RT_GROUP_SCHED
9828 {
9829 .name = "rt_runtime_us",
9830 .read_s64 = cpu_rt_runtime_read,
9831 .write_s64 = cpu_rt_runtime_write,
9832 },
9833 {
9834 .name = "rt_period_us",
9835 .read_u64 = cpu_rt_period_read_uint,
9836 .write_u64 = cpu_rt_period_write_uint,
9837 },
9838 #endif
9839 #ifdef CONFIG_UCLAMP_TASK_GROUP
9840 {
9841 .name = "uclamp.min",
9842 .flags = CFTYPE_NOT_ON_ROOT,
9843 .seq_show = cpu_uclamp_min_show,
9844 .write = cpu_uclamp_min_write,
9845 },
9846 {
9847 .name = "uclamp.max",
9848 .flags = CFTYPE_NOT_ON_ROOT,
9849 .seq_show = cpu_uclamp_max_show,
9850 .write = cpu_uclamp_max_write,
9851 },
9852 #endif
9853 { } /* Terminate */
9854 };
9855
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9856 static int cpu_extra_stat_show(struct seq_file *sf,
9857 struct cgroup_subsys_state *css)
9858 {
9859 #ifdef CONFIG_CFS_BANDWIDTH
9860 {
9861 struct task_group *tg = css_tg(css);
9862 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9863 u64 throttled_usec, burst_usec;
9864
9865 throttled_usec = cfs_b->throttled_time;
9866 do_div(throttled_usec, NSEC_PER_USEC);
9867 burst_usec = cfs_b->burst_time;
9868 do_div(burst_usec, NSEC_PER_USEC);
9869
9870 seq_printf(sf, "nr_periods %d\n"
9871 "nr_throttled %d\n"
9872 "throttled_usec %llu\n"
9873 "nr_bursts %d\n"
9874 "burst_usec %llu\n",
9875 cfs_b->nr_periods, cfs_b->nr_throttled,
9876 throttled_usec, cfs_b->nr_burst, burst_usec);
9877 }
9878 #endif
9879 return 0;
9880 }
9881
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9882 static int cpu_local_stat_show(struct seq_file *sf,
9883 struct cgroup_subsys_state *css)
9884 {
9885 #ifdef CONFIG_CFS_BANDWIDTH
9886 {
9887 struct task_group *tg = css_tg(css);
9888 u64 throttled_self_usec;
9889
9890 throttled_self_usec = throttled_time_self(tg);
9891 do_div(throttled_self_usec, NSEC_PER_USEC);
9892
9893 seq_printf(sf, "throttled_usec %llu\n",
9894 throttled_self_usec);
9895 }
9896 #endif
9897 return 0;
9898 }
9899
9900 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9901
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9902 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9903 struct cftype *cft)
9904 {
9905 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9906 }
9907
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)9908 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9909 struct cftype *cft, u64 cgrp_weight)
9910 {
9911 unsigned long weight;
9912 int ret;
9913
9914 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9915 return -ERANGE;
9916
9917 weight = sched_weight_from_cgroup(cgrp_weight);
9918
9919 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9920 if (!ret)
9921 scx_group_set_weight(css_tg(css), cgrp_weight);
9922 return ret;
9923 }
9924
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9925 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9926 struct cftype *cft)
9927 {
9928 unsigned long weight = tg_weight(css_tg(css));
9929 int last_delta = INT_MAX;
9930 int prio, delta;
9931
9932 /* find the closest nice value to the current weight */
9933 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9934 delta = abs(sched_prio_to_weight[prio] - weight);
9935 if (delta >= last_delta)
9936 break;
9937 last_delta = delta;
9938 }
9939
9940 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9941 }
9942
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)9943 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9944 struct cftype *cft, s64 nice)
9945 {
9946 unsigned long weight;
9947 int idx, ret;
9948
9949 if (nice < MIN_NICE || nice > MAX_NICE)
9950 return -ERANGE;
9951
9952 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9953 idx = array_index_nospec(idx, 40);
9954 weight = sched_prio_to_weight[idx];
9955
9956 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9957 if (!ret)
9958 scx_group_set_weight(css_tg(css),
9959 sched_weight_to_cgroup(weight));
9960 return ret;
9961 }
9962 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9963
cpu_period_quota_print(struct seq_file * sf,long period,long quota)9964 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9965 long period, long quota)
9966 {
9967 if (quota < 0)
9968 seq_puts(sf, "max");
9969 else
9970 seq_printf(sf, "%ld", quota);
9971
9972 seq_printf(sf, " %ld\n", period);
9973 }
9974
9975 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)9976 static int __maybe_unused cpu_period_quota_parse(char *buf,
9977 u64 *periodp, u64 *quotap)
9978 {
9979 char tok[21]; /* U64_MAX */
9980
9981 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9982 return -EINVAL;
9983
9984 *periodp *= NSEC_PER_USEC;
9985
9986 if (sscanf(tok, "%llu", quotap))
9987 *quotap *= NSEC_PER_USEC;
9988 else if (!strcmp(tok, "max"))
9989 *quotap = RUNTIME_INF;
9990 else
9991 return -EINVAL;
9992
9993 return 0;
9994 }
9995
9996 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)9997 static int cpu_max_show(struct seq_file *sf, void *v)
9998 {
9999 struct task_group *tg = css_tg(seq_css(sf));
10000
10001 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
10002 return 0;
10003 }
10004
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10005 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10006 char *buf, size_t nbytes, loff_t off)
10007 {
10008 struct task_group *tg = css_tg(of_css(of));
10009 u64 period = tg_get_cfs_period(tg);
10010 u64 burst = tg->cfs_bandwidth.burst;
10011 u64 quota;
10012 int ret;
10013
10014 ret = cpu_period_quota_parse(buf, &period, "a);
10015 if (!ret)
10016 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
10017 return ret ?: nbytes;
10018 }
10019 #endif
10020
10021 static struct cftype cpu_files[] = {
10022 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10023 {
10024 .name = "weight",
10025 .flags = CFTYPE_NOT_ON_ROOT,
10026 .read_u64 = cpu_weight_read_u64,
10027 .write_u64 = cpu_weight_write_u64,
10028 },
10029 {
10030 .name = "weight.nice",
10031 .flags = CFTYPE_NOT_ON_ROOT,
10032 .read_s64 = cpu_weight_nice_read_s64,
10033 .write_s64 = cpu_weight_nice_write_s64,
10034 },
10035 {
10036 .name = "idle",
10037 .flags = CFTYPE_NOT_ON_ROOT,
10038 .read_s64 = cpu_idle_read_s64,
10039 .write_s64 = cpu_idle_write_s64,
10040 },
10041 #endif
10042 #ifdef CONFIG_CFS_BANDWIDTH
10043 {
10044 .name = "max",
10045 .flags = CFTYPE_NOT_ON_ROOT,
10046 .seq_show = cpu_max_show,
10047 .write = cpu_max_write,
10048 },
10049 {
10050 .name = "max.burst",
10051 .flags = CFTYPE_NOT_ON_ROOT,
10052 .read_u64 = cpu_cfs_burst_read_u64,
10053 .write_u64 = cpu_cfs_burst_write_u64,
10054 },
10055 #endif
10056 #ifdef CONFIG_UCLAMP_TASK_GROUP
10057 {
10058 .name = "uclamp.min",
10059 .flags = CFTYPE_NOT_ON_ROOT,
10060 .seq_show = cpu_uclamp_min_show,
10061 .write = cpu_uclamp_min_write,
10062 },
10063 {
10064 .name = "uclamp.max",
10065 .flags = CFTYPE_NOT_ON_ROOT,
10066 .seq_show = cpu_uclamp_max_show,
10067 .write = cpu_uclamp_max_write,
10068 },
10069 #endif
10070 { } /* terminate */
10071 };
10072
10073 struct cgroup_subsys cpu_cgrp_subsys = {
10074 .css_alloc = cpu_cgroup_css_alloc,
10075 .css_online = cpu_cgroup_css_online,
10076 .css_offline = cpu_cgroup_css_offline,
10077 .css_released = cpu_cgroup_css_released,
10078 .css_free = cpu_cgroup_css_free,
10079 .css_extra_stat_show = cpu_extra_stat_show,
10080 .css_local_stat_show = cpu_local_stat_show,
10081 .can_attach = cpu_cgroup_can_attach,
10082 .attach = cpu_cgroup_attach,
10083 .cancel_attach = cpu_cgroup_cancel_attach,
10084 .legacy_cftypes = cpu_legacy_files,
10085 .dfl_cftypes = cpu_files,
10086 .early_init = true,
10087 .threaded = true,
10088 };
10089
10090 #endif /* CONFIG_CGROUP_SCHED */
10091
dump_cpu_task(int cpu)10092 void dump_cpu_task(int cpu)
10093 {
10094 if (in_hardirq() && cpu == smp_processor_id()) {
10095 struct pt_regs *regs;
10096
10097 regs = get_irq_regs();
10098 if (regs) {
10099 show_regs(regs);
10100 return;
10101 }
10102 }
10103
10104 if (trigger_single_cpu_backtrace(cpu))
10105 return;
10106
10107 pr_info("Task dump for CPU %d:\n", cpu);
10108 sched_show_task(cpu_curr(cpu));
10109 }
10110
10111 /*
10112 * Nice levels are multiplicative, with a gentle 10% change for every
10113 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10114 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10115 * that remained on nice 0.
10116 *
10117 * The "10% effect" is relative and cumulative: from _any_ nice level,
10118 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10119 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10120 * If a task goes up by ~10% and another task goes down by ~10% then
10121 * the relative distance between them is ~25%.)
10122 */
10123 const int sched_prio_to_weight[40] = {
10124 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10125 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10126 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10127 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10128 /* 0 */ 1024, 820, 655, 526, 423,
10129 /* 5 */ 335, 272, 215, 172, 137,
10130 /* 10 */ 110, 87, 70, 56, 45,
10131 /* 15 */ 36, 29, 23, 18, 15,
10132 };
10133
10134 /*
10135 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10136 *
10137 * In cases where the weight does not change often, we can use the
10138 * pre-calculated inverse to speed up arithmetics by turning divisions
10139 * into multiplications:
10140 */
10141 const u32 sched_prio_to_wmult[40] = {
10142 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10143 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10144 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10145 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10146 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10147 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10148 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10149 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10150 };
10151
call_trace_sched_update_nr_running(struct rq * rq,int count)10152 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10153 {
10154 trace_sched_update_nr_running_tp(rq, count);
10155 }
10156
10157 #ifdef CONFIG_SCHED_MM_CID
10158
10159 /*
10160 * @cid_lock: Guarantee forward-progress of cid allocation.
10161 *
10162 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10163 * is only used when contention is detected by the lock-free allocation so
10164 * forward progress can be guaranteed.
10165 */
10166 DEFINE_RAW_SPINLOCK(cid_lock);
10167
10168 /*
10169 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10170 *
10171 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10172 * detected, it is set to 1 to ensure that all newly coming allocations are
10173 * serialized by @cid_lock until the allocation which detected contention
10174 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10175 * of a cid allocation.
10176 */
10177 int use_cid_lock;
10178
10179 /*
10180 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10181 * concurrently with respect to the execution of the source runqueue context
10182 * switch.
10183 *
10184 * There is one basic properties we want to guarantee here:
10185 *
10186 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10187 * used by a task. That would lead to concurrent allocation of the cid and
10188 * userspace corruption.
10189 *
10190 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10191 * that a pair of loads observe at least one of a pair of stores, which can be
10192 * shown as:
10193 *
10194 * X = Y = 0
10195 *
10196 * w[X]=1 w[Y]=1
10197 * MB MB
10198 * r[Y]=y r[X]=x
10199 *
10200 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10201 * values 0 and 1, this algorithm cares about specific state transitions of the
10202 * runqueue current task (as updated by the scheduler context switch), and the
10203 * per-mm/cpu cid value.
10204 *
10205 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10206 * task->mm != mm for the rest of the discussion. There are two scheduler state
10207 * transitions on context switch we care about:
10208 *
10209 * (TSA) Store to rq->curr with transition from (N) to (Y)
10210 *
10211 * (TSB) Store to rq->curr with transition from (Y) to (N)
10212 *
10213 * On the remote-clear side, there is one transition we care about:
10214 *
10215 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10216 *
10217 * There is also a transition to UNSET state which can be performed from all
10218 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10219 * guarantees that only a single thread will succeed:
10220 *
10221 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10222 *
10223 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10224 * when a thread is actively using the cid (property (1)).
10225 *
10226 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10227 *
10228 * Scenario A) (TSA)+(TMA) (from next task perspective)
10229 *
10230 * CPU0 CPU1
10231 *
10232 * Context switch CS-1 Remote-clear
10233 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10234 * (implied barrier after cmpxchg)
10235 * - switch_mm_cid()
10236 * - memory barrier (see switch_mm_cid()
10237 * comment explaining how this barrier
10238 * is combined with other scheduler
10239 * barriers)
10240 * - mm_cid_get (next)
10241 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10242 *
10243 * This Dekker ensures that either task (Y) is observed by the
10244 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10245 * observed.
10246 *
10247 * If task (Y) store is observed by rcu_dereference(), it means that there is
10248 * still an active task on the cpu. Remote-clear will therefore not transition
10249 * to UNSET, which fulfills property (1).
10250 *
10251 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10252 * it will move its state to UNSET, which clears the percpu cid perhaps
10253 * uselessly (which is not an issue for correctness). Because task (Y) is not
10254 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10255 * state to UNSET is done with a cmpxchg expecting that the old state has the
10256 * LAZY flag set, only one thread will successfully UNSET.
10257 *
10258 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10259 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10260 * CPU1 will observe task (Y) and do nothing more, which is fine.
10261 *
10262 * What we are effectively preventing with this Dekker is a scenario where
10263 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10264 * because this would UNSET a cid which is actively used.
10265 */
10266
sched_mm_cid_migrate_from(struct task_struct * t)10267 void sched_mm_cid_migrate_from(struct task_struct *t)
10268 {
10269 t->migrate_from_cpu = task_cpu(t);
10270 }
10271
10272 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10273 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10274 struct task_struct *t,
10275 struct mm_cid *src_pcpu_cid)
10276 {
10277 struct mm_struct *mm = t->mm;
10278 struct task_struct *src_task;
10279 int src_cid, last_mm_cid;
10280
10281 if (!mm)
10282 return -1;
10283
10284 last_mm_cid = t->last_mm_cid;
10285 /*
10286 * If the migrated task has no last cid, or if the current
10287 * task on src rq uses the cid, it means the source cid does not need
10288 * to be moved to the destination cpu.
10289 */
10290 if (last_mm_cid == -1)
10291 return -1;
10292 src_cid = READ_ONCE(src_pcpu_cid->cid);
10293 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10294 return -1;
10295
10296 /*
10297 * If we observe an active task using the mm on this rq, it means we
10298 * are not the last task to be migrated from this cpu for this mm, so
10299 * there is no need to move src_cid to the destination cpu.
10300 */
10301 guard(rcu)();
10302 src_task = rcu_dereference(src_rq->curr);
10303 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10304 t->last_mm_cid = -1;
10305 return -1;
10306 }
10307
10308 return src_cid;
10309 }
10310
10311 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10312 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10313 struct task_struct *t,
10314 struct mm_cid *src_pcpu_cid,
10315 int src_cid)
10316 {
10317 struct task_struct *src_task;
10318 struct mm_struct *mm = t->mm;
10319 int lazy_cid;
10320
10321 if (src_cid == -1)
10322 return -1;
10323
10324 /*
10325 * Attempt to clear the source cpu cid to move it to the destination
10326 * cpu.
10327 */
10328 lazy_cid = mm_cid_set_lazy_put(src_cid);
10329 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10330 return -1;
10331
10332 /*
10333 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10334 * rq->curr->mm matches the scheduler barrier in context_switch()
10335 * between store to rq->curr and load of prev and next task's
10336 * per-mm/cpu cid.
10337 *
10338 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10339 * rq->curr->mm_cid_active matches the barrier in
10340 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10341 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10342 * load of per-mm/cpu cid.
10343 */
10344
10345 /*
10346 * If we observe an active task using the mm on this rq after setting
10347 * the lazy-put flag, this task will be responsible for transitioning
10348 * from lazy-put flag set to MM_CID_UNSET.
10349 */
10350 scoped_guard (rcu) {
10351 src_task = rcu_dereference(src_rq->curr);
10352 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10353 /*
10354 * We observed an active task for this mm, there is therefore
10355 * no point in moving this cid to the destination cpu.
10356 */
10357 t->last_mm_cid = -1;
10358 return -1;
10359 }
10360 }
10361
10362 /*
10363 * The src_cid is unused, so it can be unset.
10364 */
10365 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10366 return -1;
10367 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
10368 return src_cid;
10369 }
10370
10371 /*
10372 * Migration to dst cpu. Called with dst_rq lock held.
10373 * Interrupts are disabled, which keeps the window of cid ownership without the
10374 * source rq lock held small.
10375 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10376 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10377 {
10378 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10379 struct mm_struct *mm = t->mm;
10380 int src_cid, src_cpu;
10381 bool dst_cid_is_set;
10382 struct rq *src_rq;
10383
10384 lockdep_assert_rq_held(dst_rq);
10385
10386 if (!mm)
10387 return;
10388 src_cpu = t->migrate_from_cpu;
10389 if (src_cpu == -1) {
10390 t->last_mm_cid = -1;
10391 return;
10392 }
10393 /*
10394 * Move the src cid if the dst cid is unset. This keeps id
10395 * allocation closest to 0 in cases where few threads migrate around
10396 * many CPUs.
10397 *
10398 * If destination cid or recent cid is already set, we may have
10399 * to just clear the src cid to ensure compactness in frequent
10400 * migrations scenarios.
10401 *
10402 * It is not useful to clear the src cid when the number of threads is
10403 * greater or equal to the number of allowed CPUs, because user-space
10404 * can expect that the number of allowed cids can reach the number of
10405 * allowed CPUs.
10406 */
10407 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10408 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10409 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10410 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
10411 return;
10412 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10413 src_rq = cpu_rq(src_cpu);
10414 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10415 if (src_cid == -1)
10416 return;
10417 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10418 src_cid);
10419 if (src_cid == -1)
10420 return;
10421 if (dst_cid_is_set) {
10422 __mm_cid_put(mm, src_cid);
10423 return;
10424 }
10425 /* Move src_cid to dst cpu. */
10426 mm_cid_snapshot_time(dst_rq, mm);
10427 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10428 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
10429 }
10430
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10431 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10432 int cpu)
10433 {
10434 struct rq *rq = cpu_rq(cpu);
10435 struct task_struct *t;
10436 int cid, lazy_cid;
10437
10438 cid = READ_ONCE(pcpu_cid->cid);
10439 if (!mm_cid_is_valid(cid))
10440 return;
10441
10442 /*
10443 * Clear the cpu cid if it is set to keep cid allocation compact. If
10444 * there happens to be other tasks left on the source cpu using this
10445 * mm, the next task using this mm will reallocate its cid on context
10446 * switch.
10447 */
10448 lazy_cid = mm_cid_set_lazy_put(cid);
10449 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10450 return;
10451
10452 /*
10453 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10454 * rq->curr->mm matches the scheduler barrier in context_switch()
10455 * between store to rq->curr and load of prev and next task's
10456 * per-mm/cpu cid.
10457 *
10458 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10459 * rq->curr->mm_cid_active matches the barrier in
10460 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10461 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10462 * load of per-mm/cpu cid.
10463 */
10464
10465 /*
10466 * If we observe an active task using the mm on this rq after setting
10467 * the lazy-put flag, that task will be responsible for transitioning
10468 * from lazy-put flag set to MM_CID_UNSET.
10469 */
10470 scoped_guard (rcu) {
10471 t = rcu_dereference(rq->curr);
10472 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10473 return;
10474 }
10475
10476 /*
10477 * The cid is unused, so it can be unset.
10478 * Disable interrupts to keep the window of cid ownership without rq
10479 * lock small.
10480 */
10481 scoped_guard (irqsave) {
10482 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10483 __mm_cid_put(mm, cid);
10484 }
10485 }
10486
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10487 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10488 {
10489 struct rq *rq = cpu_rq(cpu);
10490 struct mm_cid *pcpu_cid;
10491 struct task_struct *curr;
10492 u64 rq_clock;
10493
10494 /*
10495 * rq->clock load is racy on 32-bit but one spurious clear once in a
10496 * while is irrelevant.
10497 */
10498 rq_clock = READ_ONCE(rq->clock);
10499 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10500
10501 /*
10502 * In order to take care of infrequently scheduled tasks, bump the time
10503 * snapshot associated with this cid if an active task using the mm is
10504 * observed on this rq.
10505 */
10506 scoped_guard (rcu) {
10507 curr = rcu_dereference(rq->curr);
10508 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10509 WRITE_ONCE(pcpu_cid->time, rq_clock);
10510 return;
10511 }
10512 }
10513
10514 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10515 return;
10516 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10517 }
10518
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10519 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10520 int weight)
10521 {
10522 struct mm_cid *pcpu_cid;
10523 int cid;
10524
10525 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10526 cid = READ_ONCE(pcpu_cid->cid);
10527 if (!mm_cid_is_valid(cid) || cid < weight)
10528 return;
10529 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10530 }
10531
task_mm_cid_work(struct callback_head * work)10532 static void task_mm_cid_work(struct callback_head *work)
10533 {
10534 unsigned long now = jiffies, old_scan, next_scan;
10535 struct task_struct *t = current;
10536 struct cpumask *cidmask;
10537 struct mm_struct *mm;
10538 int weight, cpu;
10539
10540 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10541
10542 work->next = work; /* Prevent double-add */
10543 if (t->flags & PF_EXITING)
10544 return;
10545 mm = t->mm;
10546 if (!mm)
10547 return;
10548 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10549 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10550 if (!old_scan) {
10551 unsigned long res;
10552
10553 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10554 if (res != old_scan)
10555 old_scan = res;
10556 else
10557 old_scan = next_scan;
10558 }
10559 if (time_before(now, old_scan))
10560 return;
10561 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10562 return;
10563 cidmask = mm_cidmask(mm);
10564 /* Clear cids that were not recently used. */
10565 for_each_possible_cpu(cpu)
10566 sched_mm_cid_remote_clear_old(mm, cpu);
10567 weight = cpumask_weight(cidmask);
10568 /*
10569 * Clear cids that are greater or equal to the cidmask weight to
10570 * recompact it.
10571 */
10572 for_each_possible_cpu(cpu)
10573 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10574 }
10575
init_sched_mm_cid(struct task_struct * t)10576 void init_sched_mm_cid(struct task_struct *t)
10577 {
10578 struct mm_struct *mm = t->mm;
10579 int mm_users = 0;
10580
10581 if (mm) {
10582 mm_users = atomic_read(&mm->mm_users);
10583 if (mm_users == 1)
10584 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10585 }
10586 t->cid_work.next = &t->cid_work; /* Protect against double add */
10587 init_task_work(&t->cid_work, task_mm_cid_work);
10588 }
10589
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10590 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10591 {
10592 struct callback_head *work = &curr->cid_work;
10593 unsigned long now = jiffies;
10594
10595 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10596 work->next != work)
10597 return;
10598 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10599 return;
10600
10601 /* No page allocation under rq lock */
10602 task_work_add(curr, work, TWA_RESUME);
10603 }
10604
sched_mm_cid_exit_signals(struct task_struct * t)10605 void sched_mm_cid_exit_signals(struct task_struct *t)
10606 {
10607 struct mm_struct *mm = t->mm;
10608 struct rq *rq;
10609
10610 if (!mm)
10611 return;
10612
10613 preempt_disable();
10614 rq = this_rq();
10615 guard(rq_lock_irqsave)(rq);
10616 preempt_enable_no_resched(); /* holding spinlock */
10617 WRITE_ONCE(t->mm_cid_active, 0);
10618 /*
10619 * Store t->mm_cid_active before loading per-mm/cpu cid.
10620 * Matches barrier in sched_mm_cid_remote_clear_old().
10621 */
10622 smp_mb();
10623 mm_cid_put(mm);
10624 t->last_mm_cid = t->mm_cid = -1;
10625 }
10626
sched_mm_cid_before_execve(struct task_struct * t)10627 void sched_mm_cid_before_execve(struct task_struct *t)
10628 {
10629 struct mm_struct *mm = t->mm;
10630 struct rq *rq;
10631
10632 if (!mm)
10633 return;
10634
10635 preempt_disable();
10636 rq = this_rq();
10637 guard(rq_lock_irqsave)(rq);
10638 preempt_enable_no_resched(); /* holding spinlock */
10639 WRITE_ONCE(t->mm_cid_active, 0);
10640 /*
10641 * Store t->mm_cid_active before loading per-mm/cpu cid.
10642 * Matches barrier in sched_mm_cid_remote_clear_old().
10643 */
10644 smp_mb();
10645 mm_cid_put(mm);
10646 t->last_mm_cid = t->mm_cid = -1;
10647 }
10648
sched_mm_cid_after_execve(struct task_struct * t)10649 void sched_mm_cid_after_execve(struct task_struct *t)
10650 {
10651 struct mm_struct *mm = t->mm;
10652 struct rq *rq;
10653
10654 if (!mm)
10655 return;
10656
10657 preempt_disable();
10658 rq = this_rq();
10659 scoped_guard (rq_lock_irqsave, rq) {
10660 preempt_enable_no_resched(); /* holding spinlock */
10661 WRITE_ONCE(t->mm_cid_active, 1);
10662 /*
10663 * Store t->mm_cid_active before loading per-mm/cpu cid.
10664 * Matches barrier in sched_mm_cid_remote_clear_old().
10665 */
10666 smp_mb();
10667 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
10668 }
10669 rseq_set_notify_resume(t);
10670 }
10671
sched_mm_cid_fork(struct task_struct * t)10672 void sched_mm_cid_fork(struct task_struct *t)
10673 {
10674 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10675 t->mm_cid_active = 1;
10676 }
10677 #endif
10678
10679 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10680 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10681 struct sched_enq_and_set_ctx *ctx)
10682 {
10683 struct rq *rq = task_rq(p);
10684
10685 lockdep_assert_rq_held(rq);
10686
10687 *ctx = (struct sched_enq_and_set_ctx){
10688 .p = p,
10689 .queue_flags = queue_flags,
10690 .queued = task_on_rq_queued(p),
10691 .running = task_current(rq, p),
10692 };
10693
10694 update_rq_clock(rq);
10695 if (ctx->queued)
10696 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10697 if (ctx->running)
10698 put_prev_task(rq, p);
10699 }
10700
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10701 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10702 {
10703 struct rq *rq = task_rq(ctx->p);
10704
10705 lockdep_assert_rq_held(rq);
10706
10707 if (ctx->queued)
10708 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10709 if (ctx->running)
10710 set_next_task(rq, ctx->p);
10711 }
10712 #endif /* CONFIG_SCHED_CLASS_EXT */
10713