1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 # include <linux/entry-common.h>
73 # endif
74 #endif
75
76 #include <uapi/linux/sched/types.h>
77
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87
88 #include "sched.h"
89 #include "stats.h"
90
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102
103 /*
104 * Export tracepoints that act as a bare tracehook (ie: have no trace event
105 * associated with them) to allow external modules to probe them.
106 */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124 * Debugging: various feature bits
125 *
126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127 * sysctl_sched_features, defined in sched.h, to allow constants propagation
128 * at compile time and compiler optimization based on features default.
129 */
130 #define SCHED_FEAT(name, enabled) \
131 (1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 0;
135 #undef SCHED_FEAT
136
137 /*
138 * Print a warning if need_resched is set for the given duration (if
139 * LATENCY_WARN is enabled).
140 *
141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142 * per boot.
143 */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147
148 /*
149 * Number of tasks to iterate in a single balance run.
150 * Limited because this is done with IRQs disabled.
151 */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153
154 __read_mostly int scheduler_running;
155
156 #ifdef CONFIG_SCHED_CORE
157
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159
160 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)161 static inline int __task_prio(const struct task_struct *p)
162 {
163 if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 return -2;
165
166 if (p->dl_server)
167 return -1; /* deadline */
168
169 if (rt_or_dl_prio(p->prio))
170 return p->prio; /* [-1, 99] */
171
172 if (p->sched_class == &idle_sched_class)
173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174
175 if (task_on_scx(p))
176 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177
178 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180
181 /*
182 * l(a,b)
183 * le(a,b) := !l(b,a)
184 * g(a,b) := l(b,a)
185 * ge(a,b) := !l(a,b)
186 */
187
188 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)189 static inline bool prio_less(const struct task_struct *a,
190 const struct task_struct *b, bool in_fi)
191 {
192
193 int pa = __task_prio(a), pb = __task_prio(b);
194
195 if (-pa < -pb)
196 return true;
197
198 if (-pb < -pa)
199 return false;
200
201 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 const struct sched_dl_entity *a_dl, *b_dl;
203
204 a_dl = &a->dl;
205 /*
206 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 * __task_prio() can return -1 (for DL) even for those. In that
208 * case, get to the dl_server's DL entity.
209 */
210 if (a->dl_server)
211 a_dl = a->dl_server;
212
213 b_dl = &b->dl;
214 if (b->dl_server)
215 b_dl = b->dl_server;
216
217 return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 }
219
220 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
221 return cfs_prio_less(a, b, in_fi);
222
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
225 return scx_prio_less(a, b, in_fi);
226 #endif
227
228 return false;
229 }
230
__sched_core_less(const struct task_struct * a,const struct task_struct * b)231 static inline bool __sched_core_less(const struct task_struct *a,
232 const struct task_struct *b)
233 {
234 if (a->core_cookie < b->core_cookie)
235 return true;
236
237 if (a->core_cookie > b->core_cookie)
238 return false;
239
240 /* flip prio, so high prio is leftmost */
241 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 return true;
243
244 return false;
245 }
246
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253
rb_sched_core_cmp(const void * key,const struct rb_node * node)254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 const struct task_struct *p = __node_2_sc(node);
257 unsigned long cookie = (unsigned long)key;
258
259 if (cookie < p->core_cookie)
260 return -1;
261
262 if (cookie > p->core_cookie)
263 return 1;
264
265 return 0;
266 }
267
sched_core_enqueue(struct rq * rq,struct task_struct * p)268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 if (p->se.sched_delayed)
271 return;
272
273 rq->core->core_task_seq++;
274
275 if (!p->core_cookie)
276 return;
277
278 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 if (p->se.sched_delayed)
284 return;
285
286 rq->core->core_task_seq++;
287
288 if (sched_core_enqueued(p)) {
289 rb_erase(&p->core_node, &rq->core_tree);
290 RB_CLEAR_NODE(&p->core_node);
291 }
292
293 /*
294 * Migrating the last task off the cpu, with the cpu in forced idle
295 * state. Reschedule to create an accounting edge for forced idle,
296 * and re-examine whether the core is still in forced idle state.
297 */
298 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 rq->core->core_forceidle_count && rq->curr == rq->idle)
300 resched_curr(rq);
301 }
302
sched_task_is_throttled(struct task_struct * p,int cpu)303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 if (p->sched_class->task_is_throttled)
306 return p->sched_class->task_is_throttled(p, cpu);
307
308 return 0;
309 }
310
sched_core_next(struct task_struct * p,unsigned long cookie)311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 struct rb_node *node = &p->core_node;
314 int cpu = task_cpu(p);
315
316 do {
317 node = rb_next(node);
318 if (!node)
319 return NULL;
320
321 p = __node_2_sc(node);
322 if (p->core_cookie != cookie)
323 return NULL;
324
325 } while (sched_task_is_throttled(p, cpu));
326
327 return p;
328 }
329
330 /*
331 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332 * If no suitable task is found, NULL will be returned.
333 */
sched_core_find(struct rq * rq,unsigned long cookie)334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 struct task_struct *p;
337 struct rb_node *node;
338
339 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 if (!node)
341 return NULL;
342
343 p = __node_2_sc(node);
344 if (!sched_task_is_throttled(p, rq->cpu))
345 return p;
346
347 return sched_core_next(p, cookie);
348 }
349
350 /*
351 * Magic required such that:
352 *
353 * raw_spin_rq_lock(rq);
354 * ...
355 * raw_spin_rq_unlock(rq);
356 *
357 * ends up locking and unlocking the _same_ lock, and all CPUs
358 * always agree on what rq has what lock.
359 *
360 * XXX entirely possible to selectively enable cores, don't bother for now.
361 */
362
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366
sched_core_lock(int cpu,unsigned long * flags)367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 int t, i = 0;
371
372 local_irq_save(*flags);
373 for_each_cpu(t, smt_mask)
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376
sched_core_unlock(int cpu,unsigned long * flags)377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 int t;
381
382 for_each_cpu(t, smt_mask)
383 raw_spin_unlock(&cpu_rq(t)->__lock);
384 local_irq_restore(*flags);
385 }
386
__sched_core_flip(bool enabled)387 static void __sched_core_flip(bool enabled)
388 {
389 unsigned long flags;
390 int cpu, t;
391
392 cpus_read_lock();
393
394 /*
395 * Toggle the online cores, one by one.
396 */
397 cpumask_copy(&sched_core_mask, cpu_online_mask);
398 for_each_cpu(cpu, &sched_core_mask) {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400
401 sched_core_lock(cpu, &flags);
402
403 for_each_cpu(t, smt_mask)
404 cpu_rq(t)->core_enabled = enabled;
405
406 cpu_rq(cpu)->core->core_forceidle_start = 0;
407
408 sched_core_unlock(cpu, &flags);
409
410 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 }
412
413 /*
414 * Toggle the offline CPUs.
415 */
416 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 cpu_rq(cpu)->core_enabled = enabled;
418
419 cpus_read_unlock();
420 }
421
sched_core_assert_empty(void)422 static void sched_core_assert_empty(void)
423 {
424 int cpu;
425
426 for_each_possible_cpu(cpu)
427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429
__sched_core_enable(void)430 static void __sched_core_enable(void)
431 {
432 static_branch_enable(&__sched_core_enabled);
433 /*
434 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 * and future ones will observe !sched_core_disabled().
436 */
437 synchronize_rcu();
438 __sched_core_flip(true);
439 sched_core_assert_empty();
440 }
441
__sched_core_disable(void)442 static void __sched_core_disable(void)
443 {
444 sched_core_assert_empty();
445 __sched_core_flip(false);
446 static_branch_disable(&__sched_core_enabled);
447 }
448
sched_core_get(void)449 void sched_core_get(void)
450 {
451 if (atomic_inc_not_zero(&sched_core_count))
452 return;
453
454 mutex_lock(&sched_core_mutex);
455 if (!atomic_read(&sched_core_count))
456 __sched_core_enable();
457
458 smp_mb__before_atomic();
459 atomic_inc(&sched_core_count);
460 mutex_unlock(&sched_core_mutex);
461 }
462
__sched_core_put(struct work_struct * work)463 static void __sched_core_put(struct work_struct *work)
464 {
465 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 __sched_core_disable();
467 mutex_unlock(&sched_core_mutex);
468 }
469 }
470
sched_core_put(void)471 void sched_core_put(void)
472 {
473 static DECLARE_WORK(_work, __sched_core_put);
474
475 /*
476 * "There can be only one"
477 *
478 * Either this is the last one, or we don't actually need to do any
479 * 'work'. If it is the last *again*, we rely on
480 * WORK_STRUCT_PENDING_BIT.
481 */
482 if (!atomic_add_unless(&sched_core_count, -1, 1))
483 schedule_work(&_work);
484 }
485
486 #else /* !CONFIG_SCHED_CORE */
487
sched_core_enqueue(struct rq * rq,struct task_struct * p)488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491
492 #endif /* CONFIG_SCHED_CORE */
493
494 /*
495 * Serialization rules:
496 *
497 * Lock order:
498 *
499 * p->pi_lock
500 * rq->lock
501 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502 *
503 * rq1->lock
504 * rq2->lock where: rq1 < rq2
505 *
506 * Regular state:
507 *
508 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509 * local CPU's rq->lock, it optionally removes the task from the runqueue and
510 * always looks at the local rq data structures to find the most eligible task
511 * to run next.
512 *
513 * Task enqueue is also under rq->lock, possibly taken from another CPU.
514 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515 * the local CPU to avoid bouncing the runqueue state around [ see
516 * ttwu_queue_wakelist() ]
517 *
518 * Task wakeup, specifically wakeups that involve migration, are horribly
519 * complicated to avoid having to take two rq->locks.
520 *
521 * Special state:
522 *
523 * System-calls and anything external will use task_rq_lock() which acquires
524 * both p->pi_lock and rq->lock. As a consequence the state they change is
525 * stable while holding either lock:
526 *
527 * - sched_setaffinity()/
528 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
529 * - set_user_nice(): p->se.load, p->*prio
530 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
531 * p->se.load, p->rt_priority,
532 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
533 * - sched_setnuma(): p->numa_preferred_nid
534 * - sched_move_task(): p->sched_task_group
535 * - uclamp_update_active() p->uclamp*
536 *
537 * p->state <- TASK_*:
538 *
539 * is changed locklessly using set_current_state(), __set_current_state() or
540 * set_special_state(), see their respective comments, or by
541 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
542 * concurrent self.
543 *
544 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545 *
546 * is set by activate_task() and cleared by deactivate_task(), under
547 * rq->lock. Non-zero indicates the task is runnable, the special
548 * ON_RQ_MIGRATING state is used for migration without holding both
549 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550 *
551 * Additionally it is possible to be ->on_rq but still be considered not
552 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553 * but will be dequeued as soon as they get picked again. See the
554 * task_is_runnable() helper.
555 *
556 * p->on_cpu <- { 0, 1 }:
557 *
558 * is set by prepare_task() and cleared by finish_task() such that it will be
559 * set before p is scheduled-in and cleared after p is scheduled-out, both
560 * under rq->lock. Non-zero indicates the task is running on its CPU.
561 *
562 * [ The astute reader will observe that it is possible for two tasks on one
563 * CPU to have ->on_cpu = 1 at the same time. ]
564 *
565 * task_cpu(p): is changed by set_task_cpu(), the rules are:
566 *
567 * - Don't call set_task_cpu() on a blocked task:
568 *
569 * We don't care what CPU we're not running on, this simplifies hotplug,
570 * the CPU assignment of blocked tasks isn't required to be valid.
571 *
572 * - for try_to_wake_up(), called under p->pi_lock:
573 *
574 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
575 *
576 * - for migration called under rq->lock:
577 * [ see task_on_rq_migrating() in task_rq_lock() ]
578 *
579 * o move_queued_task()
580 * o detach_task()
581 *
582 * - for migration called under double_rq_lock():
583 *
584 * o __migrate_swap_task()
585 * o push_rt_task() / pull_rt_task()
586 * o push_dl_task() / pull_dl_task()
587 * o dl_task_offline_migration()
588 *
589 */
590
raw_spin_rq_lock_nested(struct rq * rq,int subclass)591 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
592 {
593 raw_spinlock_t *lock;
594
595 /* Matches synchronize_rcu() in __sched_core_enable() */
596 preempt_disable();
597 if (sched_core_disabled()) {
598 raw_spin_lock_nested(&rq->__lock, subclass);
599 /* preempt_count *MUST* be > 1 */
600 preempt_enable_no_resched();
601 return;
602 }
603
604 for (;;) {
605 lock = __rq_lockp(rq);
606 raw_spin_lock_nested(lock, subclass);
607 if (likely(lock == __rq_lockp(rq))) {
608 /* preempt_count *MUST* be > 1 */
609 preempt_enable_no_resched();
610 return;
611 }
612 raw_spin_unlock(lock);
613 }
614 }
615
raw_spin_rq_trylock(struct rq * rq)616 bool raw_spin_rq_trylock(struct rq *rq)
617 {
618 raw_spinlock_t *lock;
619 bool ret;
620
621 /* Matches synchronize_rcu() in __sched_core_enable() */
622 preempt_disable();
623 if (sched_core_disabled()) {
624 ret = raw_spin_trylock(&rq->__lock);
625 preempt_enable();
626 return ret;
627 }
628
629 for (;;) {
630 lock = __rq_lockp(rq);
631 ret = raw_spin_trylock(lock);
632 if (!ret || (likely(lock == __rq_lockp(rq)))) {
633 preempt_enable();
634 return ret;
635 }
636 raw_spin_unlock(lock);
637 }
638 }
639
raw_spin_rq_unlock(struct rq * rq)640 void raw_spin_rq_unlock(struct rq *rq)
641 {
642 raw_spin_unlock(rq_lockp(rq));
643 }
644
645 #ifdef CONFIG_SMP
646 /*
647 * double_rq_lock - safely lock two runqueues
648 */
double_rq_lock(struct rq * rq1,struct rq * rq2)649 void double_rq_lock(struct rq *rq1, struct rq *rq2)
650 {
651 lockdep_assert_irqs_disabled();
652
653 if (rq_order_less(rq2, rq1))
654 swap(rq1, rq2);
655
656 raw_spin_rq_lock(rq1);
657 if (__rq_lockp(rq1) != __rq_lockp(rq2))
658 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
659
660 double_rq_clock_clear_update(rq1, rq2);
661 }
662 #endif
663
664 /*
665 * __task_rq_lock - lock the rq @p resides on.
666 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)667 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
668 __acquires(rq->lock)
669 {
670 struct rq *rq;
671
672 lockdep_assert_held(&p->pi_lock);
673
674 for (;;) {
675 rq = task_rq(p);
676 raw_spin_rq_lock(rq);
677 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
678 rq_pin_lock(rq, rf);
679 return rq;
680 }
681 raw_spin_rq_unlock(rq);
682
683 while (unlikely(task_on_rq_migrating(p)))
684 cpu_relax();
685 }
686 }
687
688 /*
689 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
690 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)691 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
692 __acquires(p->pi_lock)
693 __acquires(rq->lock)
694 {
695 struct rq *rq;
696
697 for (;;) {
698 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
699 rq = task_rq(p);
700 raw_spin_rq_lock(rq);
701 /*
702 * move_queued_task() task_rq_lock()
703 *
704 * ACQUIRE (rq->lock)
705 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
706 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
707 * [S] ->cpu = new_cpu [L] task_rq()
708 * [L] ->on_rq
709 * RELEASE (rq->lock)
710 *
711 * If we observe the old CPU in task_rq_lock(), the acquire of
712 * the old rq->lock will fully serialize against the stores.
713 *
714 * If we observe the new CPU in task_rq_lock(), the address
715 * dependency headed by '[L] rq = task_rq()' and the acquire
716 * will pair with the WMB to ensure we then also see migrating.
717 */
718 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
719 rq_pin_lock(rq, rf);
720 return rq;
721 }
722 raw_spin_rq_unlock(rq);
723 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
724
725 while (unlikely(task_on_rq_migrating(p)))
726 cpu_relax();
727 }
728 }
729
730 /*
731 * RQ-clock updating methods:
732 */
733
update_rq_clock_task(struct rq * rq,s64 delta)734 static void update_rq_clock_task(struct rq *rq, s64 delta)
735 {
736 /*
737 * In theory, the compile should just see 0 here, and optimize out the call
738 * to sched_rt_avg_update. But I don't trust it...
739 */
740 s64 __maybe_unused steal = 0, irq_delta = 0;
741
742 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
744
745 /*
746 * Since irq_time is only updated on {soft,}irq_exit, we might run into
747 * this case when a previous update_rq_clock() happened inside a
748 * {soft,}IRQ region.
749 *
750 * When this happens, we stop ->clock_task and only update the
751 * prev_irq_time stamp to account for the part that fit, so that a next
752 * update will consume the rest. This ensures ->clock_task is
753 * monotonic.
754 *
755 * It does however cause some slight miss-attribution of {soft,}IRQ
756 * time, a more accurate solution would be to update the irq_time using
757 * the current rq->clock timestamp, except that would require using
758 * atomic ops.
759 */
760 if (irq_delta > delta)
761 irq_delta = delta;
762
763 rq->prev_irq_time += irq_delta;
764 delta -= irq_delta;
765 delayacct_irq(rq->curr, irq_delta);
766 #endif
767 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
768 if (static_key_false((¶virt_steal_rq_enabled))) {
769 steal = paravirt_steal_clock(cpu_of(rq));
770 steal -= rq->prev_steal_time_rq;
771
772 if (unlikely(steal > delta))
773 steal = delta;
774
775 rq->prev_steal_time_rq += steal;
776 delta -= steal;
777 }
778 #endif
779
780 rq->clock_task += delta;
781
782 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
783 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
784 update_irq_load_avg(rq, irq_delta + steal);
785 #endif
786 update_rq_clock_pelt(rq, delta);
787 }
788
update_rq_clock(struct rq * rq)789 void update_rq_clock(struct rq *rq)
790 {
791 s64 delta;
792
793 lockdep_assert_rq_held(rq);
794
795 if (rq->clock_update_flags & RQCF_ACT_SKIP)
796 return;
797
798 #ifdef CONFIG_SCHED_DEBUG
799 if (sched_feat(WARN_DOUBLE_CLOCK))
800 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
801 rq->clock_update_flags |= RQCF_UPDATED;
802 #endif
803
804 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
805 if (delta < 0)
806 return;
807 rq->clock += delta;
808 update_rq_clock_task(rq, delta);
809 }
810
811 #ifdef CONFIG_SCHED_HRTICK
812 /*
813 * Use HR-timers to deliver accurate preemption points.
814 */
815
hrtick_clear(struct rq * rq)816 static void hrtick_clear(struct rq *rq)
817 {
818 if (hrtimer_active(&rq->hrtick_timer))
819 hrtimer_cancel(&rq->hrtick_timer);
820 }
821
822 /*
823 * High-resolution timer tick.
824 * Runs from hardirq context with interrupts disabled.
825 */
hrtick(struct hrtimer * timer)826 static enum hrtimer_restart hrtick(struct hrtimer *timer)
827 {
828 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
829 struct rq_flags rf;
830
831 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
832
833 rq_lock(rq, &rf);
834 update_rq_clock(rq);
835 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
836 rq_unlock(rq, &rf);
837
838 return HRTIMER_NORESTART;
839 }
840
841 #ifdef CONFIG_SMP
842
__hrtick_restart(struct rq * rq)843 static void __hrtick_restart(struct rq *rq)
844 {
845 struct hrtimer *timer = &rq->hrtick_timer;
846 ktime_t time = rq->hrtick_time;
847
848 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
849 }
850
851 /*
852 * called from hardirq (IPI) context
853 */
__hrtick_start(void * arg)854 static void __hrtick_start(void *arg)
855 {
856 struct rq *rq = arg;
857 struct rq_flags rf;
858
859 rq_lock(rq, &rf);
860 __hrtick_restart(rq);
861 rq_unlock(rq, &rf);
862 }
863
864 /*
865 * Called to set the hrtick timer state.
866 *
867 * called with rq->lock held and IRQs disabled
868 */
hrtick_start(struct rq * rq,u64 delay)869 void hrtick_start(struct rq *rq, u64 delay)
870 {
871 struct hrtimer *timer = &rq->hrtick_timer;
872 s64 delta;
873
874 /*
875 * Don't schedule slices shorter than 10000ns, that just
876 * doesn't make sense and can cause timer DoS.
877 */
878 delta = max_t(s64, delay, 10000LL);
879 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
880
881 if (rq == this_rq())
882 __hrtick_restart(rq);
883 else
884 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
885 }
886
887 #else
888 /*
889 * Called to set the hrtick timer state.
890 *
891 * called with rq->lock held and IRQs disabled
892 */
hrtick_start(struct rq * rq,u64 delay)893 void hrtick_start(struct rq *rq, u64 delay)
894 {
895 /*
896 * Don't schedule slices shorter than 10000ns, that just
897 * doesn't make sense. Rely on vruntime for fairness.
898 */
899 delay = max_t(u64, delay, 10000LL);
900 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
901 HRTIMER_MODE_REL_PINNED_HARD);
902 }
903
904 #endif /* CONFIG_SMP */
905
hrtick_rq_init(struct rq * rq)906 static void hrtick_rq_init(struct rq *rq)
907 {
908 #ifdef CONFIG_SMP
909 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
910 #endif
911 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
912 rq->hrtick_timer.function = hrtick;
913 }
914 #else /* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)915 static inline void hrtick_clear(struct rq *rq)
916 {
917 }
918
hrtick_rq_init(struct rq * rq)919 static inline void hrtick_rq_init(struct rq *rq)
920 {
921 }
922 #endif /* CONFIG_SCHED_HRTICK */
923
924 /*
925 * try_cmpxchg based fetch_or() macro so it works for different integer types:
926 */
927 #define fetch_or(ptr, mask) \
928 ({ \
929 typeof(ptr) _ptr = (ptr); \
930 typeof(mask) _mask = (mask); \
931 typeof(*_ptr) _val = *_ptr; \
932 \
933 do { \
934 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
935 _val; \
936 })
937
938 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
939 /*
940 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
941 * this avoids any races wrt polling state changes and thereby avoids
942 * spurious IPIs.
943 */
set_nr_and_not_polling(struct thread_info * ti,int tif)944 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
945 {
946 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
947 }
948
949 /*
950 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
951 *
952 * If this returns true, then the idle task promises to call
953 * sched_ttwu_pending() and reschedule soon.
954 */
set_nr_if_polling(struct task_struct * p)955 static bool set_nr_if_polling(struct task_struct *p)
956 {
957 struct thread_info *ti = task_thread_info(p);
958 typeof(ti->flags) val = READ_ONCE(ti->flags);
959
960 do {
961 if (!(val & _TIF_POLLING_NRFLAG))
962 return false;
963 if (val & _TIF_NEED_RESCHED)
964 return true;
965 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
966
967 return true;
968 }
969
970 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)971 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
972 {
973 set_ti_thread_flag(ti, tif);
974 return true;
975 }
976
977 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)978 static inline bool set_nr_if_polling(struct task_struct *p)
979 {
980 return false;
981 }
982 #endif
983 #endif
984
__wake_q_add(struct wake_q_head * head,struct task_struct * task)985 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
986 {
987 struct wake_q_node *node = &task->wake_q;
988
989 /*
990 * Atomically grab the task, if ->wake_q is !nil already it means
991 * it's already queued (either by us or someone else) and will get the
992 * wakeup due to that.
993 *
994 * In order to ensure that a pending wakeup will observe our pending
995 * state, even in the failed case, an explicit smp_mb() must be used.
996 */
997 smp_mb__before_atomic();
998 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
999 return false;
1000
1001 /*
1002 * The head is context local, there can be no concurrency.
1003 */
1004 *head->lastp = node;
1005 head->lastp = &node->next;
1006 return true;
1007 }
1008
1009 /**
1010 * wake_q_add() - queue a wakeup for 'later' waking.
1011 * @head: the wake_q_head to add @task to
1012 * @task: the task to queue for 'later' wakeup
1013 *
1014 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1015 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1016 * instantly.
1017 *
1018 * This function must be used as-if it were wake_up_process(); IOW the task
1019 * must be ready to be woken at this location.
1020 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1021 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1022 {
1023 if (__wake_q_add(head, task))
1024 get_task_struct(task);
1025 }
1026
1027 /**
1028 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1029 * @head: the wake_q_head to add @task to
1030 * @task: the task to queue for 'later' wakeup
1031 *
1032 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1033 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1034 * instantly.
1035 *
1036 * This function must be used as-if it were wake_up_process(); IOW the task
1037 * must be ready to be woken at this location.
1038 *
1039 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1040 * that already hold reference to @task can call the 'safe' version and trust
1041 * wake_q to do the right thing depending whether or not the @task is already
1042 * queued for wakeup.
1043 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1044 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1045 {
1046 if (!__wake_q_add(head, task))
1047 put_task_struct(task);
1048 }
1049
wake_up_q(struct wake_q_head * head)1050 void wake_up_q(struct wake_q_head *head)
1051 {
1052 struct wake_q_node *node = head->first;
1053
1054 while (node != WAKE_Q_TAIL) {
1055 struct task_struct *task;
1056
1057 task = container_of(node, struct task_struct, wake_q);
1058 /* Task can safely be re-inserted now: */
1059 node = node->next;
1060 task->wake_q.next = NULL;
1061
1062 /*
1063 * wake_up_process() executes a full barrier, which pairs with
1064 * the queueing in wake_q_add() so as not to miss wakeups.
1065 */
1066 wake_up_process(task);
1067 put_task_struct(task);
1068 }
1069 }
1070
1071 /*
1072 * resched_curr - mark rq's current task 'to be rescheduled now'.
1073 *
1074 * On UP this means the setting of the need_resched flag, on SMP it
1075 * might also involve a cross-CPU call to trigger the scheduler on
1076 * the target CPU.
1077 */
__resched_curr(struct rq * rq,int tif)1078 static void __resched_curr(struct rq *rq, int tif)
1079 {
1080 struct task_struct *curr = rq->curr;
1081 struct thread_info *cti = task_thread_info(curr);
1082 int cpu;
1083
1084 lockdep_assert_rq_held(rq);
1085
1086 /*
1087 * Always immediately preempt the idle task; no point in delaying doing
1088 * actual work.
1089 */
1090 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1091 tif = TIF_NEED_RESCHED;
1092
1093 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1094 return;
1095
1096 cpu = cpu_of(rq);
1097
1098 if (cpu == smp_processor_id()) {
1099 set_ti_thread_flag(cti, tif);
1100 if (tif == TIF_NEED_RESCHED)
1101 set_preempt_need_resched();
1102 return;
1103 }
1104
1105 if (set_nr_and_not_polling(cti, tif)) {
1106 if (tif == TIF_NEED_RESCHED)
1107 smp_send_reschedule(cpu);
1108 } else {
1109 trace_sched_wake_idle_without_ipi(cpu);
1110 }
1111 }
1112
resched_curr(struct rq * rq)1113 void resched_curr(struct rq *rq)
1114 {
1115 __resched_curr(rq, TIF_NEED_RESCHED);
1116 }
1117
1118 #ifdef CONFIG_PREEMPT_DYNAMIC
1119 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1120 static __always_inline bool dynamic_preempt_lazy(void)
1121 {
1122 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1123 }
1124 #else
dynamic_preempt_lazy(void)1125 static __always_inline bool dynamic_preempt_lazy(void)
1126 {
1127 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1128 }
1129 #endif
1130
get_lazy_tif_bit(void)1131 static __always_inline int get_lazy_tif_bit(void)
1132 {
1133 if (dynamic_preempt_lazy())
1134 return TIF_NEED_RESCHED_LAZY;
1135
1136 return TIF_NEED_RESCHED;
1137 }
1138
resched_curr_lazy(struct rq * rq)1139 void resched_curr_lazy(struct rq *rq)
1140 {
1141 __resched_curr(rq, get_lazy_tif_bit());
1142 }
1143
resched_cpu(int cpu)1144 void resched_cpu(int cpu)
1145 {
1146 struct rq *rq = cpu_rq(cpu);
1147 unsigned long flags;
1148
1149 raw_spin_rq_lock_irqsave(rq, flags);
1150 if (cpu_online(cpu) || cpu == smp_processor_id())
1151 resched_curr(rq);
1152 raw_spin_rq_unlock_irqrestore(rq, flags);
1153 }
1154
1155 #ifdef CONFIG_SMP
1156 #ifdef CONFIG_NO_HZ_COMMON
1157 /*
1158 * In the semi idle case, use the nearest busy CPU for migrating timers
1159 * from an idle CPU. This is good for power-savings.
1160 *
1161 * We don't do similar optimization for completely idle system, as
1162 * selecting an idle CPU will add more delays to the timers than intended
1163 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1164 */
get_nohz_timer_target(void)1165 int get_nohz_timer_target(void)
1166 {
1167 int i, cpu = smp_processor_id(), default_cpu = -1;
1168 struct sched_domain *sd;
1169 const struct cpumask *hk_mask;
1170
1171 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1172 if (!idle_cpu(cpu))
1173 return cpu;
1174 default_cpu = cpu;
1175 }
1176
1177 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1178
1179 guard(rcu)();
1180
1181 for_each_domain(cpu, sd) {
1182 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1183 if (cpu == i)
1184 continue;
1185
1186 if (!idle_cpu(i))
1187 return i;
1188 }
1189 }
1190
1191 if (default_cpu == -1)
1192 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1193
1194 return default_cpu;
1195 }
1196
1197 /*
1198 * When add_timer_on() enqueues a timer into the timer wheel of an
1199 * idle CPU then this timer might expire before the next timer event
1200 * which is scheduled to wake up that CPU. In case of a completely
1201 * idle system the next event might even be infinite time into the
1202 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1203 * leaves the inner idle loop so the newly added timer is taken into
1204 * account when the CPU goes back to idle and evaluates the timer
1205 * wheel for the next timer event.
1206 */
wake_up_idle_cpu(int cpu)1207 static void wake_up_idle_cpu(int cpu)
1208 {
1209 struct rq *rq = cpu_rq(cpu);
1210
1211 if (cpu == smp_processor_id())
1212 return;
1213
1214 /*
1215 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1216 * part of the idle loop. This forces an exit from the idle loop
1217 * and a round trip to schedule(). Now this could be optimized
1218 * because a simple new idle loop iteration is enough to
1219 * re-evaluate the next tick. Provided some re-ordering of tick
1220 * nohz functions that would need to follow TIF_NR_POLLING
1221 * clearing:
1222 *
1223 * - On most architectures, a simple fetch_or on ti::flags with a
1224 * "0" value would be enough to know if an IPI needs to be sent.
1225 *
1226 * - x86 needs to perform a last need_resched() check between
1227 * monitor and mwait which doesn't take timers into account.
1228 * There a dedicated TIF_TIMER flag would be required to
1229 * fetch_or here and be checked along with TIF_NEED_RESCHED
1230 * before mwait().
1231 *
1232 * However, remote timer enqueue is not such a frequent event
1233 * and testing of the above solutions didn't appear to report
1234 * much benefits.
1235 */
1236 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1237 smp_send_reschedule(cpu);
1238 else
1239 trace_sched_wake_idle_without_ipi(cpu);
1240 }
1241
wake_up_full_nohz_cpu(int cpu)1242 static bool wake_up_full_nohz_cpu(int cpu)
1243 {
1244 /*
1245 * We just need the target to call irq_exit() and re-evaluate
1246 * the next tick. The nohz full kick at least implies that.
1247 * If needed we can still optimize that later with an
1248 * empty IRQ.
1249 */
1250 if (cpu_is_offline(cpu))
1251 return true; /* Don't try to wake offline CPUs. */
1252 if (tick_nohz_full_cpu(cpu)) {
1253 if (cpu != smp_processor_id() ||
1254 tick_nohz_tick_stopped())
1255 tick_nohz_full_kick_cpu(cpu);
1256 return true;
1257 }
1258
1259 return false;
1260 }
1261
1262 /*
1263 * Wake up the specified CPU. If the CPU is going offline, it is the
1264 * caller's responsibility to deal with the lost wakeup, for example,
1265 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1266 */
wake_up_nohz_cpu(int cpu)1267 void wake_up_nohz_cpu(int cpu)
1268 {
1269 if (!wake_up_full_nohz_cpu(cpu))
1270 wake_up_idle_cpu(cpu);
1271 }
1272
nohz_csd_func(void * info)1273 static void nohz_csd_func(void *info)
1274 {
1275 struct rq *rq = info;
1276 int cpu = cpu_of(rq);
1277 unsigned int flags;
1278
1279 /*
1280 * Release the rq::nohz_csd.
1281 */
1282 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1283 WARN_ON(!(flags & NOHZ_KICK_MASK));
1284
1285 rq->idle_balance = idle_cpu(cpu);
1286 if (rq->idle_balance) {
1287 rq->nohz_idle_balance = flags;
1288 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1289 }
1290 }
1291
1292 #endif /* CONFIG_NO_HZ_COMMON */
1293
1294 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1295 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1296 {
1297 if (rq->nr_running != 1)
1298 return false;
1299
1300 if (p->sched_class != &fair_sched_class)
1301 return false;
1302
1303 if (!task_on_rq_queued(p))
1304 return false;
1305
1306 return true;
1307 }
1308
sched_can_stop_tick(struct rq * rq)1309 bool sched_can_stop_tick(struct rq *rq)
1310 {
1311 int fifo_nr_running;
1312
1313 /* Deadline tasks, even if single, need the tick */
1314 if (rq->dl.dl_nr_running)
1315 return false;
1316
1317 /*
1318 * If there are more than one RR tasks, we need the tick to affect the
1319 * actual RR behaviour.
1320 */
1321 if (rq->rt.rr_nr_running) {
1322 if (rq->rt.rr_nr_running == 1)
1323 return true;
1324 else
1325 return false;
1326 }
1327
1328 /*
1329 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1330 * forced preemption between FIFO tasks.
1331 */
1332 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1333 if (fifo_nr_running)
1334 return true;
1335
1336 /*
1337 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1338 * left. For CFS, if there's more than one we need the tick for
1339 * involuntary preemption. For SCX, ask.
1340 */
1341 if (scx_enabled() && !scx_can_stop_tick(rq))
1342 return false;
1343
1344 if (rq->cfs.h_nr_running > 1)
1345 return false;
1346
1347 /*
1348 * If there is one task and it has CFS runtime bandwidth constraints
1349 * and it's on the cpu now we don't want to stop the tick.
1350 * This check prevents clearing the bit if a newly enqueued task here is
1351 * dequeued by migrating while the constrained task continues to run.
1352 * E.g. going from 2->1 without going through pick_next_task().
1353 */
1354 if (__need_bw_check(rq, rq->curr)) {
1355 if (cfs_task_bw_constrained(rq->curr))
1356 return false;
1357 }
1358
1359 return true;
1360 }
1361 #endif /* CONFIG_NO_HZ_FULL */
1362 #endif /* CONFIG_SMP */
1363
1364 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1365 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1366 /*
1367 * Iterate task_group tree rooted at *from, calling @down when first entering a
1368 * node and @up when leaving it for the final time.
1369 *
1370 * Caller must hold rcu_lock or sufficient equivalent.
1371 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1372 int walk_tg_tree_from(struct task_group *from,
1373 tg_visitor down, tg_visitor up, void *data)
1374 {
1375 struct task_group *parent, *child;
1376 int ret;
1377
1378 parent = from;
1379
1380 down:
1381 ret = (*down)(parent, data);
1382 if (ret)
1383 goto out;
1384 list_for_each_entry_rcu(child, &parent->children, siblings) {
1385 parent = child;
1386 goto down;
1387
1388 up:
1389 continue;
1390 }
1391 ret = (*up)(parent, data);
1392 if (ret || parent == from)
1393 goto out;
1394
1395 child = parent;
1396 parent = parent->parent;
1397 if (parent)
1398 goto up;
1399 out:
1400 return ret;
1401 }
1402
tg_nop(struct task_group * tg,void * data)1403 int tg_nop(struct task_group *tg, void *data)
1404 {
1405 return 0;
1406 }
1407 #endif
1408
set_load_weight(struct task_struct * p,bool update_load)1409 void set_load_weight(struct task_struct *p, bool update_load)
1410 {
1411 int prio = p->static_prio - MAX_RT_PRIO;
1412 struct load_weight lw;
1413
1414 if (task_has_idle_policy(p)) {
1415 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1416 lw.inv_weight = WMULT_IDLEPRIO;
1417 } else {
1418 lw.weight = scale_load(sched_prio_to_weight[prio]);
1419 lw.inv_weight = sched_prio_to_wmult[prio];
1420 }
1421
1422 /*
1423 * SCHED_OTHER tasks have to update their load when changing their
1424 * weight
1425 */
1426 if (update_load && p->sched_class->reweight_task)
1427 p->sched_class->reweight_task(task_rq(p), p, &lw);
1428 else
1429 p->se.load = lw;
1430 }
1431
1432 #ifdef CONFIG_UCLAMP_TASK
1433 /*
1434 * Serializes updates of utilization clamp values
1435 *
1436 * The (slow-path) user-space triggers utilization clamp value updates which
1437 * can require updates on (fast-path) scheduler's data structures used to
1438 * support enqueue/dequeue operations.
1439 * While the per-CPU rq lock protects fast-path update operations, user-space
1440 * requests are serialized using a mutex to reduce the risk of conflicting
1441 * updates or API abuses.
1442 */
1443 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1444
1445 /* Max allowed minimum utilization */
1446 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1447
1448 /* Max allowed maximum utilization */
1449 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1450
1451 /*
1452 * By default RT tasks run at the maximum performance point/capacity of the
1453 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1454 * SCHED_CAPACITY_SCALE.
1455 *
1456 * This knob allows admins to change the default behavior when uclamp is being
1457 * used. In battery powered devices, particularly, running at the maximum
1458 * capacity and frequency will increase energy consumption and shorten the
1459 * battery life.
1460 *
1461 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1462 *
1463 * This knob will not override the system default sched_util_clamp_min defined
1464 * above.
1465 */
1466 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1467
1468 /* All clamps are required to be less or equal than these values */
1469 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1470
1471 /*
1472 * This static key is used to reduce the uclamp overhead in the fast path. It
1473 * primarily disables the call to uclamp_rq_{inc, dec}() in
1474 * enqueue/dequeue_task().
1475 *
1476 * This allows users to continue to enable uclamp in their kernel config with
1477 * minimum uclamp overhead in the fast path.
1478 *
1479 * As soon as userspace modifies any of the uclamp knobs, the static key is
1480 * enabled, since we have an actual users that make use of uclamp
1481 * functionality.
1482 *
1483 * The knobs that would enable this static key are:
1484 *
1485 * * A task modifying its uclamp value with sched_setattr().
1486 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1487 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1488 */
1489 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1490
1491 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1492 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1493 unsigned int clamp_value)
1494 {
1495 /*
1496 * Avoid blocked utilization pushing up the frequency when we go
1497 * idle (which drops the max-clamp) by retaining the last known
1498 * max-clamp.
1499 */
1500 if (clamp_id == UCLAMP_MAX) {
1501 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1502 return clamp_value;
1503 }
1504
1505 return uclamp_none(UCLAMP_MIN);
1506 }
1507
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1508 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1509 unsigned int clamp_value)
1510 {
1511 /* Reset max-clamp retention only on idle exit */
1512 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1513 return;
1514
1515 uclamp_rq_set(rq, clamp_id, clamp_value);
1516 }
1517
1518 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1519 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1520 unsigned int clamp_value)
1521 {
1522 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1523 int bucket_id = UCLAMP_BUCKETS - 1;
1524
1525 /*
1526 * Since both min and max clamps are max aggregated, find the
1527 * top most bucket with tasks in.
1528 */
1529 for ( ; bucket_id >= 0; bucket_id--) {
1530 if (!bucket[bucket_id].tasks)
1531 continue;
1532 return bucket[bucket_id].value;
1533 }
1534
1535 /* No tasks -- default clamp values */
1536 return uclamp_idle_value(rq, clamp_id, clamp_value);
1537 }
1538
__uclamp_update_util_min_rt_default(struct task_struct * p)1539 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1540 {
1541 unsigned int default_util_min;
1542 struct uclamp_se *uc_se;
1543
1544 lockdep_assert_held(&p->pi_lock);
1545
1546 uc_se = &p->uclamp_req[UCLAMP_MIN];
1547
1548 /* Only sync if user didn't override the default */
1549 if (uc_se->user_defined)
1550 return;
1551
1552 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1553 uclamp_se_set(uc_se, default_util_min, false);
1554 }
1555
uclamp_update_util_min_rt_default(struct task_struct * p)1556 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1557 {
1558 if (!rt_task(p))
1559 return;
1560
1561 /* Protect updates to p->uclamp_* */
1562 guard(task_rq_lock)(p);
1563 __uclamp_update_util_min_rt_default(p);
1564 }
1565
1566 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1567 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1568 {
1569 /* Copy by value as we could modify it */
1570 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1571 #ifdef CONFIG_UCLAMP_TASK_GROUP
1572 unsigned int tg_min, tg_max, value;
1573
1574 /*
1575 * Tasks in autogroups or root task group will be
1576 * restricted by system defaults.
1577 */
1578 if (task_group_is_autogroup(task_group(p)))
1579 return uc_req;
1580 if (task_group(p) == &root_task_group)
1581 return uc_req;
1582
1583 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1584 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1585 value = uc_req.value;
1586 value = clamp(value, tg_min, tg_max);
1587 uclamp_se_set(&uc_req, value, false);
1588 #endif
1589
1590 return uc_req;
1591 }
1592
1593 /*
1594 * The effective clamp bucket index of a task depends on, by increasing
1595 * priority:
1596 * - the task specific clamp value, when explicitly requested from userspace
1597 * - the task group effective clamp value, for tasks not either in the root
1598 * group or in an autogroup
1599 * - the system default clamp value, defined by the sysadmin
1600 */
1601 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1602 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1603 {
1604 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1605 struct uclamp_se uc_max = uclamp_default[clamp_id];
1606
1607 /* System default restrictions always apply */
1608 if (unlikely(uc_req.value > uc_max.value))
1609 return uc_max;
1610
1611 return uc_req;
1612 }
1613
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1614 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1615 {
1616 struct uclamp_se uc_eff;
1617
1618 /* Task currently refcounted: use back-annotated (effective) value */
1619 if (p->uclamp[clamp_id].active)
1620 return (unsigned long)p->uclamp[clamp_id].value;
1621
1622 uc_eff = uclamp_eff_get(p, clamp_id);
1623
1624 return (unsigned long)uc_eff.value;
1625 }
1626
1627 /*
1628 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1629 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1630 * updates the rq's clamp value if required.
1631 *
1632 * Tasks can have a task-specific value requested from user-space, track
1633 * within each bucket the maximum value for tasks refcounted in it.
1634 * This "local max aggregation" allows to track the exact "requested" value
1635 * for each bucket when all its RUNNABLE tasks require the same clamp.
1636 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1637 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1638 enum uclamp_id clamp_id)
1639 {
1640 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1641 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1642 struct uclamp_bucket *bucket;
1643
1644 lockdep_assert_rq_held(rq);
1645
1646 /* Update task effective clamp */
1647 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1648
1649 bucket = &uc_rq->bucket[uc_se->bucket_id];
1650 bucket->tasks++;
1651 uc_se->active = true;
1652
1653 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1654
1655 /*
1656 * Local max aggregation: rq buckets always track the max
1657 * "requested" clamp value of its RUNNABLE tasks.
1658 */
1659 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1660 bucket->value = uc_se->value;
1661
1662 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1663 uclamp_rq_set(rq, clamp_id, uc_se->value);
1664 }
1665
1666 /*
1667 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1668 * is released. If this is the last task reference counting the rq's max
1669 * active clamp value, then the rq's clamp value is updated.
1670 *
1671 * Both refcounted tasks and rq's cached clamp values are expected to be
1672 * always valid. If it's detected they are not, as defensive programming,
1673 * enforce the expected state and warn.
1674 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1675 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1676 enum uclamp_id clamp_id)
1677 {
1678 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1679 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1680 struct uclamp_bucket *bucket;
1681 unsigned int bkt_clamp;
1682 unsigned int rq_clamp;
1683
1684 lockdep_assert_rq_held(rq);
1685
1686 /*
1687 * If sched_uclamp_used was enabled after task @p was enqueued,
1688 * we could end up with unbalanced call to uclamp_rq_dec_id().
1689 *
1690 * In this case the uc_se->active flag should be false since no uclamp
1691 * accounting was performed at enqueue time and we can just return
1692 * here.
1693 *
1694 * Need to be careful of the following enqueue/dequeue ordering
1695 * problem too
1696 *
1697 * enqueue(taskA)
1698 * // sched_uclamp_used gets enabled
1699 * enqueue(taskB)
1700 * dequeue(taskA)
1701 * // Must not decrement bucket->tasks here
1702 * dequeue(taskB)
1703 *
1704 * where we could end up with stale data in uc_se and
1705 * bucket[uc_se->bucket_id].
1706 *
1707 * The following check here eliminates the possibility of such race.
1708 */
1709 if (unlikely(!uc_se->active))
1710 return;
1711
1712 bucket = &uc_rq->bucket[uc_se->bucket_id];
1713
1714 SCHED_WARN_ON(!bucket->tasks);
1715 if (likely(bucket->tasks))
1716 bucket->tasks--;
1717
1718 uc_se->active = false;
1719
1720 /*
1721 * Keep "local max aggregation" simple and accept to (possibly)
1722 * overboost some RUNNABLE tasks in the same bucket.
1723 * The rq clamp bucket value is reset to its base value whenever
1724 * there are no more RUNNABLE tasks refcounting it.
1725 */
1726 if (likely(bucket->tasks))
1727 return;
1728
1729 rq_clamp = uclamp_rq_get(rq, clamp_id);
1730 /*
1731 * Defensive programming: this should never happen. If it happens,
1732 * e.g. due to future modification, warn and fix up the expected value.
1733 */
1734 SCHED_WARN_ON(bucket->value > rq_clamp);
1735 if (bucket->value >= rq_clamp) {
1736 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1737 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1738 }
1739 }
1740
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1741 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1742 {
1743 enum uclamp_id clamp_id;
1744
1745 /*
1746 * Avoid any overhead until uclamp is actually used by the userspace.
1747 *
1748 * The condition is constructed such that a NOP is generated when
1749 * sched_uclamp_used is disabled.
1750 */
1751 if (!static_branch_unlikely(&sched_uclamp_used))
1752 return;
1753
1754 if (unlikely(!p->sched_class->uclamp_enabled))
1755 return;
1756
1757 if (p->se.sched_delayed)
1758 return;
1759
1760 for_each_clamp_id(clamp_id)
1761 uclamp_rq_inc_id(rq, p, clamp_id);
1762
1763 /* Reset clamp idle holding when there is one RUNNABLE task */
1764 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1765 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1766 }
1767
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1768 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1769 {
1770 enum uclamp_id clamp_id;
1771
1772 /*
1773 * Avoid any overhead until uclamp is actually used by the userspace.
1774 *
1775 * The condition is constructed such that a NOP is generated when
1776 * sched_uclamp_used is disabled.
1777 */
1778 if (!static_branch_unlikely(&sched_uclamp_used))
1779 return;
1780
1781 if (unlikely(!p->sched_class->uclamp_enabled))
1782 return;
1783
1784 if (p->se.sched_delayed)
1785 return;
1786
1787 for_each_clamp_id(clamp_id)
1788 uclamp_rq_dec_id(rq, p, clamp_id);
1789 }
1790
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1791 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1792 enum uclamp_id clamp_id)
1793 {
1794 if (!p->uclamp[clamp_id].active)
1795 return;
1796
1797 uclamp_rq_dec_id(rq, p, clamp_id);
1798 uclamp_rq_inc_id(rq, p, clamp_id);
1799
1800 /*
1801 * Make sure to clear the idle flag if we've transiently reached 0
1802 * active tasks on rq.
1803 */
1804 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1805 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1806 }
1807
1808 static inline void
uclamp_update_active(struct task_struct * p)1809 uclamp_update_active(struct task_struct *p)
1810 {
1811 enum uclamp_id clamp_id;
1812 struct rq_flags rf;
1813 struct rq *rq;
1814
1815 /*
1816 * Lock the task and the rq where the task is (or was) queued.
1817 *
1818 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1819 * price to pay to safely serialize util_{min,max} updates with
1820 * enqueues, dequeues and migration operations.
1821 * This is the same locking schema used by __set_cpus_allowed_ptr().
1822 */
1823 rq = task_rq_lock(p, &rf);
1824
1825 /*
1826 * Setting the clamp bucket is serialized by task_rq_lock().
1827 * If the task is not yet RUNNABLE and its task_struct is not
1828 * affecting a valid clamp bucket, the next time it's enqueued,
1829 * it will already see the updated clamp bucket value.
1830 */
1831 for_each_clamp_id(clamp_id)
1832 uclamp_rq_reinc_id(rq, p, clamp_id);
1833
1834 task_rq_unlock(rq, p, &rf);
1835 }
1836
1837 #ifdef CONFIG_UCLAMP_TASK_GROUP
1838 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1839 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1840 {
1841 struct css_task_iter it;
1842 struct task_struct *p;
1843
1844 css_task_iter_start(css, 0, &it);
1845 while ((p = css_task_iter_next(&it)))
1846 uclamp_update_active(p);
1847 css_task_iter_end(&it);
1848 }
1849
1850 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1851 #endif
1852
1853 #ifdef CONFIG_SYSCTL
1854 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1855 static void uclamp_update_root_tg(void)
1856 {
1857 struct task_group *tg = &root_task_group;
1858
1859 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1860 sysctl_sched_uclamp_util_min, false);
1861 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1862 sysctl_sched_uclamp_util_max, false);
1863
1864 guard(rcu)();
1865 cpu_util_update_eff(&root_task_group.css);
1866 }
1867 #else
uclamp_update_root_tg(void)1868 static void uclamp_update_root_tg(void) { }
1869 #endif
1870
uclamp_sync_util_min_rt_default(void)1871 static void uclamp_sync_util_min_rt_default(void)
1872 {
1873 struct task_struct *g, *p;
1874
1875 /*
1876 * copy_process() sysctl_uclamp
1877 * uclamp_min_rt = X;
1878 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1879 * // link thread smp_mb__after_spinlock()
1880 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1881 * sched_post_fork() for_each_process_thread()
1882 * __uclamp_sync_rt() __uclamp_sync_rt()
1883 *
1884 * Ensures that either sched_post_fork() will observe the new
1885 * uclamp_min_rt or for_each_process_thread() will observe the new
1886 * task.
1887 */
1888 read_lock(&tasklist_lock);
1889 smp_mb__after_spinlock();
1890 read_unlock(&tasklist_lock);
1891
1892 guard(rcu)();
1893 for_each_process_thread(g, p)
1894 uclamp_update_util_min_rt_default(p);
1895 }
1896
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1897 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1898 void *buffer, size_t *lenp, loff_t *ppos)
1899 {
1900 bool update_root_tg = false;
1901 int old_min, old_max, old_min_rt;
1902 int result;
1903
1904 guard(mutex)(&uclamp_mutex);
1905
1906 old_min = sysctl_sched_uclamp_util_min;
1907 old_max = sysctl_sched_uclamp_util_max;
1908 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1909
1910 result = proc_dointvec(table, write, buffer, lenp, ppos);
1911 if (result)
1912 goto undo;
1913 if (!write)
1914 return 0;
1915
1916 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1917 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1918 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1919
1920 result = -EINVAL;
1921 goto undo;
1922 }
1923
1924 if (old_min != sysctl_sched_uclamp_util_min) {
1925 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1926 sysctl_sched_uclamp_util_min, false);
1927 update_root_tg = true;
1928 }
1929 if (old_max != sysctl_sched_uclamp_util_max) {
1930 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1931 sysctl_sched_uclamp_util_max, false);
1932 update_root_tg = true;
1933 }
1934
1935 if (update_root_tg) {
1936 static_branch_enable(&sched_uclamp_used);
1937 uclamp_update_root_tg();
1938 }
1939
1940 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1941 static_branch_enable(&sched_uclamp_used);
1942 uclamp_sync_util_min_rt_default();
1943 }
1944
1945 /*
1946 * We update all RUNNABLE tasks only when task groups are in use.
1947 * Otherwise, keep it simple and do just a lazy update at each next
1948 * task enqueue time.
1949 */
1950 return 0;
1951
1952 undo:
1953 sysctl_sched_uclamp_util_min = old_min;
1954 sysctl_sched_uclamp_util_max = old_max;
1955 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1956 return result;
1957 }
1958 #endif
1959
uclamp_fork(struct task_struct * p)1960 static void uclamp_fork(struct task_struct *p)
1961 {
1962 enum uclamp_id clamp_id;
1963
1964 /*
1965 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1966 * as the task is still at its early fork stages.
1967 */
1968 for_each_clamp_id(clamp_id)
1969 p->uclamp[clamp_id].active = false;
1970
1971 if (likely(!p->sched_reset_on_fork))
1972 return;
1973
1974 for_each_clamp_id(clamp_id) {
1975 uclamp_se_set(&p->uclamp_req[clamp_id],
1976 uclamp_none(clamp_id), false);
1977 }
1978 }
1979
uclamp_post_fork(struct task_struct * p)1980 static void uclamp_post_fork(struct task_struct *p)
1981 {
1982 uclamp_update_util_min_rt_default(p);
1983 }
1984
init_uclamp_rq(struct rq * rq)1985 static void __init init_uclamp_rq(struct rq *rq)
1986 {
1987 enum uclamp_id clamp_id;
1988 struct uclamp_rq *uc_rq = rq->uclamp;
1989
1990 for_each_clamp_id(clamp_id) {
1991 uc_rq[clamp_id] = (struct uclamp_rq) {
1992 .value = uclamp_none(clamp_id)
1993 };
1994 }
1995
1996 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1997 }
1998
init_uclamp(void)1999 static void __init init_uclamp(void)
2000 {
2001 struct uclamp_se uc_max = {};
2002 enum uclamp_id clamp_id;
2003 int cpu;
2004
2005 for_each_possible_cpu(cpu)
2006 init_uclamp_rq(cpu_rq(cpu));
2007
2008 for_each_clamp_id(clamp_id) {
2009 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2010 uclamp_none(clamp_id), false);
2011 }
2012
2013 /* System defaults allow max clamp values for both indexes */
2014 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2015 for_each_clamp_id(clamp_id) {
2016 uclamp_default[clamp_id] = uc_max;
2017 #ifdef CONFIG_UCLAMP_TASK_GROUP
2018 root_task_group.uclamp_req[clamp_id] = uc_max;
2019 root_task_group.uclamp[clamp_id] = uc_max;
2020 #endif
2021 }
2022 }
2023
2024 #else /* !CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)2025 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2026 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2027 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2028 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2029 static inline void init_uclamp(void) { }
2030 #endif /* CONFIG_UCLAMP_TASK */
2031
sched_task_on_rq(struct task_struct * p)2032 bool sched_task_on_rq(struct task_struct *p)
2033 {
2034 return task_on_rq_queued(p);
2035 }
2036
get_wchan(struct task_struct * p)2037 unsigned long get_wchan(struct task_struct *p)
2038 {
2039 unsigned long ip = 0;
2040 unsigned int state;
2041
2042 if (!p || p == current)
2043 return 0;
2044
2045 /* Only get wchan if task is blocked and we can keep it that way. */
2046 raw_spin_lock_irq(&p->pi_lock);
2047 state = READ_ONCE(p->__state);
2048 smp_rmb(); /* see try_to_wake_up() */
2049 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2050 ip = __get_wchan(p);
2051 raw_spin_unlock_irq(&p->pi_lock);
2052
2053 return ip;
2054 }
2055
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2056 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2057 {
2058 if (!(flags & ENQUEUE_NOCLOCK))
2059 update_rq_clock(rq);
2060
2061 p->sched_class->enqueue_task(rq, p, flags);
2062 /*
2063 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2064 * ->sched_delayed.
2065 */
2066 uclamp_rq_inc(rq, p);
2067
2068 psi_enqueue(p, flags);
2069
2070 if (!(flags & ENQUEUE_RESTORE))
2071 sched_info_enqueue(rq, p);
2072
2073 if (sched_core_enabled(rq))
2074 sched_core_enqueue(rq, p);
2075 }
2076
2077 /*
2078 * Must only return false when DEQUEUE_SLEEP.
2079 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2080 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2081 {
2082 if (sched_core_enabled(rq))
2083 sched_core_dequeue(rq, p, flags);
2084
2085 if (!(flags & DEQUEUE_NOCLOCK))
2086 update_rq_clock(rq);
2087
2088 if (!(flags & DEQUEUE_SAVE))
2089 sched_info_dequeue(rq, p);
2090
2091 psi_dequeue(p, flags);
2092
2093 /*
2094 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2095 * and mark the task ->sched_delayed.
2096 */
2097 uclamp_rq_dec(rq, p);
2098 return p->sched_class->dequeue_task(rq, p, flags);
2099 }
2100
activate_task(struct rq * rq,struct task_struct * p,int flags)2101 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2102 {
2103 if (task_on_rq_migrating(p))
2104 flags |= ENQUEUE_MIGRATED;
2105 if (flags & ENQUEUE_MIGRATED)
2106 sched_mm_cid_migrate_to(rq, p);
2107
2108 enqueue_task(rq, p, flags);
2109
2110 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2111 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2112 }
2113
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2114 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2115 {
2116 SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2117
2118 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2119 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2120
2121 /*
2122 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2123 * dequeue_task() and cleared *after* enqueue_task().
2124 */
2125
2126 dequeue_task(rq, p, flags);
2127 }
2128
block_task(struct rq * rq,struct task_struct * p,int flags)2129 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2130 {
2131 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2132 __block_task(rq, p);
2133 }
2134
2135 /**
2136 * task_curr - is this task currently executing on a CPU?
2137 * @p: the task in question.
2138 *
2139 * Return: 1 if the task is currently executing. 0 otherwise.
2140 */
task_curr(const struct task_struct * p)2141 inline int task_curr(const struct task_struct *p)
2142 {
2143 return cpu_curr(task_cpu(p)) == p;
2144 }
2145
2146 /*
2147 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2148 * mess with locking.
2149 */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2150 void check_class_changing(struct rq *rq, struct task_struct *p,
2151 const struct sched_class *prev_class)
2152 {
2153 if (prev_class != p->sched_class && p->sched_class->switching_to)
2154 p->sched_class->switching_to(rq, p);
2155 }
2156
2157 /*
2158 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2159 * use the balance_callback list if you want balancing.
2160 *
2161 * this means any call to check_class_changed() must be followed by a call to
2162 * balance_callback().
2163 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2164 void check_class_changed(struct rq *rq, struct task_struct *p,
2165 const struct sched_class *prev_class,
2166 int oldprio)
2167 {
2168 if (prev_class != p->sched_class) {
2169 if (prev_class->switched_from)
2170 prev_class->switched_from(rq, p);
2171
2172 p->sched_class->switched_to(rq, p);
2173 } else if (oldprio != p->prio || dl_task(p))
2174 p->sched_class->prio_changed(rq, p, oldprio);
2175 }
2176
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2177 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2178 {
2179 struct task_struct *donor = rq->donor;
2180
2181 if (p->sched_class == donor->sched_class)
2182 donor->sched_class->wakeup_preempt(rq, p, flags);
2183 else if (sched_class_above(p->sched_class, donor->sched_class))
2184 resched_curr(rq);
2185
2186 /*
2187 * A queue event has occurred, and we're going to schedule. In
2188 * this case, we can save a useless back to back clock update.
2189 */
2190 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2191 rq_clock_skip_update(rq);
2192 }
2193
2194 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2195 int __task_state_match(struct task_struct *p, unsigned int state)
2196 {
2197 if (READ_ONCE(p->__state) & state)
2198 return 1;
2199
2200 if (READ_ONCE(p->saved_state) & state)
2201 return -1;
2202
2203 return 0;
2204 }
2205
2206 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2207 int task_state_match(struct task_struct *p, unsigned int state)
2208 {
2209 /*
2210 * Serialize against current_save_and_set_rtlock_wait_state(),
2211 * current_restore_rtlock_saved_state(), and __refrigerator().
2212 */
2213 guard(raw_spinlock_irq)(&p->pi_lock);
2214 return __task_state_match(p, state);
2215 }
2216
2217 /*
2218 * wait_task_inactive - wait for a thread to unschedule.
2219 *
2220 * Wait for the thread to block in any of the states set in @match_state.
2221 * If it changes, i.e. @p might have woken up, then return zero. When we
2222 * succeed in waiting for @p to be off its CPU, we return a positive number
2223 * (its total switch count). If a second call a short while later returns the
2224 * same number, the caller can be sure that @p has remained unscheduled the
2225 * whole time.
2226 *
2227 * The caller must ensure that the task *will* unschedule sometime soon,
2228 * else this function might spin for a *long* time. This function can't
2229 * be called with interrupts off, or it may introduce deadlock with
2230 * smp_call_function() if an IPI is sent by the same process we are
2231 * waiting to become inactive.
2232 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2233 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2234 {
2235 int running, queued, match;
2236 struct rq_flags rf;
2237 unsigned long ncsw;
2238 struct rq *rq;
2239
2240 for (;;) {
2241 /*
2242 * We do the initial early heuristics without holding
2243 * any task-queue locks at all. We'll only try to get
2244 * the runqueue lock when things look like they will
2245 * work out!
2246 */
2247 rq = task_rq(p);
2248
2249 /*
2250 * If the task is actively running on another CPU
2251 * still, just relax and busy-wait without holding
2252 * any locks.
2253 *
2254 * NOTE! Since we don't hold any locks, it's not
2255 * even sure that "rq" stays as the right runqueue!
2256 * But we don't care, since "task_on_cpu()" will
2257 * return false if the runqueue has changed and p
2258 * is actually now running somewhere else!
2259 */
2260 while (task_on_cpu(rq, p)) {
2261 if (!task_state_match(p, match_state))
2262 return 0;
2263 cpu_relax();
2264 }
2265
2266 /*
2267 * Ok, time to look more closely! We need the rq
2268 * lock now, to be *sure*. If we're wrong, we'll
2269 * just go back and repeat.
2270 */
2271 rq = task_rq_lock(p, &rf);
2272 trace_sched_wait_task(p);
2273 running = task_on_cpu(rq, p);
2274 queued = task_on_rq_queued(p);
2275 ncsw = 0;
2276 if ((match = __task_state_match(p, match_state))) {
2277 /*
2278 * When matching on p->saved_state, consider this task
2279 * still queued so it will wait.
2280 */
2281 if (match < 0)
2282 queued = 1;
2283 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2284 }
2285 task_rq_unlock(rq, p, &rf);
2286
2287 /*
2288 * If it changed from the expected state, bail out now.
2289 */
2290 if (unlikely(!ncsw))
2291 break;
2292
2293 /*
2294 * Was it really running after all now that we
2295 * checked with the proper locks actually held?
2296 *
2297 * Oops. Go back and try again..
2298 */
2299 if (unlikely(running)) {
2300 cpu_relax();
2301 continue;
2302 }
2303
2304 /*
2305 * It's not enough that it's not actively running,
2306 * it must be off the runqueue _entirely_, and not
2307 * preempted!
2308 *
2309 * So if it was still runnable (but just not actively
2310 * running right now), it's preempted, and we should
2311 * yield - it could be a while.
2312 */
2313 if (unlikely(queued)) {
2314 ktime_t to = NSEC_PER_SEC / HZ;
2315
2316 set_current_state(TASK_UNINTERRUPTIBLE);
2317 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2318 continue;
2319 }
2320
2321 /*
2322 * Ahh, all good. It wasn't running, and it wasn't
2323 * runnable, which means that it will never become
2324 * running in the future either. We're all done!
2325 */
2326 break;
2327 }
2328
2329 return ncsw;
2330 }
2331
2332 #ifdef CONFIG_SMP
2333
2334 static void
2335 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2336
migrate_disable_switch(struct rq * rq,struct task_struct * p)2337 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2338 {
2339 struct affinity_context ac = {
2340 .new_mask = cpumask_of(rq->cpu),
2341 .flags = SCA_MIGRATE_DISABLE,
2342 };
2343
2344 if (likely(!p->migration_disabled))
2345 return;
2346
2347 if (p->cpus_ptr != &p->cpus_mask)
2348 return;
2349
2350 /*
2351 * Violates locking rules! See comment in __do_set_cpus_allowed().
2352 */
2353 __do_set_cpus_allowed(p, &ac);
2354 }
2355
migrate_disable(void)2356 void migrate_disable(void)
2357 {
2358 struct task_struct *p = current;
2359
2360 if (p->migration_disabled) {
2361 #ifdef CONFIG_DEBUG_PREEMPT
2362 /*
2363 *Warn about overflow half-way through the range.
2364 */
2365 WARN_ON_ONCE((s16)p->migration_disabled < 0);
2366 #endif
2367 p->migration_disabled++;
2368 return;
2369 }
2370
2371 guard(preempt)();
2372 this_rq()->nr_pinned++;
2373 p->migration_disabled = 1;
2374 }
2375 EXPORT_SYMBOL_GPL(migrate_disable);
2376
migrate_enable(void)2377 void migrate_enable(void)
2378 {
2379 struct task_struct *p = current;
2380 struct affinity_context ac = {
2381 .new_mask = &p->cpus_mask,
2382 .flags = SCA_MIGRATE_ENABLE,
2383 };
2384
2385 #ifdef CONFIG_DEBUG_PREEMPT
2386 /*
2387 * Check both overflow from migrate_disable() and superfluous
2388 * migrate_enable().
2389 */
2390 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2391 return;
2392 #endif
2393
2394 if (p->migration_disabled > 1) {
2395 p->migration_disabled--;
2396 return;
2397 }
2398
2399 /*
2400 * Ensure stop_task runs either before or after this, and that
2401 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2402 */
2403 guard(preempt)();
2404 if (p->cpus_ptr != &p->cpus_mask)
2405 __set_cpus_allowed_ptr(p, &ac);
2406 /*
2407 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2408 * regular cpus_mask, otherwise things that race (eg.
2409 * select_fallback_rq) get confused.
2410 */
2411 barrier();
2412 p->migration_disabled = 0;
2413 this_rq()->nr_pinned--;
2414 }
2415 EXPORT_SYMBOL_GPL(migrate_enable);
2416
rq_has_pinned_tasks(struct rq * rq)2417 static inline bool rq_has_pinned_tasks(struct rq *rq)
2418 {
2419 return rq->nr_pinned;
2420 }
2421
2422 /*
2423 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2424 * __set_cpus_allowed_ptr() and select_fallback_rq().
2425 */
is_cpu_allowed(struct task_struct * p,int cpu)2426 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2427 {
2428 /* When not in the task's cpumask, no point in looking further. */
2429 if (!task_allowed_on_cpu(p, cpu))
2430 return false;
2431
2432 /* migrate_disabled() must be allowed to finish. */
2433 if (is_migration_disabled(p))
2434 return cpu_online(cpu);
2435
2436 /* Non kernel threads are not allowed during either online or offline. */
2437 if (!(p->flags & PF_KTHREAD))
2438 return cpu_active(cpu);
2439
2440 /* KTHREAD_IS_PER_CPU is always allowed. */
2441 if (kthread_is_per_cpu(p))
2442 return cpu_online(cpu);
2443
2444 /* Regular kernel threads don't get to stay during offline. */
2445 if (cpu_dying(cpu))
2446 return false;
2447
2448 /* But are allowed during online. */
2449 return cpu_online(cpu);
2450 }
2451
2452 /*
2453 * This is how migration works:
2454 *
2455 * 1) we invoke migration_cpu_stop() on the target CPU using
2456 * stop_one_cpu().
2457 * 2) stopper starts to run (implicitly forcing the migrated thread
2458 * off the CPU)
2459 * 3) it checks whether the migrated task is still in the wrong runqueue.
2460 * 4) if it's in the wrong runqueue then the migration thread removes
2461 * it and puts it into the right queue.
2462 * 5) stopper completes and stop_one_cpu() returns and the migration
2463 * is done.
2464 */
2465
2466 /*
2467 * move_queued_task - move a queued task to new rq.
2468 *
2469 * Returns (locked) new rq. Old rq's lock is released.
2470 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2471 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2472 struct task_struct *p, int new_cpu)
2473 {
2474 lockdep_assert_rq_held(rq);
2475
2476 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2477 set_task_cpu(p, new_cpu);
2478 rq_unlock(rq, rf);
2479
2480 rq = cpu_rq(new_cpu);
2481
2482 rq_lock(rq, rf);
2483 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2484 activate_task(rq, p, 0);
2485 wakeup_preempt(rq, p, 0);
2486
2487 return rq;
2488 }
2489
2490 struct migration_arg {
2491 struct task_struct *task;
2492 int dest_cpu;
2493 struct set_affinity_pending *pending;
2494 };
2495
2496 /*
2497 * @refs: number of wait_for_completion()
2498 * @stop_pending: is @stop_work in use
2499 */
2500 struct set_affinity_pending {
2501 refcount_t refs;
2502 unsigned int stop_pending;
2503 struct completion done;
2504 struct cpu_stop_work stop_work;
2505 struct migration_arg arg;
2506 };
2507
2508 /*
2509 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2510 * this because either it can't run here any more (set_cpus_allowed()
2511 * away from this CPU, or CPU going down), or because we're
2512 * attempting to rebalance this task on exec (sched_exec).
2513 *
2514 * So we race with normal scheduler movements, but that's OK, as long
2515 * as the task is no longer on this CPU.
2516 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2517 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2518 struct task_struct *p, int dest_cpu)
2519 {
2520 /* Affinity changed (again). */
2521 if (!is_cpu_allowed(p, dest_cpu))
2522 return rq;
2523
2524 rq = move_queued_task(rq, rf, p, dest_cpu);
2525
2526 return rq;
2527 }
2528
2529 /*
2530 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2531 * and performs thread migration by bumping thread off CPU then
2532 * 'pushing' onto another runqueue.
2533 */
migration_cpu_stop(void * data)2534 static int migration_cpu_stop(void *data)
2535 {
2536 struct migration_arg *arg = data;
2537 struct set_affinity_pending *pending = arg->pending;
2538 struct task_struct *p = arg->task;
2539 struct rq *rq = this_rq();
2540 bool complete = false;
2541 struct rq_flags rf;
2542
2543 /*
2544 * The original target CPU might have gone down and we might
2545 * be on another CPU but it doesn't matter.
2546 */
2547 local_irq_save(rf.flags);
2548 /*
2549 * We need to explicitly wake pending tasks before running
2550 * __migrate_task() such that we will not miss enforcing cpus_ptr
2551 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2552 */
2553 flush_smp_call_function_queue();
2554
2555 raw_spin_lock(&p->pi_lock);
2556 rq_lock(rq, &rf);
2557
2558 /*
2559 * If we were passed a pending, then ->stop_pending was set, thus
2560 * p->migration_pending must have remained stable.
2561 */
2562 WARN_ON_ONCE(pending && pending != p->migration_pending);
2563
2564 /*
2565 * If task_rq(p) != rq, it cannot be migrated here, because we're
2566 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2567 * we're holding p->pi_lock.
2568 */
2569 if (task_rq(p) == rq) {
2570 if (is_migration_disabled(p))
2571 goto out;
2572
2573 if (pending) {
2574 p->migration_pending = NULL;
2575 complete = true;
2576
2577 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2578 goto out;
2579 }
2580
2581 if (task_on_rq_queued(p)) {
2582 update_rq_clock(rq);
2583 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2584 } else {
2585 p->wake_cpu = arg->dest_cpu;
2586 }
2587
2588 /*
2589 * XXX __migrate_task() can fail, at which point we might end
2590 * up running on a dodgy CPU, AFAICT this can only happen
2591 * during CPU hotplug, at which point we'll get pushed out
2592 * anyway, so it's probably not a big deal.
2593 */
2594
2595 } else if (pending) {
2596 /*
2597 * This happens when we get migrated between migrate_enable()'s
2598 * preempt_enable() and scheduling the stopper task. At that
2599 * point we're a regular task again and not current anymore.
2600 *
2601 * A !PREEMPT kernel has a giant hole here, which makes it far
2602 * more likely.
2603 */
2604
2605 /*
2606 * The task moved before the stopper got to run. We're holding
2607 * ->pi_lock, so the allowed mask is stable - if it got
2608 * somewhere allowed, we're done.
2609 */
2610 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2611 p->migration_pending = NULL;
2612 complete = true;
2613 goto out;
2614 }
2615
2616 /*
2617 * When migrate_enable() hits a rq mis-match we can't reliably
2618 * determine is_migration_disabled() and so have to chase after
2619 * it.
2620 */
2621 WARN_ON_ONCE(!pending->stop_pending);
2622 preempt_disable();
2623 task_rq_unlock(rq, p, &rf);
2624 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2625 &pending->arg, &pending->stop_work);
2626 preempt_enable();
2627 return 0;
2628 }
2629 out:
2630 if (pending)
2631 pending->stop_pending = false;
2632 task_rq_unlock(rq, p, &rf);
2633
2634 if (complete)
2635 complete_all(&pending->done);
2636
2637 return 0;
2638 }
2639
push_cpu_stop(void * arg)2640 int push_cpu_stop(void *arg)
2641 {
2642 struct rq *lowest_rq = NULL, *rq = this_rq();
2643 struct task_struct *p = arg;
2644
2645 raw_spin_lock_irq(&p->pi_lock);
2646 raw_spin_rq_lock(rq);
2647
2648 if (task_rq(p) != rq)
2649 goto out_unlock;
2650
2651 if (is_migration_disabled(p)) {
2652 p->migration_flags |= MDF_PUSH;
2653 goto out_unlock;
2654 }
2655
2656 p->migration_flags &= ~MDF_PUSH;
2657
2658 if (p->sched_class->find_lock_rq)
2659 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2660
2661 if (!lowest_rq)
2662 goto out_unlock;
2663
2664 // XXX validate p is still the highest prio task
2665 if (task_rq(p) == rq) {
2666 move_queued_task_locked(rq, lowest_rq, p);
2667 resched_curr(lowest_rq);
2668 }
2669
2670 double_unlock_balance(rq, lowest_rq);
2671
2672 out_unlock:
2673 rq->push_busy = false;
2674 raw_spin_rq_unlock(rq);
2675 raw_spin_unlock_irq(&p->pi_lock);
2676
2677 put_task_struct(p);
2678 return 0;
2679 }
2680
2681 /*
2682 * sched_class::set_cpus_allowed must do the below, but is not required to
2683 * actually call this function.
2684 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2685 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2686 {
2687 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2688 p->cpus_ptr = ctx->new_mask;
2689 return;
2690 }
2691
2692 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2693 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2694
2695 /*
2696 * Swap in a new user_cpus_ptr if SCA_USER flag set
2697 */
2698 if (ctx->flags & SCA_USER)
2699 swap(p->user_cpus_ptr, ctx->user_mask);
2700 }
2701
2702 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2703 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2704 {
2705 struct rq *rq = task_rq(p);
2706 bool queued, running;
2707
2708 /*
2709 * This here violates the locking rules for affinity, since we're only
2710 * supposed to change these variables while holding both rq->lock and
2711 * p->pi_lock.
2712 *
2713 * HOWEVER, it magically works, because ttwu() is the only code that
2714 * accesses these variables under p->pi_lock and only does so after
2715 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2716 * before finish_task().
2717 *
2718 * XXX do further audits, this smells like something putrid.
2719 */
2720 if (ctx->flags & SCA_MIGRATE_DISABLE)
2721 SCHED_WARN_ON(!p->on_cpu);
2722 else
2723 lockdep_assert_held(&p->pi_lock);
2724
2725 queued = task_on_rq_queued(p);
2726 running = task_current_donor(rq, p);
2727
2728 if (queued) {
2729 /*
2730 * Because __kthread_bind() calls this on blocked tasks without
2731 * holding rq->lock.
2732 */
2733 lockdep_assert_rq_held(rq);
2734 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2735 }
2736 if (running)
2737 put_prev_task(rq, p);
2738
2739 p->sched_class->set_cpus_allowed(p, ctx);
2740 mm_set_cpus_allowed(p->mm, ctx->new_mask);
2741
2742 if (queued)
2743 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2744 if (running)
2745 set_next_task(rq, p);
2746 }
2747
2748 /*
2749 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2750 * affinity (if any) should be destroyed too.
2751 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2752 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2753 {
2754 struct affinity_context ac = {
2755 .new_mask = new_mask,
2756 .user_mask = NULL,
2757 .flags = SCA_USER, /* clear the user requested mask */
2758 };
2759 union cpumask_rcuhead {
2760 cpumask_t cpumask;
2761 struct rcu_head rcu;
2762 };
2763
2764 __do_set_cpus_allowed(p, &ac);
2765
2766 /*
2767 * Because this is called with p->pi_lock held, it is not possible
2768 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2769 * kfree_rcu().
2770 */
2771 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2772 }
2773
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2774 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2775 int node)
2776 {
2777 cpumask_t *user_mask;
2778 unsigned long flags;
2779
2780 /*
2781 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2782 * may differ by now due to racing.
2783 */
2784 dst->user_cpus_ptr = NULL;
2785
2786 /*
2787 * This check is racy and losing the race is a valid situation.
2788 * It is not worth the extra overhead of taking the pi_lock on
2789 * every fork/clone.
2790 */
2791 if (data_race(!src->user_cpus_ptr))
2792 return 0;
2793
2794 user_mask = alloc_user_cpus_ptr(node);
2795 if (!user_mask)
2796 return -ENOMEM;
2797
2798 /*
2799 * Use pi_lock to protect content of user_cpus_ptr
2800 *
2801 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2802 * do_set_cpus_allowed().
2803 */
2804 raw_spin_lock_irqsave(&src->pi_lock, flags);
2805 if (src->user_cpus_ptr) {
2806 swap(dst->user_cpus_ptr, user_mask);
2807 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2808 }
2809 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2810
2811 if (unlikely(user_mask))
2812 kfree(user_mask);
2813
2814 return 0;
2815 }
2816
clear_user_cpus_ptr(struct task_struct * p)2817 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2818 {
2819 struct cpumask *user_mask = NULL;
2820
2821 swap(p->user_cpus_ptr, user_mask);
2822
2823 return user_mask;
2824 }
2825
release_user_cpus_ptr(struct task_struct * p)2826 void release_user_cpus_ptr(struct task_struct *p)
2827 {
2828 kfree(clear_user_cpus_ptr(p));
2829 }
2830
2831 /*
2832 * This function is wildly self concurrent; here be dragons.
2833 *
2834 *
2835 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2836 * designated task is enqueued on an allowed CPU. If that task is currently
2837 * running, we have to kick it out using the CPU stopper.
2838 *
2839 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2840 * Consider:
2841 *
2842 * Initial conditions: P0->cpus_mask = [0, 1]
2843 *
2844 * P0@CPU0 P1
2845 *
2846 * migrate_disable();
2847 * <preempted>
2848 * set_cpus_allowed_ptr(P0, [1]);
2849 *
2850 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2851 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2852 * This means we need the following scheme:
2853 *
2854 * P0@CPU0 P1
2855 *
2856 * migrate_disable();
2857 * <preempted>
2858 * set_cpus_allowed_ptr(P0, [1]);
2859 * <blocks>
2860 * <resumes>
2861 * migrate_enable();
2862 * __set_cpus_allowed_ptr();
2863 * <wakes local stopper>
2864 * `--> <woken on migration completion>
2865 *
2866 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2867 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2868 * task p are serialized by p->pi_lock, which we can leverage: the one that
2869 * should come into effect at the end of the Migrate-Disable region is the last
2870 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2871 * but we still need to properly signal those waiting tasks at the appropriate
2872 * moment.
2873 *
2874 * This is implemented using struct set_affinity_pending. The first
2875 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2876 * setup an instance of that struct and install it on the targeted task_struct.
2877 * Any and all further callers will reuse that instance. Those then wait for
2878 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2879 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2880 *
2881 *
2882 * (1) In the cases covered above. There is one more where the completion is
2883 * signaled within affine_move_task() itself: when a subsequent affinity request
2884 * occurs after the stopper bailed out due to the targeted task still being
2885 * Migrate-Disable. Consider:
2886 *
2887 * Initial conditions: P0->cpus_mask = [0, 1]
2888 *
2889 * CPU0 P1 P2
2890 * <P0>
2891 * migrate_disable();
2892 * <preempted>
2893 * set_cpus_allowed_ptr(P0, [1]);
2894 * <blocks>
2895 * <migration/0>
2896 * migration_cpu_stop()
2897 * is_migration_disabled()
2898 * <bails>
2899 * set_cpus_allowed_ptr(P0, [0, 1]);
2900 * <signal completion>
2901 * <awakes>
2902 *
2903 * Note that the above is safe vs a concurrent migrate_enable(), as any
2904 * pending affinity completion is preceded by an uninstallation of
2905 * p->migration_pending done with p->pi_lock held.
2906 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2907 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2908 int dest_cpu, unsigned int flags)
2909 __releases(rq->lock)
2910 __releases(p->pi_lock)
2911 {
2912 struct set_affinity_pending my_pending = { }, *pending = NULL;
2913 bool stop_pending, complete = false;
2914
2915 /* Can the task run on the task's current CPU? If so, we're done */
2916 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2917 struct task_struct *push_task = NULL;
2918
2919 if ((flags & SCA_MIGRATE_ENABLE) &&
2920 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2921 rq->push_busy = true;
2922 push_task = get_task_struct(p);
2923 }
2924
2925 /*
2926 * If there are pending waiters, but no pending stop_work,
2927 * then complete now.
2928 */
2929 pending = p->migration_pending;
2930 if (pending && !pending->stop_pending) {
2931 p->migration_pending = NULL;
2932 complete = true;
2933 }
2934
2935 preempt_disable();
2936 task_rq_unlock(rq, p, rf);
2937 if (push_task) {
2938 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2939 p, &rq->push_work);
2940 }
2941 preempt_enable();
2942
2943 if (complete)
2944 complete_all(&pending->done);
2945
2946 return 0;
2947 }
2948
2949 if (!(flags & SCA_MIGRATE_ENABLE)) {
2950 /* serialized by p->pi_lock */
2951 if (!p->migration_pending) {
2952 /* Install the request */
2953 refcount_set(&my_pending.refs, 1);
2954 init_completion(&my_pending.done);
2955 my_pending.arg = (struct migration_arg) {
2956 .task = p,
2957 .dest_cpu = dest_cpu,
2958 .pending = &my_pending,
2959 };
2960
2961 p->migration_pending = &my_pending;
2962 } else {
2963 pending = p->migration_pending;
2964 refcount_inc(&pending->refs);
2965 /*
2966 * Affinity has changed, but we've already installed a
2967 * pending. migration_cpu_stop() *must* see this, else
2968 * we risk a completion of the pending despite having a
2969 * task on a disallowed CPU.
2970 *
2971 * Serialized by p->pi_lock, so this is safe.
2972 */
2973 pending->arg.dest_cpu = dest_cpu;
2974 }
2975 }
2976 pending = p->migration_pending;
2977 /*
2978 * - !MIGRATE_ENABLE:
2979 * we'll have installed a pending if there wasn't one already.
2980 *
2981 * - MIGRATE_ENABLE:
2982 * we're here because the current CPU isn't matching anymore,
2983 * the only way that can happen is because of a concurrent
2984 * set_cpus_allowed_ptr() call, which should then still be
2985 * pending completion.
2986 *
2987 * Either way, we really should have a @pending here.
2988 */
2989 if (WARN_ON_ONCE(!pending)) {
2990 task_rq_unlock(rq, p, rf);
2991 return -EINVAL;
2992 }
2993
2994 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2995 /*
2996 * MIGRATE_ENABLE gets here because 'p == current', but for
2997 * anything else we cannot do is_migration_disabled(), punt
2998 * and have the stopper function handle it all race-free.
2999 */
3000 stop_pending = pending->stop_pending;
3001 if (!stop_pending)
3002 pending->stop_pending = true;
3003
3004 if (flags & SCA_MIGRATE_ENABLE)
3005 p->migration_flags &= ~MDF_PUSH;
3006
3007 preempt_disable();
3008 task_rq_unlock(rq, p, rf);
3009 if (!stop_pending) {
3010 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3011 &pending->arg, &pending->stop_work);
3012 }
3013 preempt_enable();
3014
3015 if (flags & SCA_MIGRATE_ENABLE)
3016 return 0;
3017 } else {
3018
3019 if (!is_migration_disabled(p)) {
3020 if (task_on_rq_queued(p))
3021 rq = move_queued_task(rq, rf, p, dest_cpu);
3022
3023 if (!pending->stop_pending) {
3024 p->migration_pending = NULL;
3025 complete = true;
3026 }
3027 }
3028 task_rq_unlock(rq, p, rf);
3029
3030 if (complete)
3031 complete_all(&pending->done);
3032 }
3033
3034 wait_for_completion(&pending->done);
3035
3036 if (refcount_dec_and_test(&pending->refs))
3037 wake_up_var(&pending->refs); /* No UaF, just an address */
3038
3039 /*
3040 * Block the original owner of &pending until all subsequent callers
3041 * have seen the completion and decremented the refcount
3042 */
3043 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3044
3045 /* ARGH */
3046 WARN_ON_ONCE(my_pending.stop_pending);
3047
3048 return 0;
3049 }
3050
3051 /*
3052 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3053 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3054 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3055 struct affinity_context *ctx,
3056 struct rq *rq,
3057 struct rq_flags *rf)
3058 __releases(rq->lock)
3059 __releases(p->pi_lock)
3060 {
3061 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3062 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3063 bool kthread = p->flags & PF_KTHREAD;
3064 unsigned int dest_cpu;
3065 int ret = 0;
3066
3067 update_rq_clock(rq);
3068
3069 if (kthread || is_migration_disabled(p)) {
3070 /*
3071 * Kernel threads are allowed on online && !active CPUs,
3072 * however, during cpu-hot-unplug, even these might get pushed
3073 * away if not KTHREAD_IS_PER_CPU.
3074 *
3075 * Specifically, migration_disabled() tasks must not fail the
3076 * cpumask_any_and_distribute() pick below, esp. so on
3077 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3078 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3079 */
3080 cpu_valid_mask = cpu_online_mask;
3081 }
3082
3083 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3084 ret = -EINVAL;
3085 goto out;
3086 }
3087
3088 /*
3089 * Must re-check here, to close a race against __kthread_bind(),
3090 * sched_setaffinity() is not guaranteed to observe the flag.
3091 */
3092 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3093 ret = -EINVAL;
3094 goto out;
3095 }
3096
3097 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3098 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3099 if (ctx->flags & SCA_USER)
3100 swap(p->user_cpus_ptr, ctx->user_mask);
3101 goto out;
3102 }
3103
3104 if (WARN_ON_ONCE(p == current &&
3105 is_migration_disabled(p) &&
3106 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3107 ret = -EBUSY;
3108 goto out;
3109 }
3110 }
3111
3112 /*
3113 * Picking a ~random cpu helps in cases where we are changing affinity
3114 * for groups of tasks (ie. cpuset), so that load balancing is not
3115 * immediately required to distribute the tasks within their new mask.
3116 */
3117 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3118 if (dest_cpu >= nr_cpu_ids) {
3119 ret = -EINVAL;
3120 goto out;
3121 }
3122
3123 __do_set_cpus_allowed(p, ctx);
3124
3125 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3126
3127 out:
3128 task_rq_unlock(rq, p, rf);
3129
3130 return ret;
3131 }
3132
3133 /*
3134 * Change a given task's CPU affinity. Migrate the thread to a
3135 * proper CPU and schedule it away if the CPU it's executing on
3136 * is removed from the allowed bitmask.
3137 *
3138 * NOTE: the caller must have a valid reference to the task, the
3139 * task must not exit() & deallocate itself prematurely. The
3140 * call is not atomic; no spinlocks may be held.
3141 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3142 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3143 {
3144 struct rq_flags rf;
3145 struct rq *rq;
3146
3147 rq = task_rq_lock(p, &rf);
3148 /*
3149 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3150 * flags are set.
3151 */
3152 if (p->user_cpus_ptr &&
3153 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3154 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3155 ctx->new_mask = rq->scratch_mask;
3156
3157 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3158 }
3159
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3160 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3161 {
3162 struct affinity_context ac = {
3163 .new_mask = new_mask,
3164 .flags = 0,
3165 };
3166
3167 return __set_cpus_allowed_ptr(p, &ac);
3168 }
3169 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3170
3171 /*
3172 * Change a given task's CPU affinity to the intersection of its current
3173 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3174 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3175 * affinity or use cpu_online_mask instead.
3176 *
3177 * If the resulting mask is empty, leave the affinity unchanged and return
3178 * -EINVAL.
3179 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3180 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3181 struct cpumask *new_mask,
3182 const struct cpumask *subset_mask)
3183 {
3184 struct affinity_context ac = {
3185 .new_mask = new_mask,
3186 .flags = 0,
3187 };
3188 struct rq_flags rf;
3189 struct rq *rq;
3190 int err;
3191
3192 rq = task_rq_lock(p, &rf);
3193
3194 /*
3195 * Forcefully restricting the affinity of a deadline task is
3196 * likely to cause problems, so fail and noisily override the
3197 * mask entirely.
3198 */
3199 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3200 err = -EPERM;
3201 goto err_unlock;
3202 }
3203
3204 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3205 err = -EINVAL;
3206 goto err_unlock;
3207 }
3208
3209 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3210
3211 err_unlock:
3212 task_rq_unlock(rq, p, &rf);
3213 return err;
3214 }
3215
3216 /*
3217 * Restrict the CPU affinity of task @p so that it is a subset of
3218 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3219 * old affinity mask. If the resulting mask is empty, we warn and walk
3220 * up the cpuset hierarchy until we find a suitable mask.
3221 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3222 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3223 {
3224 cpumask_var_t new_mask;
3225 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3226
3227 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3228
3229 /*
3230 * __migrate_task() can fail silently in the face of concurrent
3231 * offlining of the chosen destination CPU, so take the hotplug
3232 * lock to ensure that the migration succeeds.
3233 */
3234 cpus_read_lock();
3235 if (!cpumask_available(new_mask))
3236 goto out_set_mask;
3237
3238 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3239 goto out_free_mask;
3240
3241 /*
3242 * We failed to find a valid subset of the affinity mask for the
3243 * task, so override it based on its cpuset hierarchy.
3244 */
3245 cpuset_cpus_allowed(p, new_mask);
3246 override_mask = new_mask;
3247
3248 out_set_mask:
3249 if (printk_ratelimit()) {
3250 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3251 task_pid_nr(p), p->comm,
3252 cpumask_pr_args(override_mask));
3253 }
3254
3255 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3256 out_free_mask:
3257 cpus_read_unlock();
3258 free_cpumask_var(new_mask);
3259 }
3260
3261 /*
3262 * Restore the affinity of a task @p which was previously restricted by a
3263 * call to force_compatible_cpus_allowed_ptr().
3264 *
3265 * It is the caller's responsibility to serialise this with any calls to
3266 * force_compatible_cpus_allowed_ptr(@p).
3267 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3268 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3269 {
3270 struct affinity_context ac = {
3271 .new_mask = task_user_cpus(p),
3272 .flags = 0,
3273 };
3274 int ret;
3275
3276 /*
3277 * Try to restore the old affinity mask with __sched_setaffinity().
3278 * Cpuset masking will be done there too.
3279 */
3280 ret = __sched_setaffinity(p, &ac);
3281 WARN_ON_ONCE(ret);
3282 }
3283
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3284 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3285 {
3286 #ifdef CONFIG_SCHED_DEBUG
3287 unsigned int state = READ_ONCE(p->__state);
3288
3289 /*
3290 * We should never call set_task_cpu() on a blocked task,
3291 * ttwu() will sort out the placement.
3292 */
3293 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3294
3295 /*
3296 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3297 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3298 * time relying on p->on_rq.
3299 */
3300 WARN_ON_ONCE(state == TASK_RUNNING &&
3301 p->sched_class == &fair_sched_class &&
3302 (p->on_rq && !task_on_rq_migrating(p)));
3303
3304 #ifdef CONFIG_LOCKDEP
3305 /*
3306 * The caller should hold either p->pi_lock or rq->lock, when changing
3307 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3308 *
3309 * sched_move_task() holds both and thus holding either pins the cgroup,
3310 * see task_group().
3311 *
3312 * Furthermore, all task_rq users should acquire both locks, see
3313 * task_rq_lock().
3314 */
3315 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3316 lockdep_is_held(__rq_lockp(task_rq(p)))));
3317 #endif
3318 /*
3319 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3320 */
3321 WARN_ON_ONCE(!cpu_online(new_cpu));
3322
3323 WARN_ON_ONCE(is_migration_disabled(p));
3324 #endif
3325
3326 trace_sched_migrate_task(p, new_cpu);
3327
3328 if (task_cpu(p) != new_cpu) {
3329 if (p->sched_class->migrate_task_rq)
3330 p->sched_class->migrate_task_rq(p, new_cpu);
3331 p->se.nr_migrations++;
3332 rseq_migrate(p);
3333 sched_mm_cid_migrate_from(p);
3334 perf_event_task_migrate(p);
3335 }
3336
3337 __set_task_cpu(p, new_cpu);
3338 }
3339
3340 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3341 static void __migrate_swap_task(struct task_struct *p, int cpu)
3342 {
3343 if (task_on_rq_queued(p)) {
3344 struct rq *src_rq, *dst_rq;
3345 struct rq_flags srf, drf;
3346
3347 src_rq = task_rq(p);
3348 dst_rq = cpu_rq(cpu);
3349
3350 rq_pin_lock(src_rq, &srf);
3351 rq_pin_lock(dst_rq, &drf);
3352
3353 move_queued_task_locked(src_rq, dst_rq, p);
3354 wakeup_preempt(dst_rq, p, 0);
3355
3356 rq_unpin_lock(dst_rq, &drf);
3357 rq_unpin_lock(src_rq, &srf);
3358
3359 } else {
3360 /*
3361 * Task isn't running anymore; make it appear like we migrated
3362 * it before it went to sleep. This means on wakeup we make the
3363 * previous CPU our target instead of where it really is.
3364 */
3365 p->wake_cpu = cpu;
3366 }
3367 }
3368
3369 struct migration_swap_arg {
3370 struct task_struct *src_task, *dst_task;
3371 int src_cpu, dst_cpu;
3372 };
3373
migrate_swap_stop(void * data)3374 static int migrate_swap_stop(void *data)
3375 {
3376 struct migration_swap_arg *arg = data;
3377 struct rq *src_rq, *dst_rq;
3378
3379 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3380 return -EAGAIN;
3381
3382 src_rq = cpu_rq(arg->src_cpu);
3383 dst_rq = cpu_rq(arg->dst_cpu);
3384
3385 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3386 guard(double_rq_lock)(src_rq, dst_rq);
3387
3388 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3389 return -EAGAIN;
3390
3391 if (task_cpu(arg->src_task) != arg->src_cpu)
3392 return -EAGAIN;
3393
3394 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3395 return -EAGAIN;
3396
3397 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3398 return -EAGAIN;
3399
3400 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3401 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3402
3403 return 0;
3404 }
3405
3406 /*
3407 * Cross migrate two tasks
3408 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3409 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3410 int target_cpu, int curr_cpu)
3411 {
3412 struct migration_swap_arg arg;
3413 int ret = -EINVAL;
3414
3415 arg = (struct migration_swap_arg){
3416 .src_task = cur,
3417 .src_cpu = curr_cpu,
3418 .dst_task = p,
3419 .dst_cpu = target_cpu,
3420 };
3421
3422 if (arg.src_cpu == arg.dst_cpu)
3423 goto out;
3424
3425 /*
3426 * These three tests are all lockless; this is OK since all of them
3427 * will be re-checked with proper locks held further down the line.
3428 */
3429 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3430 goto out;
3431
3432 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3433 goto out;
3434
3435 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3436 goto out;
3437
3438 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3439 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3440
3441 out:
3442 return ret;
3443 }
3444 #endif /* CONFIG_NUMA_BALANCING */
3445
3446 /***
3447 * kick_process - kick a running thread to enter/exit the kernel
3448 * @p: the to-be-kicked thread
3449 *
3450 * Cause a process which is running on another CPU to enter
3451 * kernel-mode, without any delay. (to get signals handled.)
3452 *
3453 * NOTE: this function doesn't have to take the runqueue lock,
3454 * because all it wants to ensure is that the remote task enters
3455 * the kernel. If the IPI races and the task has been migrated
3456 * to another CPU then no harm is done and the purpose has been
3457 * achieved as well.
3458 */
kick_process(struct task_struct * p)3459 void kick_process(struct task_struct *p)
3460 {
3461 guard(preempt)();
3462 int cpu = task_cpu(p);
3463
3464 if ((cpu != smp_processor_id()) && task_curr(p))
3465 smp_send_reschedule(cpu);
3466 }
3467 EXPORT_SYMBOL_GPL(kick_process);
3468
3469 /*
3470 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3471 *
3472 * A few notes on cpu_active vs cpu_online:
3473 *
3474 * - cpu_active must be a subset of cpu_online
3475 *
3476 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3477 * see __set_cpus_allowed_ptr(). At this point the newly online
3478 * CPU isn't yet part of the sched domains, and balancing will not
3479 * see it.
3480 *
3481 * - on CPU-down we clear cpu_active() to mask the sched domains and
3482 * avoid the load balancer to place new tasks on the to be removed
3483 * CPU. Existing tasks will remain running there and will be taken
3484 * off.
3485 *
3486 * This means that fallback selection must not select !active CPUs.
3487 * And can assume that any active CPU must be online. Conversely
3488 * select_task_rq() below may allow selection of !active CPUs in order
3489 * to satisfy the above rules.
3490 */
select_fallback_rq(int cpu,struct task_struct * p)3491 static int select_fallback_rq(int cpu, struct task_struct *p)
3492 {
3493 int nid = cpu_to_node(cpu);
3494 const struct cpumask *nodemask = NULL;
3495 enum { cpuset, possible, fail } state = cpuset;
3496 int dest_cpu;
3497
3498 /*
3499 * If the node that the CPU is on has been offlined, cpu_to_node()
3500 * will return -1. There is no CPU on the node, and we should
3501 * select the CPU on the other node.
3502 */
3503 if (nid != -1) {
3504 nodemask = cpumask_of_node(nid);
3505
3506 /* Look for allowed, online CPU in same node. */
3507 for_each_cpu(dest_cpu, nodemask) {
3508 if (is_cpu_allowed(p, dest_cpu))
3509 return dest_cpu;
3510 }
3511 }
3512
3513 for (;;) {
3514 /* Any allowed, online CPU? */
3515 for_each_cpu(dest_cpu, p->cpus_ptr) {
3516 if (!is_cpu_allowed(p, dest_cpu))
3517 continue;
3518
3519 goto out;
3520 }
3521
3522 /* No more Mr. Nice Guy. */
3523 switch (state) {
3524 case cpuset:
3525 if (cpuset_cpus_allowed_fallback(p)) {
3526 state = possible;
3527 break;
3528 }
3529 fallthrough;
3530 case possible:
3531 /*
3532 * XXX When called from select_task_rq() we only
3533 * hold p->pi_lock and again violate locking order.
3534 *
3535 * More yuck to audit.
3536 */
3537 do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3538 state = fail;
3539 break;
3540 case fail:
3541 BUG();
3542 break;
3543 }
3544 }
3545
3546 out:
3547 if (state != cpuset) {
3548 /*
3549 * Don't tell them about moving exiting tasks or
3550 * kernel threads (both mm NULL), since they never
3551 * leave kernel.
3552 */
3553 if (p->mm && printk_ratelimit()) {
3554 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3555 task_pid_nr(p), p->comm, cpu);
3556 }
3557 }
3558
3559 return dest_cpu;
3560 }
3561
3562 /*
3563 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3564 */
3565 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3566 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3567 {
3568 lockdep_assert_held(&p->pi_lock);
3569
3570 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3571 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3572 *wake_flags |= WF_RQ_SELECTED;
3573 } else {
3574 cpu = cpumask_any(p->cpus_ptr);
3575 }
3576
3577 /*
3578 * In order not to call set_task_cpu() on a blocking task we need
3579 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3580 * CPU.
3581 *
3582 * Since this is common to all placement strategies, this lives here.
3583 *
3584 * [ this allows ->select_task() to simply return task_cpu(p) and
3585 * not worry about this generic constraint ]
3586 */
3587 if (unlikely(!is_cpu_allowed(p, cpu)))
3588 cpu = select_fallback_rq(task_cpu(p), p);
3589
3590 return cpu;
3591 }
3592
sched_set_stop_task(int cpu,struct task_struct * stop)3593 void sched_set_stop_task(int cpu, struct task_struct *stop)
3594 {
3595 static struct lock_class_key stop_pi_lock;
3596 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3597 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3598
3599 if (stop) {
3600 /*
3601 * Make it appear like a SCHED_FIFO task, its something
3602 * userspace knows about and won't get confused about.
3603 *
3604 * Also, it will make PI more or less work without too
3605 * much confusion -- but then, stop work should not
3606 * rely on PI working anyway.
3607 */
3608 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3609
3610 stop->sched_class = &stop_sched_class;
3611
3612 /*
3613 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3614 * adjust the effective priority of a task. As a result,
3615 * rt_mutex_setprio() can trigger (RT) balancing operations,
3616 * which can then trigger wakeups of the stop thread to push
3617 * around the current task.
3618 *
3619 * The stop task itself will never be part of the PI-chain, it
3620 * never blocks, therefore that ->pi_lock recursion is safe.
3621 * Tell lockdep about this by placing the stop->pi_lock in its
3622 * own class.
3623 */
3624 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3625 }
3626
3627 cpu_rq(cpu)->stop = stop;
3628
3629 if (old_stop) {
3630 /*
3631 * Reset it back to a normal scheduling class so that
3632 * it can die in pieces.
3633 */
3634 old_stop->sched_class = &rt_sched_class;
3635 }
3636 }
3637
3638 #else /* CONFIG_SMP */
3639
migrate_disable_switch(struct rq * rq,struct task_struct * p)3640 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3641
rq_has_pinned_tasks(struct rq * rq)3642 static inline bool rq_has_pinned_tasks(struct rq *rq)
3643 {
3644 return false;
3645 }
3646
3647 #endif /* !CONFIG_SMP */
3648
3649 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3650 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3651 {
3652 struct rq *rq;
3653
3654 if (!schedstat_enabled())
3655 return;
3656
3657 rq = this_rq();
3658
3659 #ifdef CONFIG_SMP
3660 if (cpu == rq->cpu) {
3661 __schedstat_inc(rq->ttwu_local);
3662 __schedstat_inc(p->stats.nr_wakeups_local);
3663 } else {
3664 struct sched_domain *sd;
3665
3666 __schedstat_inc(p->stats.nr_wakeups_remote);
3667
3668 guard(rcu)();
3669 for_each_domain(rq->cpu, sd) {
3670 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3671 __schedstat_inc(sd->ttwu_wake_remote);
3672 break;
3673 }
3674 }
3675 }
3676
3677 if (wake_flags & WF_MIGRATED)
3678 __schedstat_inc(p->stats.nr_wakeups_migrate);
3679 #endif /* CONFIG_SMP */
3680
3681 __schedstat_inc(rq->ttwu_count);
3682 __schedstat_inc(p->stats.nr_wakeups);
3683
3684 if (wake_flags & WF_SYNC)
3685 __schedstat_inc(p->stats.nr_wakeups_sync);
3686 }
3687
3688 /*
3689 * Mark the task runnable.
3690 */
ttwu_do_wakeup(struct task_struct * p)3691 static inline void ttwu_do_wakeup(struct task_struct *p)
3692 {
3693 WRITE_ONCE(p->__state, TASK_RUNNING);
3694 trace_sched_wakeup(p);
3695 }
3696
3697 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3698 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3699 struct rq_flags *rf)
3700 {
3701 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3702
3703 lockdep_assert_rq_held(rq);
3704
3705 if (p->sched_contributes_to_load)
3706 rq->nr_uninterruptible--;
3707
3708 #ifdef CONFIG_SMP
3709 if (wake_flags & WF_RQ_SELECTED)
3710 en_flags |= ENQUEUE_RQ_SELECTED;
3711 if (wake_flags & WF_MIGRATED)
3712 en_flags |= ENQUEUE_MIGRATED;
3713 else
3714 #endif
3715 if (p->in_iowait) {
3716 delayacct_blkio_end(p);
3717 atomic_dec(&task_rq(p)->nr_iowait);
3718 }
3719
3720 activate_task(rq, p, en_flags);
3721 wakeup_preempt(rq, p, wake_flags);
3722
3723 ttwu_do_wakeup(p);
3724
3725 #ifdef CONFIG_SMP
3726 if (p->sched_class->task_woken) {
3727 /*
3728 * Our task @p is fully woken up and running; so it's safe to
3729 * drop the rq->lock, hereafter rq is only used for statistics.
3730 */
3731 rq_unpin_lock(rq, rf);
3732 p->sched_class->task_woken(rq, p);
3733 rq_repin_lock(rq, rf);
3734 }
3735
3736 if (rq->idle_stamp) {
3737 u64 delta = rq_clock(rq) - rq->idle_stamp;
3738 u64 max = 2*rq->max_idle_balance_cost;
3739
3740 update_avg(&rq->avg_idle, delta);
3741
3742 if (rq->avg_idle > max)
3743 rq->avg_idle = max;
3744
3745 rq->idle_stamp = 0;
3746 }
3747 #endif
3748 }
3749
3750 /*
3751 * Consider @p being inside a wait loop:
3752 *
3753 * for (;;) {
3754 * set_current_state(TASK_UNINTERRUPTIBLE);
3755 *
3756 * if (CONDITION)
3757 * break;
3758 *
3759 * schedule();
3760 * }
3761 * __set_current_state(TASK_RUNNING);
3762 *
3763 * between set_current_state() and schedule(). In this case @p is still
3764 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3765 * an atomic manner.
3766 *
3767 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3768 * then schedule() must still happen and p->state can be changed to
3769 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3770 * need to do a full wakeup with enqueue.
3771 *
3772 * Returns: %true when the wakeup is done,
3773 * %false otherwise.
3774 */
ttwu_runnable(struct task_struct * p,int wake_flags)3775 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3776 {
3777 struct rq_flags rf;
3778 struct rq *rq;
3779 int ret = 0;
3780
3781 rq = __task_rq_lock(p, &rf);
3782 if (task_on_rq_queued(p)) {
3783 update_rq_clock(rq);
3784 if (p->se.sched_delayed)
3785 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3786 if (!task_on_cpu(rq, p)) {
3787 /*
3788 * When on_rq && !on_cpu the task is preempted, see if
3789 * it should preempt the task that is current now.
3790 */
3791 wakeup_preempt(rq, p, wake_flags);
3792 }
3793 ttwu_do_wakeup(p);
3794 ret = 1;
3795 }
3796 __task_rq_unlock(rq, &rf);
3797
3798 return ret;
3799 }
3800
3801 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3802 void sched_ttwu_pending(void *arg)
3803 {
3804 struct llist_node *llist = arg;
3805 struct rq *rq = this_rq();
3806 struct task_struct *p, *t;
3807 struct rq_flags rf;
3808
3809 if (!llist)
3810 return;
3811
3812 rq_lock_irqsave(rq, &rf);
3813 update_rq_clock(rq);
3814
3815 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3816 if (WARN_ON_ONCE(p->on_cpu))
3817 smp_cond_load_acquire(&p->on_cpu, !VAL);
3818
3819 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3820 set_task_cpu(p, cpu_of(rq));
3821
3822 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3823 }
3824
3825 /*
3826 * Must be after enqueueing at least once task such that
3827 * idle_cpu() does not observe a false-negative -- if it does,
3828 * it is possible for select_idle_siblings() to stack a number
3829 * of tasks on this CPU during that window.
3830 *
3831 * It is OK to clear ttwu_pending when another task pending.
3832 * We will receive IPI after local IRQ enabled and then enqueue it.
3833 * Since now nr_running > 0, idle_cpu() will always get correct result.
3834 */
3835 WRITE_ONCE(rq->ttwu_pending, 0);
3836 rq_unlock_irqrestore(rq, &rf);
3837 }
3838
3839 /*
3840 * Prepare the scene for sending an IPI for a remote smp_call
3841 *
3842 * Returns true if the caller can proceed with sending the IPI.
3843 * Returns false otherwise.
3844 */
call_function_single_prep_ipi(int cpu)3845 bool call_function_single_prep_ipi(int cpu)
3846 {
3847 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3848 trace_sched_wake_idle_without_ipi(cpu);
3849 return false;
3850 }
3851
3852 return true;
3853 }
3854
3855 /*
3856 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3857 * necessary. The wakee CPU on receipt of the IPI will queue the task
3858 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3859 * of the wakeup instead of the waker.
3860 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3861 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3862 {
3863 struct rq *rq = cpu_rq(cpu);
3864
3865 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3866
3867 WRITE_ONCE(rq->ttwu_pending, 1);
3868 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3869 }
3870
wake_up_if_idle(int cpu)3871 void wake_up_if_idle(int cpu)
3872 {
3873 struct rq *rq = cpu_rq(cpu);
3874
3875 guard(rcu)();
3876 if (is_idle_task(rcu_dereference(rq->curr))) {
3877 guard(rq_lock_irqsave)(rq);
3878 if (is_idle_task(rq->curr))
3879 resched_curr(rq);
3880 }
3881 }
3882
cpus_equal_capacity(int this_cpu,int that_cpu)3883 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3884 {
3885 if (!sched_asym_cpucap_active())
3886 return true;
3887
3888 if (this_cpu == that_cpu)
3889 return true;
3890
3891 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3892 }
3893
cpus_share_cache(int this_cpu,int that_cpu)3894 bool cpus_share_cache(int this_cpu, int that_cpu)
3895 {
3896 if (this_cpu == that_cpu)
3897 return true;
3898
3899 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3900 }
3901
3902 /*
3903 * Whether CPUs are share cache resources, which means LLC on non-cluster
3904 * machines and LLC tag or L2 on machines with clusters.
3905 */
cpus_share_resources(int this_cpu,int that_cpu)3906 bool cpus_share_resources(int this_cpu, int that_cpu)
3907 {
3908 if (this_cpu == that_cpu)
3909 return true;
3910
3911 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3912 }
3913
ttwu_queue_cond(struct task_struct * p,int cpu)3914 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3915 {
3916 /*
3917 * The BPF scheduler may depend on select_task_rq() being invoked during
3918 * wakeups. In addition, @p may end up executing on a different CPU
3919 * regardless of what happens in the wakeup path making the ttwu_queue
3920 * optimization less meaningful. Skip if on SCX.
3921 */
3922 if (task_on_scx(p))
3923 return false;
3924
3925 /*
3926 * Do not complicate things with the async wake_list while the CPU is
3927 * in hotplug state.
3928 */
3929 if (!cpu_active(cpu))
3930 return false;
3931
3932 /* Ensure the task will still be allowed to run on the CPU. */
3933 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3934 return false;
3935
3936 /*
3937 * If the CPU does not share cache, then queue the task on the
3938 * remote rqs wakelist to avoid accessing remote data.
3939 */
3940 if (!cpus_share_cache(smp_processor_id(), cpu))
3941 return true;
3942
3943 if (cpu == smp_processor_id())
3944 return false;
3945
3946 /*
3947 * If the wakee cpu is idle, or the task is descheduling and the
3948 * only running task on the CPU, then use the wakelist to offload
3949 * the task activation to the idle (or soon-to-be-idle) CPU as
3950 * the current CPU is likely busy. nr_running is checked to
3951 * avoid unnecessary task stacking.
3952 *
3953 * Note that we can only get here with (wakee) p->on_rq=0,
3954 * p->on_cpu can be whatever, we've done the dequeue, so
3955 * the wakee has been accounted out of ->nr_running.
3956 */
3957 if (!cpu_rq(cpu)->nr_running)
3958 return true;
3959
3960 return false;
3961 }
3962
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3963 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3964 {
3965 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3966 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3967 __ttwu_queue_wakelist(p, cpu, wake_flags);
3968 return true;
3969 }
3970
3971 return false;
3972 }
3973
3974 #else /* !CONFIG_SMP */
3975
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3976 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3977 {
3978 return false;
3979 }
3980
3981 #endif /* CONFIG_SMP */
3982
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3983 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3984 {
3985 struct rq *rq = cpu_rq(cpu);
3986 struct rq_flags rf;
3987
3988 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3989 return;
3990
3991 rq_lock(rq, &rf);
3992 update_rq_clock(rq);
3993 ttwu_do_activate(rq, p, wake_flags, &rf);
3994 rq_unlock(rq, &rf);
3995 }
3996
3997 /*
3998 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3999 *
4000 * The caller holds p::pi_lock if p != current or has preemption
4001 * disabled when p == current.
4002 *
4003 * The rules of saved_state:
4004 *
4005 * The related locking code always holds p::pi_lock when updating
4006 * p::saved_state, which means the code is fully serialized in both cases.
4007 *
4008 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4009 * No other bits set. This allows to distinguish all wakeup scenarios.
4010 *
4011 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4012 * allows us to prevent early wakeup of tasks before they can be run on
4013 * asymmetric ISA architectures (eg ARMv9).
4014 */
4015 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4016 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4017 {
4018 int match;
4019
4020 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4021 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4022 state != TASK_RTLOCK_WAIT);
4023 }
4024
4025 *success = !!(match = __task_state_match(p, state));
4026
4027 /*
4028 * Saved state preserves the task state across blocking on
4029 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4030 * set p::saved_state to TASK_RUNNING, but do not wake the task
4031 * because it waits for a lock wakeup or __thaw_task(). Also
4032 * indicate success because from the regular waker's point of
4033 * view this has succeeded.
4034 *
4035 * After acquiring the lock the task will restore p::__state
4036 * from p::saved_state which ensures that the regular
4037 * wakeup is not lost. The restore will also set
4038 * p::saved_state to TASK_RUNNING so any further tests will
4039 * not result in false positives vs. @success
4040 */
4041 if (match < 0)
4042 p->saved_state = TASK_RUNNING;
4043
4044 return match > 0;
4045 }
4046
4047 /*
4048 * Notes on Program-Order guarantees on SMP systems.
4049 *
4050 * MIGRATION
4051 *
4052 * The basic program-order guarantee on SMP systems is that when a task [t]
4053 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4054 * execution on its new CPU [c1].
4055 *
4056 * For migration (of runnable tasks) this is provided by the following means:
4057 *
4058 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4059 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4060 * rq(c1)->lock (if not at the same time, then in that order).
4061 * C) LOCK of the rq(c1)->lock scheduling in task
4062 *
4063 * Release/acquire chaining guarantees that B happens after A and C after B.
4064 * Note: the CPU doing B need not be c0 or c1
4065 *
4066 * Example:
4067 *
4068 * CPU0 CPU1 CPU2
4069 *
4070 * LOCK rq(0)->lock
4071 * sched-out X
4072 * sched-in Y
4073 * UNLOCK rq(0)->lock
4074 *
4075 * LOCK rq(0)->lock // orders against CPU0
4076 * dequeue X
4077 * UNLOCK rq(0)->lock
4078 *
4079 * LOCK rq(1)->lock
4080 * enqueue X
4081 * UNLOCK rq(1)->lock
4082 *
4083 * LOCK rq(1)->lock // orders against CPU2
4084 * sched-out Z
4085 * sched-in X
4086 * UNLOCK rq(1)->lock
4087 *
4088 *
4089 * BLOCKING -- aka. SLEEP + WAKEUP
4090 *
4091 * For blocking we (obviously) need to provide the same guarantee as for
4092 * migration. However the means are completely different as there is no lock
4093 * chain to provide order. Instead we do:
4094 *
4095 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4096 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4097 *
4098 * Example:
4099 *
4100 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4101 *
4102 * LOCK rq(0)->lock LOCK X->pi_lock
4103 * dequeue X
4104 * sched-out X
4105 * smp_store_release(X->on_cpu, 0);
4106 *
4107 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4108 * X->state = WAKING
4109 * set_task_cpu(X,2)
4110 *
4111 * LOCK rq(2)->lock
4112 * enqueue X
4113 * X->state = RUNNING
4114 * UNLOCK rq(2)->lock
4115 *
4116 * LOCK rq(2)->lock // orders against CPU1
4117 * sched-out Z
4118 * sched-in X
4119 * UNLOCK rq(2)->lock
4120 *
4121 * UNLOCK X->pi_lock
4122 * UNLOCK rq(0)->lock
4123 *
4124 *
4125 * However, for wakeups there is a second guarantee we must provide, namely we
4126 * must ensure that CONDITION=1 done by the caller can not be reordered with
4127 * accesses to the task state; see try_to_wake_up() and set_current_state().
4128 */
4129
4130 /**
4131 * try_to_wake_up - wake up a thread
4132 * @p: the thread to be awakened
4133 * @state: the mask of task states that can be woken
4134 * @wake_flags: wake modifier flags (WF_*)
4135 *
4136 * Conceptually does:
4137 *
4138 * If (@state & @p->state) @p->state = TASK_RUNNING.
4139 *
4140 * If the task was not queued/runnable, also place it back on a runqueue.
4141 *
4142 * This function is atomic against schedule() which would dequeue the task.
4143 *
4144 * It issues a full memory barrier before accessing @p->state, see the comment
4145 * with set_current_state().
4146 *
4147 * Uses p->pi_lock to serialize against concurrent wake-ups.
4148 *
4149 * Relies on p->pi_lock stabilizing:
4150 * - p->sched_class
4151 * - p->cpus_ptr
4152 * - p->sched_task_group
4153 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4154 *
4155 * Tries really hard to only take one task_rq(p)->lock for performance.
4156 * Takes rq->lock in:
4157 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4158 * - ttwu_queue() -- new rq, for enqueue of the task;
4159 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4160 *
4161 * As a consequence we race really badly with just about everything. See the
4162 * many memory barriers and their comments for details.
4163 *
4164 * Return: %true if @p->state changes (an actual wakeup was done),
4165 * %false otherwise.
4166 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4167 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4168 {
4169 guard(preempt)();
4170 int cpu, success = 0;
4171
4172 wake_flags |= WF_TTWU;
4173
4174 if (p == current) {
4175 /*
4176 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4177 * == smp_processor_id()'. Together this means we can special
4178 * case the whole 'p->on_rq && ttwu_runnable()' case below
4179 * without taking any locks.
4180 *
4181 * Specifically, given current runs ttwu() we must be before
4182 * schedule()'s block_task(), as such this must not observe
4183 * sched_delayed.
4184 *
4185 * In particular:
4186 * - we rely on Program-Order guarantees for all the ordering,
4187 * - we're serialized against set_special_state() by virtue of
4188 * it disabling IRQs (this allows not taking ->pi_lock).
4189 */
4190 SCHED_WARN_ON(p->se.sched_delayed);
4191 if (!ttwu_state_match(p, state, &success))
4192 goto out;
4193
4194 trace_sched_waking(p);
4195 ttwu_do_wakeup(p);
4196 goto out;
4197 }
4198
4199 /*
4200 * If we are going to wake up a thread waiting for CONDITION we
4201 * need to ensure that CONDITION=1 done by the caller can not be
4202 * reordered with p->state check below. This pairs with smp_store_mb()
4203 * in set_current_state() that the waiting thread does.
4204 */
4205 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4206 smp_mb__after_spinlock();
4207 if (!ttwu_state_match(p, state, &success))
4208 break;
4209
4210 trace_sched_waking(p);
4211
4212 /*
4213 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4214 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4215 * in smp_cond_load_acquire() below.
4216 *
4217 * sched_ttwu_pending() try_to_wake_up()
4218 * STORE p->on_rq = 1 LOAD p->state
4219 * UNLOCK rq->lock
4220 *
4221 * __schedule() (switch to task 'p')
4222 * LOCK rq->lock smp_rmb();
4223 * smp_mb__after_spinlock();
4224 * UNLOCK rq->lock
4225 *
4226 * [task p]
4227 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4228 *
4229 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4230 * __schedule(). See the comment for smp_mb__after_spinlock().
4231 *
4232 * A similar smp_rmb() lives in __task_needs_rq_lock().
4233 */
4234 smp_rmb();
4235 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4236 break;
4237
4238 #ifdef CONFIG_SMP
4239 /*
4240 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4241 * possible to, falsely, observe p->on_cpu == 0.
4242 *
4243 * One must be running (->on_cpu == 1) in order to remove oneself
4244 * from the runqueue.
4245 *
4246 * __schedule() (switch to task 'p') try_to_wake_up()
4247 * STORE p->on_cpu = 1 LOAD p->on_rq
4248 * UNLOCK rq->lock
4249 *
4250 * __schedule() (put 'p' to sleep)
4251 * LOCK rq->lock smp_rmb();
4252 * smp_mb__after_spinlock();
4253 * STORE p->on_rq = 0 LOAD p->on_cpu
4254 *
4255 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4256 * __schedule(). See the comment for smp_mb__after_spinlock().
4257 *
4258 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4259 * schedule()'s deactivate_task() has 'happened' and p will no longer
4260 * care about it's own p->state. See the comment in __schedule().
4261 */
4262 smp_acquire__after_ctrl_dep();
4263
4264 /*
4265 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4266 * == 0), which means we need to do an enqueue, change p->state to
4267 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4268 * enqueue, such as ttwu_queue_wakelist().
4269 */
4270 WRITE_ONCE(p->__state, TASK_WAKING);
4271
4272 /*
4273 * If the owning (remote) CPU is still in the middle of schedule() with
4274 * this task as prev, considering queueing p on the remote CPUs wake_list
4275 * which potentially sends an IPI instead of spinning on p->on_cpu to
4276 * let the waker make forward progress. This is safe because IRQs are
4277 * disabled and the IPI will deliver after on_cpu is cleared.
4278 *
4279 * Ensure we load task_cpu(p) after p->on_cpu:
4280 *
4281 * set_task_cpu(p, cpu);
4282 * STORE p->cpu = @cpu
4283 * __schedule() (switch to task 'p')
4284 * LOCK rq->lock
4285 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4286 * STORE p->on_cpu = 1 LOAD p->cpu
4287 *
4288 * to ensure we observe the correct CPU on which the task is currently
4289 * scheduling.
4290 */
4291 if (smp_load_acquire(&p->on_cpu) &&
4292 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4293 break;
4294
4295 /*
4296 * If the owning (remote) CPU is still in the middle of schedule() with
4297 * this task as prev, wait until it's done referencing the task.
4298 *
4299 * Pairs with the smp_store_release() in finish_task().
4300 *
4301 * This ensures that tasks getting woken will be fully ordered against
4302 * their previous state and preserve Program Order.
4303 */
4304 smp_cond_load_acquire(&p->on_cpu, !VAL);
4305
4306 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4307 if (task_cpu(p) != cpu) {
4308 if (p->in_iowait) {
4309 delayacct_blkio_end(p);
4310 atomic_dec(&task_rq(p)->nr_iowait);
4311 }
4312
4313 wake_flags |= WF_MIGRATED;
4314 psi_ttwu_dequeue(p);
4315 set_task_cpu(p, cpu);
4316 }
4317 #else
4318 cpu = task_cpu(p);
4319 #endif /* CONFIG_SMP */
4320
4321 ttwu_queue(p, cpu, wake_flags);
4322 }
4323 out:
4324 if (success)
4325 ttwu_stat(p, task_cpu(p), wake_flags);
4326
4327 return success;
4328 }
4329
__task_needs_rq_lock(struct task_struct * p)4330 static bool __task_needs_rq_lock(struct task_struct *p)
4331 {
4332 unsigned int state = READ_ONCE(p->__state);
4333
4334 /*
4335 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4336 * the task is blocked. Make sure to check @state since ttwu() can drop
4337 * locks at the end, see ttwu_queue_wakelist().
4338 */
4339 if (state == TASK_RUNNING || state == TASK_WAKING)
4340 return true;
4341
4342 /*
4343 * Ensure we load p->on_rq after p->__state, otherwise it would be
4344 * possible to, falsely, observe p->on_rq == 0.
4345 *
4346 * See try_to_wake_up() for a longer comment.
4347 */
4348 smp_rmb();
4349 if (p->on_rq)
4350 return true;
4351
4352 #ifdef CONFIG_SMP
4353 /*
4354 * Ensure the task has finished __schedule() and will not be referenced
4355 * anymore. Again, see try_to_wake_up() for a longer comment.
4356 */
4357 smp_rmb();
4358 smp_cond_load_acquire(&p->on_cpu, !VAL);
4359 #endif
4360
4361 return false;
4362 }
4363
4364 /**
4365 * task_call_func - Invoke a function on task in fixed state
4366 * @p: Process for which the function is to be invoked, can be @current.
4367 * @func: Function to invoke.
4368 * @arg: Argument to function.
4369 *
4370 * Fix the task in it's current state by avoiding wakeups and or rq operations
4371 * and call @func(@arg) on it. This function can use task_is_runnable() and
4372 * task_curr() to work out what the state is, if required. Given that @func
4373 * can be invoked with a runqueue lock held, it had better be quite
4374 * lightweight.
4375 *
4376 * Returns:
4377 * Whatever @func returns
4378 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4379 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4380 {
4381 struct rq *rq = NULL;
4382 struct rq_flags rf;
4383 int ret;
4384
4385 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4386
4387 if (__task_needs_rq_lock(p))
4388 rq = __task_rq_lock(p, &rf);
4389
4390 /*
4391 * At this point the task is pinned; either:
4392 * - blocked and we're holding off wakeups (pi->lock)
4393 * - woken, and we're holding off enqueue (rq->lock)
4394 * - queued, and we're holding off schedule (rq->lock)
4395 * - running, and we're holding off de-schedule (rq->lock)
4396 *
4397 * The called function (@func) can use: task_curr(), p->on_rq and
4398 * p->__state to differentiate between these states.
4399 */
4400 ret = func(p, arg);
4401
4402 if (rq)
4403 rq_unlock(rq, &rf);
4404
4405 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4406 return ret;
4407 }
4408
4409 /**
4410 * cpu_curr_snapshot - Return a snapshot of the currently running task
4411 * @cpu: The CPU on which to snapshot the task.
4412 *
4413 * Returns the task_struct pointer of the task "currently" running on
4414 * the specified CPU.
4415 *
4416 * If the specified CPU was offline, the return value is whatever it
4417 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4418 * task, but there is no guarantee. Callers wishing a useful return
4419 * value must take some action to ensure that the specified CPU remains
4420 * online throughout.
4421 *
4422 * This function executes full memory barriers before and after fetching
4423 * the pointer, which permits the caller to confine this function's fetch
4424 * with respect to the caller's accesses to other shared variables.
4425 */
cpu_curr_snapshot(int cpu)4426 struct task_struct *cpu_curr_snapshot(int cpu)
4427 {
4428 struct rq *rq = cpu_rq(cpu);
4429 struct task_struct *t;
4430 struct rq_flags rf;
4431
4432 rq_lock_irqsave(rq, &rf);
4433 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4434 t = rcu_dereference(cpu_curr(cpu));
4435 rq_unlock_irqrestore(rq, &rf);
4436 smp_mb(); /* Pairing determined by caller's synchronization design. */
4437
4438 return t;
4439 }
4440
4441 /**
4442 * wake_up_process - Wake up a specific process
4443 * @p: The process to be woken up.
4444 *
4445 * Attempt to wake up the nominated process and move it to the set of runnable
4446 * processes.
4447 *
4448 * Return: 1 if the process was woken up, 0 if it was already running.
4449 *
4450 * This function executes a full memory barrier before accessing the task state.
4451 */
wake_up_process(struct task_struct * p)4452 int wake_up_process(struct task_struct *p)
4453 {
4454 return try_to_wake_up(p, TASK_NORMAL, 0);
4455 }
4456 EXPORT_SYMBOL(wake_up_process);
4457
wake_up_state(struct task_struct * p,unsigned int state)4458 int wake_up_state(struct task_struct *p, unsigned int state)
4459 {
4460 return try_to_wake_up(p, state, 0);
4461 }
4462
4463 /*
4464 * Perform scheduler related setup for a newly forked process p.
4465 * p is forked by current.
4466 *
4467 * __sched_fork() is basic setup which is also used by sched_init() to
4468 * initialize the boot CPU's idle task.
4469 */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4470 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4471 {
4472 p->on_rq = 0;
4473
4474 p->se.on_rq = 0;
4475 p->se.exec_start = 0;
4476 p->se.sum_exec_runtime = 0;
4477 p->se.prev_sum_exec_runtime = 0;
4478 p->se.nr_migrations = 0;
4479 p->se.vruntime = 0;
4480 p->se.vlag = 0;
4481 INIT_LIST_HEAD(&p->se.group_node);
4482
4483 /* A delayed task cannot be in clone(). */
4484 SCHED_WARN_ON(p->se.sched_delayed);
4485
4486 #ifdef CONFIG_FAIR_GROUP_SCHED
4487 p->se.cfs_rq = NULL;
4488 #endif
4489
4490 #ifdef CONFIG_SCHEDSTATS
4491 /* Even if schedstat is disabled, there should not be garbage */
4492 memset(&p->stats, 0, sizeof(p->stats));
4493 #endif
4494
4495 init_dl_entity(&p->dl);
4496
4497 INIT_LIST_HEAD(&p->rt.run_list);
4498 p->rt.timeout = 0;
4499 p->rt.time_slice = sched_rr_timeslice;
4500 p->rt.on_rq = 0;
4501 p->rt.on_list = 0;
4502
4503 #ifdef CONFIG_SCHED_CLASS_EXT
4504 init_scx_entity(&p->scx);
4505 #endif
4506
4507 #ifdef CONFIG_PREEMPT_NOTIFIERS
4508 INIT_HLIST_HEAD(&p->preempt_notifiers);
4509 #endif
4510
4511 #ifdef CONFIG_COMPACTION
4512 p->capture_control = NULL;
4513 #endif
4514 init_numa_balancing(clone_flags, p);
4515 #ifdef CONFIG_SMP
4516 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4517 p->migration_pending = NULL;
4518 #endif
4519 init_sched_mm_cid(p);
4520 }
4521
4522 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4523
4524 #ifdef CONFIG_NUMA_BALANCING
4525
4526 int sysctl_numa_balancing_mode;
4527
__set_numabalancing_state(bool enabled)4528 static void __set_numabalancing_state(bool enabled)
4529 {
4530 if (enabled)
4531 static_branch_enable(&sched_numa_balancing);
4532 else
4533 static_branch_disable(&sched_numa_balancing);
4534 }
4535
set_numabalancing_state(bool enabled)4536 void set_numabalancing_state(bool enabled)
4537 {
4538 if (enabled)
4539 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4540 else
4541 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4542 __set_numabalancing_state(enabled);
4543 }
4544
4545 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4546 static void reset_memory_tiering(void)
4547 {
4548 struct pglist_data *pgdat;
4549
4550 for_each_online_pgdat(pgdat) {
4551 pgdat->nbp_threshold = 0;
4552 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4553 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4554 }
4555 }
4556
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4557 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4558 void *buffer, size_t *lenp, loff_t *ppos)
4559 {
4560 struct ctl_table t;
4561 int err;
4562 int state = sysctl_numa_balancing_mode;
4563
4564 if (write && !capable(CAP_SYS_ADMIN))
4565 return -EPERM;
4566
4567 t = *table;
4568 t.data = &state;
4569 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4570 if (err < 0)
4571 return err;
4572 if (write) {
4573 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4574 (state & NUMA_BALANCING_MEMORY_TIERING))
4575 reset_memory_tiering();
4576 sysctl_numa_balancing_mode = state;
4577 __set_numabalancing_state(state);
4578 }
4579 return err;
4580 }
4581 #endif
4582 #endif
4583
4584 #ifdef CONFIG_SCHEDSTATS
4585
4586 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4587
set_schedstats(bool enabled)4588 static void set_schedstats(bool enabled)
4589 {
4590 if (enabled)
4591 static_branch_enable(&sched_schedstats);
4592 else
4593 static_branch_disable(&sched_schedstats);
4594 }
4595
force_schedstat_enabled(void)4596 void force_schedstat_enabled(void)
4597 {
4598 if (!schedstat_enabled()) {
4599 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4600 static_branch_enable(&sched_schedstats);
4601 }
4602 }
4603
setup_schedstats(char * str)4604 static int __init setup_schedstats(char *str)
4605 {
4606 int ret = 0;
4607 if (!str)
4608 goto out;
4609
4610 if (!strcmp(str, "enable")) {
4611 set_schedstats(true);
4612 ret = 1;
4613 } else if (!strcmp(str, "disable")) {
4614 set_schedstats(false);
4615 ret = 1;
4616 }
4617 out:
4618 if (!ret)
4619 pr_warn("Unable to parse schedstats=\n");
4620
4621 return ret;
4622 }
4623 __setup("schedstats=", setup_schedstats);
4624
4625 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4626 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4627 size_t *lenp, loff_t *ppos)
4628 {
4629 struct ctl_table t;
4630 int err;
4631 int state = static_branch_likely(&sched_schedstats);
4632
4633 if (write && !capable(CAP_SYS_ADMIN))
4634 return -EPERM;
4635
4636 t = *table;
4637 t.data = &state;
4638 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4639 if (err < 0)
4640 return err;
4641 if (write)
4642 set_schedstats(state);
4643 return err;
4644 }
4645 #endif /* CONFIG_PROC_SYSCTL */
4646 #endif /* CONFIG_SCHEDSTATS */
4647
4648 #ifdef CONFIG_SYSCTL
4649 static struct ctl_table sched_core_sysctls[] = {
4650 #ifdef CONFIG_SCHEDSTATS
4651 {
4652 .procname = "sched_schedstats",
4653 .data = NULL,
4654 .maxlen = sizeof(unsigned int),
4655 .mode = 0644,
4656 .proc_handler = sysctl_schedstats,
4657 .extra1 = SYSCTL_ZERO,
4658 .extra2 = SYSCTL_ONE,
4659 },
4660 #endif /* CONFIG_SCHEDSTATS */
4661 #ifdef CONFIG_UCLAMP_TASK
4662 {
4663 .procname = "sched_util_clamp_min",
4664 .data = &sysctl_sched_uclamp_util_min,
4665 .maxlen = sizeof(unsigned int),
4666 .mode = 0644,
4667 .proc_handler = sysctl_sched_uclamp_handler,
4668 },
4669 {
4670 .procname = "sched_util_clamp_max",
4671 .data = &sysctl_sched_uclamp_util_max,
4672 .maxlen = sizeof(unsigned int),
4673 .mode = 0644,
4674 .proc_handler = sysctl_sched_uclamp_handler,
4675 },
4676 {
4677 .procname = "sched_util_clamp_min_rt_default",
4678 .data = &sysctl_sched_uclamp_util_min_rt_default,
4679 .maxlen = sizeof(unsigned int),
4680 .mode = 0644,
4681 .proc_handler = sysctl_sched_uclamp_handler,
4682 },
4683 #endif /* CONFIG_UCLAMP_TASK */
4684 #ifdef CONFIG_NUMA_BALANCING
4685 {
4686 .procname = "numa_balancing",
4687 .data = NULL, /* filled in by handler */
4688 .maxlen = sizeof(unsigned int),
4689 .mode = 0644,
4690 .proc_handler = sysctl_numa_balancing,
4691 .extra1 = SYSCTL_ZERO,
4692 .extra2 = SYSCTL_FOUR,
4693 },
4694 #endif /* CONFIG_NUMA_BALANCING */
4695 };
sched_core_sysctl_init(void)4696 static int __init sched_core_sysctl_init(void)
4697 {
4698 register_sysctl_init("kernel", sched_core_sysctls);
4699 return 0;
4700 }
4701 late_initcall(sched_core_sysctl_init);
4702 #endif /* CONFIG_SYSCTL */
4703
4704 /*
4705 * fork()/clone()-time setup:
4706 */
sched_fork(unsigned long clone_flags,struct task_struct * p)4707 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4708 {
4709 __sched_fork(clone_flags, p);
4710 /*
4711 * We mark the process as NEW here. This guarantees that
4712 * nobody will actually run it, and a signal or other external
4713 * event cannot wake it up and insert it on the runqueue either.
4714 */
4715 p->__state = TASK_NEW;
4716
4717 /*
4718 * Make sure we do not leak PI boosting priority to the child.
4719 */
4720 p->prio = current->normal_prio;
4721
4722 uclamp_fork(p);
4723
4724 /*
4725 * Revert to default priority/policy on fork if requested.
4726 */
4727 if (unlikely(p->sched_reset_on_fork)) {
4728 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4729 p->policy = SCHED_NORMAL;
4730 p->static_prio = NICE_TO_PRIO(0);
4731 p->rt_priority = 0;
4732 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4733 p->static_prio = NICE_TO_PRIO(0);
4734
4735 p->prio = p->normal_prio = p->static_prio;
4736 set_load_weight(p, false);
4737 p->se.custom_slice = 0;
4738 p->se.slice = sysctl_sched_base_slice;
4739
4740 /*
4741 * We don't need the reset flag anymore after the fork. It has
4742 * fulfilled its duty:
4743 */
4744 p->sched_reset_on_fork = 0;
4745 }
4746
4747 if (dl_prio(p->prio))
4748 return -EAGAIN;
4749
4750 scx_pre_fork(p);
4751
4752 if (rt_prio(p->prio)) {
4753 p->sched_class = &rt_sched_class;
4754 #ifdef CONFIG_SCHED_CLASS_EXT
4755 } else if (task_should_scx(p->policy)) {
4756 p->sched_class = &ext_sched_class;
4757 #endif
4758 } else {
4759 p->sched_class = &fair_sched_class;
4760 }
4761
4762 init_entity_runnable_average(&p->se);
4763
4764
4765 #ifdef CONFIG_SCHED_INFO
4766 if (likely(sched_info_on()))
4767 memset(&p->sched_info, 0, sizeof(p->sched_info));
4768 #endif
4769 #if defined(CONFIG_SMP)
4770 p->on_cpu = 0;
4771 #endif
4772 init_task_preempt_count(p);
4773 #ifdef CONFIG_SMP
4774 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4775 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4776 #endif
4777 return 0;
4778 }
4779
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4780 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4781 {
4782 unsigned long flags;
4783
4784 /*
4785 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4786 * required yet, but lockdep gets upset if rules are violated.
4787 */
4788 raw_spin_lock_irqsave(&p->pi_lock, flags);
4789 #ifdef CONFIG_CGROUP_SCHED
4790 if (1) {
4791 struct task_group *tg;
4792 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4793 struct task_group, css);
4794 tg = autogroup_task_group(p, tg);
4795 p->sched_task_group = tg;
4796 }
4797 #endif
4798 rseq_migrate(p);
4799 /*
4800 * We're setting the CPU for the first time, we don't migrate,
4801 * so use __set_task_cpu().
4802 */
4803 __set_task_cpu(p, smp_processor_id());
4804 if (p->sched_class->task_fork)
4805 p->sched_class->task_fork(p);
4806 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4807
4808 return scx_fork(p);
4809 }
4810
sched_cancel_fork(struct task_struct * p)4811 void sched_cancel_fork(struct task_struct *p)
4812 {
4813 scx_cancel_fork(p);
4814 }
4815
sched_post_fork(struct task_struct * p)4816 void sched_post_fork(struct task_struct *p)
4817 {
4818 uclamp_post_fork(p);
4819 scx_post_fork(p);
4820 }
4821
to_ratio(u64 period,u64 runtime)4822 unsigned long to_ratio(u64 period, u64 runtime)
4823 {
4824 if (runtime == RUNTIME_INF)
4825 return BW_UNIT;
4826
4827 /*
4828 * Doing this here saves a lot of checks in all
4829 * the calling paths, and returning zero seems
4830 * safe for them anyway.
4831 */
4832 if (period == 0)
4833 return 0;
4834
4835 return div64_u64(runtime << BW_SHIFT, period);
4836 }
4837
4838 /*
4839 * wake_up_new_task - wake up a newly created task for the first time.
4840 *
4841 * This function will do some initial scheduler statistics housekeeping
4842 * that must be done for every newly created context, then puts the task
4843 * on the runqueue and wakes it.
4844 */
wake_up_new_task(struct task_struct * p)4845 void wake_up_new_task(struct task_struct *p)
4846 {
4847 struct rq_flags rf;
4848 struct rq *rq;
4849 int wake_flags = WF_FORK;
4850
4851 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4852 WRITE_ONCE(p->__state, TASK_RUNNING);
4853 #ifdef CONFIG_SMP
4854 /*
4855 * Fork balancing, do it here and not earlier because:
4856 * - cpus_ptr can change in the fork path
4857 * - any previously selected CPU might disappear through hotplug
4858 *
4859 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4860 * as we're not fully set-up yet.
4861 */
4862 p->recent_used_cpu = task_cpu(p);
4863 rseq_migrate(p);
4864 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4865 #endif
4866 rq = __task_rq_lock(p, &rf);
4867 update_rq_clock(rq);
4868 post_init_entity_util_avg(p);
4869
4870 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4871 trace_sched_wakeup_new(p);
4872 wakeup_preempt(rq, p, wake_flags);
4873 #ifdef CONFIG_SMP
4874 if (p->sched_class->task_woken) {
4875 /*
4876 * Nothing relies on rq->lock after this, so it's fine to
4877 * drop it.
4878 */
4879 rq_unpin_lock(rq, &rf);
4880 p->sched_class->task_woken(rq, p);
4881 rq_repin_lock(rq, &rf);
4882 }
4883 #endif
4884 task_rq_unlock(rq, p, &rf);
4885 }
4886
4887 #ifdef CONFIG_PREEMPT_NOTIFIERS
4888
4889 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4890
preempt_notifier_inc(void)4891 void preempt_notifier_inc(void)
4892 {
4893 static_branch_inc(&preempt_notifier_key);
4894 }
4895 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4896
preempt_notifier_dec(void)4897 void preempt_notifier_dec(void)
4898 {
4899 static_branch_dec(&preempt_notifier_key);
4900 }
4901 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4902
4903 /**
4904 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4905 * @notifier: notifier struct to register
4906 */
preempt_notifier_register(struct preempt_notifier * notifier)4907 void preempt_notifier_register(struct preempt_notifier *notifier)
4908 {
4909 if (!static_branch_unlikely(&preempt_notifier_key))
4910 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4911
4912 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4913 }
4914 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4915
4916 /**
4917 * preempt_notifier_unregister - no longer interested in preemption notifications
4918 * @notifier: notifier struct to unregister
4919 *
4920 * This is *not* safe to call from within a preemption notifier.
4921 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4922 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4923 {
4924 hlist_del(¬ifier->link);
4925 }
4926 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4927
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4928 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4929 {
4930 struct preempt_notifier *notifier;
4931
4932 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4933 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4934 }
4935
fire_sched_in_preempt_notifiers(struct task_struct * curr)4936 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4937 {
4938 if (static_branch_unlikely(&preempt_notifier_key))
4939 __fire_sched_in_preempt_notifiers(curr);
4940 }
4941
4942 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4943 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4944 struct task_struct *next)
4945 {
4946 struct preempt_notifier *notifier;
4947
4948 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4949 notifier->ops->sched_out(notifier, next);
4950 }
4951
4952 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4953 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4954 struct task_struct *next)
4955 {
4956 if (static_branch_unlikely(&preempt_notifier_key))
4957 __fire_sched_out_preempt_notifiers(curr, next);
4958 }
4959
4960 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4961
fire_sched_in_preempt_notifiers(struct task_struct * curr)4962 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4963 {
4964 }
4965
4966 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4967 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4968 struct task_struct *next)
4969 {
4970 }
4971
4972 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4973
prepare_task(struct task_struct * next)4974 static inline void prepare_task(struct task_struct *next)
4975 {
4976 #ifdef CONFIG_SMP
4977 /*
4978 * Claim the task as running, we do this before switching to it
4979 * such that any running task will have this set.
4980 *
4981 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4982 * its ordering comment.
4983 */
4984 WRITE_ONCE(next->on_cpu, 1);
4985 #endif
4986 }
4987
finish_task(struct task_struct * prev)4988 static inline void finish_task(struct task_struct *prev)
4989 {
4990 #ifdef CONFIG_SMP
4991 /*
4992 * This must be the very last reference to @prev from this CPU. After
4993 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4994 * must ensure this doesn't happen until the switch is completely
4995 * finished.
4996 *
4997 * In particular, the load of prev->state in finish_task_switch() must
4998 * happen before this.
4999 *
5000 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5001 */
5002 smp_store_release(&prev->on_cpu, 0);
5003 #endif
5004 }
5005
5006 #ifdef CONFIG_SMP
5007
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5008 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5009 {
5010 void (*func)(struct rq *rq);
5011 struct balance_callback *next;
5012
5013 lockdep_assert_rq_held(rq);
5014
5015 while (head) {
5016 func = (void (*)(struct rq *))head->func;
5017 next = head->next;
5018 head->next = NULL;
5019 head = next;
5020
5021 func(rq);
5022 }
5023 }
5024
5025 static void balance_push(struct rq *rq);
5026
5027 /*
5028 * balance_push_callback is a right abuse of the callback interface and plays
5029 * by significantly different rules.
5030 *
5031 * Where the normal balance_callback's purpose is to be ran in the same context
5032 * that queued it (only later, when it's safe to drop rq->lock again),
5033 * balance_push_callback is specifically targeted at __schedule().
5034 *
5035 * This abuse is tolerated because it places all the unlikely/odd cases behind
5036 * a single test, namely: rq->balance_callback == NULL.
5037 */
5038 struct balance_callback balance_push_callback = {
5039 .next = NULL,
5040 .func = balance_push,
5041 };
5042
5043 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5044 __splice_balance_callbacks(struct rq *rq, bool split)
5045 {
5046 struct balance_callback *head = rq->balance_callback;
5047
5048 if (likely(!head))
5049 return NULL;
5050
5051 lockdep_assert_rq_held(rq);
5052 /*
5053 * Must not take balance_push_callback off the list when
5054 * splice_balance_callbacks() and balance_callbacks() are not
5055 * in the same rq->lock section.
5056 *
5057 * In that case it would be possible for __schedule() to interleave
5058 * and observe the list empty.
5059 */
5060 if (split && head == &balance_push_callback)
5061 head = NULL;
5062 else
5063 rq->balance_callback = NULL;
5064
5065 return head;
5066 }
5067
splice_balance_callbacks(struct rq * rq)5068 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5069 {
5070 return __splice_balance_callbacks(rq, true);
5071 }
5072
__balance_callbacks(struct rq * rq)5073 static void __balance_callbacks(struct rq *rq)
5074 {
5075 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5076 }
5077
balance_callbacks(struct rq * rq,struct balance_callback * head)5078 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5079 {
5080 unsigned long flags;
5081
5082 if (unlikely(head)) {
5083 raw_spin_rq_lock_irqsave(rq, flags);
5084 do_balance_callbacks(rq, head);
5085 raw_spin_rq_unlock_irqrestore(rq, flags);
5086 }
5087 }
5088
5089 #else
5090
__balance_callbacks(struct rq * rq)5091 static inline void __balance_callbacks(struct rq *rq)
5092 {
5093 }
5094
5095 #endif
5096
5097 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5098 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5099 {
5100 /*
5101 * Since the runqueue lock will be released by the next
5102 * task (which is an invalid locking op but in the case
5103 * of the scheduler it's an obvious special-case), so we
5104 * do an early lockdep release here:
5105 */
5106 rq_unpin_lock(rq, rf);
5107 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5108 #ifdef CONFIG_DEBUG_SPINLOCK
5109 /* this is a valid case when another task releases the spinlock */
5110 rq_lockp(rq)->owner = next;
5111 #endif
5112 }
5113
finish_lock_switch(struct rq * rq)5114 static inline void finish_lock_switch(struct rq *rq)
5115 {
5116 /*
5117 * If we are tracking spinlock dependencies then we have to
5118 * fix up the runqueue lock - which gets 'carried over' from
5119 * prev into current:
5120 */
5121 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5122 __balance_callbacks(rq);
5123 raw_spin_rq_unlock_irq(rq);
5124 }
5125
5126 /*
5127 * NOP if the arch has not defined these:
5128 */
5129
5130 #ifndef prepare_arch_switch
5131 # define prepare_arch_switch(next) do { } while (0)
5132 #endif
5133
5134 #ifndef finish_arch_post_lock_switch
5135 # define finish_arch_post_lock_switch() do { } while (0)
5136 #endif
5137
kmap_local_sched_out(void)5138 static inline void kmap_local_sched_out(void)
5139 {
5140 #ifdef CONFIG_KMAP_LOCAL
5141 if (unlikely(current->kmap_ctrl.idx))
5142 __kmap_local_sched_out();
5143 #endif
5144 }
5145
kmap_local_sched_in(void)5146 static inline void kmap_local_sched_in(void)
5147 {
5148 #ifdef CONFIG_KMAP_LOCAL
5149 if (unlikely(current->kmap_ctrl.idx))
5150 __kmap_local_sched_in();
5151 #endif
5152 }
5153
5154 /**
5155 * prepare_task_switch - prepare to switch tasks
5156 * @rq: the runqueue preparing to switch
5157 * @prev: the current task that is being switched out
5158 * @next: the task we are going to switch to.
5159 *
5160 * This is called with the rq lock held and interrupts off. It must
5161 * be paired with a subsequent finish_task_switch after the context
5162 * switch.
5163 *
5164 * prepare_task_switch sets up locking and calls architecture specific
5165 * hooks.
5166 */
5167 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5168 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5169 struct task_struct *next)
5170 {
5171 kcov_prepare_switch(prev);
5172 sched_info_switch(rq, prev, next);
5173 perf_event_task_sched_out(prev, next);
5174 rseq_preempt(prev);
5175 fire_sched_out_preempt_notifiers(prev, next);
5176 kmap_local_sched_out();
5177 prepare_task(next);
5178 prepare_arch_switch(next);
5179 }
5180
5181 /**
5182 * finish_task_switch - clean up after a task-switch
5183 * @prev: the thread we just switched away from.
5184 *
5185 * finish_task_switch must be called after the context switch, paired
5186 * with a prepare_task_switch call before the context switch.
5187 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5188 * and do any other architecture-specific cleanup actions.
5189 *
5190 * Note that we may have delayed dropping an mm in context_switch(). If
5191 * so, we finish that here outside of the runqueue lock. (Doing it
5192 * with the lock held can cause deadlocks; see schedule() for
5193 * details.)
5194 *
5195 * The context switch have flipped the stack from under us and restored the
5196 * local variables which were saved when this task called schedule() in the
5197 * past. 'prev == current' is still correct but we need to recalculate this_rq
5198 * because prev may have moved to another CPU.
5199 */
finish_task_switch(struct task_struct * prev)5200 static struct rq *finish_task_switch(struct task_struct *prev)
5201 __releases(rq->lock)
5202 {
5203 struct rq *rq = this_rq();
5204 struct mm_struct *mm = rq->prev_mm;
5205 unsigned int prev_state;
5206
5207 /*
5208 * The previous task will have left us with a preempt_count of 2
5209 * because it left us after:
5210 *
5211 * schedule()
5212 * preempt_disable(); // 1
5213 * __schedule()
5214 * raw_spin_lock_irq(&rq->lock) // 2
5215 *
5216 * Also, see FORK_PREEMPT_COUNT.
5217 */
5218 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5219 "corrupted preempt_count: %s/%d/0x%x\n",
5220 current->comm, current->pid, preempt_count()))
5221 preempt_count_set(FORK_PREEMPT_COUNT);
5222
5223 rq->prev_mm = NULL;
5224
5225 /*
5226 * A task struct has one reference for the use as "current".
5227 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5228 * schedule one last time. The schedule call will never return, and
5229 * the scheduled task must drop that reference.
5230 *
5231 * We must observe prev->state before clearing prev->on_cpu (in
5232 * finish_task), otherwise a concurrent wakeup can get prev
5233 * running on another CPU and we could rave with its RUNNING -> DEAD
5234 * transition, resulting in a double drop.
5235 */
5236 prev_state = READ_ONCE(prev->__state);
5237 vtime_task_switch(prev);
5238 perf_event_task_sched_in(prev, current);
5239 finish_task(prev);
5240 tick_nohz_task_switch();
5241 finish_lock_switch(rq);
5242 finish_arch_post_lock_switch();
5243 kcov_finish_switch(current);
5244 /*
5245 * kmap_local_sched_out() is invoked with rq::lock held and
5246 * interrupts disabled. There is no requirement for that, but the
5247 * sched out code does not have an interrupt enabled section.
5248 * Restoring the maps on sched in does not require interrupts being
5249 * disabled either.
5250 */
5251 kmap_local_sched_in();
5252
5253 fire_sched_in_preempt_notifiers(current);
5254 /*
5255 * When switching through a kernel thread, the loop in
5256 * membarrier_{private,global}_expedited() may have observed that
5257 * kernel thread and not issued an IPI. It is therefore possible to
5258 * schedule between user->kernel->user threads without passing though
5259 * switch_mm(). Membarrier requires a barrier after storing to
5260 * rq->curr, before returning to userspace, so provide them here:
5261 *
5262 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5263 * provided by mmdrop_lazy_tlb(),
5264 * - a sync_core for SYNC_CORE.
5265 */
5266 if (mm) {
5267 membarrier_mm_sync_core_before_usermode(mm);
5268 mmdrop_lazy_tlb_sched(mm);
5269 }
5270
5271 if (unlikely(prev_state == TASK_DEAD)) {
5272 if (prev->sched_class->task_dead)
5273 prev->sched_class->task_dead(prev);
5274
5275 /* Task is done with its stack. */
5276 put_task_stack(prev);
5277
5278 put_task_struct_rcu_user(prev);
5279 }
5280
5281 return rq;
5282 }
5283
5284 /**
5285 * schedule_tail - first thing a freshly forked thread must call.
5286 * @prev: the thread we just switched away from.
5287 */
schedule_tail(struct task_struct * prev)5288 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5289 __releases(rq->lock)
5290 {
5291 /*
5292 * New tasks start with FORK_PREEMPT_COUNT, see there and
5293 * finish_task_switch() for details.
5294 *
5295 * finish_task_switch() will drop rq->lock() and lower preempt_count
5296 * and the preempt_enable() will end up enabling preemption (on
5297 * PREEMPT_COUNT kernels).
5298 */
5299
5300 finish_task_switch(prev);
5301 preempt_enable();
5302
5303 if (current->set_child_tid)
5304 put_user(task_pid_vnr(current), current->set_child_tid);
5305
5306 calculate_sigpending();
5307 }
5308
5309 /*
5310 * context_switch - switch to the new MM and the new thread's register state.
5311 */
5312 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5313 context_switch(struct rq *rq, struct task_struct *prev,
5314 struct task_struct *next, struct rq_flags *rf)
5315 {
5316 prepare_task_switch(rq, prev, next);
5317
5318 /*
5319 * For paravirt, this is coupled with an exit in switch_to to
5320 * combine the page table reload and the switch backend into
5321 * one hypercall.
5322 */
5323 arch_start_context_switch(prev);
5324
5325 /*
5326 * kernel -> kernel lazy + transfer active
5327 * user -> kernel lazy + mmgrab_lazy_tlb() active
5328 *
5329 * kernel -> user switch + mmdrop_lazy_tlb() active
5330 * user -> user switch
5331 *
5332 * switch_mm_cid() needs to be updated if the barriers provided
5333 * by context_switch() are modified.
5334 */
5335 if (!next->mm) { // to kernel
5336 enter_lazy_tlb(prev->active_mm, next);
5337
5338 next->active_mm = prev->active_mm;
5339 if (prev->mm) // from user
5340 mmgrab_lazy_tlb(prev->active_mm);
5341 else
5342 prev->active_mm = NULL;
5343 } else { // to user
5344 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5345 /*
5346 * sys_membarrier() requires an smp_mb() between setting
5347 * rq->curr / membarrier_switch_mm() and returning to userspace.
5348 *
5349 * The below provides this either through switch_mm(), or in
5350 * case 'prev->active_mm == next->mm' through
5351 * finish_task_switch()'s mmdrop().
5352 */
5353 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5354 lru_gen_use_mm(next->mm);
5355
5356 if (!prev->mm) { // from kernel
5357 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5358 rq->prev_mm = prev->active_mm;
5359 prev->active_mm = NULL;
5360 }
5361 }
5362
5363 /* switch_mm_cid() requires the memory barriers above. */
5364 switch_mm_cid(rq, prev, next);
5365
5366 prepare_lock_switch(rq, next, rf);
5367
5368 /* Here we just switch the register state and the stack. */
5369 switch_to(prev, next, prev);
5370 barrier();
5371
5372 return finish_task_switch(prev);
5373 }
5374
5375 /*
5376 * nr_running and nr_context_switches:
5377 *
5378 * externally visible scheduler statistics: current number of runnable
5379 * threads, total number of context switches performed since bootup.
5380 */
nr_running(void)5381 unsigned int nr_running(void)
5382 {
5383 unsigned int i, sum = 0;
5384
5385 for_each_online_cpu(i)
5386 sum += cpu_rq(i)->nr_running;
5387
5388 return sum;
5389 }
5390
5391 /*
5392 * Check if only the current task is running on the CPU.
5393 *
5394 * Caution: this function does not check that the caller has disabled
5395 * preemption, thus the result might have a time-of-check-to-time-of-use
5396 * race. The caller is responsible to use it correctly, for example:
5397 *
5398 * - from a non-preemptible section (of course)
5399 *
5400 * - from a thread that is bound to a single CPU
5401 *
5402 * - in a loop with very short iterations (e.g. a polling loop)
5403 */
single_task_running(void)5404 bool single_task_running(void)
5405 {
5406 return raw_rq()->nr_running == 1;
5407 }
5408 EXPORT_SYMBOL(single_task_running);
5409
nr_context_switches_cpu(int cpu)5410 unsigned long long nr_context_switches_cpu(int cpu)
5411 {
5412 return cpu_rq(cpu)->nr_switches;
5413 }
5414
nr_context_switches(void)5415 unsigned long long nr_context_switches(void)
5416 {
5417 int i;
5418 unsigned long long sum = 0;
5419
5420 for_each_possible_cpu(i)
5421 sum += cpu_rq(i)->nr_switches;
5422
5423 return sum;
5424 }
5425
5426 /*
5427 * Consumers of these two interfaces, like for example the cpuidle menu
5428 * governor, are using nonsensical data. Preferring shallow idle state selection
5429 * for a CPU that has IO-wait which might not even end up running the task when
5430 * it does become runnable.
5431 */
5432
nr_iowait_cpu(int cpu)5433 unsigned int nr_iowait_cpu(int cpu)
5434 {
5435 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5436 }
5437
5438 /*
5439 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5440 *
5441 * The idea behind IO-wait account is to account the idle time that we could
5442 * have spend running if it were not for IO. That is, if we were to improve the
5443 * storage performance, we'd have a proportional reduction in IO-wait time.
5444 *
5445 * This all works nicely on UP, where, when a task blocks on IO, we account
5446 * idle time as IO-wait, because if the storage were faster, it could've been
5447 * running and we'd not be idle.
5448 *
5449 * This has been extended to SMP, by doing the same for each CPU. This however
5450 * is broken.
5451 *
5452 * Imagine for instance the case where two tasks block on one CPU, only the one
5453 * CPU will have IO-wait accounted, while the other has regular idle. Even
5454 * though, if the storage were faster, both could've ran at the same time,
5455 * utilising both CPUs.
5456 *
5457 * This means, that when looking globally, the current IO-wait accounting on
5458 * SMP is a lower bound, by reason of under accounting.
5459 *
5460 * Worse, since the numbers are provided per CPU, they are sometimes
5461 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5462 * associated with any one particular CPU, it can wake to another CPU than it
5463 * blocked on. This means the per CPU IO-wait number is meaningless.
5464 *
5465 * Task CPU affinities can make all that even more 'interesting'.
5466 */
5467
nr_iowait(void)5468 unsigned int nr_iowait(void)
5469 {
5470 unsigned int i, sum = 0;
5471
5472 for_each_possible_cpu(i)
5473 sum += nr_iowait_cpu(i);
5474
5475 return sum;
5476 }
5477
5478 #ifdef CONFIG_SMP
5479
5480 /*
5481 * sched_exec - execve() is a valuable balancing opportunity, because at
5482 * this point the task has the smallest effective memory and cache footprint.
5483 */
sched_exec(void)5484 void sched_exec(void)
5485 {
5486 struct task_struct *p = current;
5487 struct migration_arg arg;
5488 int dest_cpu;
5489
5490 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5491 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5492 if (dest_cpu == smp_processor_id())
5493 return;
5494
5495 if (unlikely(!cpu_active(dest_cpu)))
5496 return;
5497
5498 arg = (struct migration_arg){ p, dest_cpu };
5499 }
5500 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5501 }
5502
5503 #endif
5504
5505 DEFINE_PER_CPU(struct kernel_stat, kstat);
5506 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5507
5508 EXPORT_PER_CPU_SYMBOL(kstat);
5509 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5510
5511 /*
5512 * The function fair_sched_class.update_curr accesses the struct curr
5513 * and its field curr->exec_start; when called from task_sched_runtime(),
5514 * we observe a high rate of cache misses in practice.
5515 * Prefetching this data results in improved performance.
5516 */
prefetch_curr_exec_start(struct task_struct * p)5517 static inline void prefetch_curr_exec_start(struct task_struct *p)
5518 {
5519 #ifdef CONFIG_FAIR_GROUP_SCHED
5520 struct sched_entity *curr = p->se.cfs_rq->curr;
5521 #else
5522 struct sched_entity *curr = task_rq(p)->cfs.curr;
5523 #endif
5524 prefetch(curr);
5525 prefetch(&curr->exec_start);
5526 }
5527
5528 /*
5529 * Return accounted runtime for the task.
5530 * In case the task is currently running, return the runtime plus current's
5531 * pending runtime that have not been accounted yet.
5532 */
task_sched_runtime(struct task_struct * p)5533 unsigned long long task_sched_runtime(struct task_struct *p)
5534 {
5535 struct rq_flags rf;
5536 struct rq *rq;
5537 u64 ns;
5538
5539 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5540 /*
5541 * 64-bit doesn't need locks to atomically read a 64-bit value.
5542 * So we have a optimization chance when the task's delta_exec is 0.
5543 * Reading ->on_cpu is racy, but this is OK.
5544 *
5545 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5546 * If we race with it entering CPU, unaccounted time is 0. This is
5547 * indistinguishable from the read occurring a few cycles earlier.
5548 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5549 * been accounted, so we're correct here as well.
5550 */
5551 if (!p->on_cpu || !task_on_rq_queued(p))
5552 return p->se.sum_exec_runtime;
5553 #endif
5554
5555 rq = task_rq_lock(p, &rf);
5556 /*
5557 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5558 * project cycles that may never be accounted to this
5559 * thread, breaking clock_gettime().
5560 */
5561 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5562 prefetch_curr_exec_start(p);
5563 update_rq_clock(rq);
5564 p->sched_class->update_curr(rq);
5565 }
5566 ns = p->se.sum_exec_runtime;
5567 task_rq_unlock(rq, p, &rf);
5568
5569 return ns;
5570 }
5571
5572 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5573 static u64 cpu_resched_latency(struct rq *rq)
5574 {
5575 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5576 u64 resched_latency, now = rq_clock(rq);
5577 static bool warned_once;
5578
5579 if (sysctl_resched_latency_warn_once && warned_once)
5580 return 0;
5581
5582 if (!need_resched() || !latency_warn_ms)
5583 return 0;
5584
5585 if (system_state == SYSTEM_BOOTING)
5586 return 0;
5587
5588 if (!rq->last_seen_need_resched_ns) {
5589 rq->last_seen_need_resched_ns = now;
5590 rq->ticks_without_resched = 0;
5591 return 0;
5592 }
5593
5594 rq->ticks_without_resched++;
5595 resched_latency = now - rq->last_seen_need_resched_ns;
5596 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5597 return 0;
5598
5599 warned_once = true;
5600
5601 return resched_latency;
5602 }
5603
setup_resched_latency_warn_ms(char * str)5604 static int __init setup_resched_latency_warn_ms(char *str)
5605 {
5606 long val;
5607
5608 if ((kstrtol(str, 0, &val))) {
5609 pr_warn("Unable to set resched_latency_warn_ms\n");
5610 return 1;
5611 }
5612
5613 sysctl_resched_latency_warn_ms = val;
5614 return 1;
5615 }
5616 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5617 #else
cpu_resched_latency(struct rq * rq)5618 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5619 #endif /* CONFIG_SCHED_DEBUG */
5620
5621 /*
5622 * This function gets called by the timer code, with HZ frequency.
5623 * We call it with interrupts disabled.
5624 */
sched_tick(void)5625 void sched_tick(void)
5626 {
5627 int cpu = smp_processor_id();
5628 struct rq *rq = cpu_rq(cpu);
5629 /* accounting goes to the donor task */
5630 struct task_struct *donor;
5631 struct rq_flags rf;
5632 unsigned long hw_pressure;
5633 u64 resched_latency;
5634
5635 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5636 arch_scale_freq_tick();
5637
5638 sched_clock_tick();
5639
5640 rq_lock(rq, &rf);
5641 donor = rq->donor;
5642
5643 psi_account_irqtime(rq, donor, NULL);
5644
5645 update_rq_clock(rq);
5646 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5647 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5648
5649 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5650 resched_curr(rq);
5651
5652 donor->sched_class->task_tick(rq, donor, 0);
5653 if (sched_feat(LATENCY_WARN))
5654 resched_latency = cpu_resched_latency(rq);
5655 calc_global_load_tick(rq);
5656 sched_core_tick(rq);
5657 task_tick_mm_cid(rq, donor);
5658 scx_tick(rq);
5659
5660 rq_unlock(rq, &rf);
5661
5662 if (sched_feat(LATENCY_WARN) && resched_latency)
5663 resched_latency_warn(cpu, resched_latency);
5664
5665 perf_event_task_tick();
5666
5667 if (donor->flags & PF_WQ_WORKER)
5668 wq_worker_tick(donor);
5669
5670 #ifdef CONFIG_SMP
5671 if (!scx_switched_all()) {
5672 rq->idle_balance = idle_cpu(cpu);
5673 sched_balance_trigger(rq);
5674 }
5675 #endif
5676 }
5677
5678 #ifdef CONFIG_NO_HZ_FULL
5679
5680 struct tick_work {
5681 int cpu;
5682 atomic_t state;
5683 struct delayed_work work;
5684 };
5685 /* Values for ->state, see diagram below. */
5686 #define TICK_SCHED_REMOTE_OFFLINE 0
5687 #define TICK_SCHED_REMOTE_OFFLINING 1
5688 #define TICK_SCHED_REMOTE_RUNNING 2
5689
5690 /*
5691 * State diagram for ->state:
5692 *
5693 *
5694 * TICK_SCHED_REMOTE_OFFLINE
5695 * | ^
5696 * | |
5697 * | | sched_tick_remote()
5698 * | |
5699 * | |
5700 * +--TICK_SCHED_REMOTE_OFFLINING
5701 * | ^
5702 * | |
5703 * sched_tick_start() | | sched_tick_stop()
5704 * | |
5705 * V |
5706 * TICK_SCHED_REMOTE_RUNNING
5707 *
5708 *
5709 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5710 * and sched_tick_start() are happy to leave the state in RUNNING.
5711 */
5712
5713 static struct tick_work __percpu *tick_work_cpu;
5714
sched_tick_remote(struct work_struct * work)5715 static void sched_tick_remote(struct work_struct *work)
5716 {
5717 struct delayed_work *dwork = to_delayed_work(work);
5718 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5719 int cpu = twork->cpu;
5720 struct rq *rq = cpu_rq(cpu);
5721 int os;
5722
5723 /*
5724 * Handle the tick only if it appears the remote CPU is running in full
5725 * dynticks mode. The check is racy by nature, but missing a tick or
5726 * having one too much is no big deal because the scheduler tick updates
5727 * statistics and checks timeslices in a time-independent way, regardless
5728 * of when exactly it is running.
5729 */
5730 if (tick_nohz_tick_stopped_cpu(cpu)) {
5731 guard(rq_lock_irq)(rq);
5732 struct task_struct *curr = rq->curr;
5733
5734 if (cpu_online(cpu)) {
5735 /*
5736 * Since this is a remote tick for full dynticks mode,
5737 * we are always sure that there is no proxy (only a
5738 * single task is running).
5739 */
5740 SCHED_WARN_ON(rq->curr != rq->donor);
5741 update_rq_clock(rq);
5742
5743 if (!is_idle_task(curr)) {
5744 /*
5745 * Make sure the next tick runs within a
5746 * reasonable amount of time.
5747 */
5748 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5749 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5750 }
5751 curr->sched_class->task_tick(rq, curr, 0);
5752
5753 calc_load_nohz_remote(rq);
5754 }
5755 }
5756
5757 /*
5758 * Run the remote tick once per second (1Hz). This arbitrary
5759 * frequency is large enough to avoid overload but short enough
5760 * to keep scheduler internal stats reasonably up to date. But
5761 * first update state to reflect hotplug activity if required.
5762 */
5763 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5764 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5765 if (os == TICK_SCHED_REMOTE_RUNNING)
5766 queue_delayed_work(system_unbound_wq, dwork, HZ);
5767 }
5768
sched_tick_start(int cpu)5769 static void sched_tick_start(int cpu)
5770 {
5771 int os;
5772 struct tick_work *twork;
5773
5774 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5775 return;
5776
5777 WARN_ON_ONCE(!tick_work_cpu);
5778
5779 twork = per_cpu_ptr(tick_work_cpu, cpu);
5780 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5781 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5782 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5783 twork->cpu = cpu;
5784 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5785 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5786 }
5787 }
5788
5789 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5790 static void sched_tick_stop(int cpu)
5791 {
5792 struct tick_work *twork;
5793 int os;
5794
5795 if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5796 return;
5797
5798 WARN_ON_ONCE(!tick_work_cpu);
5799
5800 twork = per_cpu_ptr(tick_work_cpu, cpu);
5801 /* There cannot be competing actions, but don't rely on stop-machine. */
5802 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5803 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5804 /* Don't cancel, as this would mess up the state machine. */
5805 }
5806 #endif /* CONFIG_HOTPLUG_CPU */
5807
sched_tick_offload_init(void)5808 int __init sched_tick_offload_init(void)
5809 {
5810 tick_work_cpu = alloc_percpu(struct tick_work);
5811 BUG_ON(!tick_work_cpu);
5812 return 0;
5813 }
5814
5815 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5816 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5817 static inline void sched_tick_stop(int cpu) { }
5818 #endif
5819
5820 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5821 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5822 /*
5823 * If the value passed in is equal to the current preempt count
5824 * then we just disabled preemption. Start timing the latency.
5825 */
preempt_latency_start(int val)5826 static inline void preempt_latency_start(int val)
5827 {
5828 if (preempt_count() == val) {
5829 unsigned long ip = get_lock_parent_ip();
5830 #ifdef CONFIG_DEBUG_PREEMPT
5831 current->preempt_disable_ip = ip;
5832 #endif
5833 trace_preempt_off(CALLER_ADDR0, ip);
5834 }
5835 }
5836
preempt_count_add(int val)5837 void preempt_count_add(int val)
5838 {
5839 #ifdef CONFIG_DEBUG_PREEMPT
5840 /*
5841 * Underflow?
5842 */
5843 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5844 return;
5845 #endif
5846 __preempt_count_add(val);
5847 #ifdef CONFIG_DEBUG_PREEMPT
5848 /*
5849 * Spinlock count overflowing soon?
5850 */
5851 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5852 PREEMPT_MASK - 10);
5853 #endif
5854 preempt_latency_start(val);
5855 }
5856 EXPORT_SYMBOL(preempt_count_add);
5857 NOKPROBE_SYMBOL(preempt_count_add);
5858
5859 /*
5860 * If the value passed in equals to the current preempt count
5861 * then we just enabled preemption. Stop timing the latency.
5862 */
preempt_latency_stop(int val)5863 static inline void preempt_latency_stop(int val)
5864 {
5865 if (preempt_count() == val)
5866 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5867 }
5868
preempt_count_sub(int val)5869 void preempt_count_sub(int val)
5870 {
5871 #ifdef CONFIG_DEBUG_PREEMPT
5872 /*
5873 * Underflow?
5874 */
5875 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5876 return;
5877 /*
5878 * Is the spinlock portion underflowing?
5879 */
5880 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5881 !(preempt_count() & PREEMPT_MASK)))
5882 return;
5883 #endif
5884
5885 preempt_latency_stop(val);
5886 __preempt_count_sub(val);
5887 }
5888 EXPORT_SYMBOL(preempt_count_sub);
5889 NOKPROBE_SYMBOL(preempt_count_sub);
5890
5891 #else
preempt_latency_start(int val)5892 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5893 static inline void preempt_latency_stop(int val) { }
5894 #endif
5895
get_preempt_disable_ip(struct task_struct * p)5896 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5897 {
5898 #ifdef CONFIG_DEBUG_PREEMPT
5899 return p->preempt_disable_ip;
5900 #else
5901 return 0;
5902 #endif
5903 }
5904
5905 /*
5906 * Print scheduling while atomic bug:
5907 */
__schedule_bug(struct task_struct * prev)5908 static noinline void __schedule_bug(struct task_struct *prev)
5909 {
5910 /* Save this before calling printk(), since that will clobber it */
5911 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5912
5913 if (oops_in_progress)
5914 return;
5915
5916 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5917 prev->comm, prev->pid, preempt_count());
5918
5919 debug_show_held_locks(prev);
5920 print_modules();
5921 if (irqs_disabled())
5922 print_irqtrace_events(prev);
5923 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5924 pr_err("Preemption disabled at:");
5925 print_ip_sym(KERN_ERR, preempt_disable_ip);
5926 }
5927 check_panic_on_warn("scheduling while atomic");
5928
5929 dump_stack();
5930 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5931 }
5932
5933 /*
5934 * Various schedule()-time debugging checks and statistics:
5935 */
schedule_debug(struct task_struct * prev,bool preempt)5936 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5937 {
5938 #ifdef CONFIG_SCHED_STACK_END_CHECK
5939 if (task_stack_end_corrupted(prev))
5940 panic("corrupted stack end detected inside scheduler\n");
5941
5942 if (task_scs_end_corrupted(prev))
5943 panic("corrupted shadow stack detected inside scheduler\n");
5944 #endif
5945
5946 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5947 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5948 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5949 prev->comm, prev->pid, prev->non_block_count);
5950 dump_stack();
5951 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5952 }
5953 #endif
5954
5955 if (unlikely(in_atomic_preempt_off())) {
5956 __schedule_bug(prev);
5957 preempt_count_set(PREEMPT_DISABLED);
5958 }
5959 rcu_sleep_check();
5960 SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5961
5962 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5963
5964 schedstat_inc(this_rq()->sched_count);
5965 }
5966
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5967 static void prev_balance(struct rq *rq, struct task_struct *prev,
5968 struct rq_flags *rf)
5969 {
5970 const struct sched_class *start_class = prev->sched_class;
5971 const struct sched_class *class;
5972
5973 #ifdef CONFIG_SCHED_CLASS_EXT
5974 /*
5975 * SCX requires a balance() call before every pick_task() including when
5976 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5977 * SCX instead. Also, set a flag to detect missing balance() call.
5978 */
5979 if (scx_enabled()) {
5980 rq->scx.flags |= SCX_RQ_BAL_PENDING;
5981 if (sched_class_above(&ext_sched_class, start_class))
5982 start_class = &ext_sched_class;
5983 }
5984 #endif
5985
5986 /*
5987 * We must do the balancing pass before put_prev_task(), such
5988 * that when we release the rq->lock the task is in the same
5989 * state as before we took rq->lock.
5990 *
5991 * We can terminate the balance pass as soon as we know there is
5992 * a runnable task of @class priority or higher.
5993 */
5994 for_active_class_range(class, start_class, &idle_sched_class) {
5995 if (class->balance && class->balance(rq, prev, rf))
5996 break;
5997 }
5998 }
5999
6000 /*
6001 * Pick up the highest-prio task:
6002 */
6003 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6004 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6005 {
6006 const struct sched_class *class;
6007 struct task_struct *p;
6008
6009 rq->dl_server = NULL;
6010
6011 if (scx_enabled())
6012 goto restart;
6013
6014 /*
6015 * Optimization: we know that if all tasks are in the fair class we can
6016 * call that function directly, but only if the @prev task wasn't of a
6017 * higher scheduling class, because otherwise those lose the
6018 * opportunity to pull in more work from other CPUs.
6019 */
6020 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6021 rq->nr_running == rq->cfs.h_nr_running)) {
6022
6023 p = pick_next_task_fair(rq, prev, rf);
6024 if (unlikely(p == RETRY_TASK))
6025 goto restart;
6026
6027 /* Assume the next prioritized class is idle_sched_class */
6028 if (!p) {
6029 p = pick_task_idle(rq);
6030 put_prev_set_next_task(rq, prev, p);
6031 }
6032
6033 return p;
6034 }
6035
6036 restart:
6037 prev_balance(rq, prev, rf);
6038
6039 for_each_active_class(class) {
6040 if (class->pick_next_task) {
6041 p = class->pick_next_task(rq, prev);
6042 if (p)
6043 return p;
6044 } else {
6045 p = class->pick_task(rq);
6046 if (p) {
6047 put_prev_set_next_task(rq, prev, p);
6048 return p;
6049 }
6050 }
6051 }
6052
6053 BUG(); /* The idle class should always have a runnable task. */
6054 }
6055
6056 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6057 static inline bool is_task_rq_idle(struct task_struct *t)
6058 {
6059 return (task_rq(t)->idle == t);
6060 }
6061
cookie_equals(struct task_struct * a,unsigned long cookie)6062 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6063 {
6064 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6065 }
6066
cookie_match(struct task_struct * a,struct task_struct * b)6067 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6068 {
6069 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6070 return true;
6071
6072 return a->core_cookie == b->core_cookie;
6073 }
6074
pick_task(struct rq * rq)6075 static inline struct task_struct *pick_task(struct rq *rq)
6076 {
6077 const struct sched_class *class;
6078 struct task_struct *p;
6079
6080 rq->dl_server = NULL;
6081
6082 for_each_active_class(class) {
6083 p = class->pick_task(rq);
6084 if (p)
6085 return p;
6086 }
6087
6088 BUG(); /* The idle class should always have a runnable task. */
6089 }
6090
6091 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6092
6093 static void queue_core_balance(struct rq *rq);
6094
6095 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6096 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6097 {
6098 struct task_struct *next, *p, *max = NULL;
6099 const struct cpumask *smt_mask;
6100 bool fi_before = false;
6101 bool core_clock_updated = (rq == rq->core);
6102 unsigned long cookie;
6103 int i, cpu, occ = 0;
6104 struct rq *rq_i;
6105 bool need_sync;
6106
6107 if (!sched_core_enabled(rq))
6108 return __pick_next_task(rq, prev, rf);
6109
6110 cpu = cpu_of(rq);
6111
6112 /* Stopper task is switching into idle, no need core-wide selection. */
6113 if (cpu_is_offline(cpu)) {
6114 /*
6115 * Reset core_pick so that we don't enter the fastpath when
6116 * coming online. core_pick would already be migrated to
6117 * another cpu during offline.
6118 */
6119 rq->core_pick = NULL;
6120 rq->core_dl_server = NULL;
6121 return __pick_next_task(rq, prev, rf);
6122 }
6123
6124 /*
6125 * If there were no {en,de}queues since we picked (IOW, the task
6126 * pointers are all still valid), and we haven't scheduled the last
6127 * pick yet, do so now.
6128 *
6129 * rq->core_pick can be NULL if no selection was made for a CPU because
6130 * it was either offline or went offline during a sibling's core-wide
6131 * selection. In this case, do a core-wide selection.
6132 */
6133 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6134 rq->core->core_pick_seq != rq->core_sched_seq &&
6135 rq->core_pick) {
6136 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6137
6138 next = rq->core_pick;
6139 rq->dl_server = rq->core_dl_server;
6140 rq->core_pick = NULL;
6141 rq->core_dl_server = NULL;
6142 goto out_set_next;
6143 }
6144
6145 prev_balance(rq, prev, rf);
6146
6147 smt_mask = cpu_smt_mask(cpu);
6148 need_sync = !!rq->core->core_cookie;
6149
6150 /* reset state */
6151 rq->core->core_cookie = 0UL;
6152 if (rq->core->core_forceidle_count) {
6153 if (!core_clock_updated) {
6154 update_rq_clock(rq->core);
6155 core_clock_updated = true;
6156 }
6157 sched_core_account_forceidle(rq);
6158 /* reset after accounting force idle */
6159 rq->core->core_forceidle_start = 0;
6160 rq->core->core_forceidle_count = 0;
6161 rq->core->core_forceidle_occupation = 0;
6162 need_sync = true;
6163 fi_before = true;
6164 }
6165
6166 /*
6167 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6168 *
6169 * @task_seq guards the task state ({en,de}queues)
6170 * @pick_seq is the @task_seq we did a selection on
6171 * @sched_seq is the @pick_seq we scheduled
6172 *
6173 * However, preemptions can cause multiple picks on the same task set.
6174 * 'Fix' this by also increasing @task_seq for every pick.
6175 */
6176 rq->core->core_task_seq++;
6177
6178 /*
6179 * Optimize for common case where this CPU has no cookies
6180 * and there are no cookied tasks running on siblings.
6181 */
6182 if (!need_sync) {
6183 next = pick_task(rq);
6184 if (!next->core_cookie) {
6185 rq->core_pick = NULL;
6186 rq->core_dl_server = NULL;
6187 /*
6188 * For robustness, update the min_vruntime_fi for
6189 * unconstrained picks as well.
6190 */
6191 WARN_ON_ONCE(fi_before);
6192 task_vruntime_update(rq, next, false);
6193 goto out_set_next;
6194 }
6195 }
6196
6197 /*
6198 * For each thread: do the regular task pick and find the max prio task
6199 * amongst them.
6200 *
6201 * Tie-break prio towards the current CPU
6202 */
6203 for_each_cpu_wrap(i, smt_mask, cpu) {
6204 rq_i = cpu_rq(i);
6205
6206 /*
6207 * Current cpu always has its clock updated on entrance to
6208 * pick_next_task(). If the current cpu is not the core,
6209 * the core may also have been updated above.
6210 */
6211 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6212 update_rq_clock(rq_i);
6213
6214 rq_i->core_pick = p = pick_task(rq_i);
6215 rq_i->core_dl_server = rq_i->dl_server;
6216
6217 if (!max || prio_less(max, p, fi_before))
6218 max = p;
6219 }
6220
6221 cookie = rq->core->core_cookie = max->core_cookie;
6222
6223 /*
6224 * For each thread: try and find a runnable task that matches @max or
6225 * force idle.
6226 */
6227 for_each_cpu(i, smt_mask) {
6228 rq_i = cpu_rq(i);
6229 p = rq_i->core_pick;
6230
6231 if (!cookie_equals(p, cookie)) {
6232 p = NULL;
6233 if (cookie)
6234 p = sched_core_find(rq_i, cookie);
6235 if (!p)
6236 p = idle_sched_class.pick_task(rq_i);
6237 }
6238
6239 rq_i->core_pick = p;
6240 rq_i->core_dl_server = NULL;
6241
6242 if (p == rq_i->idle) {
6243 if (rq_i->nr_running) {
6244 rq->core->core_forceidle_count++;
6245 if (!fi_before)
6246 rq->core->core_forceidle_seq++;
6247 }
6248 } else {
6249 occ++;
6250 }
6251 }
6252
6253 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6254 rq->core->core_forceidle_start = rq_clock(rq->core);
6255 rq->core->core_forceidle_occupation = occ;
6256 }
6257
6258 rq->core->core_pick_seq = rq->core->core_task_seq;
6259 next = rq->core_pick;
6260 rq->core_sched_seq = rq->core->core_pick_seq;
6261
6262 /* Something should have been selected for current CPU */
6263 WARN_ON_ONCE(!next);
6264
6265 /*
6266 * Reschedule siblings
6267 *
6268 * NOTE: L1TF -- at this point we're no longer running the old task and
6269 * sending an IPI (below) ensures the sibling will no longer be running
6270 * their task. This ensures there is no inter-sibling overlap between
6271 * non-matching user state.
6272 */
6273 for_each_cpu(i, smt_mask) {
6274 rq_i = cpu_rq(i);
6275
6276 /*
6277 * An online sibling might have gone offline before a task
6278 * could be picked for it, or it might be offline but later
6279 * happen to come online, but its too late and nothing was
6280 * picked for it. That's Ok - it will pick tasks for itself,
6281 * so ignore it.
6282 */
6283 if (!rq_i->core_pick)
6284 continue;
6285
6286 /*
6287 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6288 * fi_before fi update?
6289 * 0 0 1
6290 * 0 1 1
6291 * 1 0 1
6292 * 1 1 0
6293 */
6294 if (!(fi_before && rq->core->core_forceidle_count))
6295 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6296
6297 rq_i->core_pick->core_occupation = occ;
6298
6299 if (i == cpu) {
6300 rq_i->core_pick = NULL;
6301 rq_i->core_dl_server = NULL;
6302 continue;
6303 }
6304
6305 /* Did we break L1TF mitigation requirements? */
6306 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6307
6308 if (rq_i->curr == rq_i->core_pick) {
6309 rq_i->core_pick = NULL;
6310 rq_i->core_dl_server = NULL;
6311 continue;
6312 }
6313
6314 resched_curr(rq_i);
6315 }
6316
6317 out_set_next:
6318 put_prev_set_next_task(rq, prev, next);
6319 if (rq->core->core_forceidle_count && next == rq->idle)
6320 queue_core_balance(rq);
6321
6322 return next;
6323 }
6324
try_steal_cookie(int this,int that)6325 static bool try_steal_cookie(int this, int that)
6326 {
6327 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6328 struct task_struct *p;
6329 unsigned long cookie;
6330 bool success = false;
6331
6332 guard(irq)();
6333 guard(double_rq_lock)(dst, src);
6334
6335 cookie = dst->core->core_cookie;
6336 if (!cookie)
6337 return false;
6338
6339 if (dst->curr != dst->idle)
6340 return false;
6341
6342 p = sched_core_find(src, cookie);
6343 if (!p)
6344 return false;
6345
6346 do {
6347 if (p == src->core_pick || p == src->curr)
6348 goto next;
6349
6350 if (!is_cpu_allowed(p, this))
6351 goto next;
6352
6353 if (p->core_occupation > dst->idle->core_occupation)
6354 goto next;
6355 /*
6356 * sched_core_find() and sched_core_next() will ensure
6357 * that task @p is not throttled now, we also need to
6358 * check whether the runqueue of the destination CPU is
6359 * being throttled.
6360 */
6361 if (sched_task_is_throttled(p, this))
6362 goto next;
6363
6364 move_queued_task_locked(src, dst, p);
6365 resched_curr(dst);
6366
6367 success = true;
6368 break;
6369
6370 next:
6371 p = sched_core_next(p, cookie);
6372 } while (p);
6373
6374 return success;
6375 }
6376
steal_cookie_task(int cpu,struct sched_domain * sd)6377 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6378 {
6379 int i;
6380
6381 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6382 if (i == cpu)
6383 continue;
6384
6385 if (need_resched())
6386 break;
6387
6388 if (try_steal_cookie(cpu, i))
6389 return true;
6390 }
6391
6392 return false;
6393 }
6394
sched_core_balance(struct rq * rq)6395 static void sched_core_balance(struct rq *rq)
6396 {
6397 struct sched_domain *sd;
6398 int cpu = cpu_of(rq);
6399
6400 guard(preempt)();
6401 guard(rcu)();
6402
6403 raw_spin_rq_unlock_irq(rq);
6404 for_each_domain(cpu, sd) {
6405 if (need_resched())
6406 break;
6407
6408 if (steal_cookie_task(cpu, sd))
6409 break;
6410 }
6411 raw_spin_rq_lock_irq(rq);
6412 }
6413
6414 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6415
queue_core_balance(struct rq * rq)6416 static void queue_core_balance(struct rq *rq)
6417 {
6418 if (!sched_core_enabled(rq))
6419 return;
6420
6421 if (!rq->core->core_cookie)
6422 return;
6423
6424 if (!rq->nr_running) /* not forced idle */
6425 return;
6426
6427 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6428 }
6429
6430 DEFINE_LOCK_GUARD_1(core_lock, int,
6431 sched_core_lock(*_T->lock, &_T->flags),
6432 sched_core_unlock(*_T->lock, &_T->flags),
6433 unsigned long flags)
6434
sched_core_cpu_starting(unsigned int cpu)6435 static void sched_core_cpu_starting(unsigned int cpu)
6436 {
6437 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6438 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6439 int t;
6440
6441 guard(core_lock)(&cpu);
6442
6443 WARN_ON_ONCE(rq->core != rq);
6444
6445 /* if we're the first, we'll be our own leader */
6446 if (cpumask_weight(smt_mask) == 1)
6447 return;
6448
6449 /* find the leader */
6450 for_each_cpu(t, smt_mask) {
6451 if (t == cpu)
6452 continue;
6453 rq = cpu_rq(t);
6454 if (rq->core == rq) {
6455 core_rq = rq;
6456 break;
6457 }
6458 }
6459
6460 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6461 return;
6462
6463 /* install and validate core_rq */
6464 for_each_cpu(t, smt_mask) {
6465 rq = cpu_rq(t);
6466
6467 if (t == cpu)
6468 rq->core = core_rq;
6469
6470 WARN_ON_ONCE(rq->core != core_rq);
6471 }
6472 }
6473
sched_core_cpu_deactivate(unsigned int cpu)6474 static void sched_core_cpu_deactivate(unsigned int cpu)
6475 {
6476 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6477 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6478 int t;
6479
6480 guard(core_lock)(&cpu);
6481
6482 /* if we're the last man standing, nothing to do */
6483 if (cpumask_weight(smt_mask) == 1) {
6484 WARN_ON_ONCE(rq->core != rq);
6485 return;
6486 }
6487
6488 /* if we're not the leader, nothing to do */
6489 if (rq->core != rq)
6490 return;
6491
6492 /* find a new leader */
6493 for_each_cpu(t, smt_mask) {
6494 if (t == cpu)
6495 continue;
6496 core_rq = cpu_rq(t);
6497 break;
6498 }
6499
6500 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6501 return;
6502
6503 /* copy the shared state to the new leader */
6504 core_rq->core_task_seq = rq->core_task_seq;
6505 core_rq->core_pick_seq = rq->core_pick_seq;
6506 core_rq->core_cookie = rq->core_cookie;
6507 core_rq->core_forceidle_count = rq->core_forceidle_count;
6508 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6509 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6510
6511 /*
6512 * Accounting edge for forced idle is handled in pick_next_task().
6513 * Don't need another one here, since the hotplug thread shouldn't
6514 * have a cookie.
6515 */
6516 core_rq->core_forceidle_start = 0;
6517
6518 /* install new leader */
6519 for_each_cpu(t, smt_mask) {
6520 rq = cpu_rq(t);
6521 rq->core = core_rq;
6522 }
6523 }
6524
sched_core_cpu_dying(unsigned int cpu)6525 static inline void sched_core_cpu_dying(unsigned int cpu)
6526 {
6527 struct rq *rq = cpu_rq(cpu);
6528
6529 if (rq->core != rq)
6530 rq->core = rq;
6531 }
6532
6533 #else /* !CONFIG_SCHED_CORE */
6534
sched_core_cpu_starting(unsigned int cpu)6535 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6536 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6537 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6538
6539 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6540 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6541 {
6542 return __pick_next_task(rq, prev, rf);
6543 }
6544
6545 #endif /* CONFIG_SCHED_CORE */
6546
6547 /*
6548 * Constants for the sched_mode argument of __schedule().
6549 *
6550 * The mode argument allows RT enabled kernels to differentiate a
6551 * preemption from blocking on an 'sleeping' spin/rwlock.
6552 */
6553 #define SM_IDLE (-1)
6554 #define SM_NONE 0
6555 #define SM_PREEMPT 1
6556 #define SM_RTLOCK_WAIT 2
6557
6558 /*
6559 * Helper function for __schedule()
6560 *
6561 * If a task does not have signals pending, deactivate it
6562 * Otherwise marks the task's __state as RUNNING
6563 */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long task_state)6564 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6565 unsigned long task_state)
6566 {
6567 int flags = DEQUEUE_NOCLOCK;
6568
6569 if (signal_pending_state(task_state, p)) {
6570 WRITE_ONCE(p->__state, TASK_RUNNING);
6571 return false;
6572 }
6573
6574 p->sched_contributes_to_load =
6575 (task_state & TASK_UNINTERRUPTIBLE) &&
6576 !(task_state & TASK_NOLOAD) &&
6577 !(task_state & TASK_FROZEN);
6578
6579 if (unlikely(is_special_task_state(task_state)))
6580 flags |= DEQUEUE_SPECIAL;
6581
6582 /*
6583 * __schedule() ttwu()
6584 * prev_state = prev->state; if (p->on_rq && ...)
6585 * if (prev_state) goto out;
6586 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6587 * p->state = TASK_WAKING
6588 *
6589 * Where __schedule() and ttwu() have matching control dependencies.
6590 *
6591 * After this, schedule() must not care about p->state any more.
6592 */
6593 block_task(rq, p, flags);
6594 return true;
6595 }
6596
6597 /*
6598 * __schedule() is the main scheduler function.
6599 *
6600 * The main means of driving the scheduler and thus entering this function are:
6601 *
6602 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6603 *
6604 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6605 * paths. For example, see arch/x86/entry_64.S.
6606 *
6607 * To drive preemption between tasks, the scheduler sets the flag in timer
6608 * interrupt handler sched_tick().
6609 *
6610 * 3. Wakeups don't really cause entry into schedule(). They add a
6611 * task to the run-queue and that's it.
6612 *
6613 * Now, if the new task added to the run-queue preempts the current
6614 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6615 * called on the nearest possible occasion:
6616 *
6617 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6618 *
6619 * - in syscall or exception context, at the next outmost
6620 * preempt_enable(). (this might be as soon as the wake_up()'s
6621 * spin_unlock()!)
6622 *
6623 * - in IRQ context, return from interrupt-handler to
6624 * preemptible context
6625 *
6626 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6627 * then at the next:
6628 *
6629 * - cond_resched() call
6630 * - explicit schedule() call
6631 * - return from syscall or exception to user-space
6632 * - return from interrupt-handler to user-space
6633 *
6634 * WARNING: must be called with preemption disabled!
6635 */
__schedule(int sched_mode)6636 static void __sched notrace __schedule(int sched_mode)
6637 {
6638 struct task_struct *prev, *next;
6639 /*
6640 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6641 * as a preemption by schedule_debug() and RCU.
6642 */
6643 bool preempt = sched_mode > SM_NONE;
6644 bool block = false;
6645 unsigned long *switch_count;
6646 unsigned long prev_state;
6647 struct rq_flags rf;
6648 struct rq *rq;
6649 int cpu;
6650
6651 cpu = smp_processor_id();
6652 rq = cpu_rq(cpu);
6653 prev = rq->curr;
6654
6655 schedule_debug(prev, preempt);
6656
6657 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6658 hrtick_clear(rq);
6659
6660 local_irq_disable();
6661 rcu_note_context_switch(preempt);
6662
6663 /*
6664 * Make sure that signal_pending_state()->signal_pending() below
6665 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6666 * done by the caller to avoid the race with signal_wake_up():
6667 *
6668 * __set_current_state(@state) signal_wake_up()
6669 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6670 * wake_up_state(p, state)
6671 * LOCK rq->lock LOCK p->pi_state
6672 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6673 * if (signal_pending_state()) if (p->state & @state)
6674 *
6675 * Also, the membarrier system call requires a full memory barrier
6676 * after coming from user-space, before storing to rq->curr; this
6677 * barrier matches a full barrier in the proximity of the membarrier
6678 * system call exit.
6679 */
6680 rq_lock(rq, &rf);
6681 smp_mb__after_spinlock();
6682
6683 /* Promote REQ to ACT */
6684 rq->clock_update_flags <<= 1;
6685 update_rq_clock(rq);
6686 rq->clock_update_flags = RQCF_UPDATED;
6687
6688 switch_count = &prev->nivcsw;
6689
6690 /* Task state changes only considers SM_PREEMPT as preemption */
6691 preempt = sched_mode == SM_PREEMPT;
6692
6693 /*
6694 * We must load prev->state once (task_struct::state is volatile), such
6695 * that we form a control dependency vs deactivate_task() below.
6696 */
6697 prev_state = READ_ONCE(prev->__state);
6698 if (sched_mode == SM_IDLE) {
6699 /* SCX must consult the BPF scheduler to tell if rq is empty */
6700 if (!rq->nr_running && !scx_enabled()) {
6701 next = prev;
6702 goto picked;
6703 }
6704 } else if (!preempt && prev_state) {
6705 block = try_to_block_task(rq, prev, prev_state);
6706 switch_count = &prev->nvcsw;
6707 }
6708
6709 next = pick_next_task(rq, prev, &rf);
6710 rq_set_donor(rq, next);
6711 picked:
6712 clear_tsk_need_resched(prev);
6713 clear_preempt_need_resched();
6714 #ifdef CONFIG_SCHED_DEBUG
6715 rq->last_seen_need_resched_ns = 0;
6716 #endif
6717
6718 if (likely(prev != next)) {
6719 rq->nr_switches++;
6720 /*
6721 * RCU users of rcu_dereference(rq->curr) may not see
6722 * changes to task_struct made by pick_next_task().
6723 */
6724 RCU_INIT_POINTER(rq->curr, next);
6725 /*
6726 * The membarrier system call requires each architecture
6727 * to have a full memory barrier after updating
6728 * rq->curr, before returning to user-space.
6729 *
6730 * Here are the schemes providing that barrier on the
6731 * various architectures:
6732 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6733 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6734 * on PowerPC and on RISC-V.
6735 * - finish_lock_switch() for weakly-ordered
6736 * architectures where spin_unlock is a full barrier,
6737 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6738 * is a RELEASE barrier),
6739 *
6740 * The barrier matches a full barrier in the proximity of
6741 * the membarrier system call entry.
6742 *
6743 * On RISC-V, this barrier pairing is also needed for the
6744 * SYNC_CORE command when switching between processes, cf.
6745 * the inline comments in membarrier_arch_switch_mm().
6746 */
6747 ++*switch_count;
6748
6749 migrate_disable_switch(rq, prev);
6750 psi_account_irqtime(rq, prev, next);
6751 psi_sched_switch(prev, next, block);
6752
6753 trace_sched_switch(preempt, prev, next, prev_state);
6754
6755 /* Also unlocks the rq: */
6756 rq = context_switch(rq, prev, next, &rf);
6757 } else {
6758 rq_unpin_lock(rq, &rf);
6759 __balance_callbacks(rq);
6760 raw_spin_rq_unlock_irq(rq);
6761 }
6762 }
6763
do_task_dead(void)6764 void __noreturn do_task_dead(void)
6765 {
6766 /* Causes final put_task_struct in finish_task_switch(): */
6767 set_special_state(TASK_DEAD);
6768
6769 /* Tell freezer to ignore us: */
6770 current->flags |= PF_NOFREEZE;
6771
6772 __schedule(SM_NONE);
6773 BUG();
6774
6775 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6776 for (;;)
6777 cpu_relax();
6778 }
6779
sched_submit_work(struct task_struct * tsk)6780 static inline void sched_submit_work(struct task_struct *tsk)
6781 {
6782 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6783 unsigned int task_flags;
6784
6785 /*
6786 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6787 * will use a blocking primitive -- which would lead to recursion.
6788 */
6789 lock_map_acquire_try(&sched_map);
6790
6791 task_flags = tsk->flags;
6792 /*
6793 * If a worker goes to sleep, notify and ask workqueue whether it
6794 * wants to wake up a task to maintain concurrency.
6795 */
6796 if (task_flags & PF_WQ_WORKER)
6797 wq_worker_sleeping(tsk);
6798 else if (task_flags & PF_IO_WORKER)
6799 io_wq_worker_sleeping(tsk);
6800
6801 /*
6802 * spinlock and rwlock must not flush block requests. This will
6803 * deadlock if the callback attempts to acquire a lock which is
6804 * already acquired.
6805 */
6806 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6807
6808 /*
6809 * If we are going to sleep and we have plugged IO queued,
6810 * make sure to submit it to avoid deadlocks.
6811 */
6812 blk_flush_plug(tsk->plug, true);
6813
6814 lock_map_release(&sched_map);
6815 }
6816
sched_update_worker(struct task_struct * tsk)6817 static void sched_update_worker(struct task_struct *tsk)
6818 {
6819 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6820 if (tsk->flags & PF_BLOCK_TS)
6821 blk_plug_invalidate_ts(tsk);
6822 if (tsk->flags & PF_WQ_WORKER)
6823 wq_worker_running(tsk);
6824 else if (tsk->flags & PF_IO_WORKER)
6825 io_wq_worker_running(tsk);
6826 }
6827 }
6828
__schedule_loop(int sched_mode)6829 static __always_inline void __schedule_loop(int sched_mode)
6830 {
6831 do {
6832 preempt_disable();
6833 __schedule(sched_mode);
6834 sched_preempt_enable_no_resched();
6835 } while (need_resched());
6836 }
6837
schedule(void)6838 asmlinkage __visible void __sched schedule(void)
6839 {
6840 struct task_struct *tsk = current;
6841
6842 #ifdef CONFIG_RT_MUTEXES
6843 lockdep_assert(!tsk->sched_rt_mutex);
6844 #endif
6845
6846 if (!task_is_running(tsk))
6847 sched_submit_work(tsk);
6848 __schedule_loop(SM_NONE);
6849 sched_update_worker(tsk);
6850 }
6851 EXPORT_SYMBOL(schedule);
6852
6853 /*
6854 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6855 * state (have scheduled out non-voluntarily) by making sure that all
6856 * tasks have either left the run queue or have gone into user space.
6857 * As idle tasks do not do either, they must not ever be preempted
6858 * (schedule out non-voluntarily).
6859 *
6860 * schedule_idle() is similar to schedule_preempt_disable() except that it
6861 * never enables preemption because it does not call sched_submit_work().
6862 */
schedule_idle(void)6863 void __sched schedule_idle(void)
6864 {
6865 /*
6866 * As this skips calling sched_submit_work(), which the idle task does
6867 * regardless because that function is a NOP when the task is in a
6868 * TASK_RUNNING state, make sure this isn't used someplace that the
6869 * current task can be in any other state. Note, idle is always in the
6870 * TASK_RUNNING state.
6871 */
6872 WARN_ON_ONCE(current->__state);
6873 do {
6874 __schedule(SM_IDLE);
6875 } while (need_resched());
6876 }
6877
6878 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6879 asmlinkage __visible void __sched schedule_user(void)
6880 {
6881 /*
6882 * If we come here after a random call to set_need_resched(),
6883 * or we have been woken up remotely but the IPI has not yet arrived,
6884 * we haven't yet exited the RCU idle mode. Do it here manually until
6885 * we find a better solution.
6886 *
6887 * NB: There are buggy callers of this function. Ideally we
6888 * should warn if prev_state != CT_STATE_USER, but that will trigger
6889 * too frequently to make sense yet.
6890 */
6891 enum ctx_state prev_state = exception_enter();
6892 schedule();
6893 exception_exit(prev_state);
6894 }
6895 #endif
6896
6897 /**
6898 * schedule_preempt_disabled - called with preemption disabled
6899 *
6900 * Returns with preemption disabled. Note: preempt_count must be 1
6901 */
schedule_preempt_disabled(void)6902 void __sched schedule_preempt_disabled(void)
6903 {
6904 sched_preempt_enable_no_resched();
6905 schedule();
6906 preempt_disable();
6907 }
6908
6909 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6910 void __sched notrace schedule_rtlock(void)
6911 {
6912 __schedule_loop(SM_RTLOCK_WAIT);
6913 }
6914 NOKPROBE_SYMBOL(schedule_rtlock);
6915 #endif
6916
preempt_schedule_common(void)6917 static void __sched notrace preempt_schedule_common(void)
6918 {
6919 do {
6920 /*
6921 * Because the function tracer can trace preempt_count_sub()
6922 * and it also uses preempt_enable/disable_notrace(), if
6923 * NEED_RESCHED is set, the preempt_enable_notrace() called
6924 * by the function tracer will call this function again and
6925 * cause infinite recursion.
6926 *
6927 * Preemption must be disabled here before the function
6928 * tracer can trace. Break up preempt_disable() into two
6929 * calls. One to disable preemption without fear of being
6930 * traced. The other to still record the preemption latency,
6931 * which can also be traced by the function tracer.
6932 */
6933 preempt_disable_notrace();
6934 preempt_latency_start(1);
6935 __schedule(SM_PREEMPT);
6936 preempt_latency_stop(1);
6937 preempt_enable_no_resched_notrace();
6938
6939 /*
6940 * Check again in case we missed a preemption opportunity
6941 * between schedule and now.
6942 */
6943 } while (need_resched());
6944 }
6945
6946 #ifdef CONFIG_PREEMPTION
6947 /*
6948 * This is the entry point to schedule() from in-kernel preemption
6949 * off of preempt_enable.
6950 */
preempt_schedule(void)6951 asmlinkage __visible void __sched notrace preempt_schedule(void)
6952 {
6953 /*
6954 * If there is a non-zero preempt_count or interrupts are disabled,
6955 * we do not want to preempt the current task. Just return..
6956 */
6957 if (likely(!preemptible()))
6958 return;
6959 preempt_schedule_common();
6960 }
6961 NOKPROBE_SYMBOL(preempt_schedule);
6962 EXPORT_SYMBOL(preempt_schedule);
6963
6964 #ifdef CONFIG_PREEMPT_DYNAMIC
6965 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6966 #ifndef preempt_schedule_dynamic_enabled
6967 #define preempt_schedule_dynamic_enabled preempt_schedule
6968 #define preempt_schedule_dynamic_disabled NULL
6969 #endif
6970 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6971 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6972 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6973 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6974 void __sched notrace dynamic_preempt_schedule(void)
6975 {
6976 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6977 return;
6978 preempt_schedule();
6979 }
6980 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6981 EXPORT_SYMBOL(dynamic_preempt_schedule);
6982 #endif
6983 #endif
6984
6985 /**
6986 * preempt_schedule_notrace - preempt_schedule called by tracing
6987 *
6988 * The tracing infrastructure uses preempt_enable_notrace to prevent
6989 * recursion and tracing preempt enabling caused by the tracing
6990 * infrastructure itself. But as tracing can happen in areas coming
6991 * from userspace or just about to enter userspace, a preempt enable
6992 * can occur before user_exit() is called. This will cause the scheduler
6993 * to be called when the system is still in usermode.
6994 *
6995 * To prevent this, the preempt_enable_notrace will use this function
6996 * instead of preempt_schedule() to exit user context if needed before
6997 * calling the scheduler.
6998 */
preempt_schedule_notrace(void)6999 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7000 {
7001 enum ctx_state prev_ctx;
7002
7003 if (likely(!preemptible()))
7004 return;
7005
7006 do {
7007 /*
7008 * Because the function tracer can trace preempt_count_sub()
7009 * and it also uses preempt_enable/disable_notrace(), if
7010 * NEED_RESCHED is set, the preempt_enable_notrace() called
7011 * by the function tracer will call this function again and
7012 * cause infinite recursion.
7013 *
7014 * Preemption must be disabled here before the function
7015 * tracer can trace. Break up preempt_disable() into two
7016 * calls. One to disable preemption without fear of being
7017 * traced. The other to still record the preemption latency,
7018 * which can also be traced by the function tracer.
7019 */
7020 preempt_disable_notrace();
7021 preempt_latency_start(1);
7022 /*
7023 * Needs preempt disabled in case user_exit() is traced
7024 * and the tracer calls preempt_enable_notrace() causing
7025 * an infinite recursion.
7026 */
7027 prev_ctx = exception_enter();
7028 __schedule(SM_PREEMPT);
7029 exception_exit(prev_ctx);
7030
7031 preempt_latency_stop(1);
7032 preempt_enable_no_resched_notrace();
7033 } while (need_resched());
7034 }
7035 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7036
7037 #ifdef CONFIG_PREEMPT_DYNAMIC
7038 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7039 #ifndef preempt_schedule_notrace_dynamic_enabled
7040 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7041 #define preempt_schedule_notrace_dynamic_disabled NULL
7042 #endif
7043 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7044 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7045 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7046 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7047 void __sched notrace dynamic_preempt_schedule_notrace(void)
7048 {
7049 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7050 return;
7051 preempt_schedule_notrace();
7052 }
7053 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7054 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7055 #endif
7056 #endif
7057
7058 #endif /* CONFIG_PREEMPTION */
7059
7060 /*
7061 * This is the entry point to schedule() from kernel preemption
7062 * off of IRQ context.
7063 * Note, that this is called and return with IRQs disabled. This will
7064 * protect us against recursive calling from IRQ contexts.
7065 */
preempt_schedule_irq(void)7066 asmlinkage __visible void __sched preempt_schedule_irq(void)
7067 {
7068 enum ctx_state prev_state;
7069
7070 /* Catch callers which need to be fixed */
7071 BUG_ON(preempt_count() || !irqs_disabled());
7072
7073 prev_state = exception_enter();
7074
7075 do {
7076 preempt_disable();
7077 local_irq_enable();
7078 __schedule(SM_PREEMPT);
7079 local_irq_disable();
7080 sched_preempt_enable_no_resched();
7081 } while (need_resched());
7082
7083 exception_exit(prev_state);
7084 }
7085
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7086 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7087 void *key)
7088 {
7089 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7090 return try_to_wake_up(curr->private, mode, wake_flags);
7091 }
7092 EXPORT_SYMBOL(default_wake_function);
7093
__setscheduler_class(int policy,int prio)7094 const struct sched_class *__setscheduler_class(int policy, int prio)
7095 {
7096 if (dl_prio(prio))
7097 return &dl_sched_class;
7098
7099 if (rt_prio(prio))
7100 return &rt_sched_class;
7101
7102 #ifdef CONFIG_SCHED_CLASS_EXT
7103 if (task_should_scx(policy))
7104 return &ext_sched_class;
7105 #endif
7106
7107 return &fair_sched_class;
7108 }
7109
7110 #ifdef CONFIG_RT_MUTEXES
7111
7112 /*
7113 * Would be more useful with typeof()/auto_type but they don't mix with
7114 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7115 * name such that if someone were to implement this function we get to compare
7116 * notes.
7117 */
7118 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7119
rt_mutex_pre_schedule(void)7120 void rt_mutex_pre_schedule(void)
7121 {
7122 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7123 sched_submit_work(current);
7124 }
7125
rt_mutex_schedule(void)7126 void rt_mutex_schedule(void)
7127 {
7128 lockdep_assert(current->sched_rt_mutex);
7129 __schedule_loop(SM_NONE);
7130 }
7131
rt_mutex_post_schedule(void)7132 void rt_mutex_post_schedule(void)
7133 {
7134 sched_update_worker(current);
7135 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7136 }
7137
7138 /*
7139 * rt_mutex_setprio - set the current priority of a task
7140 * @p: task to boost
7141 * @pi_task: donor task
7142 *
7143 * This function changes the 'effective' priority of a task. It does
7144 * not touch ->normal_prio like __setscheduler().
7145 *
7146 * Used by the rt_mutex code to implement priority inheritance
7147 * logic. Call site only calls if the priority of the task changed.
7148 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7149 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7150 {
7151 int prio, oldprio, queued, running, queue_flag =
7152 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7153 const struct sched_class *prev_class, *next_class;
7154 struct rq_flags rf;
7155 struct rq *rq;
7156
7157 /* XXX used to be waiter->prio, not waiter->task->prio */
7158 prio = __rt_effective_prio(pi_task, p->normal_prio);
7159
7160 /*
7161 * If nothing changed; bail early.
7162 */
7163 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7164 return;
7165
7166 rq = __task_rq_lock(p, &rf);
7167 update_rq_clock(rq);
7168 /*
7169 * Set under pi_lock && rq->lock, such that the value can be used under
7170 * either lock.
7171 *
7172 * Note that there is loads of tricky to make this pointer cache work
7173 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7174 * ensure a task is de-boosted (pi_task is set to NULL) before the
7175 * task is allowed to run again (and can exit). This ensures the pointer
7176 * points to a blocked task -- which guarantees the task is present.
7177 */
7178 p->pi_top_task = pi_task;
7179
7180 /*
7181 * For FIFO/RR we only need to set prio, if that matches we're done.
7182 */
7183 if (prio == p->prio && !dl_prio(prio))
7184 goto out_unlock;
7185
7186 /*
7187 * Idle task boosting is a no-no in general. There is one
7188 * exception, when PREEMPT_RT and NOHZ is active:
7189 *
7190 * The idle task calls get_next_timer_interrupt() and holds
7191 * the timer wheel base->lock on the CPU and another CPU wants
7192 * to access the timer (probably to cancel it). We can safely
7193 * ignore the boosting request, as the idle CPU runs this code
7194 * with interrupts disabled and will complete the lock
7195 * protected section without being interrupted. So there is no
7196 * real need to boost.
7197 */
7198 if (unlikely(p == rq->idle)) {
7199 WARN_ON(p != rq->curr);
7200 WARN_ON(p->pi_blocked_on);
7201 goto out_unlock;
7202 }
7203
7204 trace_sched_pi_setprio(p, pi_task);
7205 oldprio = p->prio;
7206
7207 if (oldprio == prio)
7208 queue_flag &= ~DEQUEUE_MOVE;
7209
7210 prev_class = p->sched_class;
7211 next_class = __setscheduler_class(p->policy, prio);
7212
7213 if (prev_class != next_class && p->se.sched_delayed)
7214 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7215
7216 queued = task_on_rq_queued(p);
7217 running = task_current_donor(rq, p);
7218 if (queued)
7219 dequeue_task(rq, p, queue_flag);
7220 if (running)
7221 put_prev_task(rq, p);
7222
7223 /*
7224 * Boosting condition are:
7225 * 1. -rt task is running and holds mutex A
7226 * --> -dl task blocks on mutex A
7227 *
7228 * 2. -dl task is running and holds mutex A
7229 * --> -dl task blocks on mutex A and could preempt the
7230 * running task
7231 */
7232 if (dl_prio(prio)) {
7233 if (!dl_prio(p->normal_prio) ||
7234 (pi_task && dl_prio(pi_task->prio) &&
7235 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7236 p->dl.pi_se = pi_task->dl.pi_se;
7237 queue_flag |= ENQUEUE_REPLENISH;
7238 } else {
7239 p->dl.pi_se = &p->dl;
7240 }
7241 } else if (rt_prio(prio)) {
7242 if (dl_prio(oldprio))
7243 p->dl.pi_se = &p->dl;
7244 if (oldprio < prio)
7245 queue_flag |= ENQUEUE_HEAD;
7246 } else {
7247 if (dl_prio(oldprio))
7248 p->dl.pi_se = &p->dl;
7249 if (rt_prio(oldprio))
7250 p->rt.timeout = 0;
7251 }
7252
7253 p->sched_class = next_class;
7254 p->prio = prio;
7255
7256 check_class_changing(rq, p, prev_class);
7257
7258 if (queued)
7259 enqueue_task(rq, p, queue_flag);
7260 if (running)
7261 set_next_task(rq, p);
7262
7263 check_class_changed(rq, p, prev_class, oldprio);
7264 out_unlock:
7265 /* Avoid rq from going away on us: */
7266 preempt_disable();
7267
7268 rq_unpin_lock(rq, &rf);
7269 __balance_callbacks(rq);
7270 raw_spin_rq_unlock(rq);
7271
7272 preempt_enable();
7273 }
7274 #endif
7275
7276 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7277 int __sched __cond_resched(void)
7278 {
7279 if (should_resched(0)) {
7280 preempt_schedule_common();
7281 return 1;
7282 }
7283 /*
7284 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7285 * whether the current CPU is in an RCU read-side critical section,
7286 * so the tick can report quiescent states even for CPUs looping
7287 * in kernel context. In contrast, in non-preemptible kernels,
7288 * RCU readers leave no in-memory hints, which means that CPU-bound
7289 * processes executing in kernel context might never report an
7290 * RCU quiescent state. Therefore, the following code causes
7291 * cond_resched() to report a quiescent state, but only when RCU
7292 * is in urgent need of one.
7293 */
7294 #ifndef CONFIG_PREEMPT_RCU
7295 rcu_all_qs();
7296 #endif
7297 return 0;
7298 }
7299 EXPORT_SYMBOL(__cond_resched);
7300 #endif
7301
7302 #ifdef CONFIG_PREEMPT_DYNAMIC
7303 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7304 #define cond_resched_dynamic_enabled __cond_resched
7305 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7306 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7307 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7308
7309 #define might_resched_dynamic_enabled __cond_resched
7310 #define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7311 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7312 EXPORT_STATIC_CALL_TRAMP(might_resched);
7313 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7314 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7315 int __sched dynamic_cond_resched(void)
7316 {
7317 klp_sched_try_switch();
7318 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7319 return 0;
7320 return __cond_resched();
7321 }
7322 EXPORT_SYMBOL(dynamic_cond_resched);
7323
7324 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7325 int __sched dynamic_might_resched(void)
7326 {
7327 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7328 return 0;
7329 return __cond_resched();
7330 }
7331 EXPORT_SYMBOL(dynamic_might_resched);
7332 #endif
7333 #endif
7334
7335 /*
7336 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7337 * call schedule, and on return reacquire the lock.
7338 *
7339 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7340 * operations here to prevent schedule() from being called twice (once via
7341 * spin_unlock(), once by hand).
7342 */
__cond_resched_lock(spinlock_t * lock)7343 int __cond_resched_lock(spinlock_t *lock)
7344 {
7345 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7346 int ret = 0;
7347
7348 lockdep_assert_held(lock);
7349
7350 if (spin_needbreak(lock) || resched) {
7351 spin_unlock(lock);
7352 if (!_cond_resched())
7353 cpu_relax();
7354 ret = 1;
7355 spin_lock(lock);
7356 }
7357 return ret;
7358 }
7359 EXPORT_SYMBOL(__cond_resched_lock);
7360
__cond_resched_rwlock_read(rwlock_t * lock)7361 int __cond_resched_rwlock_read(rwlock_t *lock)
7362 {
7363 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7364 int ret = 0;
7365
7366 lockdep_assert_held_read(lock);
7367
7368 if (rwlock_needbreak(lock) || resched) {
7369 read_unlock(lock);
7370 if (!_cond_resched())
7371 cpu_relax();
7372 ret = 1;
7373 read_lock(lock);
7374 }
7375 return ret;
7376 }
7377 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7378
__cond_resched_rwlock_write(rwlock_t * lock)7379 int __cond_resched_rwlock_write(rwlock_t *lock)
7380 {
7381 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7382 int ret = 0;
7383
7384 lockdep_assert_held_write(lock);
7385
7386 if (rwlock_needbreak(lock) || resched) {
7387 write_unlock(lock);
7388 if (!_cond_resched())
7389 cpu_relax();
7390 ret = 1;
7391 write_lock(lock);
7392 }
7393 return ret;
7394 }
7395 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7396
7397 #ifdef CONFIG_PREEMPT_DYNAMIC
7398
7399 #ifdef CONFIG_GENERIC_ENTRY
7400 #include <linux/entry-common.h>
7401 #endif
7402
7403 /*
7404 * SC:cond_resched
7405 * SC:might_resched
7406 * SC:preempt_schedule
7407 * SC:preempt_schedule_notrace
7408 * SC:irqentry_exit_cond_resched
7409 *
7410 *
7411 * NONE:
7412 * cond_resched <- __cond_resched
7413 * might_resched <- RET0
7414 * preempt_schedule <- NOP
7415 * preempt_schedule_notrace <- NOP
7416 * irqentry_exit_cond_resched <- NOP
7417 * dynamic_preempt_lazy <- false
7418 *
7419 * VOLUNTARY:
7420 * cond_resched <- __cond_resched
7421 * might_resched <- __cond_resched
7422 * preempt_schedule <- NOP
7423 * preempt_schedule_notrace <- NOP
7424 * irqentry_exit_cond_resched <- NOP
7425 * dynamic_preempt_lazy <- false
7426 *
7427 * FULL:
7428 * cond_resched <- RET0
7429 * might_resched <- RET0
7430 * preempt_schedule <- preempt_schedule
7431 * preempt_schedule_notrace <- preempt_schedule_notrace
7432 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7433 * dynamic_preempt_lazy <- false
7434 *
7435 * LAZY:
7436 * cond_resched <- RET0
7437 * might_resched <- RET0
7438 * preempt_schedule <- preempt_schedule
7439 * preempt_schedule_notrace <- preempt_schedule_notrace
7440 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7441 * dynamic_preempt_lazy <- true
7442 */
7443
7444 enum {
7445 preempt_dynamic_undefined = -1,
7446 preempt_dynamic_none,
7447 preempt_dynamic_voluntary,
7448 preempt_dynamic_full,
7449 preempt_dynamic_lazy,
7450 };
7451
7452 int preempt_dynamic_mode = preempt_dynamic_undefined;
7453
sched_dynamic_mode(const char * str)7454 int sched_dynamic_mode(const char *str)
7455 {
7456 #ifndef CONFIG_PREEMPT_RT
7457 if (!strcmp(str, "none"))
7458 return preempt_dynamic_none;
7459
7460 if (!strcmp(str, "voluntary"))
7461 return preempt_dynamic_voluntary;
7462 #endif
7463
7464 if (!strcmp(str, "full"))
7465 return preempt_dynamic_full;
7466
7467 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7468 if (!strcmp(str, "lazy"))
7469 return preempt_dynamic_lazy;
7470 #endif
7471
7472 return -EINVAL;
7473 }
7474
7475 #define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7476 #define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7477
7478 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7479 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7480 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7481 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7482 #define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7483 #define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7484 #else
7485 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7486 #endif
7487
7488 static DEFINE_MUTEX(sched_dynamic_mutex);
7489 static bool klp_override;
7490
__sched_dynamic_update(int mode)7491 static void __sched_dynamic_update(int mode)
7492 {
7493 /*
7494 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7495 * the ZERO state, which is invalid.
7496 */
7497 if (!klp_override)
7498 preempt_dynamic_enable(cond_resched);
7499 preempt_dynamic_enable(might_resched);
7500 preempt_dynamic_enable(preempt_schedule);
7501 preempt_dynamic_enable(preempt_schedule_notrace);
7502 preempt_dynamic_enable(irqentry_exit_cond_resched);
7503 preempt_dynamic_key_disable(preempt_lazy);
7504
7505 switch (mode) {
7506 case preempt_dynamic_none:
7507 if (!klp_override)
7508 preempt_dynamic_enable(cond_resched);
7509 preempt_dynamic_disable(might_resched);
7510 preempt_dynamic_disable(preempt_schedule);
7511 preempt_dynamic_disable(preempt_schedule_notrace);
7512 preempt_dynamic_disable(irqentry_exit_cond_resched);
7513 preempt_dynamic_key_disable(preempt_lazy);
7514 if (mode != preempt_dynamic_mode)
7515 pr_info("Dynamic Preempt: none\n");
7516 break;
7517
7518 case preempt_dynamic_voluntary:
7519 if (!klp_override)
7520 preempt_dynamic_enable(cond_resched);
7521 preempt_dynamic_enable(might_resched);
7522 preempt_dynamic_disable(preempt_schedule);
7523 preempt_dynamic_disable(preempt_schedule_notrace);
7524 preempt_dynamic_disable(irqentry_exit_cond_resched);
7525 preempt_dynamic_key_disable(preempt_lazy);
7526 if (mode != preempt_dynamic_mode)
7527 pr_info("Dynamic Preempt: voluntary\n");
7528 break;
7529
7530 case preempt_dynamic_full:
7531 if (!klp_override)
7532 preempt_dynamic_disable(cond_resched);
7533 preempt_dynamic_disable(might_resched);
7534 preempt_dynamic_enable(preempt_schedule);
7535 preempt_dynamic_enable(preempt_schedule_notrace);
7536 preempt_dynamic_enable(irqentry_exit_cond_resched);
7537 preempt_dynamic_key_disable(preempt_lazy);
7538 if (mode != preempt_dynamic_mode)
7539 pr_info("Dynamic Preempt: full\n");
7540 break;
7541
7542 case preempt_dynamic_lazy:
7543 if (!klp_override)
7544 preempt_dynamic_disable(cond_resched);
7545 preempt_dynamic_disable(might_resched);
7546 preempt_dynamic_enable(preempt_schedule);
7547 preempt_dynamic_enable(preempt_schedule_notrace);
7548 preempt_dynamic_enable(irqentry_exit_cond_resched);
7549 preempt_dynamic_key_enable(preempt_lazy);
7550 if (mode != preempt_dynamic_mode)
7551 pr_info("Dynamic Preempt: lazy\n");
7552 break;
7553 }
7554
7555 preempt_dynamic_mode = mode;
7556 }
7557
sched_dynamic_update(int mode)7558 void sched_dynamic_update(int mode)
7559 {
7560 mutex_lock(&sched_dynamic_mutex);
7561 __sched_dynamic_update(mode);
7562 mutex_unlock(&sched_dynamic_mutex);
7563 }
7564
7565 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7566
klp_cond_resched(void)7567 static int klp_cond_resched(void)
7568 {
7569 __klp_sched_try_switch();
7570 return __cond_resched();
7571 }
7572
sched_dynamic_klp_enable(void)7573 void sched_dynamic_klp_enable(void)
7574 {
7575 mutex_lock(&sched_dynamic_mutex);
7576
7577 klp_override = true;
7578 static_call_update(cond_resched, klp_cond_resched);
7579
7580 mutex_unlock(&sched_dynamic_mutex);
7581 }
7582
sched_dynamic_klp_disable(void)7583 void sched_dynamic_klp_disable(void)
7584 {
7585 mutex_lock(&sched_dynamic_mutex);
7586
7587 klp_override = false;
7588 __sched_dynamic_update(preempt_dynamic_mode);
7589
7590 mutex_unlock(&sched_dynamic_mutex);
7591 }
7592
7593 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7594
setup_preempt_mode(char * str)7595 static int __init setup_preempt_mode(char *str)
7596 {
7597 int mode = sched_dynamic_mode(str);
7598 if (mode < 0) {
7599 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7600 return 0;
7601 }
7602
7603 sched_dynamic_update(mode);
7604 return 1;
7605 }
7606 __setup("preempt=", setup_preempt_mode);
7607
preempt_dynamic_init(void)7608 static void __init preempt_dynamic_init(void)
7609 {
7610 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7611 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7612 sched_dynamic_update(preempt_dynamic_none);
7613 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7614 sched_dynamic_update(preempt_dynamic_voluntary);
7615 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7616 sched_dynamic_update(preempt_dynamic_lazy);
7617 } else {
7618 /* Default static call setting, nothing to do */
7619 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7620 preempt_dynamic_mode = preempt_dynamic_full;
7621 pr_info("Dynamic Preempt: full\n");
7622 }
7623 }
7624 }
7625
7626 #define PREEMPT_MODEL_ACCESSOR(mode) \
7627 bool preempt_model_##mode(void) \
7628 { \
7629 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7630 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7631 } \
7632 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7633
7634 PREEMPT_MODEL_ACCESSOR(none);
7635 PREEMPT_MODEL_ACCESSOR(voluntary);
7636 PREEMPT_MODEL_ACCESSOR(full);
7637 PREEMPT_MODEL_ACCESSOR(lazy);
7638
7639 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7640
preempt_dynamic_init(void)7641 static inline void preempt_dynamic_init(void) { }
7642
7643 #endif /* CONFIG_PREEMPT_DYNAMIC */
7644
io_schedule_prepare(void)7645 int io_schedule_prepare(void)
7646 {
7647 int old_iowait = current->in_iowait;
7648
7649 current->in_iowait = 1;
7650 blk_flush_plug(current->plug, true);
7651 return old_iowait;
7652 }
7653
io_schedule_finish(int token)7654 void io_schedule_finish(int token)
7655 {
7656 current->in_iowait = token;
7657 }
7658
7659 /*
7660 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7661 * that process accounting knows that this is a task in IO wait state.
7662 */
io_schedule_timeout(long timeout)7663 long __sched io_schedule_timeout(long timeout)
7664 {
7665 int token;
7666 long ret;
7667
7668 token = io_schedule_prepare();
7669 ret = schedule_timeout(timeout);
7670 io_schedule_finish(token);
7671
7672 return ret;
7673 }
7674 EXPORT_SYMBOL(io_schedule_timeout);
7675
io_schedule(void)7676 void __sched io_schedule(void)
7677 {
7678 int token;
7679
7680 token = io_schedule_prepare();
7681 schedule();
7682 io_schedule_finish(token);
7683 }
7684 EXPORT_SYMBOL(io_schedule);
7685
sched_show_task(struct task_struct * p)7686 void sched_show_task(struct task_struct *p)
7687 {
7688 unsigned long free;
7689 int ppid;
7690
7691 if (!try_get_task_stack(p))
7692 return;
7693
7694 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7695
7696 if (task_is_running(p))
7697 pr_cont(" running task ");
7698 free = stack_not_used(p);
7699 ppid = 0;
7700 rcu_read_lock();
7701 if (pid_alive(p))
7702 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7703 rcu_read_unlock();
7704 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7705 free, task_pid_nr(p), task_tgid_nr(p),
7706 ppid, read_task_thread_flags(p));
7707
7708 print_worker_info(KERN_INFO, p);
7709 print_stop_info(KERN_INFO, p);
7710 print_scx_info(KERN_INFO, p);
7711 show_stack(p, NULL, KERN_INFO);
7712 put_task_stack(p);
7713 }
7714 EXPORT_SYMBOL_GPL(sched_show_task);
7715
7716 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7717 state_filter_match(unsigned long state_filter, struct task_struct *p)
7718 {
7719 unsigned int state = READ_ONCE(p->__state);
7720
7721 /* no filter, everything matches */
7722 if (!state_filter)
7723 return true;
7724
7725 /* filter, but doesn't match */
7726 if (!(state & state_filter))
7727 return false;
7728
7729 /*
7730 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7731 * TASK_KILLABLE).
7732 */
7733 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7734 return false;
7735
7736 return true;
7737 }
7738
7739
show_state_filter(unsigned int state_filter)7740 void show_state_filter(unsigned int state_filter)
7741 {
7742 struct task_struct *g, *p;
7743
7744 rcu_read_lock();
7745 for_each_process_thread(g, p) {
7746 /*
7747 * reset the NMI-timeout, listing all files on a slow
7748 * console might take a lot of time:
7749 * Also, reset softlockup watchdogs on all CPUs, because
7750 * another CPU might be blocked waiting for us to process
7751 * an IPI.
7752 */
7753 touch_nmi_watchdog();
7754 touch_all_softlockup_watchdogs();
7755 if (state_filter_match(state_filter, p))
7756 sched_show_task(p);
7757 }
7758
7759 #ifdef CONFIG_SCHED_DEBUG
7760 if (!state_filter)
7761 sysrq_sched_debug_show();
7762 #endif
7763 rcu_read_unlock();
7764 /*
7765 * Only show locks if all tasks are dumped:
7766 */
7767 if (!state_filter)
7768 debug_show_all_locks();
7769 }
7770
7771 /**
7772 * init_idle - set up an idle thread for a given CPU
7773 * @idle: task in question
7774 * @cpu: CPU the idle task belongs to
7775 *
7776 * NOTE: this function does not set the idle thread's NEED_RESCHED
7777 * flag, to make booting more robust.
7778 */
init_idle(struct task_struct * idle,int cpu)7779 void __init init_idle(struct task_struct *idle, int cpu)
7780 {
7781 #ifdef CONFIG_SMP
7782 struct affinity_context ac = (struct affinity_context) {
7783 .new_mask = cpumask_of(cpu),
7784 .flags = 0,
7785 };
7786 #endif
7787 struct rq *rq = cpu_rq(cpu);
7788 unsigned long flags;
7789
7790 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7791 raw_spin_rq_lock(rq);
7792
7793 idle->__state = TASK_RUNNING;
7794 idle->se.exec_start = sched_clock();
7795 /*
7796 * PF_KTHREAD should already be set at this point; regardless, make it
7797 * look like a proper per-CPU kthread.
7798 */
7799 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7800 kthread_set_per_cpu(idle, cpu);
7801
7802 #ifdef CONFIG_SMP
7803 /*
7804 * No validation and serialization required at boot time and for
7805 * setting up the idle tasks of not yet online CPUs.
7806 */
7807 set_cpus_allowed_common(idle, &ac);
7808 #endif
7809 /*
7810 * We're having a chicken and egg problem, even though we are
7811 * holding rq->lock, the CPU isn't yet set to this CPU so the
7812 * lockdep check in task_group() will fail.
7813 *
7814 * Similar case to sched_fork(). / Alternatively we could
7815 * use task_rq_lock() here and obtain the other rq->lock.
7816 *
7817 * Silence PROVE_RCU
7818 */
7819 rcu_read_lock();
7820 __set_task_cpu(idle, cpu);
7821 rcu_read_unlock();
7822
7823 rq->idle = idle;
7824 rq_set_donor(rq, idle);
7825 rcu_assign_pointer(rq->curr, idle);
7826 idle->on_rq = TASK_ON_RQ_QUEUED;
7827 #ifdef CONFIG_SMP
7828 idle->on_cpu = 1;
7829 #endif
7830 raw_spin_rq_unlock(rq);
7831 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7832
7833 /* Set the preempt count _outside_ the spinlocks! */
7834 init_idle_preempt_count(idle, cpu);
7835
7836 /*
7837 * The idle tasks have their own, simple scheduling class:
7838 */
7839 idle->sched_class = &idle_sched_class;
7840 ftrace_graph_init_idle_task(idle, cpu);
7841 vtime_init_idle(idle, cpu);
7842 #ifdef CONFIG_SMP
7843 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7844 #endif
7845 }
7846
7847 #ifdef CONFIG_SMP
7848
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7849 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7850 const struct cpumask *trial)
7851 {
7852 int ret = 1;
7853
7854 if (cpumask_empty(cur))
7855 return ret;
7856
7857 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7858
7859 return ret;
7860 }
7861
task_can_attach(struct task_struct * p)7862 int task_can_attach(struct task_struct *p)
7863 {
7864 int ret = 0;
7865
7866 /*
7867 * Kthreads which disallow setaffinity shouldn't be moved
7868 * to a new cpuset; we don't want to change their CPU
7869 * affinity and isolating such threads by their set of
7870 * allowed nodes is unnecessary. Thus, cpusets are not
7871 * applicable for such threads. This prevents checking for
7872 * success of set_cpus_allowed_ptr() on all attached tasks
7873 * before cpus_mask may be changed.
7874 */
7875 if (p->flags & PF_NO_SETAFFINITY)
7876 ret = -EINVAL;
7877
7878 return ret;
7879 }
7880
7881 bool sched_smp_initialized __read_mostly;
7882
7883 #ifdef CONFIG_NUMA_BALANCING
7884 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7885 int migrate_task_to(struct task_struct *p, int target_cpu)
7886 {
7887 struct migration_arg arg = { p, target_cpu };
7888 int curr_cpu = task_cpu(p);
7889
7890 if (curr_cpu == target_cpu)
7891 return 0;
7892
7893 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7894 return -EINVAL;
7895
7896 /* TODO: This is not properly updating schedstats */
7897
7898 trace_sched_move_numa(p, curr_cpu, target_cpu);
7899 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7900 }
7901
7902 /*
7903 * Requeue a task on a given node and accurately track the number of NUMA
7904 * tasks on the runqueues
7905 */
sched_setnuma(struct task_struct * p,int nid)7906 void sched_setnuma(struct task_struct *p, int nid)
7907 {
7908 bool queued, running;
7909 struct rq_flags rf;
7910 struct rq *rq;
7911
7912 rq = task_rq_lock(p, &rf);
7913 queued = task_on_rq_queued(p);
7914 running = task_current_donor(rq, p);
7915
7916 if (queued)
7917 dequeue_task(rq, p, DEQUEUE_SAVE);
7918 if (running)
7919 put_prev_task(rq, p);
7920
7921 p->numa_preferred_nid = nid;
7922
7923 if (queued)
7924 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7925 if (running)
7926 set_next_task(rq, p);
7927 task_rq_unlock(rq, p, &rf);
7928 }
7929 #endif /* CONFIG_NUMA_BALANCING */
7930
7931 #ifdef CONFIG_HOTPLUG_CPU
7932 /*
7933 * Ensure that the idle task is using init_mm right before its CPU goes
7934 * offline.
7935 */
idle_task_exit(void)7936 void idle_task_exit(void)
7937 {
7938 struct mm_struct *mm = current->active_mm;
7939
7940 BUG_ON(cpu_online(smp_processor_id()));
7941 BUG_ON(current != this_rq()->idle);
7942
7943 if (mm != &init_mm) {
7944 switch_mm(mm, &init_mm, current);
7945 finish_arch_post_lock_switch();
7946 }
7947
7948 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7949 }
7950
__balance_push_cpu_stop(void * arg)7951 static int __balance_push_cpu_stop(void *arg)
7952 {
7953 struct task_struct *p = arg;
7954 struct rq *rq = this_rq();
7955 struct rq_flags rf;
7956 int cpu;
7957
7958 raw_spin_lock_irq(&p->pi_lock);
7959 rq_lock(rq, &rf);
7960
7961 update_rq_clock(rq);
7962
7963 if (task_rq(p) == rq && task_on_rq_queued(p)) {
7964 cpu = select_fallback_rq(rq->cpu, p);
7965 rq = __migrate_task(rq, &rf, p, cpu);
7966 }
7967
7968 rq_unlock(rq, &rf);
7969 raw_spin_unlock_irq(&p->pi_lock);
7970
7971 put_task_struct(p);
7972
7973 return 0;
7974 }
7975
7976 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7977
7978 /*
7979 * Ensure we only run per-cpu kthreads once the CPU goes !active.
7980 *
7981 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7982 * effective when the hotplug motion is down.
7983 */
balance_push(struct rq * rq)7984 static void balance_push(struct rq *rq)
7985 {
7986 struct task_struct *push_task = rq->curr;
7987
7988 lockdep_assert_rq_held(rq);
7989
7990 /*
7991 * Ensure the thing is persistent until balance_push_set(.on = false);
7992 */
7993 rq->balance_callback = &balance_push_callback;
7994
7995 /*
7996 * Only active while going offline and when invoked on the outgoing
7997 * CPU.
7998 */
7999 if (!cpu_dying(rq->cpu) || rq != this_rq())
8000 return;
8001
8002 /*
8003 * Both the cpu-hotplug and stop task are in this case and are
8004 * required to complete the hotplug process.
8005 */
8006 if (kthread_is_per_cpu(push_task) ||
8007 is_migration_disabled(push_task)) {
8008
8009 /*
8010 * If this is the idle task on the outgoing CPU try to wake
8011 * up the hotplug control thread which might wait for the
8012 * last task to vanish. The rcuwait_active() check is
8013 * accurate here because the waiter is pinned on this CPU
8014 * and can't obviously be running in parallel.
8015 *
8016 * On RT kernels this also has to check whether there are
8017 * pinned and scheduled out tasks on the runqueue. They
8018 * need to leave the migrate disabled section first.
8019 */
8020 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8021 rcuwait_active(&rq->hotplug_wait)) {
8022 raw_spin_rq_unlock(rq);
8023 rcuwait_wake_up(&rq->hotplug_wait);
8024 raw_spin_rq_lock(rq);
8025 }
8026 return;
8027 }
8028
8029 get_task_struct(push_task);
8030 /*
8031 * Temporarily drop rq->lock such that we can wake-up the stop task.
8032 * Both preemption and IRQs are still disabled.
8033 */
8034 preempt_disable();
8035 raw_spin_rq_unlock(rq);
8036 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8037 this_cpu_ptr(&push_work));
8038 preempt_enable();
8039 /*
8040 * At this point need_resched() is true and we'll take the loop in
8041 * schedule(). The next pick is obviously going to be the stop task
8042 * which kthread_is_per_cpu() and will push this task away.
8043 */
8044 raw_spin_rq_lock(rq);
8045 }
8046
balance_push_set(int cpu,bool on)8047 static void balance_push_set(int cpu, bool on)
8048 {
8049 struct rq *rq = cpu_rq(cpu);
8050 struct rq_flags rf;
8051
8052 rq_lock_irqsave(rq, &rf);
8053 if (on) {
8054 WARN_ON_ONCE(rq->balance_callback);
8055 rq->balance_callback = &balance_push_callback;
8056 } else if (rq->balance_callback == &balance_push_callback) {
8057 rq->balance_callback = NULL;
8058 }
8059 rq_unlock_irqrestore(rq, &rf);
8060 }
8061
8062 /*
8063 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8064 * inactive. All tasks which are not per CPU kernel threads are either
8065 * pushed off this CPU now via balance_push() or placed on a different CPU
8066 * during wakeup. Wait until the CPU is quiescent.
8067 */
balance_hotplug_wait(void)8068 static void balance_hotplug_wait(void)
8069 {
8070 struct rq *rq = this_rq();
8071
8072 rcuwait_wait_event(&rq->hotplug_wait,
8073 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8074 TASK_UNINTERRUPTIBLE);
8075 }
8076
8077 #else
8078
balance_push(struct rq * rq)8079 static inline void balance_push(struct rq *rq)
8080 {
8081 }
8082
balance_push_set(int cpu,bool on)8083 static inline void balance_push_set(int cpu, bool on)
8084 {
8085 }
8086
balance_hotplug_wait(void)8087 static inline void balance_hotplug_wait(void)
8088 {
8089 }
8090
8091 #endif /* CONFIG_HOTPLUG_CPU */
8092
set_rq_online(struct rq * rq)8093 void set_rq_online(struct rq *rq)
8094 {
8095 if (!rq->online) {
8096 const struct sched_class *class;
8097
8098 cpumask_set_cpu(rq->cpu, rq->rd->online);
8099 rq->online = 1;
8100
8101 for_each_class(class) {
8102 if (class->rq_online)
8103 class->rq_online(rq);
8104 }
8105 }
8106 }
8107
set_rq_offline(struct rq * rq)8108 void set_rq_offline(struct rq *rq)
8109 {
8110 if (rq->online) {
8111 const struct sched_class *class;
8112
8113 update_rq_clock(rq);
8114 for_each_class(class) {
8115 if (class->rq_offline)
8116 class->rq_offline(rq);
8117 }
8118
8119 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8120 rq->online = 0;
8121 }
8122 }
8123
sched_set_rq_online(struct rq * rq,int cpu)8124 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8125 {
8126 struct rq_flags rf;
8127
8128 rq_lock_irqsave(rq, &rf);
8129 if (rq->rd) {
8130 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8131 set_rq_online(rq);
8132 }
8133 rq_unlock_irqrestore(rq, &rf);
8134 }
8135
sched_set_rq_offline(struct rq * rq,int cpu)8136 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8137 {
8138 struct rq_flags rf;
8139
8140 rq_lock_irqsave(rq, &rf);
8141 if (rq->rd) {
8142 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8143 set_rq_offline(rq);
8144 }
8145 rq_unlock_irqrestore(rq, &rf);
8146 }
8147
8148 /*
8149 * used to mark begin/end of suspend/resume:
8150 */
8151 static int num_cpus_frozen;
8152
8153 /*
8154 * Update cpusets according to cpu_active mask. If cpusets are
8155 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8156 * around partition_sched_domains().
8157 *
8158 * If we come here as part of a suspend/resume, don't touch cpusets because we
8159 * want to restore it back to its original state upon resume anyway.
8160 */
cpuset_cpu_active(void)8161 static void cpuset_cpu_active(void)
8162 {
8163 if (cpuhp_tasks_frozen) {
8164 /*
8165 * num_cpus_frozen tracks how many CPUs are involved in suspend
8166 * resume sequence. As long as this is not the last online
8167 * operation in the resume sequence, just build a single sched
8168 * domain, ignoring cpusets.
8169 */
8170 partition_sched_domains(1, NULL, NULL);
8171 if (--num_cpus_frozen)
8172 return;
8173 /*
8174 * This is the last CPU online operation. So fall through and
8175 * restore the original sched domains by considering the
8176 * cpuset configurations.
8177 */
8178 cpuset_force_rebuild();
8179 }
8180 cpuset_update_active_cpus();
8181 }
8182
cpuset_cpu_inactive(unsigned int cpu)8183 static int cpuset_cpu_inactive(unsigned int cpu)
8184 {
8185 if (!cpuhp_tasks_frozen) {
8186 int ret = dl_bw_check_overflow(cpu);
8187
8188 if (ret)
8189 return ret;
8190 cpuset_update_active_cpus();
8191 } else {
8192 num_cpus_frozen++;
8193 partition_sched_domains(1, NULL, NULL);
8194 }
8195 return 0;
8196 }
8197
sched_smt_present_inc(int cpu)8198 static inline void sched_smt_present_inc(int cpu)
8199 {
8200 #ifdef CONFIG_SCHED_SMT
8201 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8202 static_branch_inc_cpuslocked(&sched_smt_present);
8203 #endif
8204 }
8205
sched_smt_present_dec(int cpu)8206 static inline void sched_smt_present_dec(int cpu)
8207 {
8208 #ifdef CONFIG_SCHED_SMT
8209 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8210 static_branch_dec_cpuslocked(&sched_smt_present);
8211 #endif
8212 }
8213
sched_cpu_activate(unsigned int cpu)8214 int sched_cpu_activate(unsigned int cpu)
8215 {
8216 struct rq *rq = cpu_rq(cpu);
8217
8218 /*
8219 * Clear the balance_push callback and prepare to schedule
8220 * regular tasks.
8221 */
8222 balance_push_set(cpu, false);
8223
8224 /*
8225 * When going up, increment the number of cores with SMT present.
8226 */
8227 sched_smt_present_inc(cpu);
8228 set_cpu_active(cpu, true);
8229
8230 if (sched_smp_initialized) {
8231 sched_update_numa(cpu, true);
8232 sched_domains_numa_masks_set(cpu);
8233 cpuset_cpu_active();
8234 }
8235
8236 scx_rq_activate(rq);
8237
8238 /*
8239 * Put the rq online, if not already. This happens:
8240 *
8241 * 1) In the early boot process, because we build the real domains
8242 * after all CPUs have been brought up.
8243 *
8244 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8245 * domains.
8246 */
8247 sched_set_rq_online(rq, cpu);
8248
8249 return 0;
8250 }
8251
sched_cpu_deactivate(unsigned int cpu)8252 int sched_cpu_deactivate(unsigned int cpu)
8253 {
8254 struct rq *rq = cpu_rq(cpu);
8255 int ret;
8256
8257 /*
8258 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8259 * load balancing when not active
8260 */
8261 nohz_balance_exit_idle(rq);
8262
8263 set_cpu_active(cpu, false);
8264
8265 /*
8266 * From this point forward, this CPU will refuse to run any task that
8267 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8268 * push those tasks away until this gets cleared, see
8269 * sched_cpu_dying().
8270 */
8271 balance_push_set(cpu, true);
8272
8273 /*
8274 * We've cleared cpu_active_mask / set balance_push, wait for all
8275 * preempt-disabled and RCU users of this state to go away such that
8276 * all new such users will observe it.
8277 *
8278 * Specifically, we rely on ttwu to no longer target this CPU, see
8279 * ttwu_queue_cond() and is_cpu_allowed().
8280 *
8281 * Do sync before park smpboot threads to take care the RCU boost case.
8282 */
8283 synchronize_rcu();
8284
8285 sched_set_rq_offline(rq, cpu);
8286
8287 scx_rq_deactivate(rq);
8288
8289 /*
8290 * When going down, decrement the number of cores with SMT present.
8291 */
8292 sched_smt_present_dec(cpu);
8293
8294 #ifdef CONFIG_SCHED_SMT
8295 sched_core_cpu_deactivate(cpu);
8296 #endif
8297
8298 if (!sched_smp_initialized)
8299 return 0;
8300
8301 sched_update_numa(cpu, false);
8302 ret = cpuset_cpu_inactive(cpu);
8303 if (ret) {
8304 sched_smt_present_inc(cpu);
8305 sched_set_rq_online(rq, cpu);
8306 balance_push_set(cpu, false);
8307 set_cpu_active(cpu, true);
8308 sched_update_numa(cpu, true);
8309 return ret;
8310 }
8311 sched_domains_numa_masks_clear(cpu);
8312 return 0;
8313 }
8314
sched_rq_cpu_starting(unsigned int cpu)8315 static void sched_rq_cpu_starting(unsigned int cpu)
8316 {
8317 struct rq *rq = cpu_rq(cpu);
8318
8319 rq->calc_load_update = calc_load_update;
8320 update_max_interval();
8321 }
8322
sched_cpu_starting(unsigned int cpu)8323 int sched_cpu_starting(unsigned int cpu)
8324 {
8325 sched_core_cpu_starting(cpu);
8326 sched_rq_cpu_starting(cpu);
8327 sched_tick_start(cpu);
8328 return 0;
8329 }
8330
8331 #ifdef CONFIG_HOTPLUG_CPU
8332
8333 /*
8334 * Invoked immediately before the stopper thread is invoked to bring the
8335 * CPU down completely. At this point all per CPU kthreads except the
8336 * hotplug thread (current) and the stopper thread (inactive) have been
8337 * either parked or have been unbound from the outgoing CPU. Ensure that
8338 * any of those which might be on the way out are gone.
8339 *
8340 * If after this point a bound task is being woken on this CPU then the
8341 * responsible hotplug callback has failed to do it's job.
8342 * sched_cpu_dying() will catch it with the appropriate fireworks.
8343 */
sched_cpu_wait_empty(unsigned int cpu)8344 int sched_cpu_wait_empty(unsigned int cpu)
8345 {
8346 balance_hotplug_wait();
8347 return 0;
8348 }
8349
8350 /*
8351 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8352 * might have. Called from the CPU stopper task after ensuring that the
8353 * stopper is the last running task on the CPU, so nr_active count is
8354 * stable. We need to take the tear-down thread which is calling this into
8355 * account, so we hand in adjust = 1 to the load calculation.
8356 *
8357 * Also see the comment "Global load-average calculations".
8358 */
calc_load_migrate(struct rq * rq)8359 static void calc_load_migrate(struct rq *rq)
8360 {
8361 long delta = calc_load_fold_active(rq, 1);
8362
8363 if (delta)
8364 atomic_long_add(delta, &calc_load_tasks);
8365 }
8366
dump_rq_tasks(struct rq * rq,const char * loglvl)8367 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8368 {
8369 struct task_struct *g, *p;
8370 int cpu = cpu_of(rq);
8371
8372 lockdep_assert_rq_held(rq);
8373
8374 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8375 for_each_process_thread(g, p) {
8376 if (task_cpu(p) != cpu)
8377 continue;
8378
8379 if (!task_on_rq_queued(p))
8380 continue;
8381
8382 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8383 }
8384 }
8385
sched_cpu_dying(unsigned int cpu)8386 int sched_cpu_dying(unsigned int cpu)
8387 {
8388 struct rq *rq = cpu_rq(cpu);
8389 struct rq_flags rf;
8390
8391 /* Handle pending wakeups and then migrate everything off */
8392 sched_tick_stop(cpu);
8393
8394 rq_lock_irqsave(rq, &rf);
8395 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8396 WARN(true, "Dying CPU not properly vacated!");
8397 dump_rq_tasks(rq, KERN_WARNING);
8398 }
8399 rq_unlock_irqrestore(rq, &rf);
8400
8401 calc_load_migrate(rq);
8402 update_max_interval();
8403 hrtick_clear(rq);
8404 sched_core_cpu_dying(cpu);
8405 return 0;
8406 }
8407 #endif
8408
sched_init_smp(void)8409 void __init sched_init_smp(void)
8410 {
8411 sched_init_numa(NUMA_NO_NODE);
8412
8413 /*
8414 * There's no userspace yet to cause hotplug operations; hence all the
8415 * CPU masks are stable and all blatant races in the below code cannot
8416 * happen.
8417 */
8418 mutex_lock(&sched_domains_mutex);
8419 sched_init_domains(cpu_active_mask);
8420 mutex_unlock(&sched_domains_mutex);
8421
8422 /* Move init over to a non-isolated CPU */
8423 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8424 BUG();
8425 current->flags &= ~PF_NO_SETAFFINITY;
8426 sched_init_granularity();
8427
8428 init_sched_rt_class();
8429 init_sched_dl_class();
8430
8431 sched_smp_initialized = true;
8432 }
8433
migration_init(void)8434 static int __init migration_init(void)
8435 {
8436 sched_cpu_starting(smp_processor_id());
8437 return 0;
8438 }
8439 early_initcall(migration_init);
8440
8441 #else
sched_init_smp(void)8442 void __init sched_init_smp(void)
8443 {
8444 sched_init_granularity();
8445 }
8446 #endif /* CONFIG_SMP */
8447
in_sched_functions(unsigned long addr)8448 int in_sched_functions(unsigned long addr)
8449 {
8450 return in_lock_functions(addr) ||
8451 (addr >= (unsigned long)__sched_text_start
8452 && addr < (unsigned long)__sched_text_end);
8453 }
8454
8455 #ifdef CONFIG_CGROUP_SCHED
8456 /*
8457 * Default task group.
8458 * Every task in system belongs to this group at bootup.
8459 */
8460 struct task_group root_task_group;
8461 LIST_HEAD(task_groups);
8462
8463 /* Cacheline aligned slab cache for task_group */
8464 static struct kmem_cache *task_group_cache __ro_after_init;
8465 #endif
8466
sched_init(void)8467 void __init sched_init(void)
8468 {
8469 unsigned long ptr = 0;
8470 int i;
8471
8472 /* Make sure the linker didn't screw up */
8473 #ifdef CONFIG_SMP
8474 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8475 #endif
8476 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8477 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8478 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8479 #ifdef CONFIG_SCHED_CLASS_EXT
8480 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8481 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8482 #endif
8483
8484 wait_bit_init();
8485
8486 #ifdef CONFIG_FAIR_GROUP_SCHED
8487 ptr += 2 * nr_cpu_ids * sizeof(void **);
8488 #endif
8489 #ifdef CONFIG_RT_GROUP_SCHED
8490 ptr += 2 * nr_cpu_ids * sizeof(void **);
8491 #endif
8492 if (ptr) {
8493 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8494
8495 #ifdef CONFIG_FAIR_GROUP_SCHED
8496 root_task_group.se = (struct sched_entity **)ptr;
8497 ptr += nr_cpu_ids * sizeof(void **);
8498
8499 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8500 ptr += nr_cpu_ids * sizeof(void **);
8501
8502 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8503 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8504 #endif /* CONFIG_FAIR_GROUP_SCHED */
8505 #ifdef CONFIG_EXT_GROUP_SCHED
8506 root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8507 #endif /* CONFIG_EXT_GROUP_SCHED */
8508 #ifdef CONFIG_RT_GROUP_SCHED
8509 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8510 ptr += nr_cpu_ids * sizeof(void **);
8511
8512 root_task_group.rt_rq = (struct rt_rq **)ptr;
8513 ptr += nr_cpu_ids * sizeof(void **);
8514
8515 #endif /* CONFIG_RT_GROUP_SCHED */
8516 }
8517
8518 #ifdef CONFIG_SMP
8519 init_defrootdomain();
8520 #endif
8521
8522 #ifdef CONFIG_RT_GROUP_SCHED
8523 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8524 global_rt_period(), global_rt_runtime());
8525 #endif /* CONFIG_RT_GROUP_SCHED */
8526
8527 #ifdef CONFIG_CGROUP_SCHED
8528 task_group_cache = KMEM_CACHE(task_group, 0);
8529
8530 list_add(&root_task_group.list, &task_groups);
8531 INIT_LIST_HEAD(&root_task_group.children);
8532 INIT_LIST_HEAD(&root_task_group.siblings);
8533 autogroup_init(&init_task);
8534 #endif /* CONFIG_CGROUP_SCHED */
8535
8536 for_each_possible_cpu(i) {
8537 struct rq *rq;
8538
8539 rq = cpu_rq(i);
8540 raw_spin_lock_init(&rq->__lock);
8541 rq->nr_running = 0;
8542 rq->calc_load_active = 0;
8543 rq->calc_load_update = jiffies + LOAD_FREQ;
8544 init_cfs_rq(&rq->cfs);
8545 init_rt_rq(&rq->rt);
8546 init_dl_rq(&rq->dl);
8547 #ifdef CONFIG_FAIR_GROUP_SCHED
8548 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8549 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8550 /*
8551 * How much CPU bandwidth does root_task_group get?
8552 *
8553 * In case of task-groups formed through the cgroup filesystem, it
8554 * gets 100% of the CPU resources in the system. This overall
8555 * system CPU resource is divided among the tasks of
8556 * root_task_group and its child task-groups in a fair manner,
8557 * based on each entity's (task or task-group's) weight
8558 * (se->load.weight).
8559 *
8560 * In other words, if root_task_group has 10 tasks of weight
8561 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8562 * then A0's share of the CPU resource is:
8563 *
8564 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8565 *
8566 * We achieve this by letting root_task_group's tasks sit
8567 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8568 */
8569 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8570 #endif /* CONFIG_FAIR_GROUP_SCHED */
8571
8572 #ifdef CONFIG_RT_GROUP_SCHED
8573 /*
8574 * This is required for init cpu because rt.c:__enable_runtime()
8575 * starts working after scheduler_running, which is not the case
8576 * yet.
8577 */
8578 rq->rt.rt_runtime = global_rt_runtime();
8579 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8580 #endif
8581 #ifdef CONFIG_SMP
8582 rq->sd = NULL;
8583 rq->rd = NULL;
8584 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8585 rq->balance_callback = &balance_push_callback;
8586 rq->active_balance = 0;
8587 rq->next_balance = jiffies;
8588 rq->push_cpu = 0;
8589 rq->cpu = i;
8590 rq->online = 0;
8591 rq->idle_stamp = 0;
8592 rq->avg_idle = 2*sysctl_sched_migration_cost;
8593 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8594
8595 INIT_LIST_HEAD(&rq->cfs_tasks);
8596
8597 rq_attach_root(rq, &def_root_domain);
8598 #ifdef CONFIG_NO_HZ_COMMON
8599 rq->last_blocked_load_update_tick = jiffies;
8600 atomic_set(&rq->nohz_flags, 0);
8601
8602 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8603 #endif
8604 #ifdef CONFIG_HOTPLUG_CPU
8605 rcuwait_init(&rq->hotplug_wait);
8606 #endif
8607 #endif /* CONFIG_SMP */
8608 hrtick_rq_init(rq);
8609 atomic_set(&rq->nr_iowait, 0);
8610 fair_server_init(rq);
8611
8612 #ifdef CONFIG_SCHED_CORE
8613 rq->core = rq;
8614 rq->core_pick = NULL;
8615 rq->core_dl_server = NULL;
8616 rq->core_enabled = 0;
8617 rq->core_tree = RB_ROOT;
8618 rq->core_forceidle_count = 0;
8619 rq->core_forceidle_occupation = 0;
8620 rq->core_forceidle_start = 0;
8621
8622 rq->core_cookie = 0UL;
8623 #endif
8624 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8625 }
8626
8627 set_load_weight(&init_task, false);
8628 init_task.se.slice = sysctl_sched_base_slice,
8629
8630 /*
8631 * The boot idle thread does lazy MMU switching as well:
8632 */
8633 mmgrab_lazy_tlb(&init_mm);
8634 enter_lazy_tlb(&init_mm, current);
8635
8636 /*
8637 * The idle task doesn't need the kthread struct to function, but it
8638 * is dressed up as a per-CPU kthread and thus needs to play the part
8639 * if we want to avoid special-casing it in code that deals with per-CPU
8640 * kthreads.
8641 */
8642 WARN_ON(!set_kthread_struct(current));
8643
8644 /*
8645 * Make us the idle thread. Technically, schedule() should not be
8646 * called from this thread, however somewhere below it might be,
8647 * but because we are the idle thread, we just pick up running again
8648 * when this runqueue becomes "idle".
8649 */
8650 __sched_fork(0, current);
8651 init_idle(current, smp_processor_id());
8652
8653 calc_load_update = jiffies + LOAD_FREQ;
8654
8655 #ifdef CONFIG_SMP
8656 idle_thread_set_boot_cpu();
8657 balance_push_set(smp_processor_id(), false);
8658 #endif
8659 init_sched_fair_class();
8660 init_sched_ext_class();
8661
8662 psi_init();
8663
8664 init_uclamp();
8665
8666 preempt_dynamic_init();
8667
8668 scheduler_running = 1;
8669 }
8670
8671 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8672
__might_sleep(const char * file,int line)8673 void __might_sleep(const char *file, int line)
8674 {
8675 unsigned int state = get_current_state();
8676 /*
8677 * Blocking primitives will set (and therefore destroy) current->state,
8678 * since we will exit with TASK_RUNNING make sure we enter with it,
8679 * otherwise we will destroy state.
8680 */
8681 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8682 "do not call blocking ops when !TASK_RUNNING; "
8683 "state=%x set at [<%p>] %pS\n", state,
8684 (void *)current->task_state_change,
8685 (void *)current->task_state_change);
8686
8687 __might_resched(file, line, 0);
8688 }
8689 EXPORT_SYMBOL(__might_sleep);
8690
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8691 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8692 {
8693 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8694 return;
8695
8696 if (preempt_count() == preempt_offset)
8697 return;
8698
8699 pr_err("Preemption disabled at:");
8700 print_ip_sym(KERN_ERR, ip);
8701 }
8702
resched_offsets_ok(unsigned int offsets)8703 static inline bool resched_offsets_ok(unsigned int offsets)
8704 {
8705 unsigned int nested = preempt_count();
8706
8707 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8708
8709 return nested == offsets;
8710 }
8711
__might_resched(const char * file,int line,unsigned int offsets)8712 void __might_resched(const char *file, int line, unsigned int offsets)
8713 {
8714 /* Ratelimiting timestamp: */
8715 static unsigned long prev_jiffy;
8716
8717 unsigned long preempt_disable_ip;
8718
8719 /* WARN_ON_ONCE() by default, no rate limit required: */
8720 rcu_sleep_check();
8721
8722 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8723 !is_idle_task(current) && !current->non_block_count) ||
8724 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8725 oops_in_progress)
8726 return;
8727
8728 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8729 return;
8730 prev_jiffy = jiffies;
8731
8732 /* Save this before calling printk(), since that will clobber it: */
8733 preempt_disable_ip = get_preempt_disable_ip(current);
8734
8735 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8736 file, line);
8737 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8738 in_atomic(), irqs_disabled(), current->non_block_count,
8739 current->pid, current->comm);
8740 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8741 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8742
8743 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8744 pr_err("RCU nest depth: %d, expected: %u\n",
8745 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8746 }
8747
8748 if (task_stack_end_corrupted(current))
8749 pr_emerg("Thread overran stack, or stack corrupted\n");
8750
8751 debug_show_held_locks(current);
8752 if (irqs_disabled())
8753 print_irqtrace_events(current);
8754
8755 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8756 preempt_disable_ip);
8757
8758 dump_stack();
8759 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8760 }
8761 EXPORT_SYMBOL(__might_resched);
8762
__cant_sleep(const char * file,int line,int preempt_offset)8763 void __cant_sleep(const char *file, int line, int preempt_offset)
8764 {
8765 static unsigned long prev_jiffy;
8766
8767 if (irqs_disabled())
8768 return;
8769
8770 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8771 return;
8772
8773 if (preempt_count() > preempt_offset)
8774 return;
8775
8776 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8777 return;
8778 prev_jiffy = jiffies;
8779
8780 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8781 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8782 in_atomic(), irqs_disabled(),
8783 current->pid, current->comm);
8784
8785 debug_show_held_locks(current);
8786 dump_stack();
8787 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8788 }
8789 EXPORT_SYMBOL_GPL(__cant_sleep);
8790
8791 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8792 void __cant_migrate(const char *file, int line)
8793 {
8794 static unsigned long prev_jiffy;
8795
8796 if (irqs_disabled())
8797 return;
8798
8799 if (is_migration_disabled(current))
8800 return;
8801
8802 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8803 return;
8804
8805 if (preempt_count() > 0)
8806 return;
8807
8808 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8809 return;
8810 prev_jiffy = jiffies;
8811
8812 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8813 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8814 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8815 current->pid, current->comm);
8816
8817 debug_show_held_locks(current);
8818 dump_stack();
8819 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8820 }
8821 EXPORT_SYMBOL_GPL(__cant_migrate);
8822 #endif
8823 #endif
8824
8825 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8826 void normalize_rt_tasks(void)
8827 {
8828 struct task_struct *g, *p;
8829 struct sched_attr attr = {
8830 .sched_policy = SCHED_NORMAL,
8831 };
8832
8833 read_lock(&tasklist_lock);
8834 for_each_process_thread(g, p) {
8835 /*
8836 * Only normalize user tasks:
8837 */
8838 if (p->flags & PF_KTHREAD)
8839 continue;
8840
8841 p->se.exec_start = 0;
8842 schedstat_set(p->stats.wait_start, 0);
8843 schedstat_set(p->stats.sleep_start, 0);
8844 schedstat_set(p->stats.block_start, 0);
8845
8846 if (!rt_or_dl_task(p)) {
8847 /*
8848 * Renice negative nice level userspace
8849 * tasks back to 0:
8850 */
8851 if (task_nice(p) < 0)
8852 set_user_nice(p, 0);
8853 continue;
8854 }
8855
8856 __sched_setscheduler(p, &attr, false, false);
8857 }
8858 read_unlock(&tasklist_lock);
8859 }
8860
8861 #endif /* CONFIG_MAGIC_SYSRQ */
8862
8863 #if defined(CONFIG_KGDB_KDB)
8864 /*
8865 * These functions are only useful for KDB.
8866 *
8867 * They can only be called when the whole system has been
8868 * stopped - every CPU needs to be quiescent, and no scheduling
8869 * activity can take place. Using them for anything else would
8870 * be a serious bug, and as a result, they aren't even visible
8871 * under any other configuration.
8872 */
8873
8874 /**
8875 * curr_task - return the current task for a given CPU.
8876 * @cpu: the processor in question.
8877 *
8878 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8879 *
8880 * Return: The current task for @cpu.
8881 */
curr_task(int cpu)8882 struct task_struct *curr_task(int cpu)
8883 {
8884 return cpu_curr(cpu);
8885 }
8886
8887 #endif /* defined(CONFIG_KGDB_KDB) */
8888
8889 #ifdef CONFIG_CGROUP_SCHED
8890 /* task_group_lock serializes the addition/removal of task groups */
8891 static DEFINE_SPINLOCK(task_group_lock);
8892
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8893 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8894 struct task_group *parent)
8895 {
8896 #ifdef CONFIG_UCLAMP_TASK_GROUP
8897 enum uclamp_id clamp_id;
8898
8899 for_each_clamp_id(clamp_id) {
8900 uclamp_se_set(&tg->uclamp_req[clamp_id],
8901 uclamp_none(clamp_id), false);
8902 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8903 }
8904 #endif
8905 }
8906
sched_free_group(struct task_group * tg)8907 static void sched_free_group(struct task_group *tg)
8908 {
8909 free_fair_sched_group(tg);
8910 free_rt_sched_group(tg);
8911 autogroup_free(tg);
8912 kmem_cache_free(task_group_cache, tg);
8913 }
8914
sched_free_group_rcu(struct rcu_head * rcu)8915 static void sched_free_group_rcu(struct rcu_head *rcu)
8916 {
8917 sched_free_group(container_of(rcu, struct task_group, rcu));
8918 }
8919
sched_unregister_group(struct task_group * tg)8920 static void sched_unregister_group(struct task_group *tg)
8921 {
8922 unregister_fair_sched_group(tg);
8923 unregister_rt_sched_group(tg);
8924 /*
8925 * We have to wait for yet another RCU grace period to expire, as
8926 * print_cfs_stats() might run concurrently.
8927 */
8928 call_rcu(&tg->rcu, sched_free_group_rcu);
8929 }
8930
8931 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8932 struct task_group *sched_create_group(struct task_group *parent)
8933 {
8934 struct task_group *tg;
8935
8936 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8937 if (!tg)
8938 return ERR_PTR(-ENOMEM);
8939
8940 if (!alloc_fair_sched_group(tg, parent))
8941 goto err;
8942
8943 if (!alloc_rt_sched_group(tg, parent))
8944 goto err;
8945
8946 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8947 alloc_uclamp_sched_group(tg, parent);
8948
8949 return tg;
8950
8951 err:
8952 sched_free_group(tg);
8953 return ERR_PTR(-ENOMEM);
8954 }
8955
sched_online_group(struct task_group * tg,struct task_group * parent)8956 void sched_online_group(struct task_group *tg, struct task_group *parent)
8957 {
8958 unsigned long flags;
8959
8960 spin_lock_irqsave(&task_group_lock, flags);
8961 list_add_rcu(&tg->list, &task_groups);
8962
8963 /* Root should already exist: */
8964 WARN_ON(!parent);
8965
8966 tg->parent = parent;
8967 INIT_LIST_HEAD(&tg->children);
8968 list_add_rcu(&tg->siblings, &parent->children);
8969 spin_unlock_irqrestore(&task_group_lock, flags);
8970
8971 online_fair_sched_group(tg);
8972 }
8973
8974 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)8975 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8976 {
8977 /* Now it should be safe to free those cfs_rqs: */
8978 sched_unregister_group(container_of(rhp, struct task_group, rcu));
8979 }
8980
sched_destroy_group(struct task_group * tg)8981 void sched_destroy_group(struct task_group *tg)
8982 {
8983 /* Wait for possible concurrent references to cfs_rqs complete: */
8984 call_rcu(&tg->rcu, sched_unregister_group_rcu);
8985 }
8986
sched_release_group(struct task_group * tg)8987 void sched_release_group(struct task_group *tg)
8988 {
8989 unsigned long flags;
8990
8991 /*
8992 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8993 * sched_cfs_period_timer()).
8994 *
8995 * For this to be effective, we have to wait for all pending users of
8996 * this task group to leave their RCU critical section to ensure no new
8997 * user will see our dying task group any more. Specifically ensure
8998 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
8999 *
9000 * We therefore defer calling unregister_fair_sched_group() to
9001 * sched_unregister_group() which is guarantied to get called only after the
9002 * current RCU grace period has expired.
9003 */
9004 spin_lock_irqsave(&task_group_lock, flags);
9005 list_del_rcu(&tg->list);
9006 list_del_rcu(&tg->siblings);
9007 spin_unlock_irqrestore(&task_group_lock, flags);
9008 }
9009
sched_get_task_group(struct task_struct * tsk)9010 static struct task_group *sched_get_task_group(struct task_struct *tsk)
9011 {
9012 struct task_group *tg;
9013
9014 /*
9015 * All callers are synchronized by task_rq_lock(); we do not use RCU
9016 * which is pointless here. Thus, we pass "true" to task_css_check()
9017 * to prevent lockdep warnings.
9018 */
9019 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9020 struct task_group, css);
9021 tg = autogroup_task_group(tsk, tg);
9022
9023 return tg;
9024 }
9025
sched_change_group(struct task_struct * tsk,struct task_group * group)9026 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
9027 {
9028 tsk->sched_task_group = group;
9029
9030 #ifdef CONFIG_FAIR_GROUP_SCHED
9031 if (tsk->sched_class->task_change_group)
9032 tsk->sched_class->task_change_group(tsk);
9033 else
9034 #endif
9035 set_task_rq(tsk, task_cpu(tsk));
9036 }
9037
9038 /*
9039 * Change task's runqueue when it moves between groups.
9040 *
9041 * The caller of this function should have put the task in its new group by
9042 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9043 * its new group.
9044 */
sched_move_task(struct task_struct * tsk)9045 void sched_move_task(struct task_struct *tsk)
9046 {
9047 int queued, running, queue_flags =
9048 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9049 struct task_group *group;
9050 struct rq *rq;
9051
9052 CLASS(task_rq_lock, rq_guard)(tsk);
9053 rq = rq_guard.rq;
9054
9055 /*
9056 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9057 * group changes.
9058 */
9059 group = sched_get_task_group(tsk);
9060 if (group == tsk->sched_task_group)
9061 return;
9062
9063 update_rq_clock(rq);
9064
9065 running = task_current_donor(rq, tsk);
9066 queued = task_on_rq_queued(tsk);
9067
9068 if (queued)
9069 dequeue_task(rq, tsk, queue_flags);
9070 if (running)
9071 put_prev_task(rq, tsk);
9072
9073 sched_change_group(tsk, group);
9074 scx_move_task(tsk);
9075
9076 if (queued)
9077 enqueue_task(rq, tsk, queue_flags);
9078 if (running) {
9079 set_next_task(rq, tsk);
9080 /*
9081 * After changing group, the running task may have joined a
9082 * throttled one but it's still the running task. Trigger a
9083 * resched to make sure that task can still run.
9084 */
9085 resched_curr(rq);
9086 }
9087 }
9088
9089 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9090 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9091 {
9092 struct task_group *parent = css_tg(parent_css);
9093 struct task_group *tg;
9094
9095 if (!parent) {
9096 /* This is early initialization for the top cgroup */
9097 return &root_task_group.css;
9098 }
9099
9100 tg = sched_create_group(parent);
9101 if (IS_ERR(tg))
9102 return ERR_PTR(-ENOMEM);
9103
9104 return &tg->css;
9105 }
9106
9107 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9108 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9109 {
9110 struct task_group *tg = css_tg(css);
9111 struct task_group *parent = css_tg(css->parent);
9112 int ret;
9113
9114 ret = scx_tg_online(tg);
9115 if (ret)
9116 return ret;
9117
9118 if (parent)
9119 sched_online_group(tg, parent);
9120
9121 #ifdef CONFIG_UCLAMP_TASK_GROUP
9122 /* Propagate the effective uclamp value for the new group */
9123 guard(mutex)(&uclamp_mutex);
9124 guard(rcu)();
9125 cpu_util_update_eff(css);
9126 #endif
9127
9128 return 0;
9129 }
9130
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9131 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9132 {
9133 struct task_group *tg = css_tg(css);
9134
9135 scx_tg_offline(tg);
9136 }
9137
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9138 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9139 {
9140 struct task_group *tg = css_tg(css);
9141
9142 sched_release_group(tg);
9143 }
9144
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9145 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9146 {
9147 struct task_group *tg = css_tg(css);
9148
9149 /*
9150 * Relies on the RCU grace period between css_released() and this.
9151 */
9152 sched_unregister_group(tg);
9153 }
9154
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9155 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9156 {
9157 #ifdef CONFIG_RT_GROUP_SCHED
9158 struct task_struct *task;
9159 struct cgroup_subsys_state *css;
9160
9161 cgroup_taskset_for_each(task, css, tset) {
9162 if (!sched_rt_can_attach(css_tg(css), task))
9163 return -EINVAL;
9164 }
9165 #endif
9166 return scx_cgroup_can_attach(tset);
9167 }
9168
cpu_cgroup_attach(struct cgroup_taskset * tset)9169 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9170 {
9171 struct task_struct *task;
9172 struct cgroup_subsys_state *css;
9173
9174 cgroup_taskset_for_each(task, css, tset)
9175 sched_move_task(task);
9176
9177 scx_cgroup_finish_attach();
9178 }
9179
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9180 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9181 {
9182 scx_cgroup_cancel_attach(tset);
9183 }
9184
9185 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9186 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9187 {
9188 struct cgroup_subsys_state *top_css = css;
9189 struct uclamp_se *uc_parent = NULL;
9190 struct uclamp_se *uc_se = NULL;
9191 unsigned int eff[UCLAMP_CNT];
9192 enum uclamp_id clamp_id;
9193 unsigned int clamps;
9194
9195 lockdep_assert_held(&uclamp_mutex);
9196 SCHED_WARN_ON(!rcu_read_lock_held());
9197
9198 css_for_each_descendant_pre(css, top_css) {
9199 uc_parent = css_tg(css)->parent
9200 ? css_tg(css)->parent->uclamp : NULL;
9201
9202 for_each_clamp_id(clamp_id) {
9203 /* Assume effective clamps matches requested clamps */
9204 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9205 /* Cap effective clamps with parent's effective clamps */
9206 if (uc_parent &&
9207 eff[clamp_id] > uc_parent[clamp_id].value) {
9208 eff[clamp_id] = uc_parent[clamp_id].value;
9209 }
9210 }
9211 /* Ensure protection is always capped by limit */
9212 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9213
9214 /* Propagate most restrictive effective clamps */
9215 clamps = 0x0;
9216 uc_se = css_tg(css)->uclamp;
9217 for_each_clamp_id(clamp_id) {
9218 if (eff[clamp_id] == uc_se[clamp_id].value)
9219 continue;
9220 uc_se[clamp_id].value = eff[clamp_id];
9221 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9222 clamps |= (0x1 << clamp_id);
9223 }
9224 if (!clamps) {
9225 css = css_rightmost_descendant(css);
9226 continue;
9227 }
9228
9229 /* Immediately update descendants RUNNABLE tasks */
9230 uclamp_update_active_tasks(css);
9231 }
9232 }
9233
9234 /*
9235 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9236 * C expression. Since there is no way to convert a macro argument (N) into a
9237 * character constant, use two levels of macros.
9238 */
9239 #define _POW10(exp) ((unsigned int)1e##exp)
9240 #define POW10(exp) _POW10(exp)
9241
9242 struct uclamp_request {
9243 #define UCLAMP_PERCENT_SHIFT 2
9244 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9245 s64 percent;
9246 u64 util;
9247 int ret;
9248 };
9249
9250 static inline struct uclamp_request
capacity_from_percent(char * buf)9251 capacity_from_percent(char *buf)
9252 {
9253 struct uclamp_request req = {
9254 .percent = UCLAMP_PERCENT_SCALE,
9255 .util = SCHED_CAPACITY_SCALE,
9256 .ret = 0,
9257 };
9258
9259 buf = strim(buf);
9260 if (strcmp(buf, "max")) {
9261 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9262 &req.percent);
9263 if (req.ret)
9264 return req;
9265 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9266 req.ret = -ERANGE;
9267 return req;
9268 }
9269
9270 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9271 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9272 }
9273
9274 return req;
9275 }
9276
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9277 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9278 size_t nbytes, loff_t off,
9279 enum uclamp_id clamp_id)
9280 {
9281 struct uclamp_request req;
9282 struct task_group *tg;
9283
9284 req = capacity_from_percent(buf);
9285 if (req.ret)
9286 return req.ret;
9287
9288 static_branch_enable(&sched_uclamp_used);
9289
9290 guard(mutex)(&uclamp_mutex);
9291 guard(rcu)();
9292
9293 tg = css_tg(of_css(of));
9294 if (tg->uclamp_req[clamp_id].value != req.util)
9295 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9296
9297 /*
9298 * Because of not recoverable conversion rounding we keep track of the
9299 * exact requested value
9300 */
9301 tg->uclamp_pct[clamp_id] = req.percent;
9302
9303 /* Update effective clamps to track the most restrictive value */
9304 cpu_util_update_eff(of_css(of));
9305
9306 return nbytes;
9307 }
9308
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9309 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9310 char *buf, size_t nbytes,
9311 loff_t off)
9312 {
9313 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9314 }
9315
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9316 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9317 char *buf, size_t nbytes,
9318 loff_t off)
9319 {
9320 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9321 }
9322
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9323 static inline void cpu_uclamp_print(struct seq_file *sf,
9324 enum uclamp_id clamp_id)
9325 {
9326 struct task_group *tg;
9327 u64 util_clamp;
9328 u64 percent;
9329 u32 rem;
9330
9331 scoped_guard (rcu) {
9332 tg = css_tg(seq_css(sf));
9333 util_clamp = tg->uclamp_req[clamp_id].value;
9334 }
9335
9336 if (util_clamp == SCHED_CAPACITY_SCALE) {
9337 seq_puts(sf, "max\n");
9338 return;
9339 }
9340
9341 percent = tg->uclamp_pct[clamp_id];
9342 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9343 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9344 }
9345
cpu_uclamp_min_show(struct seq_file * sf,void * v)9346 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9347 {
9348 cpu_uclamp_print(sf, UCLAMP_MIN);
9349 return 0;
9350 }
9351
cpu_uclamp_max_show(struct seq_file * sf,void * v)9352 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9353 {
9354 cpu_uclamp_print(sf, UCLAMP_MAX);
9355 return 0;
9356 }
9357 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9358
9359 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9360 static unsigned long tg_weight(struct task_group *tg)
9361 {
9362 #ifdef CONFIG_FAIR_GROUP_SCHED
9363 return scale_load_down(tg->shares);
9364 #else
9365 return sched_weight_from_cgroup(tg->scx_weight);
9366 #endif
9367 }
9368
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9369 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9370 struct cftype *cftype, u64 shareval)
9371 {
9372 int ret;
9373
9374 if (shareval > scale_load_down(ULONG_MAX))
9375 shareval = MAX_SHARES;
9376 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9377 if (!ret)
9378 scx_group_set_weight(css_tg(css),
9379 sched_weight_to_cgroup(shareval));
9380 return ret;
9381 }
9382
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9383 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9384 struct cftype *cft)
9385 {
9386 return tg_weight(css_tg(css));
9387 }
9388 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9389
9390 #ifdef CONFIG_CFS_BANDWIDTH
9391 static DEFINE_MUTEX(cfs_constraints_mutex);
9392
9393 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9394 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9395 /* More than 203 days if BW_SHIFT equals 20. */
9396 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9397
9398 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9399
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)9400 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9401 u64 burst)
9402 {
9403 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9404 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9405
9406 if (tg == &root_task_group)
9407 return -EINVAL;
9408
9409 /*
9410 * Ensure we have at some amount of bandwidth every period. This is
9411 * to prevent reaching a state of large arrears when throttled via
9412 * entity_tick() resulting in prolonged exit starvation.
9413 */
9414 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9415 return -EINVAL;
9416
9417 /*
9418 * Likewise, bound things on the other side by preventing insane quota
9419 * periods. This also allows us to normalize in computing quota
9420 * feasibility.
9421 */
9422 if (period > max_cfs_quota_period)
9423 return -EINVAL;
9424
9425 /*
9426 * Bound quota to defend quota against overflow during bandwidth shift.
9427 */
9428 if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9429 return -EINVAL;
9430
9431 if (quota != RUNTIME_INF && (burst > quota ||
9432 burst + quota > max_cfs_runtime))
9433 return -EINVAL;
9434
9435 /*
9436 * Prevent race between setting of cfs_rq->runtime_enabled and
9437 * unthrottle_offline_cfs_rqs().
9438 */
9439 guard(cpus_read_lock)();
9440 guard(mutex)(&cfs_constraints_mutex);
9441
9442 ret = __cfs_schedulable(tg, period, quota);
9443 if (ret)
9444 return ret;
9445
9446 runtime_enabled = quota != RUNTIME_INF;
9447 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9448 /*
9449 * If we need to toggle cfs_bandwidth_used, off->on must occur
9450 * before making related changes, and on->off must occur afterwards
9451 */
9452 if (runtime_enabled && !runtime_was_enabled)
9453 cfs_bandwidth_usage_inc();
9454
9455 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9456 cfs_b->period = ns_to_ktime(period);
9457 cfs_b->quota = quota;
9458 cfs_b->burst = burst;
9459
9460 __refill_cfs_bandwidth_runtime(cfs_b);
9461
9462 /*
9463 * Restart the period timer (if active) to handle new
9464 * period expiry:
9465 */
9466 if (runtime_enabled)
9467 start_cfs_bandwidth(cfs_b);
9468 }
9469
9470 for_each_online_cpu(i) {
9471 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9472 struct rq *rq = cfs_rq->rq;
9473
9474 guard(rq_lock_irq)(rq);
9475 cfs_rq->runtime_enabled = runtime_enabled;
9476 cfs_rq->runtime_remaining = 0;
9477
9478 if (cfs_rq->throttled)
9479 unthrottle_cfs_rq(cfs_rq);
9480 }
9481
9482 if (runtime_was_enabled && !runtime_enabled)
9483 cfs_bandwidth_usage_dec();
9484
9485 return 0;
9486 }
9487
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)9488 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9489 {
9490 u64 quota, period, burst;
9491
9492 period = ktime_to_ns(tg->cfs_bandwidth.period);
9493 burst = tg->cfs_bandwidth.burst;
9494 if (cfs_quota_us < 0)
9495 quota = RUNTIME_INF;
9496 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9497 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9498 else
9499 return -EINVAL;
9500
9501 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9502 }
9503
tg_get_cfs_quota(struct task_group * tg)9504 static long tg_get_cfs_quota(struct task_group *tg)
9505 {
9506 u64 quota_us;
9507
9508 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9509 return -1;
9510
9511 quota_us = tg->cfs_bandwidth.quota;
9512 do_div(quota_us, NSEC_PER_USEC);
9513
9514 return quota_us;
9515 }
9516
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)9517 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9518 {
9519 u64 quota, period, burst;
9520
9521 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9522 return -EINVAL;
9523
9524 period = (u64)cfs_period_us * NSEC_PER_USEC;
9525 quota = tg->cfs_bandwidth.quota;
9526 burst = tg->cfs_bandwidth.burst;
9527
9528 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9529 }
9530
tg_get_cfs_period(struct task_group * tg)9531 static long tg_get_cfs_period(struct task_group *tg)
9532 {
9533 u64 cfs_period_us;
9534
9535 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9536 do_div(cfs_period_us, NSEC_PER_USEC);
9537
9538 return cfs_period_us;
9539 }
9540
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)9541 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9542 {
9543 u64 quota, period, burst;
9544
9545 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9546 return -EINVAL;
9547
9548 burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9549 period = ktime_to_ns(tg->cfs_bandwidth.period);
9550 quota = tg->cfs_bandwidth.quota;
9551
9552 return tg_set_cfs_bandwidth(tg, period, quota, burst);
9553 }
9554
tg_get_cfs_burst(struct task_group * tg)9555 static long tg_get_cfs_burst(struct task_group *tg)
9556 {
9557 u64 burst_us;
9558
9559 burst_us = tg->cfs_bandwidth.burst;
9560 do_div(burst_us, NSEC_PER_USEC);
9561
9562 return burst_us;
9563 }
9564
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9565 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9566 struct cftype *cft)
9567 {
9568 return tg_get_cfs_quota(css_tg(css));
9569 }
9570
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)9571 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9572 struct cftype *cftype, s64 cfs_quota_us)
9573 {
9574 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9575 }
9576
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9577 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9578 struct cftype *cft)
9579 {
9580 return tg_get_cfs_period(css_tg(css));
9581 }
9582
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)9583 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9584 struct cftype *cftype, u64 cfs_period_us)
9585 {
9586 return tg_set_cfs_period(css_tg(css), cfs_period_us);
9587 }
9588
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9589 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9590 struct cftype *cft)
9591 {
9592 return tg_get_cfs_burst(css_tg(css));
9593 }
9594
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)9595 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9596 struct cftype *cftype, u64 cfs_burst_us)
9597 {
9598 return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9599 }
9600
9601 struct cfs_schedulable_data {
9602 struct task_group *tg;
9603 u64 period, quota;
9604 };
9605
9606 /*
9607 * normalize group quota/period to be quota/max_period
9608 * note: units are usecs
9609 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9610 static u64 normalize_cfs_quota(struct task_group *tg,
9611 struct cfs_schedulable_data *d)
9612 {
9613 u64 quota, period;
9614
9615 if (tg == d->tg) {
9616 period = d->period;
9617 quota = d->quota;
9618 } else {
9619 period = tg_get_cfs_period(tg);
9620 quota = tg_get_cfs_quota(tg);
9621 }
9622
9623 /* note: these should typically be equivalent */
9624 if (quota == RUNTIME_INF || quota == -1)
9625 return RUNTIME_INF;
9626
9627 return to_ratio(period, quota);
9628 }
9629
tg_cfs_schedulable_down(struct task_group * tg,void * data)9630 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9631 {
9632 struct cfs_schedulable_data *d = data;
9633 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9634 s64 quota = 0, parent_quota = -1;
9635
9636 if (!tg->parent) {
9637 quota = RUNTIME_INF;
9638 } else {
9639 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9640
9641 quota = normalize_cfs_quota(tg, d);
9642 parent_quota = parent_b->hierarchical_quota;
9643
9644 /*
9645 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9646 * always take the non-RUNTIME_INF min. On cgroup1, only
9647 * inherit when no limit is set. In both cases this is used
9648 * by the scheduler to determine if a given CFS task has a
9649 * bandwidth constraint at some higher level.
9650 */
9651 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9652 if (quota == RUNTIME_INF)
9653 quota = parent_quota;
9654 else if (parent_quota != RUNTIME_INF)
9655 quota = min(quota, parent_quota);
9656 } else {
9657 if (quota == RUNTIME_INF)
9658 quota = parent_quota;
9659 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9660 return -EINVAL;
9661 }
9662 }
9663 cfs_b->hierarchical_quota = quota;
9664
9665 return 0;
9666 }
9667
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9668 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9669 {
9670 struct cfs_schedulable_data data = {
9671 .tg = tg,
9672 .period = period,
9673 .quota = quota,
9674 };
9675
9676 if (quota != RUNTIME_INF) {
9677 do_div(data.period, NSEC_PER_USEC);
9678 do_div(data.quota, NSEC_PER_USEC);
9679 }
9680
9681 guard(rcu)();
9682 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9683 }
9684
cpu_cfs_stat_show(struct seq_file * sf,void * v)9685 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9686 {
9687 struct task_group *tg = css_tg(seq_css(sf));
9688 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9689
9690 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9691 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9692 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9693
9694 if (schedstat_enabled() && tg != &root_task_group) {
9695 struct sched_statistics *stats;
9696 u64 ws = 0;
9697 int i;
9698
9699 for_each_possible_cpu(i) {
9700 stats = __schedstats_from_se(tg->se[i]);
9701 ws += schedstat_val(stats->wait_sum);
9702 }
9703
9704 seq_printf(sf, "wait_sum %llu\n", ws);
9705 }
9706
9707 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9708 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9709
9710 return 0;
9711 }
9712
throttled_time_self(struct task_group * tg)9713 static u64 throttled_time_self(struct task_group *tg)
9714 {
9715 int i;
9716 u64 total = 0;
9717
9718 for_each_possible_cpu(i) {
9719 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9720 }
9721
9722 return total;
9723 }
9724
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9725 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9726 {
9727 struct task_group *tg = css_tg(seq_css(sf));
9728
9729 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9730
9731 return 0;
9732 }
9733 #endif /* CONFIG_CFS_BANDWIDTH */
9734
9735 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9736 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9737 struct cftype *cft, s64 val)
9738 {
9739 return sched_group_set_rt_runtime(css_tg(css), val);
9740 }
9741
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9742 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9743 struct cftype *cft)
9744 {
9745 return sched_group_rt_runtime(css_tg(css));
9746 }
9747
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9748 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9749 struct cftype *cftype, u64 rt_period_us)
9750 {
9751 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9752 }
9753
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9754 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9755 struct cftype *cft)
9756 {
9757 return sched_group_rt_period(css_tg(css));
9758 }
9759 #endif /* CONFIG_RT_GROUP_SCHED */
9760
9761 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9762 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9763 struct cftype *cft)
9764 {
9765 return css_tg(css)->idle;
9766 }
9767
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9768 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9769 struct cftype *cft, s64 idle)
9770 {
9771 int ret;
9772
9773 ret = sched_group_set_idle(css_tg(css), idle);
9774 if (!ret)
9775 scx_group_set_idle(css_tg(css), idle);
9776 return ret;
9777 }
9778 #endif
9779
9780 static struct cftype cpu_legacy_files[] = {
9781 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9782 {
9783 .name = "shares",
9784 .read_u64 = cpu_shares_read_u64,
9785 .write_u64 = cpu_shares_write_u64,
9786 },
9787 {
9788 .name = "idle",
9789 .read_s64 = cpu_idle_read_s64,
9790 .write_s64 = cpu_idle_write_s64,
9791 },
9792 #endif
9793 #ifdef CONFIG_CFS_BANDWIDTH
9794 {
9795 .name = "cfs_quota_us",
9796 .read_s64 = cpu_cfs_quota_read_s64,
9797 .write_s64 = cpu_cfs_quota_write_s64,
9798 },
9799 {
9800 .name = "cfs_period_us",
9801 .read_u64 = cpu_cfs_period_read_u64,
9802 .write_u64 = cpu_cfs_period_write_u64,
9803 },
9804 {
9805 .name = "cfs_burst_us",
9806 .read_u64 = cpu_cfs_burst_read_u64,
9807 .write_u64 = cpu_cfs_burst_write_u64,
9808 },
9809 {
9810 .name = "stat",
9811 .seq_show = cpu_cfs_stat_show,
9812 },
9813 {
9814 .name = "stat.local",
9815 .seq_show = cpu_cfs_local_stat_show,
9816 },
9817 #endif
9818 #ifdef CONFIG_RT_GROUP_SCHED
9819 {
9820 .name = "rt_runtime_us",
9821 .read_s64 = cpu_rt_runtime_read,
9822 .write_s64 = cpu_rt_runtime_write,
9823 },
9824 {
9825 .name = "rt_period_us",
9826 .read_u64 = cpu_rt_period_read_uint,
9827 .write_u64 = cpu_rt_period_write_uint,
9828 },
9829 #endif
9830 #ifdef CONFIG_UCLAMP_TASK_GROUP
9831 {
9832 .name = "uclamp.min",
9833 .flags = CFTYPE_NOT_ON_ROOT,
9834 .seq_show = cpu_uclamp_min_show,
9835 .write = cpu_uclamp_min_write,
9836 },
9837 {
9838 .name = "uclamp.max",
9839 .flags = CFTYPE_NOT_ON_ROOT,
9840 .seq_show = cpu_uclamp_max_show,
9841 .write = cpu_uclamp_max_write,
9842 },
9843 #endif
9844 { } /* Terminate */
9845 };
9846
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9847 static int cpu_extra_stat_show(struct seq_file *sf,
9848 struct cgroup_subsys_state *css)
9849 {
9850 #ifdef CONFIG_CFS_BANDWIDTH
9851 {
9852 struct task_group *tg = css_tg(css);
9853 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9854 u64 throttled_usec, burst_usec;
9855
9856 throttled_usec = cfs_b->throttled_time;
9857 do_div(throttled_usec, NSEC_PER_USEC);
9858 burst_usec = cfs_b->burst_time;
9859 do_div(burst_usec, NSEC_PER_USEC);
9860
9861 seq_printf(sf, "nr_periods %d\n"
9862 "nr_throttled %d\n"
9863 "throttled_usec %llu\n"
9864 "nr_bursts %d\n"
9865 "burst_usec %llu\n",
9866 cfs_b->nr_periods, cfs_b->nr_throttled,
9867 throttled_usec, cfs_b->nr_burst, burst_usec);
9868 }
9869 #endif
9870 return 0;
9871 }
9872
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9873 static int cpu_local_stat_show(struct seq_file *sf,
9874 struct cgroup_subsys_state *css)
9875 {
9876 #ifdef CONFIG_CFS_BANDWIDTH
9877 {
9878 struct task_group *tg = css_tg(css);
9879 u64 throttled_self_usec;
9880
9881 throttled_self_usec = throttled_time_self(tg);
9882 do_div(throttled_self_usec, NSEC_PER_USEC);
9883
9884 seq_printf(sf, "throttled_usec %llu\n",
9885 throttled_self_usec);
9886 }
9887 #endif
9888 return 0;
9889 }
9890
9891 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9892
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9893 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9894 struct cftype *cft)
9895 {
9896 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9897 }
9898
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)9899 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9900 struct cftype *cft, u64 cgrp_weight)
9901 {
9902 unsigned long weight;
9903 int ret;
9904
9905 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9906 return -ERANGE;
9907
9908 weight = sched_weight_from_cgroup(cgrp_weight);
9909
9910 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9911 if (!ret)
9912 scx_group_set_weight(css_tg(css), cgrp_weight);
9913 return ret;
9914 }
9915
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9916 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9917 struct cftype *cft)
9918 {
9919 unsigned long weight = tg_weight(css_tg(css));
9920 int last_delta = INT_MAX;
9921 int prio, delta;
9922
9923 /* find the closest nice value to the current weight */
9924 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9925 delta = abs(sched_prio_to_weight[prio] - weight);
9926 if (delta >= last_delta)
9927 break;
9928 last_delta = delta;
9929 }
9930
9931 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9932 }
9933
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)9934 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9935 struct cftype *cft, s64 nice)
9936 {
9937 unsigned long weight;
9938 int idx, ret;
9939
9940 if (nice < MIN_NICE || nice > MAX_NICE)
9941 return -ERANGE;
9942
9943 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9944 idx = array_index_nospec(idx, 40);
9945 weight = sched_prio_to_weight[idx];
9946
9947 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9948 if (!ret)
9949 scx_group_set_weight(css_tg(css),
9950 sched_weight_to_cgroup(weight));
9951 return ret;
9952 }
9953 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9954
cpu_period_quota_print(struct seq_file * sf,long period,long quota)9955 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9956 long period, long quota)
9957 {
9958 if (quota < 0)
9959 seq_puts(sf, "max");
9960 else
9961 seq_printf(sf, "%ld", quota);
9962
9963 seq_printf(sf, " %ld\n", period);
9964 }
9965
9966 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)9967 static int __maybe_unused cpu_period_quota_parse(char *buf,
9968 u64 *periodp, u64 *quotap)
9969 {
9970 char tok[21]; /* U64_MAX */
9971
9972 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9973 return -EINVAL;
9974
9975 *periodp *= NSEC_PER_USEC;
9976
9977 if (sscanf(tok, "%llu", quotap))
9978 *quotap *= NSEC_PER_USEC;
9979 else if (!strcmp(tok, "max"))
9980 *quotap = RUNTIME_INF;
9981 else
9982 return -EINVAL;
9983
9984 return 0;
9985 }
9986
9987 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)9988 static int cpu_max_show(struct seq_file *sf, void *v)
9989 {
9990 struct task_group *tg = css_tg(seq_css(sf));
9991
9992 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9993 return 0;
9994 }
9995
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9996 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9997 char *buf, size_t nbytes, loff_t off)
9998 {
9999 struct task_group *tg = css_tg(of_css(of));
10000 u64 period = tg_get_cfs_period(tg);
10001 u64 burst = tg->cfs_bandwidth.burst;
10002 u64 quota;
10003 int ret;
10004
10005 ret = cpu_period_quota_parse(buf, &period, "a);
10006 if (!ret)
10007 ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
10008 return ret ?: nbytes;
10009 }
10010 #endif
10011
10012 static struct cftype cpu_files[] = {
10013 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10014 {
10015 .name = "weight",
10016 .flags = CFTYPE_NOT_ON_ROOT,
10017 .read_u64 = cpu_weight_read_u64,
10018 .write_u64 = cpu_weight_write_u64,
10019 },
10020 {
10021 .name = "weight.nice",
10022 .flags = CFTYPE_NOT_ON_ROOT,
10023 .read_s64 = cpu_weight_nice_read_s64,
10024 .write_s64 = cpu_weight_nice_write_s64,
10025 },
10026 {
10027 .name = "idle",
10028 .flags = CFTYPE_NOT_ON_ROOT,
10029 .read_s64 = cpu_idle_read_s64,
10030 .write_s64 = cpu_idle_write_s64,
10031 },
10032 #endif
10033 #ifdef CONFIG_CFS_BANDWIDTH
10034 {
10035 .name = "max",
10036 .flags = CFTYPE_NOT_ON_ROOT,
10037 .seq_show = cpu_max_show,
10038 .write = cpu_max_write,
10039 },
10040 {
10041 .name = "max.burst",
10042 .flags = CFTYPE_NOT_ON_ROOT,
10043 .read_u64 = cpu_cfs_burst_read_u64,
10044 .write_u64 = cpu_cfs_burst_write_u64,
10045 },
10046 #endif
10047 #ifdef CONFIG_UCLAMP_TASK_GROUP
10048 {
10049 .name = "uclamp.min",
10050 .flags = CFTYPE_NOT_ON_ROOT,
10051 .seq_show = cpu_uclamp_min_show,
10052 .write = cpu_uclamp_min_write,
10053 },
10054 {
10055 .name = "uclamp.max",
10056 .flags = CFTYPE_NOT_ON_ROOT,
10057 .seq_show = cpu_uclamp_max_show,
10058 .write = cpu_uclamp_max_write,
10059 },
10060 #endif
10061 { } /* terminate */
10062 };
10063
10064 struct cgroup_subsys cpu_cgrp_subsys = {
10065 .css_alloc = cpu_cgroup_css_alloc,
10066 .css_online = cpu_cgroup_css_online,
10067 .css_offline = cpu_cgroup_css_offline,
10068 .css_released = cpu_cgroup_css_released,
10069 .css_free = cpu_cgroup_css_free,
10070 .css_extra_stat_show = cpu_extra_stat_show,
10071 .css_local_stat_show = cpu_local_stat_show,
10072 .can_attach = cpu_cgroup_can_attach,
10073 .attach = cpu_cgroup_attach,
10074 .cancel_attach = cpu_cgroup_cancel_attach,
10075 .legacy_cftypes = cpu_legacy_files,
10076 .dfl_cftypes = cpu_files,
10077 .early_init = true,
10078 .threaded = true,
10079 };
10080
10081 #endif /* CONFIG_CGROUP_SCHED */
10082
dump_cpu_task(int cpu)10083 void dump_cpu_task(int cpu)
10084 {
10085 if (in_hardirq() && cpu == smp_processor_id()) {
10086 struct pt_regs *regs;
10087
10088 regs = get_irq_regs();
10089 if (regs) {
10090 show_regs(regs);
10091 return;
10092 }
10093 }
10094
10095 if (trigger_single_cpu_backtrace(cpu))
10096 return;
10097
10098 pr_info("Task dump for CPU %d:\n", cpu);
10099 sched_show_task(cpu_curr(cpu));
10100 }
10101
10102 /*
10103 * Nice levels are multiplicative, with a gentle 10% change for every
10104 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10105 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10106 * that remained on nice 0.
10107 *
10108 * The "10% effect" is relative and cumulative: from _any_ nice level,
10109 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10110 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10111 * If a task goes up by ~10% and another task goes down by ~10% then
10112 * the relative distance between them is ~25%.)
10113 */
10114 const int sched_prio_to_weight[40] = {
10115 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10116 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10117 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10118 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10119 /* 0 */ 1024, 820, 655, 526, 423,
10120 /* 5 */ 335, 272, 215, 172, 137,
10121 /* 10 */ 110, 87, 70, 56, 45,
10122 /* 15 */ 36, 29, 23, 18, 15,
10123 };
10124
10125 /*
10126 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10127 *
10128 * In cases where the weight does not change often, we can use the
10129 * pre-calculated inverse to speed up arithmetics by turning divisions
10130 * into multiplications:
10131 */
10132 const u32 sched_prio_to_wmult[40] = {
10133 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10134 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10135 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10136 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10137 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10138 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10139 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10140 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10141 };
10142
call_trace_sched_update_nr_running(struct rq * rq,int count)10143 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10144 {
10145 trace_sched_update_nr_running_tp(rq, count);
10146 }
10147
10148 #ifdef CONFIG_SCHED_MM_CID
10149
10150 /*
10151 * @cid_lock: Guarantee forward-progress of cid allocation.
10152 *
10153 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10154 * is only used when contention is detected by the lock-free allocation so
10155 * forward progress can be guaranteed.
10156 */
10157 DEFINE_RAW_SPINLOCK(cid_lock);
10158
10159 /*
10160 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10161 *
10162 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10163 * detected, it is set to 1 to ensure that all newly coming allocations are
10164 * serialized by @cid_lock until the allocation which detected contention
10165 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10166 * of a cid allocation.
10167 */
10168 int use_cid_lock;
10169
10170 /*
10171 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10172 * concurrently with respect to the execution of the source runqueue context
10173 * switch.
10174 *
10175 * There is one basic properties we want to guarantee here:
10176 *
10177 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10178 * used by a task. That would lead to concurrent allocation of the cid and
10179 * userspace corruption.
10180 *
10181 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10182 * that a pair of loads observe at least one of a pair of stores, which can be
10183 * shown as:
10184 *
10185 * X = Y = 0
10186 *
10187 * w[X]=1 w[Y]=1
10188 * MB MB
10189 * r[Y]=y r[X]=x
10190 *
10191 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10192 * values 0 and 1, this algorithm cares about specific state transitions of the
10193 * runqueue current task (as updated by the scheduler context switch), and the
10194 * per-mm/cpu cid value.
10195 *
10196 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10197 * task->mm != mm for the rest of the discussion. There are two scheduler state
10198 * transitions on context switch we care about:
10199 *
10200 * (TSA) Store to rq->curr with transition from (N) to (Y)
10201 *
10202 * (TSB) Store to rq->curr with transition from (Y) to (N)
10203 *
10204 * On the remote-clear side, there is one transition we care about:
10205 *
10206 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10207 *
10208 * There is also a transition to UNSET state which can be performed from all
10209 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10210 * guarantees that only a single thread will succeed:
10211 *
10212 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10213 *
10214 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10215 * when a thread is actively using the cid (property (1)).
10216 *
10217 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10218 *
10219 * Scenario A) (TSA)+(TMA) (from next task perspective)
10220 *
10221 * CPU0 CPU1
10222 *
10223 * Context switch CS-1 Remote-clear
10224 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10225 * (implied barrier after cmpxchg)
10226 * - switch_mm_cid()
10227 * - memory barrier (see switch_mm_cid()
10228 * comment explaining how this barrier
10229 * is combined with other scheduler
10230 * barriers)
10231 * - mm_cid_get (next)
10232 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10233 *
10234 * This Dekker ensures that either task (Y) is observed by the
10235 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10236 * observed.
10237 *
10238 * If task (Y) store is observed by rcu_dereference(), it means that there is
10239 * still an active task on the cpu. Remote-clear will therefore not transition
10240 * to UNSET, which fulfills property (1).
10241 *
10242 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10243 * it will move its state to UNSET, which clears the percpu cid perhaps
10244 * uselessly (which is not an issue for correctness). Because task (Y) is not
10245 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10246 * state to UNSET is done with a cmpxchg expecting that the old state has the
10247 * LAZY flag set, only one thread will successfully UNSET.
10248 *
10249 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10250 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10251 * CPU1 will observe task (Y) and do nothing more, which is fine.
10252 *
10253 * What we are effectively preventing with this Dekker is a scenario where
10254 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10255 * because this would UNSET a cid which is actively used.
10256 */
10257
sched_mm_cid_migrate_from(struct task_struct * t)10258 void sched_mm_cid_migrate_from(struct task_struct *t)
10259 {
10260 t->migrate_from_cpu = task_cpu(t);
10261 }
10262
10263 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10264 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10265 struct task_struct *t,
10266 struct mm_cid *src_pcpu_cid)
10267 {
10268 struct mm_struct *mm = t->mm;
10269 struct task_struct *src_task;
10270 int src_cid, last_mm_cid;
10271
10272 if (!mm)
10273 return -1;
10274
10275 last_mm_cid = t->last_mm_cid;
10276 /*
10277 * If the migrated task has no last cid, or if the current
10278 * task on src rq uses the cid, it means the source cid does not need
10279 * to be moved to the destination cpu.
10280 */
10281 if (last_mm_cid == -1)
10282 return -1;
10283 src_cid = READ_ONCE(src_pcpu_cid->cid);
10284 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10285 return -1;
10286
10287 /*
10288 * If we observe an active task using the mm on this rq, it means we
10289 * are not the last task to be migrated from this cpu for this mm, so
10290 * there is no need to move src_cid to the destination cpu.
10291 */
10292 guard(rcu)();
10293 src_task = rcu_dereference(src_rq->curr);
10294 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10295 t->last_mm_cid = -1;
10296 return -1;
10297 }
10298
10299 return src_cid;
10300 }
10301
10302 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10303 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10304 struct task_struct *t,
10305 struct mm_cid *src_pcpu_cid,
10306 int src_cid)
10307 {
10308 struct task_struct *src_task;
10309 struct mm_struct *mm = t->mm;
10310 int lazy_cid;
10311
10312 if (src_cid == -1)
10313 return -1;
10314
10315 /*
10316 * Attempt to clear the source cpu cid to move it to the destination
10317 * cpu.
10318 */
10319 lazy_cid = mm_cid_set_lazy_put(src_cid);
10320 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10321 return -1;
10322
10323 /*
10324 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10325 * rq->curr->mm matches the scheduler barrier in context_switch()
10326 * between store to rq->curr and load of prev and next task's
10327 * per-mm/cpu cid.
10328 *
10329 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10330 * rq->curr->mm_cid_active matches the barrier in
10331 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10332 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10333 * load of per-mm/cpu cid.
10334 */
10335
10336 /*
10337 * If we observe an active task using the mm on this rq after setting
10338 * the lazy-put flag, this task will be responsible for transitioning
10339 * from lazy-put flag set to MM_CID_UNSET.
10340 */
10341 scoped_guard (rcu) {
10342 src_task = rcu_dereference(src_rq->curr);
10343 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10344 /*
10345 * We observed an active task for this mm, there is therefore
10346 * no point in moving this cid to the destination cpu.
10347 */
10348 t->last_mm_cid = -1;
10349 return -1;
10350 }
10351 }
10352
10353 /*
10354 * The src_cid is unused, so it can be unset.
10355 */
10356 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10357 return -1;
10358 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
10359 return src_cid;
10360 }
10361
10362 /*
10363 * Migration to dst cpu. Called with dst_rq lock held.
10364 * Interrupts are disabled, which keeps the window of cid ownership without the
10365 * source rq lock held small.
10366 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10367 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10368 {
10369 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10370 struct mm_struct *mm = t->mm;
10371 int src_cid, src_cpu;
10372 bool dst_cid_is_set;
10373 struct rq *src_rq;
10374
10375 lockdep_assert_rq_held(dst_rq);
10376
10377 if (!mm)
10378 return;
10379 src_cpu = t->migrate_from_cpu;
10380 if (src_cpu == -1) {
10381 t->last_mm_cid = -1;
10382 return;
10383 }
10384 /*
10385 * Move the src cid if the dst cid is unset. This keeps id
10386 * allocation closest to 0 in cases where few threads migrate around
10387 * many CPUs.
10388 *
10389 * If destination cid or recent cid is already set, we may have
10390 * to just clear the src cid to ensure compactness in frequent
10391 * migrations scenarios.
10392 *
10393 * It is not useful to clear the src cid when the number of threads is
10394 * greater or equal to the number of allowed CPUs, because user-space
10395 * can expect that the number of allowed cids can reach the number of
10396 * allowed CPUs.
10397 */
10398 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10399 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10400 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10401 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
10402 return;
10403 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10404 src_rq = cpu_rq(src_cpu);
10405 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10406 if (src_cid == -1)
10407 return;
10408 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10409 src_cid);
10410 if (src_cid == -1)
10411 return;
10412 if (dst_cid_is_set) {
10413 __mm_cid_put(mm, src_cid);
10414 return;
10415 }
10416 /* Move src_cid to dst cpu. */
10417 mm_cid_snapshot_time(dst_rq, mm);
10418 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10419 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
10420 }
10421
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10422 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10423 int cpu)
10424 {
10425 struct rq *rq = cpu_rq(cpu);
10426 struct task_struct *t;
10427 int cid, lazy_cid;
10428
10429 cid = READ_ONCE(pcpu_cid->cid);
10430 if (!mm_cid_is_valid(cid))
10431 return;
10432
10433 /*
10434 * Clear the cpu cid if it is set to keep cid allocation compact. If
10435 * there happens to be other tasks left on the source cpu using this
10436 * mm, the next task using this mm will reallocate its cid on context
10437 * switch.
10438 */
10439 lazy_cid = mm_cid_set_lazy_put(cid);
10440 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10441 return;
10442
10443 /*
10444 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10445 * rq->curr->mm matches the scheduler barrier in context_switch()
10446 * between store to rq->curr and load of prev and next task's
10447 * per-mm/cpu cid.
10448 *
10449 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10450 * rq->curr->mm_cid_active matches the barrier in
10451 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10452 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10453 * load of per-mm/cpu cid.
10454 */
10455
10456 /*
10457 * If we observe an active task using the mm on this rq after setting
10458 * the lazy-put flag, that task will be responsible for transitioning
10459 * from lazy-put flag set to MM_CID_UNSET.
10460 */
10461 scoped_guard (rcu) {
10462 t = rcu_dereference(rq->curr);
10463 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10464 return;
10465 }
10466
10467 /*
10468 * The cid is unused, so it can be unset.
10469 * Disable interrupts to keep the window of cid ownership without rq
10470 * lock small.
10471 */
10472 scoped_guard (irqsave) {
10473 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10474 __mm_cid_put(mm, cid);
10475 }
10476 }
10477
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10478 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10479 {
10480 struct rq *rq = cpu_rq(cpu);
10481 struct mm_cid *pcpu_cid;
10482 struct task_struct *curr;
10483 u64 rq_clock;
10484
10485 /*
10486 * rq->clock load is racy on 32-bit but one spurious clear once in a
10487 * while is irrelevant.
10488 */
10489 rq_clock = READ_ONCE(rq->clock);
10490 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10491
10492 /*
10493 * In order to take care of infrequently scheduled tasks, bump the time
10494 * snapshot associated with this cid if an active task using the mm is
10495 * observed on this rq.
10496 */
10497 scoped_guard (rcu) {
10498 curr = rcu_dereference(rq->curr);
10499 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10500 WRITE_ONCE(pcpu_cid->time, rq_clock);
10501 return;
10502 }
10503 }
10504
10505 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10506 return;
10507 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10508 }
10509
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10510 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10511 int weight)
10512 {
10513 struct mm_cid *pcpu_cid;
10514 int cid;
10515
10516 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10517 cid = READ_ONCE(pcpu_cid->cid);
10518 if (!mm_cid_is_valid(cid) || cid < weight)
10519 return;
10520 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10521 }
10522
task_mm_cid_work(struct callback_head * work)10523 static void task_mm_cid_work(struct callback_head *work)
10524 {
10525 unsigned long now = jiffies, old_scan, next_scan;
10526 struct task_struct *t = current;
10527 struct cpumask *cidmask;
10528 struct mm_struct *mm;
10529 int weight, cpu;
10530
10531 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10532
10533 work->next = work; /* Prevent double-add */
10534 if (t->flags & PF_EXITING)
10535 return;
10536 mm = t->mm;
10537 if (!mm)
10538 return;
10539 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10540 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10541 if (!old_scan) {
10542 unsigned long res;
10543
10544 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10545 if (res != old_scan)
10546 old_scan = res;
10547 else
10548 old_scan = next_scan;
10549 }
10550 if (time_before(now, old_scan))
10551 return;
10552 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10553 return;
10554 cidmask = mm_cidmask(mm);
10555 /* Clear cids that were not recently used. */
10556 for_each_possible_cpu(cpu)
10557 sched_mm_cid_remote_clear_old(mm, cpu);
10558 weight = cpumask_weight(cidmask);
10559 /*
10560 * Clear cids that are greater or equal to the cidmask weight to
10561 * recompact it.
10562 */
10563 for_each_possible_cpu(cpu)
10564 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10565 }
10566
init_sched_mm_cid(struct task_struct * t)10567 void init_sched_mm_cid(struct task_struct *t)
10568 {
10569 struct mm_struct *mm = t->mm;
10570 int mm_users = 0;
10571
10572 if (mm) {
10573 mm_users = atomic_read(&mm->mm_users);
10574 if (mm_users == 1)
10575 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10576 }
10577 t->cid_work.next = &t->cid_work; /* Protect against double add */
10578 init_task_work(&t->cid_work, task_mm_cid_work);
10579 }
10580
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10581 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10582 {
10583 struct callback_head *work = &curr->cid_work;
10584 unsigned long now = jiffies;
10585
10586 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10587 work->next != work)
10588 return;
10589 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10590 return;
10591
10592 /* No page allocation under rq lock */
10593 task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
10594 }
10595
sched_mm_cid_exit_signals(struct task_struct * t)10596 void sched_mm_cid_exit_signals(struct task_struct *t)
10597 {
10598 struct mm_struct *mm = t->mm;
10599 struct rq *rq;
10600
10601 if (!mm)
10602 return;
10603
10604 preempt_disable();
10605 rq = this_rq();
10606 guard(rq_lock_irqsave)(rq);
10607 preempt_enable_no_resched(); /* holding spinlock */
10608 WRITE_ONCE(t->mm_cid_active, 0);
10609 /*
10610 * Store t->mm_cid_active before loading per-mm/cpu cid.
10611 * Matches barrier in sched_mm_cid_remote_clear_old().
10612 */
10613 smp_mb();
10614 mm_cid_put(mm);
10615 t->last_mm_cid = t->mm_cid = -1;
10616 }
10617
sched_mm_cid_before_execve(struct task_struct * t)10618 void sched_mm_cid_before_execve(struct task_struct *t)
10619 {
10620 struct mm_struct *mm = t->mm;
10621 struct rq *rq;
10622
10623 if (!mm)
10624 return;
10625
10626 preempt_disable();
10627 rq = this_rq();
10628 guard(rq_lock_irqsave)(rq);
10629 preempt_enable_no_resched(); /* holding spinlock */
10630 WRITE_ONCE(t->mm_cid_active, 0);
10631 /*
10632 * Store t->mm_cid_active before loading per-mm/cpu cid.
10633 * Matches barrier in sched_mm_cid_remote_clear_old().
10634 */
10635 smp_mb();
10636 mm_cid_put(mm);
10637 t->last_mm_cid = t->mm_cid = -1;
10638 }
10639
sched_mm_cid_after_execve(struct task_struct * t)10640 void sched_mm_cid_after_execve(struct task_struct *t)
10641 {
10642 struct mm_struct *mm = t->mm;
10643 struct rq *rq;
10644
10645 if (!mm)
10646 return;
10647
10648 preempt_disable();
10649 rq = this_rq();
10650 scoped_guard (rq_lock_irqsave, rq) {
10651 preempt_enable_no_resched(); /* holding spinlock */
10652 WRITE_ONCE(t->mm_cid_active, 1);
10653 /*
10654 * Store t->mm_cid_active before loading per-mm/cpu cid.
10655 * Matches barrier in sched_mm_cid_remote_clear_old().
10656 */
10657 smp_mb();
10658 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
10659 }
10660 rseq_set_notify_resume(t);
10661 }
10662
sched_mm_cid_fork(struct task_struct * t)10663 void sched_mm_cid_fork(struct task_struct *t)
10664 {
10665 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10666 t->mm_cid_active = 1;
10667 }
10668 #endif
10669
10670 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10671 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10672 struct sched_enq_and_set_ctx *ctx)
10673 {
10674 struct rq *rq = task_rq(p);
10675
10676 lockdep_assert_rq_held(rq);
10677
10678 *ctx = (struct sched_enq_and_set_ctx){
10679 .p = p,
10680 .queue_flags = queue_flags,
10681 .queued = task_on_rq_queued(p),
10682 .running = task_current(rq, p),
10683 };
10684
10685 update_rq_clock(rq);
10686 if (ctx->queued)
10687 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10688 if (ctx->running)
10689 put_prev_task(rq, p);
10690 }
10691
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10692 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10693 {
10694 struct rq *rq = task_rq(ctx->p);
10695
10696 lockdep_assert_rq_held(rq);
10697
10698 if (ctx->queued)
10699 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10700 if (ctx->running)
10701 set_next_task(rq, ctx->p);
10702 }
10703 #endif /* CONFIG_SCHED_CLASS_EXT */
10704