1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/core.c
4 *
5 * Core kernel CPU scheduler code
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat
9 */
10 #define INSTANTIATE_EXPORTED_MIGRATE_DISABLE
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hrtimer_api.h>
14 #include <linux/ktime_api.h>
15 #include <linux/sched/signal.h>
16 #include <linux/syscalls_api.h>
17 #include <linux/debug_locks.h>
18 #include <linux/prefetch.h>
19 #include <linux/capability.h>
20 #include <linux/pgtable_api.h>
21 #include <linux/wait_bit.h>
22 #include <linux/jiffies.h>
23 #include <linux/spinlock_api.h>
24 #include <linux/cpumask_api.h>
25 #include <linux/lockdep_api.h>
26 #include <linux/hardirq.h>
27 #include <linux/softirq.h>
28 #include <linux/refcount_api.h>
29 #include <linux/topology.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/cond_resched.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/hotplug.h>
35 #include <linux/sched/init.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/sched/loadavg.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/rseq_api.h>
41 #include <linux/sched/rt.h>
42
43 #include <linux/blkdev.h>
44 #include <linux/context_tracking.h>
45 #include <linux/cpuset.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/interrupt.h>
49 #include <linux/ioprio.h>
50 #include <linux/kallsyms.h>
51 #include <linux/kcov.h>
52 #include <linux/kprobes.h>
53 #include <linux/llist_api.h>
54 #include <linux/mmu_context.h>
55 #include <linux/mmzone.h>
56 #include <linux/mutex_api.h>
57 #include <linux/nmi.h>
58 #include <linux/nospec.h>
59 #include <linux/perf_event_api.h>
60 #include <linux/profile.h>
61 #include <linux/psi.h>
62 #include <linux/rcuwait_api.h>
63 #include <linux/rseq.h>
64 #include <linux/sched/wake_q.h>
65 #include <linux/scs.h>
66 #include <linux/slab.h>
67 #include <linux/syscalls.h>
68 #include <linux/vtime.h>
69 #include <linux/wait_api.h>
70 #include <linux/workqueue_api.h>
71 #include <linux/livepatch_sched.h>
72
73 #ifdef CONFIG_PREEMPT_DYNAMIC
74 # ifdef CONFIG_GENERIC_IRQ_ENTRY
75 # include <linux/irq-entry-common.h>
76 # endif
77 #endif
78
79 #include <uapi/linux/sched/types.h>
80
81 #include <asm/irq_regs.h>
82 #include <asm/switch_to.h>
83 #include <asm/tlb.h>
84
85 #define CREATE_TRACE_POINTS
86 #include <linux/sched/rseq_api.h>
87 #include <trace/events/sched.h>
88 #include <trace/events/ipi.h>
89 #undef CREATE_TRACE_POINTS
90
91 #include "sched.h"
92 #include "stats.h"
93
94 #include "autogroup.h"
95 #include "pelt.h"
96 #include "smp.h"
97
98 #include "../workqueue_internal.h"
99 #include "../../io_uring/io-wq.h"
100 #include "../smpboot.h"
101 #include "../locking/mutex.h"
102
103 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
104 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
105
106 /*
107 * Export tracepoints that act as a bare tracehook (ie: have no trace event
108 * associated with them) to allow external modules to probe them.
109 */
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
119 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
120 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
121 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
122
123 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
124
125 #ifdef CONFIG_SCHED_PROXY_EXEC
126 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
setup_proxy_exec(char * str)127 static int __init setup_proxy_exec(char *str)
128 {
129 bool proxy_enable = true;
130
131 if (*str && kstrtobool(str + 1, &proxy_enable)) {
132 pr_warn("Unable to parse sched_proxy_exec=\n");
133 return 0;
134 }
135
136 if (proxy_enable) {
137 pr_info("sched_proxy_exec enabled via boot arg\n");
138 static_branch_enable(&__sched_proxy_exec);
139 } else {
140 pr_info("sched_proxy_exec disabled via boot arg\n");
141 static_branch_disable(&__sched_proxy_exec);
142 }
143 return 1;
144 }
145 #else
setup_proxy_exec(char * str)146 static int __init setup_proxy_exec(char *str)
147 {
148 pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
149 return 0;
150 }
151 #endif
152 __setup("sched_proxy_exec", setup_proxy_exec);
153
154 /*
155 * Debugging: various feature bits
156 *
157 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
158 * sysctl_sched_features, defined in sched.h, to allow constants propagation
159 * at compile time and compiler optimization based on features default.
160 */
161 #define SCHED_FEAT(name, enabled) \
162 (1UL << __SCHED_FEAT_##name) * enabled |
163 __read_mostly unsigned int sysctl_sched_features =
164 #include "features.h"
165 0;
166 #undef SCHED_FEAT
167
168 /*
169 * Print a warning if need_resched is set for the given duration (if
170 * LATENCY_WARN is enabled).
171 *
172 * If sysctl_resched_latency_warn_once is set, only one warning will be shown
173 * per boot.
174 */
175 __read_mostly int sysctl_resched_latency_warn_ms = 100;
176 __read_mostly int sysctl_resched_latency_warn_once = 1;
177
178 /*
179 * Number of tasks to iterate in a single balance run.
180 * Limited because this is done with IRQs disabled.
181 */
182 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
183
184 __read_mostly int scheduler_running;
185
186 #ifdef CONFIG_SCHED_CORE
187
188 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
189
190 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)191 static inline int __task_prio(const struct task_struct *p)
192 {
193 if (p->sched_class == &stop_sched_class) /* trumps deadline */
194 return -2;
195
196 if (p->dl_server)
197 return -1; /* deadline */
198
199 if (rt_or_dl_prio(p->prio))
200 return p->prio; /* [-1, 99] */
201
202 if (p->sched_class == &idle_sched_class)
203 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
204
205 if (task_on_scx(p))
206 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
207
208 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
209 }
210
211 /*
212 * l(a,b)
213 * le(a,b) := !l(b,a)
214 * g(a,b) := l(b,a)
215 * ge(a,b) := !l(a,b)
216 */
217
218 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)219 static inline bool prio_less(const struct task_struct *a,
220 const struct task_struct *b, bool in_fi)
221 {
222
223 int pa = __task_prio(a), pb = __task_prio(b);
224
225 if (-pa < -pb)
226 return true;
227
228 if (-pb < -pa)
229 return false;
230
231 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
232 const struct sched_dl_entity *a_dl, *b_dl;
233
234 a_dl = &a->dl;
235 /*
236 * Since,'a' and 'b' can be CFS tasks served by DL server,
237 * __task_prio() can return -1 (for DL) even for those. In that
238 * case, get to the dl_server's DL entity.
239 */
240 if (a->dl_server)
241 a_dl = a->dl_server;
242
243 b_dl = &b->dl;
244 if (b->dl_server)
245 b_dl = b->dl_server;
246
247 return !dl_time_before(a_dl->deadline, b_dl->deadline);
248 }
249
250 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
251 return cfs_prio_less(a, b, in_fi);
252
253 #ifdef CONFIG_SCHED_CLASS_EXT
254 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */
255 return scx_prio_less(a, b, in_fi);
256 #endif
257
258 return false;
259 }
260
__sched_core_less(const struct task_struct * a,const struct task_struct * b)261 static inline bool __sched_core_less(const struct task_struct *a,
262 const struct task_struct *b)
263 {
264 if (a->core_cookie < b->core_cookie)
265 return true;
266
267 if (a->core_cookie > b->core_cookie)
268 return false;
269
270 /* flip prio, so high prio is leftmost */
271 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
272 return true;
273
274 return false;
275 }
276
277 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
278
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)279 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
280 {
281 return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
282 }
283
rb_sched_core_cmp(const void * key,const struct rb_node * node)284 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
285 {
286 const struct task_struct *p = __node_2_sc(node);
287 unsigned long cookie = (unsigned long)key;
288
289 if (cookie < p->core_cookie)
290 return -1;
291
292 if (cookie > p->core_cookie)
293 return 1;
294
295 return 0;
296 }
297
sched_core_enqueue(struct rq * rq,struct task_struct * p)298 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
299 {
300 if (p->se.sched_delayed)
301 return;
302
303 rq->core->core_task_seq++;
304
305 if (!p->core_cookie)
306 return;
307
308 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
309 }
310
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)311 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
312 {
313 if (p->se.sched_delayed)
314 return;
315
316 rq->core->core_task_seq++;
317
318 if (sched_core_enqueued(p)) {
319 rb_erase(&p->core_node, &rq->core_tree);
320 RB_CLEAR_NODE(&p->core_node);
321 }
322
323 /*
324 * Migrating the last task off the cpu, with the cpu in forced idle
325 * state. Reschedule to create an accounting edge for forced idle,
326 * and re-examine whether the core is still in forced idle state.
327 */
328 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
329 rq->core->core_forceidle_count && rq->curr == rq->idle)
330 resched_curr(rq);
331 }
332
sched_task_is_throttled(struct task_struct * p,int cpu)333 static int sched_task_is_throttled(struct task_struct *p, int cpu)
334 {
335 if (p->sched_class->task_is_throttled)
336 return p->sched_class->task_is_throttled(p, cpu);
337
338 return 0;
339 }
340
sched_core_next(struct task_struct * p,unsigned long cookie)341 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
342 {
343 struct rb_node *node = &p->core_node;
344 int cpu = task_cpu(p);
345
346 do {
347 node = rb_next(node);
348 if (!node)
349 return NULL;
350
351 p = __node_2_sc(node);
352 if (p->core_cookie != cookie)
353 return NULL;
354
355 } while (sched_task_is_throttled(p, cpu));
356
357 return p;
358 }
359
360 /*
361 * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
362 * If no suitable task is found, NULL will be returned.
363 */
sched_core_find(struct rq * rq,unsigned long cookie)364 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
365 {
366 struct task_struct *p;
367 struct rb_node *node;
368
369 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
370 if (!node)
371 return NULL;
372
373 p = __node_2_sc(node);
374 if (!sched_task_is_throttled(p, rq->cpu))
375 return p;
376
377 return sched_core_next(p, cookie);
378 }
379
380 /*
381 * Magic required such that:
382 *
383 * raw_spin_rq_lock(rq);
384 * ...
385 * raw_spin_rq_unlock(rq);
386 *
387 * ends up locking and unlocking the _same_ lock, and all CPUs
388 * always agree on what rq has what lock.
389 *
390 * XXX entirely possible to selectively enable cores, don't bother for now.
391 */
392
393 static DEFINE_MUTEX(sched_core_mutex);
394 static atomic_t sched_core_count;
395 static struct cpumask sched_core_mask;
396
sched_core_lock(int cpu,unsigned long * flags)397 static void sched_core_lock(int cpu, unsigned long *flags)
398 {
399 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400 int t, i = 0;
401
402 local_irq_save(*flags);
403 for_each_cpu(t, smt_mask)
404 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
405 }
406
sched_core_unlock(int cpu,unsigned long * flags)407 static void sched_core_unlock(int cpu, unsigned long *flags)
408 {
409 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
410 int t;
411
412 for_each_cpu(t, smt_mask)
413 raw_spin_unlock(&cpu_rq(t)->__lock);
414 local_irq_restore(*flags);
415 }
416
__sched_core_flip(bool enabled)417 static void __sched_core_flip(bool enabled)
418 {
419 unsigned long flags;
420 int cpu, t;
421
422 cpus_read_lock();
423
424 /*
425 * Toggle the online cores, one by one.
426 */
427 cpumask_copy(&sched_core_mask, cpu_online_mask);
428 for_each_cpu(cpu, &sched_core_mask) {
429 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
430
431 sched_core_lock(cpu, &flags);
432
433 for_each_cpu(t, smt_mask)
434 cpu_rq(t)->core_enabled = enabled;
435
436 cpu_rq(cpu)->core->core_forceidle_start = 0;
437
438 sched_core_unlock(cpu, &flags);
439
440 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
441 }
442
443 /*
444 * Toggle the offline CPUs.
445 */
446 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
447 cpu_rq(cpu)->core_enabled = enabled;
448
449 cpus_read_unlock();
450 }
451
sched_core_assert_empty(void)452 static void sched_core_assert_empty(void)
453 {
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
458 }
459
__sched_core_enable(void)460 static void __sched_core_enable(void)
461 {
462 static_branch_enable(&__sched_core_enabled);
463 /*
464 * Ensure all previous instances of raw_spin_rq_*lock() have finished
465 * and future ones will observe !sched_core_disabled().
466 */
467 synchronize_rcu();
468 __sched_core_flip(true);
469 sched_core_assert_empty();
470 }
471
__sched_core_disable(void)472 static void __sched_core_disable(void)
473 {
474 sched_core_assert_empty();
475 __sched_core_flip(false);
476 static_branch_disable(&__sched_core_enabled);
477 }
478
sched_core_get(void)479 void sched_core_get(void)
480 {
481 if (atomic_inc_not_zero(&sched_core_count))
482 return;
483
484 mutex_lock(&sched_core_mutex);
485 if (!atomic_read(&sched_core_count))
486 __sched_core_enable();
487
488 smp_mb__before_atomic();
489 atomic_inc(&sched_core_count);
490 mutex_unlock(&sched_core_mutex);
491 }
492
__sched_core_put(struct work_struct * work)493 static void __sched_core_put(struct work_struct *work)
494 {
495 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
496 __sched_core_disable();
497 mutex_unlock(&sched_core_mutex);
498 }
499 }
500
sched_core_put(void)501 void sched_core_put(void)
502 {
503 static DECLARE_WORK(_work, __sched_core_put);
504
505 /*
506 * "There can be only one"
507 *
508 * Either this is the last one, or we don't actually need to do any
509 * 'work'. If it is the last *again*, we rely on
510 * WORK_STRUCT_PENDING_BIT.
511 */
512 if (!atomic_add_unless(&sched_core_count, -1, 1))
513 schedule_work(&_work);
514 }
515
516 #else /* !CONFIG_SCHED_CORE: */
517
sched_core_enqueue(struct rq * rq,struct task_struct * p)518 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
519 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)520 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
521
522 #endif /* !CONFIG_SCHED_CORE */
523
524 /* need a wrapper since we may need to trace from modules */
525 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
526
527 /* Call via the helper macro trace_set_current_state. */
__trace_set_current_state(int state_value)528 void __trace_set_current_state(int state_value)
529 {
530 trace_sched_set_state_tp(current, state_value);
531 }
532 EXPORT_SYMBOL(__trace_set_current_state);
533
534 /*
535 * Serialization rules:
536 *
537 * Lock order:
538 *
539 * p->pi_lock
540 * rq->lock
541 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
542 *
543 * rq1->lock
544 * rq2->lock where: rq1 < rq2
545 *
546 * Regular state:
547 *
548 * Normal scheduling state is serialized by rq->lock. __schedule() takes the
549 * local CPU's rq->lock, it optionally removes the task from the runqueue and
550 * always looks at the local rq data structures to find the most eligible task
551 * to run next.
552 *
553 * Task enqueue is also under rq->lock, possibly taken from another CPU.
554 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
555 * the local CPU to avoid bouncing the runqueue state around [ see
556 * ttwu_queue_wakelist() ]
557 *
558 * Task wakeup, specifically wakeups that involve migration, are horribly
559 * complicated to avoid having to take two rq->locks.
560 *
561 * Special state:
562 *
563 * System-calls and anything external will use task_rq_lock() which acquires
564 * both p->pi_lock and rq->lock. As a consequence the state they change is
565 * stable while holding either lock:
566 *
567 * - sched_setaffinity()/
568 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
569 * - set_user_nice(): p->se.load, p->*prio
570 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
571 * p->se.load, p->rt_priority,
572 * p->dl.dl_{runtime, deadline, period, flags, bw, density}
573 * - sched_setnuma(): p->numa_preferred_nid
574 * - sched_move_task(): p->sched_task_group
575 * - uclamp_update_active() p->uclamp*
576 *
577 * p->state <- TASK_*:
578 *
579 * is changed locklessly using set_current_state(), __set_current_state() or
580 * set_special_state(), see their respective comments, or by
581 * try_to_wake_up(). This latter uses p->pi_lock to serialize against
582 * concurrent self.
583 *
584 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
585 *
586 * is set by activate_task() and cleared by deactivate_task(), under
587 * rq->lock. Non-zero indicates the task is runnable, the special
588 * ON_RQ_MIGRATING state is used for migration without holding both
589 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
590 *
591 * Additionally it is possible to be ->on_rq but still be considered not
592 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue
593 * but will be dequeued as soon as they get picked again. See the
594 * task_is_runnable() helper.
595 *
596 * p->on_cpu <- { 0, 1 }:
597 *
598 * is set by prepare_task() and cleared by finish_task() such that it will be
599 * set before p is scheduled-in and cleared after p is scheduled-out, both
600 * under rq->lock. Non-zero indicates the task is running on its CPU.
601 *
602 * [ The astute reader will observe that it is possible for two tasks on one
603 * CPU to have ->on_cpu = 1 at the same time. ]
604 *
605 * task_cpu(p): is changed by set_task_cpu(), the rules are:
606 *
607 * - Don't call set_task_cpu() on a blocked task:
608 *
609 * We don't care what CPU we're not running on, this simplifies hotplug,
610 * the CPU assignment of blocked tasks isn't required to be valid.
611 *
612 * - for try_to_wake_up(), called under p->pi_lock:
613 *
614 * This allows try_to_wake_up() to only take one rq->lock, see its comment.
615 *
616 * - for migration called under rq->lock:
617 * [ see task_on_rq_migrating() in task_rq_lock() ]
618 *
619 * o move_queued_task()
620 * o detach_task()
621 *
622 * - for migration called under double_rq_lock():
623 *
624 * o __migrate_swap_task()
625 * o push_rt_task() / pull_rt_task()
626 * o push_dl_task() / pull_dl_task()
627 * o dl_task_offline_migration()
628 *
629 */
630
raw_spin_rq_lock_nested(struct rq * rq,int subclass)631 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
632 {
633 raw_spinlock_t *lock;
634
635 /* Matches synchronize_rcu() in __sched_core_enable() */
636 preempt_disable();
637 if (sched_core_disabled()) {
638 raw_spin_lock_nested(&rq->__lock, subclass);
639 /* preempt_count *MUST* be > 1 */
640 preempt_enable_no_resched();
641 return;
642 }
643
644 for (;;) {
645 lock = __rq_lockp(rq);
646 raw_spin_lock_nested(lock, subclass);
647 if (likely(lock == __rq_lockp(rq))) {
648 /* preempt_count *MUST* be > 1 */
649 preempt_enable_no_resched();
650 return;
651 }
652 raw_spin_unlock(lock);
653 }
654 }
655
raw_spin_rq_trylock(struct rq * rq)656 bool raw_spin_rq_trylock(struct rq *rq)
657 {
658 raw_spinlock_t *lock;
659 bool ret;
660
661 /* Matches synchronize_rcu() in __sched_core_enable() */
662 preempt_disable();
663 if (sched_core_disabled()) {
664 ret = raw_spin_trylock(&rq->__lock);
665 preempt_enable();
666 return ret;
667 }
668
669 for (;;) {
670 lock = __rq_lockp(rq);
671 ret = raw_spin_trylock(lock);
672 if (!ret || (likely(lock == __rq_lockp(rq)))) {
673 preempt_enable();
674 return ret;
675 }
676 raw_spin_unlock(lock);
677 }
678 }
679
raw_spin_rq_unlock(struct rq * rq)680 void raw_spin_rq_unlock(struct rq *rq)
681 {
682 raw_spin_unlock(rq_lockp(rq));
683 }
684
685 /*
686 * double_rq_lock - safely lock two runqueues
687 */
double_rq_lock(struct rq * rq1,struct rq * rq2)688 void double_rq_lock(struct rq *rq1, struct rq *rq2)
689 {
690 lockdep_assert_irqs_disabled();
691
692 if (rq_order_less(rq2, rq1))
693 swap(rq1, rq2);
694
695 raw_spin_rq_lock(rq1);
696 if (__rq_lockp(rq1) != __rq_lockp(rq2))
697 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
698
699 double_rq_clock_clear_update(rq1, rq2);
700 }
701
702 /*
703 * __task_rq_lock - lock the rq @p resides on.
704 */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)705 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
706 __acquires(rq->lock)
707 {
708 struct rq *rq;
709
710 lockdep_assert_held(&p->pi_lock);
711
712 for (;;) {
713 rq = task_rq(p);
714 raw_spin_rq_lock(rq);
715 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
716 rq_pin_lock(rq, rf);
717 return rq;
718 }
719 raw_spin_rq_unlock(rq);
720
721 while (unlikely(task_on_rq_migrating(p)))
722 cpu_relax();
723 }
724 }
725
726 /*
727 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
728 */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)729 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
730 __acquires(p->pi_lock)
731 __acquires(rq->lock)
732 {
733 struct rq *rq;
734
735 for (;;) {
736 raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
737 rq = task_rq(p);
738 raw_spin_rq_lock(rq);
739 /*
740 * move_queued_task() task_rq_lock()
741 *
742 * ACQUIRE (rq->lock)
743 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
744 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
745 * [S] ->cpu = new_cpu [L] task_rq()
746 * [L] ->on_rq
747 * RELEASE (rq->lock)
748 *
749 * If we observe the old CPU in task_rq_lock(), the acquire of
750 * the old rq->lock will fully serialize against the stores.
751 *
752 * If we observe the new CPU in task_rq_lock(), the address
753 * dependency headed by '[L] rq = task_rq()' and the acquire
754 * will pair with the WMB to ensure we then also see migrating.
755 */
756 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
757 rq_pin_lock(rq, rf);
758 return rq;
759 }
760 raw_spin_rq_unlock(rq);
761 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
762
763 while (unlikely(task_on_rq_migrating(p)))
764 cpu_relax();
765 }
766 }
767
768 /*
769 * RQ-clock updating methods:
770 */
771
update_rq_clock_task(struct rq * rq,s64 delta)772 static void update_rq_clock_task(struct rq *rq, s64 delta)
773 {
774 /*
775 * In theory, the compile should just see 0 here, and optimize out the call
776 * to sched_rt_avg_update. But I don't trust it...
777 */
778 s64 __maybe_unused steal = 0, irq_delta = 0;
779
780 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
781 if (irqtime_enabled()) {
782 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
783
784 /*
785 * Since irq_time is only updated on {soft,}irq_exit, we might run into
786 * this case when a previous update_rq_clock() happened inside a
787 * {soft,}IRQ region.
788 *
789 * When this happens, we stop ->clock_task and only update the
790 * prev_irq_time stamp to account for the part that fit, so that a next
791 * update will consume the rest. This ensures ->clock_task is
792 * monotonic.
793 *
794 * It does however cause some slight miss-attribution of {soft,}IRQ
795 * time, a more accurate solution would be to update the irq_time using
796 * the current rq->clock timestamp, except that would require using
797 * atomic ops.
798 */
799 if (irq_delta > delta)
800 irq_delta = delta;
801
802 rq->prev_irq_time += irq_delta;
803 delta -= irq_delta;
804 delayacct_irq(rq->curr, irq_delta);
805 }
806 #endif
807 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
808 if (static_key_false((¶virt_steal_rq_enabled))) {
809 u64 prev_steal;
810
811 steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
812 steal -= rq->prev_steal_time_rq;
813
814 if (unlikely(steal > delta))
815 steal = delta;
816
817 rq->prev_steal_time_rq = prev_steal;
818 delta -= steal;
819 }
820 #endif
821
822 rq->clock_task += delta;
823
824 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
825 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
826 update_irq_load_avg(rq, irq_delta + steal);
827 #endif
828 update_rq_clock_pelt(rq, delta);
829 }
830
update_rq_clock(struct rq * rq)831 void update_rq_clock(struct rq *rq)
832 {
833 s64 delta;
834 u64 clock;
835
836 lockdep_assert_rq_held(rq);
837
838 if (rq->clock_update_flags & RQCF_ACT_SKIP)
839 return;
840
841 if (sched_feat(WARN_DOUBLE_CLOCK))
842 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
843 rq->clock_update_flags |= RQCF_UPDATED;
844
845 clock = sched_clock_cpu(cpu_of(rq));
846 scx_rq_clock_update(rq, clock);
847
848 delta = clock - rq->clock;
849 if (delta < 0)
850 return;
851 rq->clock += delta;
852
853 update_rq_clock_task(rq, delta);
854 }
855
856 #ifdef CONFIG_SCHED_HRTICK
857 /*
858 * Use HR-timers to deliver accurate preemption points.
859 */
860
hrtick_clear(struct rq * rq)861 static void hrtick_clear(struct rq *rq)
862 {
863 if (hrtimer_active(&rq->hrtick_timer))
864 hrtimer_cancel(&rq->hrtick_timer);
865 }
866
867 /*
868 * High-resolution timer tick.
869 * Runs from hardirq context with interrupts disabled.
870 */
hrtick(struct hrtimer * timer)871 static enum hrtimer_restart hrtick(struct hrtimer *timer)
872 {
873 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
874 struct rq_flags rf;
875
876 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
877
878 rq_lock(rq, &rf);
879 update_rq_clock(rq);
880 rq->donor->sched_class->task_tick(rq, rq->curr, 1);
881 rq_unlock(rq, &rf);
882
883 return HRTIMER_NORESTART;
884 }
885
__hrtick_restart(struct rq * rq)886 static void __hrtick_restart(struct rq *rq)
887 {
888 struct hrtimer *timer = &rq->hrtick_timer;
889 ktime_t time = rq->hrtick_time;
890
891 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
892 }
893
894 /*
895 * called from hardirq (IPI) context
896 */
__hrtick_start(void * arg)897 static void __hrtick_start(void *arg)
898 {
899 struct rq *rq = arg;
900 struct rq_flags rf;
901
902 rq_lock(rq, &rf);
903 __hrtick_restart(rq);
904 rq_unlock(rq, &rf);
905 }
906
907 /*
908 * Called to set the hrtick timer state.
909 *
910 * called with rq->lock held and IRQs disabled
911 */
hrtick_start(struct rq * rq,u64 delay)912 void hrtick_start(struct rq *rq, u64 delay)
913 {
914 struct hrtimer *timer = &rq->hrtick_timer;
915 s64 delta;
916
917 /*
918 * Don't schedule slices shorter than 10000ns, that just
919 * doesn't make sense and can cause timer DoS.
920 */
921 delta = max_t(s64, delay, 10000LL);
922 rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta);
923
924 if (rq == this_rq())
925 __hrtick_restart(rq);
926 else
927 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
928 }
929
hrtick_rq_init(struct rq * rq)930 static void hrtick_rq_init(struct rq *rq)
931 {
932 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
933 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
934 }
935 #else /* !CONFIG_SCHED_HRTICK: */
hrtick_clear(struct rq * rq)936 static inline void hrtick_clear(struct rq *rq)
937 {
938 }
939
hrtick_rq_init(struct rq * rq)940 static inline void hrtick_rq_init(struct rq *rq)
941 {
942 }
943 #endif /* !CONFIG_SCHED_HRTICK */
944
945 /*
946 * try_cmpxchg based fetch_or() macro so it works for different integer types:
947 */
948 #define fetch_or(ptr, mask) \
949 ({ \
950 typeof(ptr) _ptr = (ptr); \
951 typeof(mask) _mask = (mask); \
952 typeof(*_ptr) _val = *_ptr; \
953 \
954 do { \
955 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \
956 _val; \
957 })
958
959 #ifdef TIF_POLLING_NRFLAG
960 /*
961 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
962 * this avoids any races wrt polling state changes and thereby avoids
963 * spurious IPIs.
964 */
set_nr_and_not_polling(struct thread_info * ti,int tif)965 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
966 {
967 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
968 }
969
970 /*
971 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
972 *
973 * If this returns true, then the idle task promises to call
974 * sched_ttwu_pending() and reschedule soon.
975 */
set_nr_if_polling(struct task_struct * p)976 static bool set_nr_if_polling(struct task_struct *p)
977 {
978 struct thread_info *ti = task_thread_info(p);
979 typeof(ti->flags) val = READ_ONCE(ti->flags);
980
981 do {
982 if (!(val & _TIF_POLLING_NRFLAG))
983 return false;
984 if (val & _TIF_NEED_RESCHED)
985 return true;
986 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
987
988 return true;
989 }
990
991 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)992 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
993 {
994 set_ti_thread_flag(ti, tif);
995 return true;
996 }
997
set_nr_if_polling(struct task_struct * p)998 static inline bool set_nr_if_polling(struct task_struct *p)
999 {
1000 return false;
1001 }
1002 #endif
1003
__wake_q_add(struct wake_q_head * head,struct task_struct * task)1004 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
1005 {
1006 struct wake_q_node *node = &task->wake_q;
1007
1008 /*
1009 * Atomically grab the task, if ->wake_q is !nil already it means
1010 * it's already queued (either by us or someone else) and will get the
1011 * wakeup due to that.
1012 *
1013 * In order to ensure that a pending wakeup will observe our pending
1014 * state, even in the failed case, an explicit smp_mb() must be used.
1015 */
1016 smp_mb__before_atomic();
1017 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1018 return false;
1019
1020 /*
1021 * The head is context local, there can be no concurrency.
1022 */
1023 *head->lastp = node;
1024 head->lastp = &node->next;
1025 return true;
1026 }
1027
1028 /**
1029 * wake_q_add() - queue a wakeup for 'later' waking.
1030 * @head: the wake_q_head to add @task to
1031 * @task: the task to queue for 'later' wakeup
1032 *
1033 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1034 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1035 * instantly.
1036 *
1037 * This function must be used as-if it were wake_up_process(); IOW the task
1038 * must be ready to be woken at this location.
1039 */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1040 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1041 {
1042 if (__wake_q_add(head, task))
1043 get_task_struct(task);
1044 }
1045
1046 /**
1047 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1048 * @head: the wake_q_head to add @task to
1049 * @task: the task to queue for 'later' wakeup
1050 *
1051 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1052 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1053 * instantly.
1054 *
1055 * This function must be used as-if it were wake_up_process(); IOW the task
1056 * must be ready to be woken at this location.
1057 *
1058 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1059 * that already hold reference to @task can call the 'safe' version and trust
1060 * wake_q to do the right thing depending whether or not the @task is already
1061 * queued for wakeup.
1062 */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1063 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1064 {
1065 if (!__wake_q_add(head, task))
1066 put_task_struct(task);
1067 }
1068
wake_up_q(struct wake_q_head * head)1069 void wake_up_q(struct wake_q_head *head)
1070 {
1071 struct wake_q_node *node = head->first;
1072
1073 while (node != WAKE_Q_TAIL) {
1074 struct task_struct *task;
1075
1076 task = container_of(node, struct task_struct, wake_q);
1077 node = node->next;
1078 /* pairs with cmpxchg_relaxed() in __wake_q_add() */
1079 WRITE_ONCE(task->wake_q.next, NULL);
1080 /* Task can safely be re-inserted now. */
1081
1082 /*
1083 * wake_up_process() executes a full barrier, which pairs with
1084 * the queueing in wake_q_add() so as not to miss wakeups.
1085 */
1086 wake_up_process(task);
1087 put_task_struct(task);
1088 }
1089 }
1090
1091 /*
1092 * resched_curr - mark rq's current task 'to be rescheduled now'.
1093 *
1094 * On UP this means the setting of the need_resched flag, on SMP it
1095 * might also involve a cross-CPU call to trigger the scheduler on
1096 * the target CPU.
1097 */
__resched_curr(struct rq * rq,int tif)1098 static void __resched_curr(struct rq *rq, int tif)
1099 {
1100 struct task_struct *curr = rq->curr;
1101 struct thread_info *cti = task_thread_info(curr);
1102 int cpu;
1103
1104 lockdep_assert_rq_held(rq);
1105
1106 /*
1107 * Always immediately preempt the idle task; no point in delaying doing
1108 * actual work.
1109 */
1110 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1111 tif = TIF_NEED_RESCHED;
1112
1113 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1114 return;
1115
1116 cpu = cpu_of(rq);
1117
1118 trace_sched_set_need_resched_tp(curr, cpu, tif);
1119 if (cpu == smp_processor_id()) {
1120 set_ti_thread_flag(cti, tif);
1121 if (tif == TIF_NEED_RESCHED)
1122 set_preempt_need_resched();
1123 return;
1124 }
1125
1126 if (set_nr_and_not_polling(cti, tif)) {
1127 if (tif == TIF_NEED_RESCHED)
1128 smp_send_reschedule(cpu);
1129 } else {
1130 trace_sched_wake_idle_without_ipi(cpu);
1131 }
1132 }
1133
__trace_set_need_resched(struct task_struct * curr,int tif)1134 void __trace_set_need_resched(struct task_struct *curr, int tif)
1135 {
1136 trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
1137 }
1138
resched_curr(struct rq * rq)1139 void resched_curr(struct rq *rq)
1140 {
1141 __resched_curr(rq, TIF_NEED_RESCHED);
1142 }
1143
1144 #ifdef CONFIG_PREEMPT_DYNAMIC
1145 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1146 static __always_inline bool dynamic_preempt_lazy(void)
1147 {
1148 return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1149 }
1150 #else
dynamic_preempt_lazy(void)1151 static __always_inline bool dynamic_preempt_lazy(void)
1152 {
1153 return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1154 }
1155 #endif
1156
get_lazy_tif_bit(void)1157 static __always_inline int get_lazy_tif_bit(void)
1158 {
1159 if (dynamic_preempt_lazy())
1160 return TIF_NEED_RESCHED_LAZY;
1161
1162 return TIF_NEED_RESCHED;
1163 }
1164
resched_curr_lazy(struct rq * rq)1165 void resched_curr_lazy(struct rq *rq)
1166 {
1167 __resched_curr(rq, get_lazy_tif_bit());
1168 }
1169
resched_cpu(int cpu)1170 void resched_cpu(int cpu)
1171 {
1172 struct rq *rq = cpu_rq(cpu);
1173 unsigned long flags;
1174
1175 raw_spin_rq_lock_irqsave(rq, flags);
1176 if (cpu_online(cpu) || cpu == smp_processor_id())
1177 resched_curr(rq);
1178 raw_spin_rq_unlock_irqrestore(rq, flags);
1179 }
1180
1181 #ifdef CONFIG_NO_HZ_COMMON
1182 /*
1183 * In the semi idle case, use the nearest busy CPU for migrating timers
1184 * from an idle CPU. This is good for power-savings.
1185 *
1186 * We don't do similar optimization for completely idle system, as
1187 * selecting an idle CPU will add more delays to the timers than intended
1188 * (as that CPU's timer base may not be up to date wrt jiffies etc).
1189 */
get_nohz_timer_target(void)1190 int get_nohz_timer_target(void)
1191 {
1192 int i, cpu = smp_processor_id(), default_cpu = -1;
1193 struct sched_domain *sd;
1194 const struct cpumask *hk_mask;
1195
1196 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1197 if (!idle_cpu(cpu))
1198 return cpu;
1199 default_cpu = cpu;
1200 }
1201
1202 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1203
1204 guard(rcu)();
1205
1206 for_each_domain(cpu, sd) {
1207 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1208 if (cpu == i)
1209 continue;
1210
1211 if (!idle_cpu(i))
1212 return i;
1213 }
1214 }
1215
1216 if (default_cpu == -1)
1217 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1218
1219 return default_cpu;
1220 }
1221
1222 /*
1223 * When add_timer_on() enqueues a timer into the timer wheel of an
1224 * idle CPU then this timer might expire before the next timer event
1225 * which is scheduled to wake up that CPU. In case of a completely
1226 * idle system the next event might even be infinite time into the
1227 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1228 * leaves the inner idle loop so the newly added timer is taken into
1229 * account when the CPU goes back to idle and evaluates the timer
1230 * wheel for the next timer event.
1231 */
wake_up_idle_cpu(int cpu)1232 static void wake_up_idle_cpu(int cpu)
1233 {
1234 struct rq *rq = cpu_rq(cpu);
1235
1236 if (cpu == smp_processor_id())
1237 return;
1238
1239 /*
1240 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1241 * part of the idle loop. This forces an exit from the idle loop
1242 * and a round trip to schedule(). Now this could be optimized
1243 * because a simple new idle loop iteration is enough to
1244 * re-evaluate the next tick. Provided some re-ordering of tick
1245 * nohz functions that would need to follow TIF_NR_POLLING
1246 * clearing:
1247 *
1248 * - On most architectures, a simple fetch_or on ti::flags with a
1249 * "0" value would be enough to know if an IPI needs to be sent.
1250 *
1251 * - x86 needs to perform a last need_resched() check between
1252 * monitor and mwait which doesn't take timers into account.
1253 * There a dedicated TIF_TIMER flag would be required to
1254 * fetch_or here and be checked along with TIF_NEED_RESCHED
1255 * before mwait().
1256 *
1257 * However, remote timer enqueue is not such a frequent event
1258 * and testing of the above solutions didn't appear to report
1259 * much benefits.
1260 */
1261 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1262 smp_send_reschedule(cpu);
1263 else
1264 trace_sched_wake_idle_without_ipi(cpu);
1265 }
1266
wake_up_full_nohz_cpu(int cpu)1267 static bool wake_up_full_nohz_cpu(int cpu)
1268 {
1269 /*
1270 * We just need the target to call irq_exit() and re-evaluate
1271 * the next tick. The nohz full kick at least implies that.
1272 * If needed we can still optimize that later with an
1273 * empty IRQ.
1274 */
1275 if (cpu_is_offline(cpu))
1276 return true; /* Don't try to wake offline CPUs. */
1277 if (tick_nohz_full_cpu(cpu)) {
1278 if (cpu != smp_processor_id() ||
1279 tick_nohz_tick_stopped())
1280 tick_nohz_full_kick_cpu(cpu);
1281 return true;
1282 }
1283
1284 return false;
1285 }
1286
1287 /*
1288 * Wake up the specified CPU. If the CPU is going offline, it is the
1289 * caller's responsibility to deal with the lost wakeup, for example,
1290 * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1291 */
wake_up_nohz_cpu(int cpu)1292 void wake_up_nohz_cpu(int cpu)
1293 {
1294 if (!wake_up_full_nohz_cpu(cpu))
1295 wake_up_idle_cpu(cpu);
1296 }
1297
nohz_csd_func(void * info)1298 static void nohz_csd_func(void *info)
1299 {
1300 struct rq *rq = info;
1301 int cpu = cpu_of(rq);
1302 unsigned int flags;
1303
1304 /*
1305 * Release the rq::nohz_csd.
1306 */
1307 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1308 WARN_ON(!(flags & NOHZ_KICK_MASK));
1309
1310 rq->idle_balance = idle_cpu(cpu);
1311 if (rq->idle_balance) {
1312 rq->nohz_idle_balance = flags;
1313 __raise_softirq_irqoff(SCHED_SOFTIRQ);
1314 }
1315 }
1316
1317 #endif /* CONFIG_NO_HZ_COMMON */
1318
1319 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1320 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1321 {
1322 if (rq->nr_running != 1)
1323 return false;
1324
1325 if (p->sched_class != &fair_sched_class)
1326 return false;
1327
1328 if (!task_on_rq_queued(p))
1329 return false;
1330
1331 return true;
1332 }
1333
sched_can_stop_tick(struct rq * rq)1334 bool sched_can_stop_tick(struct rq *rq)
1335 {
1336 int fifo_nr_running;
1337
1338 /* Deadline tasks, even if single, need the tick */
1339 if (rq->dl.dl_nr_running)
1340 return false;
1341
1342 /*
1343 * If there are more than one RR tasks, we need the tick to affect the
1344 * actual RR behaviour.
1345 */
1346 if (rq->rt.rr_nr_running) {
1347 if (rq->rt.rr_nr_running == 1)
1348 return true;
1349 else
1350 return false;
1351 }
1352
1353 /*
1354 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1355 * forced preemption between FIFO tasks.
1356 */
1357 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1358 if (fifo_nr_running)
1359 return true;
1360
1361 /*
1362 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1363 * left. For CFS, if there's more than one we need the tick for
1364 * involuntary preemption. For SCX, ask.
1365 */
1366 if (scx_enabled() && !scx_can_stop_tick(rq))
1367 return false;
1368
1369 if (rq->cfs.h_nr_queued > 1)
1370 return false;
1371
1372 /*
1373 * If there is one task and it has CFS runtime bandwidth constraints
1374 * and it's on the cpu now we don't want to stop the tick.
1375 * This check prevents clearing the bit if a newly enqueued task here is
1376 * dequeued by migrating while the constrained task continues to run.
1377 * E.g. going from 2->1 without going through pick_next_task().
1378 */
1379 if (__need_bw_check(rq, rq->curr)) {
1380 if (cfs_task_bw_constrained(rq->curr))
1381 return false;
1382 }
1383
1384 return true;
1385 }
1386 #endif /* CONFIG_NO_HZ_FULL */
1387
1388 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
1389 /*
1390 * Iterate task_group tree rooted at *from, calling @down when first entering a
1391 * node and @up when leaving it for the final time.
1392 *
1393 * Caller must hold rcu_lock or sufficient equivalent.
1394 */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1395 int walk_tg_tree_from(struct task_group *from,
1396 tg_visitor down, tg_visitor up, void *data)
1397 {
1398 struct task_group *parent, *child;
1399 int ret;
1400
1401 parent = from;
1402
1403 down:
1404 ret = (*down)(parent, data);
1405 if (ret)
1406 goto out;
1407 list_for_each_entry_rcu(child, &parent->children, siblings) {
1408 parent = child;
1409 goto down;
1410
1411 up:
1412 continue;
1413 }
1414 ret = (*up)(parent, data);
1415 if (ret || parent == from)
1416 goto out;
1417
1418 child = parent;
1419 parent = parent->parent;
1420 if (parent)
1421 goto up;
1422 out:
1423 return ret;
1424 }
1425
tg_nop(struct task_group * tg,void * data)1426 int tg_nop(struct task_group *tg, void *data)
1427 {
1428 return 0;
1429 }
1430 #endif
1431
set_load_weight(struct task_struct * p,bool update_load)1432 void set_load_weight(struct task_struct *p, bool update_load)
1433 {
1434 int prio = p->static_prio - MAX_RT_PRIO;
1435 struct load_weight lw;
1436
1437 if (task_has_idle_policy(p)) {
1438 lw.weight = scale_load(WEIGHT_IDLEPRIO);
1439 lw.inv_weight = WMULT_IDLEPRIO;
1440 } else {
1441 lw.weight = scale_load(sched_prio_to_weight[prio]);
1442 lw.inv_weight = sched_prio_to_wmult[prio];
1443 }
1444
1445 /*
1446 * SCHED_OTHER tasks have to update their load when changing their
1447 * weight
1448 */
1449 if (update_load && p->sched_class->reweight_task)
1450 p->sched_class->reweight_task(task_rq(p), p, &lw);
1451 else
1452 p->se.load = lw;
1453 }
1454
1455 #ifdef CONFIG_UCLAMP_TASK
1456 /*
1457 * Serializes updates of utilization clamp values
1458 *
1459 * The (slow-path) user-space triggers utilization clamp value updates which
1460 * can require updates on (fast-path) scheduler's data structures used to
1461 * support enqueue/dequeue operations.
1462 * While the per-CPU rq lock protects fast-path update operations, user-space
1463 * requests are serialized using a mutex to reduce the risk of conflicting
1464 * updates or API abuses.
1465 */
1466 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1467
1468 /* Max allowed minimum utilization */
1469 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1470
1471 /* Max allowed maximum utilization */
1472 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1473
1474 /*
1475 * By default RT tasks run at the maximum performance point/capacity of the
1476 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1477 * SCHED_CAPACITY_SCALE.
1478 *
1479 * This knob allows admins to change the default behavior when uclamp is being
1480 * used. In battery powered devices, particularly, running at the maximum
1481 * capacity and frequency will increase energy consumption and shorten the
1482 * battery life.
1483 *
1484 * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1485 *
1486 * This knob will not override the system default sched_util_clamp_min defined
1487 * above.
1488 */
1489 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1490
1491 /* All clamps are required to be less or equal than these values */
1492 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1493
1494 /*
1495 * This static key is used to reduce the uclamp overhead in the fast path. It
1496 * primarily disables the call to uclamp_rq_{inc, dec}() in
1497 * enqueue/dequeue_task().
1498 *
1499 * This allows users to continue to enable uclamp in their kernel config with
1500 * minimum uclamp overhead in the fast path.
1501 *
1502 * As soon as userspace modifies any of the uclamp knobs, the static key is
1503 * enabled, since we have an actual users that make use of uclamp
1504 * functionality.
1505 *
1506 * The knobs that would enable this static key are:
1507 *
1508 * * A task modifying its uclamp value with sched_setattr().
1509 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1510 * * An admin modifying the cgroup cpu.uclamp.{min, max}
1511 */
1512 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1513
1514 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1515 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1516 unsigned int clamp_value)
1517 {
1518 /*
1519 * Avoid blocked utilization pushing up the frequency when we go
1520 * idle (which drops the max-clamp) by retaining the last known
1521 * max-clamp.
1522 */
1523 if (clamp_id == UCLAMP_MAX) {
1524 rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1525 return clamp_value;
1526 }
1527
1528 return uclamp_none(UCLAMP_MIN);
1529 }
1530
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1531 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1532 unsigned int clamp_value)
1533 {
1534 /* Reset max-clamp retention only on idle exit */
1535 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1536 return;
1537
1538 uclamp_rq_set(rq, clamp_id, clamp_value);
1539 }
1540
1541 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1542 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1543 unsigned int clamp_value)
1544 {
1545 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1546 int bucket_id = UCLAMP_BUCKETS - 1;
1547
1548 /*
1549 * Since both min and max clamps are max aggregated, find the
1550 * top most bucket with tasks in.
1551 */
1552 for ( ; bucket_id >= 0; bucket_id--) {
1553 if (!bucket[bucket_id].tasks)
1554 continue;
1555 return bucket[bucket_id].value;
1556 }
1557
1558 /* No tasks -- default clamp values */
1559 return uclamp_idle_value(rq, clamp_id, clamp_value);
1560 }
1561
__uclamp_update_util_min_rt_default(struct task_struct * p)1562 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1563 {
1564 unsigned int default_util_min;
1565 struct uclamp_se *uc_se;
1566
1567 lockdep_assert_held(&p->pi_lock);
1568
1569 uc_se = &p->uclamp_req[UCLAMP_MIN];
1570
1571 /* Only sync if user didn't override the default */
1572 if (uc_se->user_defined)
1573 return;
1574
1575 default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1576 uclamp_se_set(uc_se, default_util_min, false);
1577 }
1578
uclamp_update_util_min_rt_default(struct task_struct * p)1579 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1580 {
1581 if (!rt_task(p))
1582 return;
1583
1584 /* Protect updates to p->uclamp_* */
1585 guard(task_rq_lock)(p);
1586 __uclamp_update_util_min_rt_default(p);
1587 }
1588
1589 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1590 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1591 {
1592 /* Copy by value as we could modify it */
1593 struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1594 #ifdef CONFIG_UCLAMP_TASK_GROUP
1595 unsigned int tg_min, tg_max, value;
1596
1597 /*
1598 * Tasks in autogroups or root task group will be
1599 * restricted by system defaults.
1600 */
1601 if (task_group_is_autogroup(task_group(p)))
1602 return uc_req;
1603 if (task_group(p) == &root_task_group)
1604 return uc_req;
1605
1606 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1607 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1608 value = uc_req.value;
1609 value = clamp(value, tg_min, tg_max);
1610 uclamp_se_set(&uc_req, value, false);
1611 #endif
1612
1613 return uc_req;
1614 }
1615
1616 /*
1617 * The effective clamp bucket index of a task depends on, by increasing
1618 * priority:
1619 * - the task specific clamp value, when explicitly requested from userspace
1620 * - the task group effective clamp value, for tasks not either in the root
1621 * group or in an autogroup
1622 * - the system default clamp value, defined by the sysadmin
1623 */
1624 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1625 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1626 {
1627 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1628 struct uclamp_se uc_max = uclamp_default[clamp_id];
1629
1630 /* System default restrictions always apply */
1631 if (unlikely(uc_req.value > uc_max.value))
1632 return uc_max;
1633
1634 return uc_req;
1635 }
1636
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1637 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1638 {
1639 struct uclamp_se uc_eff;
1640
1641 /* Task currently refcounted: use back-annotated (effective) value */
1642 if (p->uclamp[clamp_id].active)
1643 return (unsigned long)p->uclamp[clamp_id].value;
1644
1645 uc_eff = uclamp_eff_get(p, clamp_id);
1646
1647 return (unsigned long)uc_eff.value;
1648 }
1649
1650 /*
1651 * When a task is enqueued on a rq, the clamp bucket currently defined by the
1652 * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1653 * updates the rq's clamp value if required.
1654 *
1655 * Tasks can have a task-specific value requested from user-space, track
1656 * within each bucket the maximum value for tasks refcounted in it.
1657 * This "local max aggregation" allows to track the exact "requested" value
1658 * for each bucket when all its RUNNABLE tasks require the same clamp.
1659 */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1660 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1661 enum uclamp_id clamp_id)
1662 {
1663 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1664 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1665 struct uclamp_bucket *bucket;
1666
1667 lockdep_assert_rq_held(rq);
1668
1669 /* Update task effective clamp */
1670 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1671
1672 bucket = &uc_rq->bucket[uc_se->bucket_id];
1673 bucket->tasks++;
1674 uc_se->active = true;
1675
1676 uclamp_idle_reset(rq, clamp_id, uc_se->value);
1677
1678 /*
1679 * Local max aggregation: rq buckets always track the max
1680 * "requested" clamp value of its RUNNABLE tasks.
1681 */
1682 if (bucket->tasks == 1 || uc_se->value > bucket->value)
1683 bucket->value = uc_se->value;
1684
1685 if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1686 uclamp_rq_set(rq, clamp_id, uc_se->value);
1687 }
1688
1689 /*
1690 * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1691 * is released. If this is the last task reference counting the rq's max
1692 * active clamp value, then the rq's clamp value is updated.
1693 *
1694 * Both refcounted tasks and rq's cached clamp values are expected to be
1695 * always valid. If it's detected they are not, as defensive programming,
1696 * enforce the expected state and warn.
1697 */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1698 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1699 enum uclamp_id clamp_id)
1700 {
1701 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1702 struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1703 struct uclamp_bucket *bucket;
1704 unsigned int bkt_clamp;
1705 unsigned int rq_clamp;
1706
1707 lockdep_assert_rq_held(rq);
1708
1709 /*
1710 * If sched_uclamp_used was enabled after task @p was enqueued,
1711 * we could end up with unbalanced call to uclamp_rq_dec_id().
1712 *
1713 * In this case the uc_se->active flag should be false since no uclamp
1714 * accounting was performed at enqueue time and we can just return
1715 * here.
1716 *
1717 * Need to be careful of the following enqueue/dequeue ordering
1718 * problem too
1719 *
1720 * enqueue(taskA)
1721 * // sched_uclamp_used gets enabled
1722 * enqueue(taskB)
1723 * dequeue(taskA)
1724 * // Must not decrement bucket->tasks here
1725 * dequeue(taskB)
1726 *
1727 * where we could end up with stale data in uc_se and
1728 * bucket[uc_se->bucket_id].
1729 *
1730 * The following check here eliminates the possibility of such race.
1731 */
1732 if (unlikely(!uc_se->active))
1733 return;
1734
1735 bucket = &uc_rq->bucket[uc_se->bucket_id];
1736
1737 WARN_ON_ONCE(!bucket->tasks);
1738 if (likely(bucket->tasks))
1739 bucket->tasks--;
1740
1741 uc_se->active = false;
1742
1743 /*
1744 * Keep "local max aggregation" simple and accept to (possibly)
1745 * overboost some RUNNABLE tasks in the same bucket.
1746 * The rq clamp bucket value is reset to its base value whenever
1747 * there are no more RUNNABLE tasks refcounting it.
1748 */
1749 if (likely(bucket->tasks))
1750 return;
1751
1752 rq_clamp = uclamp_rq_get(rq, clamp_id);
1753 /*
1754 * Defensive programming: this should never happen. If it happens,
1755 * e.g. due to future modification, warn and fix up the expected value.
1756 */
1757 WARN_ON_ONCE(bucket->value > rq_clamp);
1758 if (bucket->value >= rq_clamp) {
1759 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1760 uclamp_rq_set(rq, clamp_id, bkt_clamp);
1761 }
1762 }
1763
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)1764 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
1765 {
1766 enum uclamp_id clamp_id;
1767
1768 /*
1769 * Avoid any overhead until uclamp is actually used by the userspace.
1770 *
1771 * The condition is constructed such that a NOP is generated when
1772 * sched_uclamp_used is disabled.
1773 */
1774 if (!uclamp_is_used())
1775 return;
1776
1777 if (unlikely(!p->sched_class->uclamp_enabled))
1778 return;
1779
1780 /* Only inc the delayed task which being woken up. */
1781 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
1782 return;
1783
1784 for_each_clamp_id(clamp_id)
1785 uclamp_rq_inc_id(rq, p, clamp_id);
1786
1787 /* Reset clamp idle holding when there is one RUNNABLE task */
1788 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1789 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1790 }
1791
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1792 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1793 {
1794 enum uclamp_id clamp_id;
1795
1796 /*
1797 * Avoid any overhead until uclamp is actually used by the userspace.
1798 *
1799 * The condition is constructed such that a NOP is generated when
1800 * sched_uclamp_used is disabled.
1801 */
1802 if (!uclamp_is_used())
1803 return;
1804
1805 if (unlikely(!p->sched_class->uclamp_enabled))
1806 return;
1807
1808 if (p->se.sched_delayed)
1809 return;
1810
1811 for_each_clamp_id(clamp_id)
1812 uclamp_rq_dec_id(rq, p, clamp_id);
1813 }
1814
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1815 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1816 enum uclamp_id clamp_id)
1817 {
1818 if (!p->uclamp[clamp_id].active)
1819 return;
1820
1821 uclamp_rq_dec_id(rq, p, clamp_id);
1822 uclamp_rq_inc_id(rq, p, clamp_id);
1823
1824 /*
1825 * Make sure to clear the idle flag if we've transiently reached 0
1826 * active tasks on rq.
1827 */
1828 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1829 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1830 }
1831
1832 static inline void
uclamp_update_active(struct task_struct * p)1833 uclamp_update_active(struct task_struct *p)
1834 {
1835 enum uclamp_id clamp_id;
1836 struct rq_flags rf;
1837 struct rq *rq;
1838
1839 /*
1840 * Lock the task and the rq where the task is (or was) queued.
1841 *
1842 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1843 * price to pay to safely serialize util_{min,max} updates with
1844 * enqueues, dequeues and migration operations.
1845 * This is the same locking schema used by __set_cpus_allowed_ptr().
1846 */
1847 rq = task_rq_lock(p, &rf);
1848
1849 /*
1850 * Setting the clamp bucket is serialized by task_rq_lock().
1851 * If the task is not yet RUNNABLE and its task_struct is not
1852 * affecting a valid clamp bucket, the next time it's enqueued,
1853 * it will already see the updated clamp bucket value.
1854 */
1855 for_each_clamp_id(clamp_id)
1856 uclamp_rq_reinc_id(rq, p, clamp_id);
1857
1858 task_rq_unlock(rq, p, &rf);
1859 }
1860
1861 #ifdef CONFIG_UCLAMP_TASK_GROUP
1862 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1863 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1864 {
1865 struct css_task_iter it;
1866 struct task_struct *p;
1867
1868 css_task_iter_start(css, 0, &it);
1869 while ((p = css_task_iter_next(&it)))
1870 uclamp_update_active(p);
1871 css_task_iter_end(&it);
1872 }
1873
1874 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1875 #endif
1876
1877 #ifdef CONFIG_SYSCTL
1878 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1879 static void uclamp_update_root_tg(void)
1880 {
1881 struct task_group *tg = &root_task_group;
1882
1883 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1884 sysctl_sched_uclamp_util_min, false);
1885 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1886 sysctl_sched_uclamp_util_max, false);
1887
1888 guard(rcu)();
1889 cpu_util_update_eff(&root_task_group.css);
1890 }
1891 #else
uclamp_update_root_tg(void)1892 static void uclamp_update_root_tg(void) { }
1893 #endif
1894
uclamp_sync_util_min_rt_default(void)1895 static void uclamp_sync_util_min_rt_default(void)
1896 {
1897 struct task_struct *g, *p;
1898
1899 /*
1900 * copy_process() sysctl_uclamp
1901 * uclamp_min_rt = X;
1902 * write_lock(&tasklist_lock) read_lock(&tasklist_lock)
1903 * // link thread smp_mb__after_spinlock()
1904 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock);
1905 * sched_post_fork() for_each_process_thread()
1906 * __uclamp_sync_rt() __uclamp_sync_rt()
1907 *
1908 * Ensures that either sched_post_fork() will observe the new
1909 * uclamp_min_rt or for_each_process_thread() will observe the new
1910 * task.
1911 */
1912 read_lock(&tasklist_lock);
1913 smp_mb__after_spinlock();
1914 read_unlock(&tasklist_lock);
1915
1916 guard(rcu)();
1917 for_each_process_thread(g, p)
1918 uclamp_update_util_min_rt_default(p);
1919 }
1920
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1921 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1922 void *buffer, size_t *lenp, loff_t *ppos)
1923 {
1924 bool update_root_tg = false;
1925 int old_min, old_max, old_min_rt;
1926 int result;
1927
1928 guard(mutex)(&uclamp_mutex);
1929
1930 old_min = sysctl_sched_uclamp_util_min;
1931 old_max = sysctl_sched_uclamp_util_max;
1932 old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1933
1934 result = proc_dointvec(table, write, buffer, lenp, ppos);
1935 if (result)
1936 goto undo;
1937 if (!write)
1938 return 0;
1939
1940 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1941 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
1942 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1943
1944 result = -EINVAL;
1945 goto undo;
1946 }
1947
1948 if (old_min != sysctl_sched_uclamp_util_min) {
1949 uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1950 sysctl_sched_uclamp_util_min, false);
1951 update_root_tg = true;
1952 }
1953 if (old_max != sysctl_sched_uclamp_util_max) {
1954 uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1955 sysctl_sched_uclamp_util_max, false);
1956 update_root_tg = true;
1957 }
1958
1959 if (update_root_tg) {
1960 sched_uclamp_enable();
1961 uclamp_update_root_tg();
1962 }
1963
1964 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1965 sched_uclamp_enable();
1966 uclamp_sync_util_min_rt_default();
1967 }
1968
1969 /*
1970 * We update all RUNNABLE tasks only when task groups are in use.
1971 * Otherwise, keep it simple and do just a lazy update at each next
1972 * task enqueue time.
1973 */
1974 return 0;
1975
1976 undo:
1977 sysctl_sched_uclamp_util_min = old_min;
1978 sysctl_sched_uclamp_util_max = old_max;
1979 sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1980 return result;
1981 }
1982 #endif /* CONFIG_SYSCTL */
1983
uclamp_fork(struct task_struct * p)1984 static void uclamp_fork(struct task_struct *p)
1985 {
1986 enum uclamp_id clamp_id;
1987
1988 /*
1989 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1990 * as the task is still at its early fork stages.
1991 */
1992 for_each_clamp_id(clamp_id)
1993 p->uclamp[clamp_id].active = false;
1994
1995 if (likely(!p->sched_reset_on_fork))
1996 return;
1997
1998 for_each_clamp_id(clamp_id) {
1999 uclamp_se_set(&p->uclamp_req[clamp_id],
2000 uclamp_none(clamp_id), false);
2001 }
2002 }
2003
uclamp_post_fork(struct task_struct * p)2004 static void uclamp_post_fork(struct task_struct *p)
2005 {
2006 uclamp_update_util_min_rt_default(p);
2007 }
2008
init_uclamp_rq(struct rq * rq)2009 static void __init init_uclamp_rq(struct rq *rq)
2010 {
2011 enum uclamp_id clamp_id;
2012 struct uclamp_rq *uc_rq = rq->uclamp;
2013
2014 for_each_clamp_id(clamp_id) {
2015 uc_rq[clamp_id] = (struct uclamp_rq) {
2016 .value = uclamp_none(clamp_id)
2017 };
2018 }
2019
2020 rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2021 }
2022
init_uclamp(void)2023 static void __init init_uclamp(void)
2024 {
2025 struct uclamp_se uc_max = {};
2026 enum uclamp_id clamp_id;
2027 int cpu;
2028
2029 for_each_possible_cpu(cpu)
2030 init_uclamp_rq(cpu_rq(cpu));
2031
2032 for_each_clamp_id(clamp_id) {
2033 uclamp_se_set(&init_task.uclamp_req[clamp_id],
2034 uclamp_none(clamp_id), false);
2035 }
2036
2037 /* System defaults allow max clamp values for both indexes */
2038 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2039 for_each_clamp_id(clamp_id) {
2040 uclamp_default[clamp_id] = uc_max;
2041 #ifdef CONFIG_UCLAMP_TASK_GROUP
2042 root_task_group.uclamp_req[clamp_id] = uc_max;
2043 root_task_group.uclamp[clamp_id] = uc_max;
2044 #endif
2045 }
2046 }
2047
2048 #else /* !CONFIG_UCLAMP_TASK: */
uclamp_rq_inc(struct rq * rq,struct task_struct * p,int flags)2049 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2050 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2051 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2052 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2053 static inline void init_uclamp(void) { }
2054 #endif /* !CONFIG_UCLAMP_TASK */
2055
sched_task_on_rq(struct task_struct * p)2056 bool sched_task_on_rq(struct task_struct *p)
2057 {
2058 return task_on_rq_queued(p);
2059 }
2060
get_wchan(struct task_struct * p)2061 unsigned long get_wchan(struct task_struct *p)
2062 {
2063 unsigned long ip = 0;
2064 unsigned int state;
2065
2066 if (!p || p == current)
2067 return 0;
2068
2069 /* Only get wchan if task is blocked and we can keep it that way. */
2070 raw_spin_lock_irq(&p->pi_lock);
2071 state = READ_ONCE(p->__state);
2072 smp_rmb(); /* see try_to_wake_up() */
2073 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2074 ip = __get_wchan(p);
2075 raw_spin_unlock_irq(&p->pi_lock);
2076
2077 return ip;
2078 }
2079
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2080 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2081 {
2082 if (!(flags & ENQUEUE_NOCLOCK))
2083 update_rq_clock(rq);
2084
2085 /*
2086 * Can be before ->enqueue_task() because uclamp considers the
2087 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2088 * in ->enqueue_task().
2089 */
2090 uclamp_rq_inc(rq, p, flags);
2091
2092 p->sched_class->enqueue_task(rq, p, flags);
2093
2094 psi_enqueue(p, flags);
2095
2096 if (!(flags & ENQUEUE_RESTORE))
2097 sched_info_enqueue(rq, p);
2098
2099 if (sched_core_enabled(rq))
2100 sched_core_enqueue(rq, p);
2101 }
2102
2103 /*
2104 * Must only return false when DEQUEUE_SLEEP.
2105 */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2106 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2107 {
2108 if (sched_core_enabled(rq))
2109 sched_core_dequeue(rq, p, flags);
2110
2111 if (!(flags & DEQUEUE_NOCLOCK))
2112 update_rq_clock(rq);
2113
2114 if (!(flags & DEQUEUE_SAVE))
2115 sched_info_dequeue(rq, p);
2116
2117 psi_dequeue(p, flags);
2118
2119 /*
2120 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2121 * and mark the task ->sched_delayed.
2122 */
2123 uclamp_rq_dec(rq, p);
2124 return p->sched_class->dequeue_task(rq, p, flags);
2125 }
2126
activate_task(struct rq * rq,struct task_struct * p,int flags)2127 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2128 {
2129 if (task_on_rq_migrating(p))
2130 flags |= ENQUEUE_MIGRATED;
2131 if (flags & ENQUEUE_MIGRATED)
2132 sched_mm_cid_migrate_to(rq, p);
2133
2134 enqueue_task(rq, p, flags);
2135
2136 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2137 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2138 }
2139
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2140 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2141 {
2142 WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
2143
2144 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2145 ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2146
2147 /*
2148 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2149 * dequeue_task() and cleared *after* enqueue_task().
2150 */
2151
2152 dequeue_task(rq, p, flags);
2153 }
2154
block_task(struct rq * rq,struct task_struct * p,int flags)2155 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2156 {
2157 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2158 __block_task(rq, p);
2159 }
2160
2161 /**
2162 * task_curr - is this task currently executing on a CPU?
2163 * @p: the task in question.
2164 *
2165 * Return: 1 if the task is currently executing. 0 otherwise.
2166 */
task_curr(const struct task_struct * p)2167 inline int task_curr(const struct task_struct *p)
2168 {
2169 return cpu_curr(task_cpu(p)) == p;
2170 }
2171
2172 /*
2173 * ->switching_to() is called with the pi_lock and rq_lock held and must not
2174 * mess with locking.
2175 */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2176 void check_class_changing(struct rq *rq, struct task_struct *p,
2177 const struct sched_class *prev_class)
2178 {
2179 if (prev_class != p->sched_class && p->sched_class->switching_to)
2180 p->sched_class->switching_to(rq, p);
2181 }
2182
2183 /*
2184 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2185 * use the balance_callback list if you want balancing.
2186 *
2187 * this means any call to check_class_changed() must be followed by a call to
2188 * balance_callback().
2189 */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2190 void check_class_changed(struct rq *rq, struct task_struct *p,
2191 const struct sched_class *prev_class,
2192 int oldprio)
2193 {
2194 if (prev_class != p->sched_class) {
2195 if (prev_class->switched_from)
2196 prev_class->switched_from(rq, p);
2197
2198 p->sched_class->switched_to(rq, p);
2199 } else if (oldprio != p->prio || dl_task(p))
2200 p->sched_class->prio_changed(rq, p, oldprio);
2201 }
2202
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2203 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2204 {
2205 struct task_struct *donor = rq->donor;
2206
2207 if (p->sched_class == donor->sched_class)
2208 donor->sched_class->wakeup_preempt(rq, p, flags);
2209 else if (sched_class_above(p->sched_class, donor->sched_class))
2210 resched_curr(rq);
2211
2212 /*
2213 * A queue event has occurred, and we're going to schedule. In
2214 * this case, we can save a useless back to back clock update.
2215 */
2216 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2217 rq_clock_skip_update(rq);
2218 }
2219
2220 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2221 int __task_state_match(struct task_struct *p, unsigned int state)
2222 {
2223 if (READ_ONCE(p->__state) & state)
2224 return 1;
2225
2226 if (READ_ONCE(p->saved_state) & state)
2227 return -1;
2228
2229 return 0;
2230 }
2231
2232 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2233 int task_state_match(struct task_struct *p, unsigned int state)
2234 {
2235 /*
2236 * Serialize against current_save_and_set_rtlock_wait_state(),
2237 * current_restore_rtlock_saved_state(), and __refrigerator().
2238 */
2239 guard(raw_spinlock_irq)(&p->pi_lock);
2240 return __task_state_match(p, state);
2241 }
2242
2243 /*
2244 * wait_task_inactive - wait for a thread to unschedule.
2245 *
2246 * Wait for the thread to block in any of the states set in @match_state.
2247 * If it changes, i.e. @p might have woken up, then return zero. When we
2248 * succeed in waiting for @p to be off its CPU, we return a positive number
2249 * (its total switch count). If a second call a short while later returns the
2250 * same number, the caller can be sure that @p has remained unscheduled the
2251 * whole time.
2252 *
2253 * The caller must ensure that the task *will* unschedule sometime soon,
2254 * else this function might spin for a *long* time. This function can't
2255 * be called with interrupts off, or it may introduce deadlock with
2256 * smp_call_function() if an IPI is sent by the same process we are
2257 * waiting to become inactive.
2258 */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2259 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2260 {
2261 int running, queued, match;
2262 struct rq_flags rf;
2263 unsigned long ncsw;
2264 struct rq *rq;
2265
2266 for (;;) {
2267 /*
2268 * We do the initial early heuristics without holding
2269 * any task-queue locks at all. We'll only try to get
2270 * the runqueue lock when things look like they will
2271 * work out!
2272 */
2273 rq = task_rq(p);
2274
2275 /*
2276 * If the task is actively running on another CPU
2277 * still, just relax and busy-wait without holding
2278 * any locks.
2279 *
2280 * NOTE! Since we don't hold any locks, it's not
2281 * even sure that "rq" stays as the right runqueue!
2282 * But we don't care, since "task_on_cpu()" will
2283 * return false if the runqueue has changed and p
2284 * is actually now running somewhere else!
2285 */
2286 while (task_on_cpu(rq, p)) {
2287 if (!task_state_match(p, match_state))
2288 return 0;
2289 cpu_relax();
2290 }
2291
2292 /*
2293 * Ok, time to look more closely! We need the rq
2294 * lock now, to be *sure*. If we're wrong, we'll
2295 * just go back and repeat.
2296 */
2297 rq = task_rq_lock(p, &rf);
2298 /*
2299 * If task is sched_delayed, force dequeue it, to avoid always
2300 * hitting the tick timeout in the queued case
2301 */
2302 if (p->se.sched_delayed)
2303 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
2304 trace_sched_wait_task(p);
2305 running = task_on_cpu(rq, p);
2306 queued = task_on_rq_queued(p);
2307 ncsw = 0;
2308 if ((match = __task_state_match(p, match_state))) {
2309 /*
2310 * When matching on p->saved_state, consider this task
2311 * still queued so it will wait.
2312 */
2313 if (match < 0)
2314 queued = 1;
2315 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2316 }
2317 task_rq_unlock(rq, p, &rf);
2318
2319 /*
2320 * If it changed from the expected state, bail out now.
2321 */
2322 if (unlikely(!ncsw))
2323 break;
2324
2325 /*
2326 * Was it really running after all now that we
2327 * checked with the proper locks actually held?
2328 *
2329 * Oops. Go back and try again..
2330 */
2331 if (unlikely(running)) {
2332 cpu_relax();
2333 continue;
2334 }
2335
2336 /*
2337 * It's not enough that it's not actively running,
2338 * it must be off the runqueue _entirely_, and not
2339 * preempted!
2340 *
2341 * So if it was still runnable (but just not actively
2342 * running right now), it's preempted, and we should
2343 * yield - it could be a while.
2344 */
2345 if (unlikely(queued)) {
2346 ktime_t to = NSEC_PER_SEC / HZ;
2347
2348 set_current_state(TASK_UNINTERRUPTIBLE);
2349 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2350 continue;
2351 }
2352
2353 /*
2354 * Ahh, all good. It wasn't running, and it wasn't
2355 * runnable, which means that it will never become
2356 * running in the future either. We're all done!
2357 */
2358 break;
2359 }
2360
2361 return ncsw;
2362 }
2363
2364 static void
2365 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2366
migrate_disable_switch(struct rq * rq,struct task_struct * p)2367 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2368 {
2369 struct affinity_context ac = {
2370 .new_mask = cpumask_of(rq->cpu),
2371 .flags = SCA_MIGRATE_DISABLE,
2372 };
2373
2374 if (likely(!p->migration_disabled))
2375 return;
2376
2377 if (p->cpus_ptr != &p->cpus_mask)
2378 return;
2379
2380 /*
2381 * Violates locking rules! See comment in __do_set_cpus_allowed().
2382 */
2383 __do_set_cpus_allowed(p, &ac);
2384 }
2385
___migrate_enable(void)2386 void ___migrate_enable(void)
2387 {
2388 struct task_struct *p = current;
2389 struct affinity_context ac = {
2390 .new_mask = &p->cpus_mask,
2391 .flags = SCA_MIGRATE_ENABLE,
2392 };
2393
2394 __set_cpus_allowed_ptr(p, &ac);
2395 }
2396 EXPORT_SYMBOL_GPL(___migrate_enable);
2397
migrate_disable(void)2398 void migrate_disable(void)
2399 {
2400 __migrate_disable();
2401 }
2402 EXPORT_SYMBOL_GPL(migrate_disable);
2403
migrate_enable(void)2404 void migrate_enable(void)
2405 {
2406 __migrate_enable();
2407 }
2408 EXPORT_SYMBOL_GPL(migrate_enable);
2409
rq_has_pinned_tasks(struct rq * rq)2410 static inline bool rq_has_pinned_tasks(struct rq *rq)
2411 {
2412 return rq->nr_pinned;
2413 }
2414
2415 /*
2416 * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2417 * __set_cpus_allowed_ptr() and select_fallback_rq().
2418 */
is_cpu_allowed(struct task_struct * p,int cpu)2419 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2420 {
2421 /* When not in the task's cpumask, no point in looking further. */
2422 if (!task_allowed_on_cpu(p, cpu))
2423 return false;
2424
2425 /* migrate_disabled() must be allowed to finish. */
2426 if (is_migration_disabled(p))
2427 return cpu_online(cpu);
2428
2429 /* Non kernel threads are not allowed during either online or offline. */
2430 if (!(p->flags & PF_KTHREAD))
2431 return cpu_active(cpu);
2432
2433 /* KTHREAD_IS_PER_CPU is always allowed. */
2434 if (kthread_is_per_cpu(p))
2435 return cpu_online(cpu);
2436
2437 /* Regular kernel threads don't get to stay during offline. */
2438 if (cpu_dying(cpu))
2439 return false;
2440
2441 /* But are allowed during online. */
2442 return cpu_online(cpu);
2443 }
2444
2445 /*
2446 * This is how migration works:
2447 *
2448 * 1) we invoke migration_cpu_stop() on the target CPU using
2449 * stop_one_cpu().
2450 * 2) stopper starts to run (implicitly forcing the migrated thread
2451 * off the CPU)
2452 * 3) it checks whether the migrated task is still in the wrong runqueue.
2453 * 4) if it's in the wrong runqueue then the migration thread removes
2454 * it and puts it into the right queue.
2455 * 5) stopper completes and stop_one_cpu() returns and the migration
2456 * is done.
2457 */
2458
2459 /*
2460 * move_queued_task - move a queued task to new rq.
2461 *
2462 * Returns (locked) new rq. Old rq's lock is released.
2463 */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2464 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2465 struct task_struct *p, int new_cpu)
2466 {
2467 lockdep_assert_rq_held(rq);
2468
2469 deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2470 set_task_cpu(p, new_cpu);
2471 rq_unlock(rq, rf);
2472
2473 rq = cpu_rq(new_cpu);
2474
2475 rq_lock(rq, rf);
2476 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2477 activate_task(rq, p, 0);
2478 wakeup_preempt(rq, p, 0);
2479
2480 return rq;
2481 }
2482
2483 struct migration_arg {
2484 struct task_struct *task;
2485 int dest_cpu;
2486 struct set_affinity_pending *pending;
2487 };
2488
2489 /*
2490 * @refs: number of wait_for_completion()
2491 * @stop_pending: is @stop_work in use
2492 */
2493 struct set_affinity_pending {
2494 refcount_t refs;
2495 unsigned int stop_pending;
2496 struct completion done;
2497 struct cpu_stop_work stop_work;
2498 struct migration_arg arg;
2499 };
2500
2501 /*
2502 * Move (not current) task off this CPU, onto the destination CPU. We're doing
2503 * this because either it can't run here any more (set_cpus_allowed()
2504 * away from this CPU, or CPU going down), or because we're
2505 * attempting to rebalance this task on exec (sched_exec).
2506 *
2507 * So we race with normal scheduler movements, but that's OK, as long
2508 * as the task is no longer on this CPU.
2509 */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2510 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2511 struct task_struct *p, int dest_cpu)
2512 {
2513 /* Affinity changed (again). */
2514 if (!is_cpu_allowed(p, dest_cpu))
2515 return rq;
2516
2517 rq = move_queued_task(rq, rf, p, dest_cpu);
2518
2519 return rq;
2520 }
2521
2522 /*
2523 * migration_cpu_stop - this will be executed by a high-prio stopper thread
2524 * and performs thread migration by bumping thread off CPU then
2525 * 'pushing' onto another runqueue.
2526 */
migration_cpu_stop(void * data)2527 static int migration_cpu_stop(void *data)
2528 {
2529 struct migration_arg *arg = data;
2530 struct set_affinity_pending *pending = arg->pending;
2531 struct task_struct *p = arg->task;
2532 struct rq *rq = this_rq();
2533 bool complete = false;
2534 struct rq_flags rf;
2535
2536 /*
2537 * The original target CPU might have gone down and we might
2538 * be on another CPU but it doesn't matter.
2539 */
2540 local_irq_save(rf.flags);
2541 /*
2542 * We need to explicitly wake pending tasks before running
2543 * __migrate_task() such that we will not miss enforcing cpus_ptr
2544 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2545 */
2546 flush_smp_call_function_queue();
2547
2548 raw_spin_lock(&p->pi_lock);
2549 rq_lock(rq, &rf);
2550
2551 /*
2552 * If we were passed a pending, then ->stop_pending was set, thus
2553 * p->migration_pending must have remained stable.
2554 */
2555 WARN_ON_ONCE(pending && pending != p->migration_pending);
2556
2557 /*
2558 * If task_rq(p) != rq, it cannot be migrated here, because we're
2559 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2560 * we're holding p->pi_lock.
2561 */
2562 if (task_rq(p) == rq) {
2563 if (is_migration_disabled(p))
2564 goto out;
2565
2566 if (pending) {
2567 p->migration_pending = NULL;
2568 complete = true;
2569
2570 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2571 goto out;
2572 }
2573
2574 if (task_on_rq_queued(p)) {
2575 update_rq_clock(rq);
2576 rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2577 } else {
2578 p->wake_cpu = arg->dest_cpu;
2579 }
2580
2581 /*
2582 * XXX __migrate_task() can fail, at which point we might end
2583 * up running on a dodgy CPU, AFAICT this can only happen
2584 * during CPU hotplug, at which point we'll get pushed out
2585 * anyway, so it's probably not a big deal.
2586 */
2587
2588 } else if (pending) {
2589 /*
2590 * This happens when we get migrated between migrate_enable()'s
2591 * preempt_enable() and scheduling the stopper task. At that
2592 * point we're a regular task again and not current anymore.
2593 *
2594 * A !PREEMPT kernel has a giant hole here, which makes it far
2595 * more likely.
2596 */
2597
2598 /*
2599 * The task moved before the stopper got to run. We're holding
2600 * ->pi_lock, so the allowed mask is stable - if it got
2601 * somewhere allowed, we're done.
2602 */
2603 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2604 p->migration_pending = NULL;
2605 complete = true;
2606 goto out;
2607 }
2608
2609 /*
2610 * When migrate_enable() hits a rq mis-match we can't reliably
2611 * determine is_migration_disabled() and so have to chase after
2612 * it.
2613 */
2614 WARN_ON_ONCE(!pending->stop_pending);
2615 preempt_disable();
2616 task_rq_unlock(rq, p, &rf);
2617 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2618 &pending->arg, &pending->stop_work);
2619 preempt_enable();
2620 return 0;
2621 }
2622 out:
2623 if (pending)
2624 pending->stop_pending = false;
2625 task_rq_unlock(rq, p, &rf);
2626
2627 if (complete)
2628 complete_all(&pending->done);
2629
2630 return 0;
2631 }
2632
push_cpu_stop(void * arg)2633 int push_cpu_stop(void *arg)
2634 {
2635 struct rq *lowest_rq = NULL, *rq = this_rq();
2636 struct task_struct *p = arg;
2637
2638 raw_spin_lock_irq(&p->pi_lock);
2639 raw_spin_rq_lock(rq);
2640
2641 if (task_rq(p) != rq)
2642 goto out_unlock;
2643
2644 if (is_migration_disabled(p)) {
2645 p->migration_flags |= MDF_PUSH;
2646 goto out_unlock;
2647 }
2648
2649 p->migration_flags &= ~MDF_PUSH;
2650
2651 if (p->sched_class->find_lock_rq)
2652 lowest_rq = p->sched_class->find_lock_rq(p, rq);
2653
2654 if (!lowest_rq)
2655 goto out_unlock;
2656
2657 // XXX validate p is still the highest prio task
2658 if (task_rq(p) == rq) {
2659 move_queued_task_locked(rq, lowest_rq, p);
2660 resched_curr(lowest_rq);
2661 }
2662
2663 double_unlock_balance(rq, lowest_rq);
2664
2665 out_unlock:
2666 rq->push_busy = false;
2667 raw_spin_rq_unlock(rq);
2668 raw_spin_unlock_irq(&p->pi_lock);
2669
2670 put_task_struct(p);
2671 return 0;
2672 }
2673
2674 /*
2675 * sched_class::set_cpus_allowed must do the below, but is not required to
2676 * actually call this function.
2677 */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2678 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2679 {
2680 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2681 p->cpus_ptr = ctx->new_mask;
2682 return;
2683 }
2684
2685 cpumask_copy(&p->cpus_mask, ctx->new_mask);
2686 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2687
2688 /*
2689 * Swap in a new user_cpus_ptr if SCA_USER flag set
2690 */
2691 if (ctx->flags & SCA_USER)
2692 swap(p->user_cpus_ptr, ctx->user_mask);
2693 }
2694
2695 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2696 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2697 {
2698 struct rq *rq = task_rq(p);
2699 bool queued, running;
2700
2701 /*
2702 * This here violates the locking rules for affinity, since we're only
2703 * supposed to change these variables while holding both rq->lock and
2704 * p->pi_lock.
2705 *
2706 * HOWEVER, it magically works, because ttwu() is the only code that
2707 * accesses these variables under p->pi_lock and only does so after
2708 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2709 * before finish_task().
2710 *
2711 * XXX do further audits, this smells like something putrid.
2712 */
2713 if (ctx->flags & SCA_MIGRATE_DISABLE)
2714 WARN_ON_ONCE(!p->on_cpu);
2715 else
2716 lockdep_assert_held(&p->pi_lock);
2717
2718 queued = task_on_rq_queued(p);
2719 running = task_current_donor(rq, p);
2720
2721 if (queued) {
2722 /*
2723 * Because __kthread_bind() calls this on blocked tasks without
2724 * holding rq->lock.
2725 */
2726 lockdep_assert_rq_held(rq);
2727 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2728 }
2729 if (running)
2730 put_prev_task(rq, p);
2731
2732 p->sched_class->set_cpus_allowed(p, ctx);
2733 mm_set_cpus_allowed(p->mm, ctx->new_mask);
2734
2735 if (queued)
2736 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2737 if (running)
2738 set_next_task(rq, p);
2739 }
2740
2741 /*
2742 * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2743 * affinity (if any) should be destroyed too.
2744 */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2745 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2746 {
2747 struct affinity_context ac = {
2748 .new_mask = new_mask,
2749 .user_mask = NULL,
2750 .flags = SCA_USER, /* clear the user requested mask */
2751 };
2752 union cpumask_rcuhead {
2753 cpumask_t cpumask;
2754 struct rcu_head rcu;
2755 };
2756
2757 __do_set_cpus_allowed(p, &ac);
2758
2759 /*
2760 * Because this is called with p->pi_lock held, it is not possible
2761 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2762 * kfree_rcu().
2763 */
2764 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2765 }
2766
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2767 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2768 int node)
2769 {
2770 cpumask_t *user_mask;
2771 unsigned long flags;
2772
2773 /*
2774 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2775 * may differ by now due to racing.
2776 */
2777 dst->user_cpus_ptr = NULL;
2778
2779 /*
2780 * This check is racy and losing the race is a valid situation.
2781 * It is not worth the extra overhead of taking the pi_lock on
2782 * every fork/clone.
2783 */
2784 if (data_race(!src->user_cpus_ptr))
2785 return 0;
2786
2787 user_mask = alloc_user_cpus_ptr(node);
2788 if (!user_mask)
2789 return -ENOMEM;
2790
2791 /*
2792 * Use pi_lock to protect content of user_cpus_ptr
2793 *
2794 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2795 * do_set_cpus_allowed().
2796 */
2797 raw_spin_lock_irqsave(&src->pi_lock, flags);
2798 if (src->user_cpus_ptr) {
2799 swap(dst->user_cpus_ptr, user_mask);
2800 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2801 }
2802 raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2803
2804 if (unlikely(user_mask))
2805 kfree(user_mask);
2806
2807 return 0;
2808 }
2809
clear_user_cpus_ptr(struct task_struct * p)2810 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2811 {
2812 struct cpumask *user_mask = NULL;
2813
2814 swap(p->user_cpus_ptr, user_mask);
2815
2816 return user_mask;
2817 }
2818
release_user_cpus_ptr(struct task_struct * p)2819 void release_user_cpus_ptr(struct task_struct *p)
2820 {
2821 kfree(clear_user_cpus_ptr(p));
2822 }
2823
2824 /*
2825 * This function is wildly self concurrent; here be dragons.
2826 *
2827 *
2828 * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2829 * designated task is enqueued on an allowed CPU. If that task is currently
2830 * running, we have to kick it out using the CPU stopper.
2831 *
2832 * Migrate-Disable comes along and tramples all over our nice sandcastle.
2833 * Consider:
2834 *
2835 * Initial conditions: P0->cpus_mask = [0, 1]
2836 *
2837 * P0@CPU0 P1
2838 *
2839 * migrate_disable();
2840 * <preempted>
2841 * set_cpus_allowed_ptr(P0, [1]);
2842 *
2843 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2844 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2845 * This means we need the following scheme:
2846 *
2847 * P0@CPU0 P1
2848 *
2849 * migrate_disable();
2850 * <preempted>
2851 * set_cpus_allowed_ptr(P0, [1]);
2852 * <blocks>
2853 * <resumes>
2854 * migrate_enable();
2855 * __set_cpus_allowed_ptr();
2856 * <wakes local stopper>
2857 * `--> <woken on migration completion>
2858 *
2859 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2860 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2861 * task p are serialized by p->pi_lock, which we can leverage: the one that
2862 * should come into effect at the end of the Migrate-Disable region is the last
2863 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2864 * but we still need to properly signal those waiting tasks at the appropriate
2865 * moment.
2866 *
2867 * This is implemented using struct set_affinity_pending. The first
2868 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2869 * setup an instance of that struct and install it on the targeted task_struct.
2870 * Any and all further callers will reuse that instance. Those then wait for
2871 * a completion signaled at the tail of the CPU stopper callback (1), triggered
2872 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2873 *
2874 *
2875 * (1) In the cases covered above. There is one more where the completion is
2876 * signaled within affine_move_task() itself: when a subsequent affinity request
2877 * occurs after the stopper bailed out due to the targeted task still being
2878 * Migrate-Disable. Consider:
2879 *
2880 * Initial conditions: P0->cpus_mask = [0, 1]
2881 *
2882 * CPU0 P1 P2
2883 * <P0>
2884 * migrate_disable();
2885 * <preempted>
2886 * set_cpus_allowed_ptr(P0, [1]);
2887 * <blocks>
2888 * <migration/0>
2889 * migration_cpu_stop()
2890 * is_migration_disabled()
2891 * <bails>
2892 * set_cpus_allowed_ptr(P0, [0, 1]);
2893 * <signal completion>
2894 * <awakes>
2895 *
2896 * Note that the above is safe vs a concurrent migrate_enable(), as any
2897 * pending affinity completion is preceded by an uninstallation of
2898 * p->migration_pending done with p->pi_lock held.
2899 */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2900 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2901 int dest_cpu, unsigned int flags)
2902 __releases(rq->lock)
2903 __releases(p->pi_lock)
2904 {
2905 struct set_affinity_pending my_pending = { }, *pending = NULL;
2906 bool stop_pending, complete = false;
2907
2908 /*
2909 * Can the task run on the task's current CPU? If so, we're done
2910 *
2911 * We are also done if the task is the current donor, boosting a lock-
2912 * holding proxy, (and potentially has been migrated outside its
2913 * current or previous affinity mask)
2914 */
2915 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
2916 (task_current_donor(rq, p) && !task_current(rq, p))) {
2917 struct task_struct *push_task = NULL;
2918
2919 if ((flags & SCA_MIGRATE_ENABLE) &&
2920 (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2921 rq->push_busy = true;
2922 push_task = get_task_struct(p);
2923 }
2924
2925 /*
2926 * If there are pending waiters, but no pending stop_work,
2927 * then complete now.
2928 */
2929 pending = p->migration_pending;
2930 if (pending && !pending->stop_pending) {
2931 p->migration_pending = NULL;
2932 complete = true;
2933 }
2934
2935 preempt_disable();
2936 task_rq_unlock(rq, p, rf);
2937 if (push_task) {
2938 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2939 p, &rq->push_work);
2940 }
2941 preempt_enable();
2942
2943 if (complete)
2944 complete_all(&pending->done);
2945
2946 return 0;
2947 }
2948
2949 if (!(flags & SCA_MIGRATE_ENABLE)) {
2950 /* serialized by p->pi_lock */
2951 if (!p->migration_pending) {
2952 /* Install the request */
2953 refcount_set(&my_pending.refs, 1);
2954 init_completion(&my_pending.done);
2955 my_pending.arg = (struct migration_arg) {
2956 .task = p,
2957 .dest_cpu = dest_cpu,
2958 .pending = &my_pending,
2959 };
2960
2961 p->migration_pending = &my_pending;
2962 } else {
2963 pending = p->migration_pending;
2964 refcount_inc(&pending->refs);
2965 /*
2966 * Affinity has changed, but we've already installed a
2967 * pending. migration_cpu_stop() *must* see this, else
2968 * we risk a completion of the pending despite having a
2969 * task on a disallowed CPU.
2970 *
2971 * Serialized by p->pi_lock, so this is safe.
2972 */
2973 pending->arg.dest_cpu = dest_cpu;
2974 }
2975 }
2976 pending = p->migration_pending;
2977 /*
2978 * - !MIGRATE_ENABLE:
2979 * we'll have installed a pending if there wasn't one already.
2980 *
2981 * - MIGRATE_ENABLE:
2982 * we're here because the current CPU isn't matching anymore,
2983 * the only way that can happen is because of a concurrent
2984 * set_cpus_allowed_ptr() call, which should then still be
2985 * pending completion.
2986 *
2987 * Either way, we really should have a @pending here.
2988 */
2989 if (WARN_ON_ONCE(!pending)) {
2990 task_rq_unlock(rq, p, rf);
2991 return -EINVAL;
2992 }
2993
2994 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2995 /*
2996 * MIGRATE_ENABLE gets here because 'p == current', but for
2997 * anything else we cannot do is_migration_disabled(), punt
2998 * and have the stopper function handle it all race-free.
2999 */
3000 stop_pending = pending->stop_pending;
3001 if (!stop_pending)
3002 pending->stop_pending = true;
3003
3004 if (flags & SCA_MIGRATE_ENABLE)
3005 p->migration_flags &= ~MDF_PUSH;
3006
3007 preempt_disable();
3008 task_rq_unlock(rq, p, rf);
3009 if (!stop_pending) {
3010 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3011 &pending->arg, &pending->stop_work);
3012 }
3013 preempt_enable();
3014
3015 if (flags & SCA_MIGRATE_ENABLE)
3016 return 0;
3017 } else {
3018
3019 if (!is_migration_disabled(p)) {
3020 if (task_on_rq_queued(p))
3021 rq = move_queued_task(rq, rf, p, dest_cpu);
3022
3023 if (!pending->stop_pending) {
3024 p->migration_pending = NULL;
3025 complete = true;
3026 }
3027 }
3028 task_rq_unlock(rq, p, rf);
3029
3030 if (complete)
3031 complete_all(&pending->done);
3032 }
3033
3034 wait_for_completion(&pending->done);
3035
3036 if (refcount_dec_and_test(&pending->refs))
3037 wake_up_var(&pending->refs); /* No UaF, just an address */
3038
3039 /*
3040 * Block the original owner of &pending until all subsequent callers
3041 * have seen the completion and decremented the refcount
3042 */
3043 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3044
3045 /* ARGH */
3046 WARN_ON_ONCE(my_pending.stop_pending);
3047
3048 return 0;
3049 }
3050
3051 /*
3052 * Called with both p->pi_lock and rq->lock held; drops both before returning.
3053 */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3054 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3055 struct affinity_context *ctx,
3056 struct rq *rq,
3057 struct rq_flags *rf)
3058 __releases(rq->lock)
3059 __releases(p->pi_lock)
3060 {
3061 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3062 const struct cpumask *cpu_valid_mask = cpu_active_mask;
3063 bool kthread = p->flags & PF_KTHREAD;
3064 unsigned int dest_cpu;
3065 int ret = 0;
3066
3067 update_rq_clock(rq);
3068
3069 if (kthread || is_migration_disabled(p)) {
3070 /*
3071 * Kernel threads are allowed on online && !active CPUs,
3072 * however, during cpu-hot-unplug, even these might get pushed
3073 * away if not KTHREAD_IS_PER_CPU.
3074 *
3075 * Specifically, migration_disabled() tasks must not fail the
3076 * cpumask_any_and_distribute() pick below, esp. so on
3077 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3078 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3079 */
3080 cpu_valid_mask = cpu_online_mask;
3081 }
3082
3083 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3084 ret = -EINVAL;
3085 goto out;
3086 }
3087
3088 /*
3089 * Must re-check here, to close a race against __kthread_bind(),
3090 * sched_setaffinity() is not guaranteed to observe the flag.
3091 */
3092 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3093 ret = -EINVAL;
3094 goto out;
3095 }
3096
3097 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3098 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3099 if (ctx->flags & SCA_USER)
3100 swap(p->user_cpus_ptr, ctx->user_mask);
3101 goto out;
3102 }
3103
3104 if (WARN_ON_ONCE(p == current &&
3105 is_migration_disabled(p) &&
3106 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3107 ret = -EBUSY;
3108 goto out;
3109 }
3110 }
3111
3112 /*
3113 * Picking a ~random cpu helps in cases where we are changing affinity
3114 * for groups of tasks (ie. cpuset), so that load balancing is not
3115 * immediately required to distribute the tasks within their new mask.
3116 */
3117 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3118 if (dest_cpu >= nr_cpu_ids) {
3119 ret = -EINVAL;
3120 goto out;
3121 }
3122
3123 __do_set_cpus_allowed(p, ctx);
3124
3125 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3126
3127 out:
3128 task_rq_unlock(rq, p, rf);
3129
3130 return ret;
3131 }
3132
3133 /*
3134 * Change a given task's CPU affinity. Migrate the thread to a
3135 * proper CPU and schedule it away if the CPU it's executing on
3136 * is removed from the allowed bitmask.
3137 *
3138 * NOTE: the caller must have a valid reference to the task, the
3139 * task must not exit() & deallocate itself prematurely. The
3140 * call is not atomic; no spinlocks may be held.
3141 */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3142 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3143 {
3144 struct rq_flags rf;
3145 struct rq *rq;
3146
3147 rq = task_rq_lock(p, &rf);
3148 /*
3149 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3150 * flags are set.
3151 */
3152 if (p->user_cpus_ptr &&
3153 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3154 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3155 ctx->new_mask = rq->scratch_mask;
3156
3157 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3158 }
3159
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3160 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3161 {
3162 struct affinity_context ac = {
3163 .new_mask = new_mask,
3164 .flags = 0,
3165 };
3166
3167 return __set_cpus_allowed_ptr(p, &ac);
3168 }
3169 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3170
3171 /*
3172 * Change a given task's CPU affinity to the intersection of its current
3173 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3174 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3175 * affinity or use cpu_online_mask instead.
3176 *
3177 * If the resulting mask is empty, leave the affinity unchanged and return
3178 * -EINVAL.
3179 */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3180 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3181 struct cpumask *new_mask,
3182 const struct cpumask *subset_mask)
3183 {
3184 struct affinity_context ac = {
3185 .new_mask = new_mask,
3186 .flags = 0,
3187 };
3188 struct rq_flags rf;
3189 struct rq *rq;
3190 int err;
3191
3192 rq = task_rq_lock(p, &rf);
3193
3194 /*
3195 * Forcefully restricting the affinity of a deadline task is
3196 * likely to cause problems, so fail and noisily override the
3197 * mask entirely.
3198 */
3199 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3200 err = -EPERM;
3201 goto err_unlock;
3202 }
3203
3204 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3205 err = -EINVAL;
3206 goto err_unlock;
3207 }
3208
3209 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3210
3211 err_unlock:
3212 task_rq_unlock(rq, p, &rf);
3213 return err;
3214 }
3215
3216 /*
3217 * Restrict the CPU affinity of task @p so that it is a subset of
3218 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3219 * old affinity mask. If the resulting mask is empty, we warn and walk
3220 * up the cpuset hierarchy until we find a suitable mask.
3221 */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3222 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3223 {
3224 cpumask_var_t new_mask;
3225 const struct cpumask *override_mask = task_cpu_possible_mask(p);
3226
3227 alloc_cpumask_var(&new_mask, GFP_KERNEL);
3228
3229 /*
3230 * __migrate_task() can fail silently in the face of concurrent
3231 * offlining of the chosen destination CPU, so take the hotplug
3232 * lock to ensure that the migration succeeds.
3233 */
3234 cpus_read_lock();
3235 if (!cpumask_available(new_mask))
3236 goto out_set_mask;
3237
3238 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3239 goto out_free_mask;
3240
3241 /*
3242 * We failed to find a valid subset of the affinity mask for the
3243 * task, so override it based on its cpuset hierarchy.
3244 */
3245 cpuset_cpus_allowed(p, new_mask);
3246 override_mask = new_mask;
3247
3248 out_set_mask:
3249 if (printk_ratelimit()) {
3250 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3251 task_pid_nr(p), p->comm,
3252 cpumask_pr_args(override_mask));
3253 }
3254
3255 WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3256 out_free_mask:
3257 cpus_read_unlock();
3258 free_cpumask_var(new_mask);
3259 }
3260
3261 /*
3262 * Restore the affinity of a task @p which was previously restricted by a
3263 * call to force_compatible_cpus_allowed_ptr().
3264 *
3265 * It is the caller's responsibility to serialise this with any calls to
3266 * force_compatible_cpus_allowed_ptr(@p).
3267 */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3268 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3269 {
3270 struct affinity_context ac = {
3271 .new_mask = task_user_cpus(p),
3272 .flags = 0,
3273 };
3274 int ret;
3275
3276 /*
3277 * Try to restore the old affinity mask with __sched_setaffinity().
3278 * Cpuset masking will be done there too.
3279 */
3280 ret = __sched_setaffinity(p, &ac);
3281 WARN_ON_ONCE(ret);
3282 }
3283
3284 #ifdef CONFIG_SMP
3285
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3286 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3287 {
3288 unsigned int state = READ_ONCE(p->__state);
3289
3290 /*
3291 * We should never call set_task_cpu() on a blocked task,
3292 * ttwu() will sort out the placement.
3293 */
3294 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3295
3296 /*
3297 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3298 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3299 * time relying on p->on_rq.
3300 */
3301 WARN_ON_ONCE(state == TASK_RUNNING &&
3302 p->sched_class == &fair_sched_class &&
3303 (p->on_rq && !task_on_rq_migrating(p)));
3304
3305 #ifdef CONFIG_LOCKDEP
3306 /*
3307 * The caller should hold either p->pi_lock or rq->lock, when changing
3308 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3309 *
3310 * sched_move_task() holds both and thus holding either pins the cgroup,
3311 * see task_group().
3312 *
3313 * Furthermore, all task_rq users should acquire both locks, see
3314 * task_rq_lock().
3315 */
3316 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3317 lockdep_is_held(__rq_lockp(task_rq(p)))));
3318 #endif
3319 /*
3320 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3321 */
3322 WARN_ON_ONCE(!cpu_online(new_cpu));
3323
3324 WARN_ON_ONCE(is_migration_disabled(p));
3325
3326 trace_sched_migrate_task(p, new_cpu);
3327
3328 if (task_cpu(p) != new_cpu) {
3329 if (p->sched_class->migrate_task_rq)
3330 p->sched_class->migrate_task_rq(p, new_cpu);
3331 p->se.nr_migrations++;
3332 rseq_migrate(p);
3333 sched_mm_cid_migrate_from(p);
3334 perf_event_task_migrate(p);
3335 }
3336
3337 __set_task_cpu(p, new_cpu);
3338 }
3339 #endif /* CONFIG_SMP */
3340
3341 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3342 static void __migrate_swap_task(struct task_struct *p, int cpu)
3343 {
3344 if (task_on_rq_queued(p)) {
3345 struct rq *src_rq, *dst_rq;
3346 struct rq_flags srf, drf;
3347
3348 src_rq = task_rq(p);
3349 dst_rq = cpu_rq(cpu);
3350
3351 rq_pin_lock(src_rq, &srf);
3352 rq_pin_lock(dst_rq, &drf);
3353
3354 move_queued_task_locked(src_rq, dst_rq, p);
3355 wakeup_preempt(dst_rq, p, 0);
3356
3357 rq_unpin_lock(dst_rq, &drf);
3358 rq_unpin_lock(src_rq, &srf);
3359
3360 } else {
3361 /*
3362 * Task isn't running anymore; make it appear like we migrated
3363 * it before it went to sleep. This means on wakeup we make the
3364 * previous CPU our target instead of where it really is.
3365 */
3366 p->wake_cpu = cpu;
3367 }
3368 }
3369
3370 struct migration_swap_arg {
3371 struct task_struct *src_task, *dst_task;
3372 int src_cpu, dst_cpu;
3373 };
3374
migrate_swap_stop(void * data)3375 static int migrate_swap_stop(void *data)
3376 {
3377 struct migration_swap_arg *arg = data;
3378 struct rq *src_rq, *dst_rq;
3379
3380 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3381 return -EAGAIN;
3382
3383 src_rq = cpu_rq(arg->src_cpu);
3384 dst_rq = cpu_rq(arg->dst_cpu);
3385
3386 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3387 guard(double_rq_lock)(src_rq, dst_rq);
3388
3389 if (task_cpu(arg->dst_task) != arg->dst_cpu)
3390 return -EAGAIN;
3391
3392 if (task_cpu(arg->src_task) != arg->src_cpu)
3393 return -EAGAIN;
3394
3395 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3396 return -EAGAIN;
3397
3398 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3399 return -EAGAIN;
3400
3401 __migrate_swap_task(arg->src_task, arg->dst_cpu);
3402 __migrate_swap_task(arg->dst_task, arg->src_cpu);
3403
3404 return 0;
3405 }
3406
3407 /*
3408 * Cross migrate two tasks
3409 */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3410 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3411 int target_cpu, int curr_cpu)
3412 {
3413 struct migration_swap_arg arg;
3414 int ret = -EINVAL;
3415
3416 arg = (struct migration_swap_arg){
3417 .src_task = cur,
3418 .src_cpu = curr_cpu,
3419 .dst_task = p,
3420 .dst_cpu = target_cpu,
3421 };
3422
3423 if (arg.src_cpu == arg.dst_cpu)
3424 goto out;
3425
3426 /*
3427 * These three tests are all lockless; this is OK since all of them
3428 * will be re-checked with proper locks held further down the line.
3429 */
3430 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3431 goto out;
3432
3433 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3434 goto out;
3435
3436 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3437 goto out;
3438
3439 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3440 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3441
3442 out:
3443 return ret;
3444 }
3445 #endif /* CONFIG_NUMA_BALANCING */
3446
3447 /***
3448 * kick_process - kick a running thread to enter/exit the kernel
3449 * @p: the to-be-kicked thread
3450 *
3451 * Cause a process which is running on another CPU to enter
3452 * kernel-mode, without any delay. (to get signals handled.)
3453 *
3454 * NOTE: this function doesn't have to take the runqueue lock,
3455 * because all it wants to ensure is that the remote task enters
3456 * the kernel. If the IPI races and the task has been migrated
3457 * to another CPU then no harm is done and the purpose has been
3458 * achieved as well.
3459 */
kick_process(struct task_struct * p)3460 void kick_process(struct task_struct *p)
3461 {
3462 guard(preempt)();
3463 int cpu = task_cpu(p);
3464
3465 if ((cpu != smp_processor_id()) && task_curr(p))
3466 smp_send_reschedule(cpu);
3467 }
3468 EXPORT_SYMBOL_GPL(kick_process);
3469
3470 /*
3471 * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3472 *
3473 * A few notes on cpu_active vs cpu_online:
3474 *
3475 * - cpu_active must be a subset of cpu_online
3476 *
3477 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3478 * see __set_cpus_allowed_ptr(). At this point the newly online
3479 * CPU isn't yet part of the sched domains, and balancing will not
3480 * see it.
3481 *
3482 * - on CPU-down we clear cpu_active() to mask the sched domains and
3483 * avoid the load balancer to place new tasks on the to be removed
3484 * CPU. Existing tasks will remain running there and will be taken
3485 * off.
3486 *
3487 * This means that fallback selection must not select !active CPUs.
3488 * And can assume that any active CPU must be online. Conversely
3489 * select_task_rq() below may allow selection of !active CPUs in order
3490 * to satisfy the above rules.
3491 */
select_fallback_rq(int cpu,struct task_struct * p)3492 static int select_fallback_rq(int cpu, struct task_struct *p)
3493 {
3494 int nid = cpu_to_node(cpu);
3495 const struct cpumask *nodemask = NULL;
3496 enum { cpuset, possible, fail } state = cpuset;
3497 int dest_cpu;
3498
3499 /*
3500 * If the node that the CPU is on has been offlined, cpu_to_node()
3501 * will return -1. There is no CPU on the node, and we should
3502 * select the CPU on the other node.
3503 */
3504 if (nid != -1) {
3505 nodemask = cpumask_of_node(nid);
3506
3507 /* Look for allowed, online CPU in same node. */
3508 for_each_cpu(dest_cpu, nodemask) {
3509 if (is_cpu_allowed(p, dest_cpu))
3510 return dest_cpu;
3511 }
3512 }
3513
3514 for (;;) {
3515 /* Any allowed, online CPU? */
3516 for_each_cpu(dest_cpu, p->cpus_ptr) {
3517 if (!is_cpu_allowed(p, dest_cpu))
3518 continue;
3519
3520 goto out;
3521 }
3522
3523 /* No more Mr. Nice Guy. */
3524 switch (state) {
3525 case cpuset:
3526 if (cpuset_cpus_allowed_fallback(p)) {
3527 state = possible;
3528 break;
3529 }
3530 fallthrough;
3531 case possible:
3532 /*
3533 * XXX When called from select_task_rq() we only
3534 * hold p->pi_lock and again violate locking order.
3535 *
3536 * More yuck to audit.
3537 */
3538 do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
3539 state = fail;
3540 break;
3541 case fail:
3542 BUG();
3543 break;
3544 }
3545 }
3546
3547 out:
3548 if (state != cpuset) {
3549 /*
3550 * Don't tell them about moving exiting tasks or
3551 * kernel threads (both mm NULL), since they never
3552 * leave kernel.
3553 */
3554 if (p->mm && printk_ratelimit()) {
3555 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3556 task_pid_nr(p), p->comm, cpu);
3557 }
3558 }
3559
3560 return dest_cpu;
3561 }
3562
3563 /*
3564 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3565 */
3566 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3567 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3568 {
3569 lockdep_assert_held(&p->pi_lock);
3570
3571 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3572 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3573 *wake_flags |= WF_RQ_SELECTED;
3574 } else {
3575 cpu = cpumask_any(p->cpus_ptr);
3576 }
3577
3578 /*
3579 * In order not to call set_task_cpu() on a blocking task we need
3580 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3581 * CPU.
3582 *
3583 * Since this is common to all placement strategies, this lives here.
3584 *
3585 * [ this allows ->select_task() to simply return task_cpu(p) and
3586 * not worry about this generic constraint ]
3587 */
3588 if (unlikely(!is_cpu_allowed(p, cpu)))
3589 cpu = select_fallback_rq(task_cpu(p), p);
3590
3591 return cpu;
3592 }
3593
sched_set_stop_task(int cpu,struct task_struct * stop)3594 void sched_set_stop_task(int cpu, struct task_struct *stop)
3595 {
3596 static struct lock_class_key stop_pi_lock;
3597 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3598 struct task_struct *old_stop = cpu_rq(cpu)->stop;
3599
3600 if (stop) {
3601 /*
3602 * Make it appear like a SCHED_FIFO task, its something
3603 * userspace knows about and won't get confused about.
3604 *
3605 * Also, it will make PI more or less work without too
3606 * much confusion -- but then, stop work should not
3607 * rely on PI working anyway.
3608 */
3609 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
3610
3611 stop->sched_class = &stop_sched_class;
3612
3613 /*
3614 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3615 * adjust the effective priority of a task. As a result,
3616 * rt_mutex_setprio() can trigger (RT) balancing operations,
3617 * which can then trigger wakeups of the stop thread to push
3618 * around the current task.
3619 *
3620 * The stop task itself will never be part of the PI-chain, it
3621 * never blocks, therefore that ->pi_lock recursion is safe.
3622 * Tell lockdep about this by placing the stop->pi_lock in its
3623 * own class.
3624 */
3625 lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3626 }
3627
3628 cpu_rq(cpu)->stop = stop;
3629
3630 if (old_stop) {
3631 /*
3632 * Reset it back to a normal scheduling class so that
3633 * it can die in pieces.
3634 */
3635 old_stop->sched_class = &rt_sched_class;
3636 }
3637 }
3638
3639 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3640 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3641 {
3642 struct rq *rq;
3643
3644 if (!schedstat_enabled())
3645 return;
3646
3647 rq = this_rq();
3648
3649 if (cpu == rq->cpu) {
3650 __schedstat_inc(rq->ttwu_local);
3651 __schedstat_inc(p->stats.nr_wakeups_local);
3652 } else {
3653 struct sched_domain *sd;
3654
3655 __schedstat_inc(p->stats.nr_wakeups_remote);
3656
3657 guard(rcu)();
3658 for_each_domain(rq->cpu, sd) {
3659 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3660 __schedstat_inc(sd->ttwu_wake_remote);
3661 break;
3662 }
3663 }
3664 }
3665
3666 if (wake_flags & WF_MIGRATED)
3667 __schedstat_inc(p->stats.nr_wakeups_migrate);
3668
3669 __schedstat_inc(rq->ttwu_count);
3670 __schedstat_inc(p->stats.nr_wakeups);
3671
3672 if (wake_flags & WF_SYNC)
3673 __schedstat_inc(p->stats.nr_wakeups_sync);
3674 }
3675
3676 /*
3677 * Mark the task runnable.
3678 */
ttwu_do_wakeup(struct task_struct * p)3679 static inline void ttwu_do_wakeup(struct task_struct *p)
3680 {
3681 WRITE_ONCE(p->__state, TASK_RUNNING);
3682 trace_sched_wakeup(p);
3683 }
3684
3685 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3686 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3687 struct rq_flags *rf)
3688 {
3689 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3690
3691 lockdep_assert_rq_held(rq);
3692
3693 if (p->sched_contributes_to_load)
3694 rq->nr_uninterruptible--;
3695
3696 if (wake_flags & WF_RQ_SELECTED)
3697 en_flags |= ENQUEUE_RQ_SELECTED;
3698 if (wake_flags & WF_MIGRATED)
3699 en_flags |= ENQUEUE_MIGRATED;
3700 else
3701 if (p->in_iowait) {
3702 delayacct_blkio_end(p);
3703 atomic_dec(&task_rq(p)->nr_iowait);
3704 }
3705
3706 activate_task(rq, p, en_flags);
3707 wakeup_preempt(rq, p, wake_flags);
3708
3709 ttwu_do_wakeup(p);
3710
3711 if (p->sched_class->task_woken) {
3712 /*
3713 * Our task @p is fully woken up and running; so it's safe to
3714 * drop the rq->lock, hereafter rq is only used for statistics.
3715 */
3716 rq_unpin_lock(rq, rf);
3717 p->sched_class->task_woken(rq, p);
3718 rq_repin_lock(rq, rf);
3719 }
3720
3721 if (rq->idle_stamp) {
3722 u64 delta = rq_clock(rq) - rq->idle_stamp;
3723 u64 max = 2*rq->max_idle_balance_cost;
3724
3725 update_avg(&rq->avg_idle, delta);
3726
3727 if (rq->avg_idle > max)
3728 rq->avg_idle = max;
3729
3730 rq->idle_stamp = 0;
3731 }
3732 }
3733
3734 /*
3735 * Consider @p being inside a wait loop:
3736 *
3737 * for (;;) {
3738 * set_current_state(TASK_UNINTERRUPTIBLE);
3739 *
3740 * if (CONDITION)
3741 * break;
3742 *
3743 * schedule();
3744 * }
3745 * __set_current_state(TASK_RUNNING);
3746 *
3747 * between set_current_state() and schedule(). In this case @p is still
3748 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3749 * an atomic manner.
3750 *
3751 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3752 * then schedule() must still happen and p->state can be changed to
3753 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3754 * need to do a full wakeup with enqueue.
3755 *
3756 * Returns: %true when the wakeup is done,
3757 * %false otherwise.
3758 */
ttwu_runnable(struct task_struct * p,int wake_flags)3759 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3760 {
3761 struct rq_flags rf;
3762 struct rq *rq;
3763 int ret = 0;
3764
3765 rq = __task_rq_lock(p, &rf);
3766 if (task_on_rq_queued(p)) {
3767 update_rq_clock(rq);
3768 if (p->se.sched_delayed)
3769 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3770 if (!task_on_cpu(rq, p)) {
3771 /*
3772 * When on_rq && !on_cpu the task is preempted, see if
3773 * it should preempt the task that is current now.
3774 */
3775 wakeup_preempt(rq, p, wake_flags);
3776 }
3777 ttwu_do_wakeup(p);
3778 ret = 1;
3779 }
3780 __task_rq_unlock(rq, &rf);
3781
3782 return ret;
3783 }
3784
sched_ttwu_pending(void * arg)3785 void sched_ttwu_pending(void *arg)
3786 {
3787 struct llist_node *llist = arg;
3788 struct rq *rq = this_rq();
3789 struct task_struct *p, *t;
3790 struct rq_flags rf;
3791
3792 if (!llist)
3793 return;
3794
3795 rq_lock_irqsave(rq, &rf);
3796 update_rq_clock(rq);
3797
3798 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3799 if (WARN_ON_ONCE(p->on_cpu))
3800 smp_cond_load_acquire(&p->on_cpu, !VAL);
3801
3802 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3803 set_task_cpu(p, cpu_of(rq));
3804
3805 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3806 }
3807
3808 /*
3809 * Must be after enqueueing at least once task such that
3810 * idle_cpu() does not observe a false-negative -- if it does,
3811 * it is possible for select_idle_siblings() to stack a number
3812 * of tasks on this CPU during that window.
3813 *
3814 * It is OK to clear ttwu_pending when another task pending.
3815 * We will receive IPI after local IRQ enabled and then enqueue it.
3816 * Since now nr_running > 0, idle_cpu() will always get correct result.
3817 */
3818 WRITE_ONCE(rq->ttwu_pending, 0);
3819 rq_unlock_irqrestore(rq, &rf);
3820 }
3821
3822 /*
3823 * Prepare the scene for sending an IPI for a remote smp_call
3824 *
3825 * Returns true if the caller can proceed with sending the IPI.
3826 * Returns false otherwise.
3827 */
call_function_single_prep_ipi(int cpu)3828 bool call_function_single_prep_ipi(int cpu)
3829 {
3830 if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3831 trace_sched_wake_idle_without_ipi(cpu);
3832 return false;
3833 }
3834
3835 return true;
3836 }
3837
3838 /*
3839 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3840 * necessary. The wakee CPU on receipt of the IPI will queue the task
3841 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3842 * of the wakeup instead of the waker.
3843 */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3844 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3845 {
3846 struct rq *rq = cpu_rq(cpu);
3847
3848 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3849
3850 WRITE_ONCE(rq->ttwu_pending, 1);
3851 #ifdef CONFIG_SMP
3852 __smp_call_single_queue(cpu, &p->wake_entry.llist);
3853 #endif
3854 }
3855
wake_up_if_idle(int cpu)3856 void wake_up_if_idle(int cpu)
3857 {
3858 struct rq *rq = cpu_rq(cpu);
3859
3860 guard(rcu)();
3861 if (is_idle_task(rcu_dereference(rq->curr))) {
3862 guard(rq_lock_irqsave)(rq);
3863 if (is_idle_task(rq->curr))
3864 resched_curr(rq);
3865 }
3866 }
3867
cpus_equal_capacity(int this_cpu,int that_cpu)3868 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3869 {
3870 if (!sched_asym_cpucap_active())
3871 return true;
3872
3873 if (this_cpu == that_cpu)
3874 return true;
3875
3876 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3877 }
3878
cpus_share_cache(int this_cpu,int that_cpu)3879 bool cpus_share_cache(int this_cpu, int that_cpu)
3880 {
3881 if (this_cpu == that_cpu)
3882 return true;
3883
3884 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3885 }
3886
3887 /*
3888 * Whether CPUs are share cache resources, which means LLC on non-cluster
3889 * machines and LLC tag or L2 on machines with clusters.
3890 */
cpus_share_resources(int this_cpu,int that_cpu)3891 bool cpus_share_resources(int this_cpu, int that_cpu)
3892 {
3893 if (this_cpu == that_cpu)
3894 return true;
3895
3896 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3897 }
3898
ttwu_queue_cond(struct task_struct * p,int cpu)3899 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3900 {
3901 /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3902 if (!scx_allow_ttwu_queue(p))
3903 return false;
3904
3905 #ifdef CONFIG_SMP
3906 if (p->sched_class == &stop_sched_class)
3907 return false;
3908 #endif
3909
3910 /*
3911 * Do not complicate things with the async wake_list while the CPU is
3912 * in hotplug state.
3913 */
3914 if (!cpu_active(cpu))
3915 return false;
3916
3917 /* Ensure the task will still be allowed to run on the CPU. */
3918 if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3919 return false;
3920
3921 /*
3922 * If the CPU does not share cache, then queue the task on the
3923 * remote rqs wakelist to avoid accessing remote data.
3924 */
3925 if (!cpus_share_cache(smp_processor_id(), cpu))
3926 return true;
3927
3928 if (cpu == smp_processor_id())
3929 return false;
3930
3931 /*
3932 * If the wakee cpu is idle, or the task is descheduling and the
3933 * only running task on the CPU, then use the wakelist to offload
3934 * the task activation to the idle (or soon-to-be-idle) CPU as
3935 * the current CPU is likely busy. nr_running is checked to
3936 * avoid unnecessary task stacking.
3937 *
3938 * Note that we can only get here with (wakee) p->on_rq=0,
3939 * p->on_cpu can be whatever, we've done the dequeue, so
3940 * the wakee has been accounted out of ->nr_running.
3941 */
3942 if (!cpu_rq(cpu)->nr_running)
3943 return true;
3944
3945 return false;
3946 }
3947
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3948 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3949 {
3950 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3951 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3952 __ttwu_queue_wakelist(p, cpu, wake_flags);
3953 return true;
3954 }
3955
3956 return false;
3957 }
3958
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3959 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3960 {
3961 struct rq *rq = cpu_rq(cpu);
3962 struct rq_flags rf;
3963
3964 if (ttwu_queue_wakelist(p, cpu, wake_flags))
3965 return;
3966
3967 rq_lock(rq, &rf);
3968 update_rq_clock(rq);
3969 ttwu_do_activate(rq, p, wake_flags, &rf);
3970 rq_unlock(rq, &rf);
3971 }
3972
3973 /*
3974 * Invoked from try_to_wake_up() to check whether the task can be woken up.
3975 *
3976 * The caller holds p::pi_lock if p != current or has preemption
3977 * disabled when p == current.
3978 *
3979 * The rules of saved_state:
3980 *
3981 * The related locking code always holds p::pi_lock when updating
3982 * p::saved_state, which means the code is fully serialized in both cases.
3983 *
3984 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3985 * No other bits set. This allows to distinguish all wakeup scenarios.
3986 *
3987 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3988 * allows us to prevent early wakeup of tasks before they can be run on
3989 * asymmetric ISA architectures (eg ARMv9).
3990 */
3991 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)3992 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3993 {
3994 int match;
3995
3996 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3997 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3998 state != TASK_RTLOCK_WAIT);
3999 }
4000
4001 *success = !!(match = __task_state_match(p, state));
4002
4003 /*
4004 * Saved state preserves the task state across blocking on
4005 * an RT lock or TASK_FREEZABLE tasks. If the state matches,
4006 * set p::saved_state to TASK_RUNNING, but do not wake the task
4007 * because it waits for a lock wakeup or __thaw_task(). Also
4008 * indicate success because from the regular waker's point of
4009 * view this has succeeded.
4010 *
4011 * After acquiring the lock the task will restore p::__state
4012 * from p::saved_state which ensures that the regular
4013 * wakeup is not lost. The restore will also set
4014 * p::saved_state to TASK_RUNNING so any further tests will
4015 * not result in false positives vs. @success
4016 */
4017 if (match < 0)
4018 p->saved_state = TASK_RUNNING;
4019
4020 return match > 0;
4021 }
4022
4023 /*
4024 * Notes on Program-Order guarantees on SMP systems.
4025 *
4026 * MIGRATION
4027 *
4028 * The basic program-order guarantee on SMP systems is that when a task [t]
4029 * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4030 * execution on its new CPU [c1].
4031 *
4032 * For migration (of runnable tasks) this is provided by the following means:
4033 *
4034 * A) UNLOCK of the rq(c0)->lock scheduling out task t
4035 * B) migration for t is required to synchronize *both* rq(c0)->lock and
4036 * rq(c1)->lock (if not at the same time, then in that order).
4037 * C) LOCK of the rq(c1)->lock scheduling in task
4038 *
4039 * Release/acquire chaining guarantees that B happens after A and C after B.
4040 * Note: the CPU doing B need not be c0 or c1
4041 *
4042 * Example:
4043 *
4044 * CPU0 CPU1 CPU2
4045 *
4046 * LOCK rq(0)->lock
4047 * sched-out X
4048 * sched-in Y
4049 * UNLOCK rq(0)->lock
4050 *
4051 * LOCK rq(0)->lock // orders against CPU0
4052 * dequeue X
4053 * UNLOCK rq(0)->lock
4054 *
4055 * LOCK rq(1)->lock
4056 * enqueue X
4057 * UNLOCK rq(1)->lock
4058 *
4059 * LOCK rq(1)->lock // orders against CPU2
4060 * sched-out Z
4061 * sched-in X
4062 * UNLOCK rq(1)->lock
4063 *
4064 *
4065 * BLOCKING -- aka. SLEEP + WAKEUP
4066 *
4067 * For blocking we (obviously) need to provide the same guarantee as for
4068 * migration. However the means are completely different as there is no lock
4069 * chain to provide order. Instead we do:
4070 *
4071 * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
4072 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4073 *
4074 * Example:
4075 *
4076 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
4077 *
4078 * LOCK rq(0)->lock LOCK X->pi_lock
4079 * dequeue X
4080 * sched-out X
4081 * smp_store_release(X->on_cpu, 0);
4082 *
4083 * smp_cond_load_acquire(&X->on_cpu, !VAL);
4084 * X->state = WAKING
4085 * set_task_cpu(X,2)
4086 *
4087 * LOCK rq(2)->lock
4088 * enqueue X
4089 * X->state = RUNNING
4090 * UNLOCK rq(2)->lock
4091 *
4092 * LOCK rq(2)->lock // orders against CPU1
4093 * sched-out Z
4094 * sched-in X
4095 * UNLOCK rq(2)->lock
4096 *
4097 * UNLOCK X->pi_lock
4098 * UNLOCK rq(0)->lock
4099 *
4100 *
4101 * However, for wakeups there is a second guarantee we must provide, namely we
4102 * must ensure that CONDITION=1 done by the caller can not be reordered with
4103 * accesses to the task state; see try_to_wake_up() and set_current_state().
4104 */
4105
4106 /**
4107 * try_to_wake_up - wake up a thread
4108 * @p: the thread to be awakened
4109 * @state: the mask of task states that can be woken
4110 * @wake_flags: wake modifier flags (WF_*)
4111 *
4112 * Conceptually does:
4113 *
4114 * If (@state & @p->state) @p->state = TASK_RUNNING.
4115 *
4116 * If the task was not queued/runnable, also place it back on a runqueue.
4117 *
4118 * This function is atomic against schedule() which would dequeue the task.
4119 *
4120 * It issues a full memory barrier before accessing @p->state, see the comment
4121 * with set_current_state().
4122 *
4123 * Uses p->pi_lock to serialize against concurrent wake-ups.
4124 *
4125 * Relies on p->pi_lock stabilizing:
4126 * - p->sched_class
4127 * - p->cpus_ptr
4128 * - p->sched_task_group
4129 * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4130 *
4131 * Tries really hard to only take one task_rq(p)->lock for performance.
4132 * Takes rq->lock in:
4133 * - ttwu_runnable() -- old rq, unavoidable, see comment there;
4134 * - ttwu_queue() -- new rq, for enqueue of the task;
4135 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4136 *
4137 * As a consequence we race really badly with just about everything. See the
4138 * many memory barriers and their comments for details.
4139 *
4140 * Return: %true if @p->state changes (an actual wakeup was done),
4141 * %false otherwise.
4142 */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4143 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4144 {
4145 guard(preempt)();
4146 int cpu, success = 0;
4147
4148 wake_flags |= WF_TTWU;
4149
4150 if (p == current) {
4151 /*
4152 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4153 * == smp_processor_id()'. Together this means we can special
4154 * case the whole 'p->on_rq && ttwu_runnable()' case below
4155 * without taking any locks.
4156 *
4157 * Specifically, given current runs ttwu() we must be before
4158 * schedule()'s block_task(), as such this must not observe
4159 * sched_delayed.
4160 *
4161 * In particular:
4162 * - we rely on Program-Order guarantees for all the ordering,
4163 * - we're serialized against set_special_state() by virtue of
4164 * it disabling IRQs (this allows not taking ->pi_lock).
4165 */
4166 WARN_ON_ONCE(p->se.sched_delayed);
4167 if (!ttwu_state_match(p, state, &success))
4168 goto out;
4169
4170 trace_sched_waking(p);
4171 ttwu_do_wakeup(p);
4172 goto out;
4173 }
4174
4175 /*
4176 * If we are going to wake up a thread waiting for CONDITION we
4177 * need to ensure that CONDITION=1 done by the caller can not be
4178 * reordered with p->state check below. This pairs with smp_store_mb()
4179 * in set_current_state() that the waiting thread does.
4180 */
4181 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4182 smp_mb__after_spinlock();
4183 if (!ttwu_state_match(p, state, &success))
4184 break;
4185
4186 trace_sched_waking(p);
4187
4188 /*
4189 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4190 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4191 * in smp_cond_load_acquire() below.
4192 *
4193 * sched_ttwu_pending() try_to_wake_up()
4194 * STORE p->on_rq = 1 LOAD p->state
4195 * UNLOCK rq->lock
4196 *
4197 * __schedule() (switch to task 'p')
4198 * LOCK rq->lock smp_rmb();
4199 * smp_mb__after_spinlock();
4200 * UNLOCK rq->lock
4201 *
4202 * [task p]
4203 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
4204 *
4205 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4206 * __schedule(). See the comment for smp_mb__after_spinlock().
4207 *
4208 * A similar smp_rmb() lives in __task_needs_rq_lock().
4209 */
4210 smp_rmb();
4211 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4212 break;
4213
4214 /*
4215 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4216 * possible to, falsely, observe p->on_cpu == 0.
4217 *
4218 * One must be running (->on_cpu == 1) in order to remove oneself
4219 * from the runqueue.
4220 *
4221 * __schedule() (switch to task 'p') try_to_wake_up()
4222 * STORE p->on_cpu = 1 LOAD p->on_rq
4223 * UNLOCK rq->lock
4224 *
4225 * __schedule() (put 'p' to sleep)
4226 * LOCK rq->lock smp_rmb();
4227 * smp_mb__after_spinlock();
4228 * STORE p->on_rq = 0 LOAD p->on_cpu
4229 *
4230 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4231 * __schedule(). See the comment for smp_mb__after_spinlock().
4232 *
4233 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4234 * schedule()'s deactivate_task() has 'happened' and p will no longer
4235 * care about it's own p->state. See the comment in __schedule().
4236 */
4237 smp_acquire__after_ctrl_dep();
4238
4239 /*
4240 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4241 * == 0), which means we need to do an enqueue, change p->state to
4242 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4243 * enqueue, such as ttwu_queue_wakelist().
4244 */
4245 WRITE_ONCE(p->__state, TASK_WAKING);
4246
4247 /*
4248 * If the owning (remote) CPU is still in the middle of schedule() with
4249 * this task as prev, considering queueing p on the remote CPUs wake_list
4250 * which potentially sends an IPI instead of spinning on p->on_cpu to
4251 * let the waker make forward progress. This is safe because IRQs are
4252 * disabled and the IPI will deliver after on_cpu is cleared.
4253 *
4254 * Ensure we load task_cpu(p) after p->on_cpu:
4255 *
4256 * set_task_cpu(p, cpu);
4257 * STORE p->cpu = @cpu
4258 * __schedule() (switch to task 'p')
4259 * LOCK rq->lock
4260 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4261 * STORE p->on_cpu = 1 LOAD p->cpu
4262 *
4263 * to ensure we observe the correct CPU on which the task is currently
4264 * scheduling.
4265 */
4266 if (smp_load_acquire(&p->on_cpu) &&
4267 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4268 break;
4269
4270 /*
4271 * If the owning (remote) CPU is still in the middle of schedule() with
4272 * this task as prev, wait until it's done referencing the task.
4273 *
4274 * Pairs with the smp_store_release() in finish_task().
4275 *
4276 * This ensures that tasks getting woken will be fully ordered against
4277 * their previous state and preserve Program Order.
4278 */
4279 smp_cond_load_acquire(&p->on_cpu, !VAL);
4280
4281 cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4282 if (task_cpu(p) != cpu) {
4283 if (p->in_iowait) {
4284 delayacct_blkio_end(p);
4285 atomic_dec(&task_rq(p)->nr_iowait);
4286 }
4287
4288 wake_flags |= WF_MIGRATED;
4289 psi_ttwu_dequeue(p);
4290 set_task_cpu(p, cpu);
4291 }
4292
4293 ttwu_queue(p, cpu, wake_flags);
4294 }
4295 out:
4296 if (success)
4297 ttwu_stat(p, task_cpu(p), wake_flags);
4298
4299 return success;
4300 }
4301
__task_needs_rq_lock(struct task_struct * p)4302 static bool __task_needs_rq_lock(struct task_struct *p)
4303 {
4304 unsigned int state = READ_ONCE(p->__state);
4305
4306 /*
4307 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4308 * the task is blocked. Make sure to check @state since ttwu() can drop
4309 * locks at the end, see ttwu_queue_wakelist().
4310 */
4311 if (state == TASK_RUNNING || state == TASK_WAKING)
4312 return true;
4313
4314 /*
4315 * Ensure we load p->on_rq after p->__state, otherwise it would be
4316 * possible to, falsely, observe p->on_rq == 0.
4317 *
4318 * See try_to_wake_up() for a longer comment.
4319 */
4320 smp_rmb();
4321 if (p->on_rq)
4322 return true;
4323
4324 /*
4325 * Ensure the task has finished __schedule() and will not be referenced
4326 * anymore. Again, see try_to_wake_up() for a longer comment.
4327 */
4328 smp_rmb();
4329 smp_cond_load_acquire(&p->on_cpu, !VAL);
4330
4331 return false;
4332 }
4333
4334 /**
4335 * task_call_func - Invoke a function on task in fixed state
4336 * @p: Process for which the function is to be invoked, can be @current.
4337 * @func: Function to invoke.
4338 * @arg: Argument to function.
4339 *
4340 * Fix the task in it's current state by avoiding wakeups and or rq operations
4341 * and call @func(@arg) on it. This function can use task_is_runnable() and
4342 * task_curr() to work out what the state is, if required. Given that @func
4343 * can be invoked with a runqueue lock held, it had better be quite
4344 * lightweight.
4345 *
4346 * Returns:
4347 * Whatever @func returns
4348 */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4349 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4350 {
4351 struct rq *rq = NULL;
4352 struct rq_flags rf;
4353 int ret;
4354
4355 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4356
4357 if (__task_needs_rq_lock(p))
4358 rq = __task_rq_lock(p, &rf);
4359
4360 /*
4361 * At this point the task is pinned; either:
4362 * - blocked and we're holding off wakeups (pi->lock)
4363 * - woken, and we're holding off enqueue (rq->lock)
4364 * - queued, and we're holding off schedule (rq->lock)
4365 * - running, and we're holding off de-schedule (rq->lock)
4366 *
4367 * The called function (@func) can use: task_curr(), p->on_rq and
4368 * p->__state to differentiate between these states.
4369 */
4370 ret = func(p, arg);
4371
4372 if (rq)
4373 rq_unlock(rq, &rf);
4374
4375 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4376 return ret;
4377 }
4378
4379 /**
4380 * cpu_curr_snapshot - Return a snapshot of the currently running task
4381 * @cpu: The CPU on which to snapshot the task.
4382 *
4383 * Returns the task_struct pointer of the task "currently" running on
4384 * the specified CPU.
4385 *
4386 * If the specified CPU was offline, the return value is whatever it
4387 * is, perhaps a pointer to the task_struct structure of that CPU's idle
4388 * task, but there is no guarantee. Callers wishing a useful return
4389 * value must take some action to ensure that the specified CPU remains
4390 * online throughout.
4391 *
4392 * This function executes full memory barriers before and after fetching
4393 * the pointer, which permits the caller to confine this function's fetch
4394 * with respect to the caller's accesses to other shared variables.
4395 */
cpu_curr_snapshot(int cpu)4396 struct task_struct *cpu_curr_snapshot(int cpu)
4397 {
4398 struct rq *rq = cpu_rq(cpu);
4399 struct task_struct *t;
4400 struct rq_flags rf;
4401
4402 rq_lock_irqsave(rq, &rf);
4403 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4404 t = rcu_dereference(cpu_curr(cpu));
4405 rq_unlock_irqrestore(rq, &rf);
4406 smp_mb(); /* Pairing determined by caller's synchronization design. */
4407
4408 return t;
4409 }
4410
4411 /**
4412 * wake_up_process - Wake up a specific process
4413 * @p: The process to be woken up.
4414 *
4415 * Attempt to wake up the nominated process and move it to the set of runnable
4416 * processes.
4417 *
4418 * Return: 1 if the process was woken up, 0 if it was already running.
4419 *
4420 * This function executes a full memory barrier before accessing the task state.
4421 */
wake_up_process(struct task_struct * p)4422 int wake_up_process(struct task_struct *p)
4423 {
4424 return try_to_wake_up(p, TASK_NORMAL, 0);
4425 }
4426 EXPORT_SYMBOL(wake_up_process);
4427
wake_up_state(struct task_struct * p,unsigned int state)4428 int wake_up_state(struct task_struct *p, unsigned int state)
4429 {
4430 return try_to_wake_up(p, state, 0);
4431 }
4432
4433 /*
4434 * Perform scheduler related setup for a newly forked process p.
4435 * p is forked by current.
4436 *
4437 * __sched_fork() is basic setup which is also used by sched_init() to
4438 * initialize the boot CPU's idle task.
4439 */
__sched_fork(u64 clone_flags,struct task_struct * p)4440 static void __sched_fork(u64 clone_flags, struct task_struct *p)
4441 {
4442 p->on_rq = 0;
4443
4444 p->se.on_rq = 0;
4445 p->se.exec_start = 0;
4446 p->se.sum_exec_runtime = 0;
4447 p->se.prev_sum_exec_runtime = 0;
4448 p->se.nr_migrations = 0;
4449 p->se.vruntime = 0;
4450 p->se.vlag = 0;
4451 INIT_LIST_HEAD(&p->se.group_node);
4452
4453 /* A delayed task cannot be in clone(). */
4454 WARN_ON_ONCE(p->se.sched_delayed);
4455
4456 #ifdef CONFIG_FAIR_GROUP_SCHED
4457 p->se.cfs_rq = NULL;
4458 #ifdef CONFIG_CFS_BANDWIDTH
4459 init_cfs_throttle_work(p);
4460 #endif
4461 #endif
4462
4463 #ifdef CONFIG_SCHEDSTATS
4464 /* Even if schedstat is disabled, there should not be garbage */
4465 memset(&p->stats, 0, sizeof(p->stats));
4466 #endif
4467
4468 init_dl_entity(&p->dl);
4469
4470 INIT_LIST_HEAD(&p->rt.run_list);
4471 p->rt.timeout = 0;
4472 p->rt.time_slice = sched_rr_timeslice;
4473 p->rt.on_rq = 0;
4474 p->rt.on_list = 0;
4475
4476 #ifdef CONFIG_SCHED_CLASS_EXT
4477 init_scx_entity(&p->scx);
4478 #endif
4479
4480 #ifdef CONFIG_PREEMPT_NOTIFIERS
4481 INIT_HLIST_HEAD(&p->preempt_notifiers);
4482 #endif
4483
4484 #ifdef CONFIG_COMPACTION
4485 p->capture_control = NULL;
4486 #endif
4487 init_numa_balancing(clone_flags, p);
4488 p->wake_entry.u_flags = CSD_TYPE_TTWU;
4489 p->migration_pending = NULL;
4490 init_sched_mm_cid(p);
4491 }
4492
4493 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4494
4495 #ifdef CONFIG_NUMA_BALANCING
4496
4497 int sysctl_numa_balancing_mode;
4498
__set_numabalancing_state(bool enabled)4499 static void __set_numabalancing_state(bool enabled)
4500 {
4501 if (enabled)
4502 static_branch_enable(&sched_numa_balancing);
4503 else
4504 static_branch_disable(&sched_numa_balancing);
4505 }
4506
set_numabalancing_state(bool enabled)4507 void set_numabalancing_state(bool enabled)
4508 {
4509 if (enabled)
4510 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4511 else
4512 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4513 __set_numabalancing_state(enabled);
4514 }
4515
4516 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4517 static void reset_memory_tiering(void)
4518 {
4519 struct pglist_data *pgdat;
4520
4521 for_each_online_pgdat(pgdat) {
4522 pgdat->nbp_threshold = 0;
4523 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4524 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4525 }
4526 }
4527
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4528 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4529 void *buffer, size_t *lenp, loff_t *ppos)
4530 {
4531 struct ctl_table t;
4532 int err;
4533 int state = sysctl_numa_balancing_mode;
4534
4535 if (write && !capable(CAP_SYS_ADMIN))
4536 return -EPERM;
4537
4538 t = *table;
4539 t.data = &state;
4540 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4541 if (err < 0)
4542 return err;
4543 if (write) {
4544 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4545 (state & NUMA_BALANCING_MEMORY_TIERING))
4546 reset_memory_tiering();
4547 sysctl_numa_balancing_mode = state;
4548 __set_numabalancing_state(state);
4549 }
4550 return err;
4551 }
4552 #endif /* CONFIG_PROC_SYSCTL */
4553 #endif /* CONFIG_NUMA_BALANCING */
4554
4555 #ifdef CONFIG_SCHEDSTATS
4556
4557 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4558
set_schedstats(bool enabled)4559 static void set_schedstats(bool enabled)
4560 {
4561 if (enabled)
4562 static_branch_enable(&sched_schedstats);
4563 else
4564 static_branch_disable(&sched_schedstats);
4565 }
4566
force_schedstat_enabled(void)4567 void force_schedstat_enabled(void)
4568 {
4569 if (!schedstat_enabled()) {
4570 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4571 static_branch_enable(&sched_schedstats);
4572 }
4573 }
4574
setup_schedstats(char * str)4575 static int __init setup_schedstats(char *str)
4576 {
4577 int ret = 0;
4578 if (!str)
4579 goto out;
4580
4581 if (!strcmp(str, "enable")) {
4582 set_schedstats(true);
4583 ret = 1;
4584 } else if (!strcmp(str, "disable")) {
4585 set_schedstats(false);
4586 ret = 1;
4587 }
4588 out:
4589 if (!ret)
4590 pr_warn("Unable to parse schedstats=\n");
4591
4592 return ret;
4593 }
4594 __setup("schedstats=", setup_schedstats);
4595
4596 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4597 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4598 size_t *lenp, loff_t *ppos)
4599 {
4600 struct ctl_table t;
4601 int err;
4602 int state = static_branch_likely(&sched_schedstats);
4603
4604 if (write && !capable(CAP_SYS_ADMIN))
4605 return -EPERM;
4606
4607 t = *table;
4608 t.data = &state;
4609 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4610 if (err < 0)
4611 return err;
4612 if (write)
4613 set_schedstats(state);
4614 return err;
4615 }
4616 #endif /* CONFIG_PROC_SYSCTL */
4617 #endif /* CONFIG_SCHEDSTATS */
4618
4619 #ifdef CONFIG_SYSCTL
4620 static const struct ctl_table sched_core_sysctls[] = {
4621 #ifdef CONFIG_SCHEDSTATS
4622 {
4623 .procname = "sched_schedstats",
4624 .data = NULL,
4625 .maxlen = sizeof(unsigned int),
4626 .mode = 0644,
4627 .proc_handler = sysctl_schedstats,
4628 .extra1 = SYSCTL_ZERO,
4629 .extra2 = SYSCTL_ONE,
4630 },
4631 #endif /* CONFIG_SCHEDSTATS */
4632 #ifdef CONFIG_UCLAMP_TASK
4633 {
4634 .procname = "sched_util_clamp_min",
4635 .data = &sysctl_sched_uclamp_util_min,
4636 .maxlen = sizeof(unsigned int),
4637 .mode = 0644,
4638 .proc_handler = sysctl_sched_uclamp_handler,
4639 },
4640 {
4641 .procname = "sched_util_clamp_max",
4642 .data = &sysctl_sched_uclamp_util_max,
4643 .maxlen = sizeof(unsigned int),
4644 .mode = 0644,
4645 .proc_handler = sysctl_sched_uclamp_handler,
4646 },
4647 {
4648 .procname = "sched_util_clamp_min_rt_default",
4649 .data = &sysctl_sched_uclamp_util_min_rt_default,
4650 .maxlen = sizeof(unsigned int),
4651 .mode = 0644,
4652 .proc_handler = sysctl_sched_uclamp_handler,
4653 },
4654 #endif /* CONFIG_UCLAMP_TASK */
4655 #ifdef CONFIG_NUMA_BALANCING
4656 {
4657 .procname = "numa_balancing",
4658 .data = NULL, /* filled in by handler */
4659 .maxlen = sizeof(unsigned int),
4660 .mode = 0644,
4661 .proc_handler = sysctl_numa_balancing,
4662 .extra1 = SYSCTL_ZERO,
4663 .extra2 = SYSCTL_FOUR,
4664 },
4665 #endif /* CONFIG_NUMA_BALANCING */
4666 };
sched_core_sysctl_init(void)4667 static int __init sched_core_sysctl_init(void)
4668 {
4669 register_sysctl_init("kernel", sched_core_sysctls);
4670 return 0;
4671 }
4672 late_initcall(sched_core_sysctl_init);
4673 #endif /* CONFIG_SYSCTL */
4674
4675 /*
4676 * fork()/clone()-time setup:
4677 */
sched_fork(u64 clone_flags,struct task_struct * p)4678 int sched_fork(u64 clone_flags, struct task_struct *p)
4679 {
4680 __sched_fork(clone_flags, p);
4681 /*
4682 * We mark the process as NEW here. This guarantees that
4683 * nobody will actually run it, and a signal or other external
4684 * event cannot wake it up and insert it on the runqueue either.
4685 */
4686 p->__state = TASK_NEW;
4687
4688 /*
4689 * Make sure we do not leak PI boosting priority to the child.
4690 */
4691 p->prio = current->normal_prio;
4692
4693 uclamp_fork(p);
4694
4695 /*
4696 * Revert to default priority/policy on fork if requested.
4697 */
4698 if (unlikely(p->sched_reset_on_fork)) {
4699 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4700 p->policy = SCHED_NORMAL;
4701 p->static_prio = NICE_TO_PRIO(0);
4702 p->rt_priority = 0;
4703 } else if (PRIO_TO_NICE(p->static_prio) < 0)
4704 p->static_prio = NICE_TO_PRIO(0);
4705
4706 p->prio = p->normal_prio = p->static_prio;
4707 set_load_weight(p, false);
4708 p->se.custom_slice = 0;
4709 p->se.slice = sysctl_sched_base_slice;
4710
4711 /*
4712 * We don't need the reset flag anymore after the fork. It has
4713 * fulfilled its duty:
4714 */
4715 p->sched_reset_on_fork = 0;
4716 }
4717
4718 if (dl_prio(p->prio))
4719 return -EAGAIN;
4720
4721 scx_pre_fork(p);
4722
4723 if (rt_prio(p->prio)) {
4724 p->sched_class = &rt_sched_class;
4725 #ifdef CONFIG_SCHED_CLASS_EXT
4726 } else if (task_should_scx(p->policy)) {
4727 p->sched_class = &ext_sched_class;
4728 #endif
4729 } else {
4730 p->sched_class = &fair_sched_class;
4731 }
4732
4733 init_entity_runnable_average(&p->se);
4734
4735
4736 #ifdef CONFIG_SCHED_INFO
4737 if (likely(sched_info_on()))
4738 memset(&p->sched_info, 0, sizeof(p->sched_info));
4739 #endif
4740 p->on_cpu = 0;
4741 init_task_preempt_count(p);
4742 plist_node_init(&p->pushable_tasks, MAX_PRIO);
4743 RB_CLEAR_NODE(&p->pushable_dl_tasks);
4744
4745 return 0;
4746 }
4747
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4748 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4749 {
4750 unsigned long flags;
4751
4752 /*
4753 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4754 * required yet, but lockdep gets upset if rules are violated.
4755 */
4756 raw_spin_lock_irqsave(&p->pi_lock, flags);
4757 #ifdef CONFIG_CGROUP_SCHED
4758 if (1) {
4759 struct task_group *tg;
4760 tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4761 struct task_group, css);
4762 tg = autogroup_task_group(p, tg);
4763 p->sched_task_group = tg;
4764 }
4765 #endif
4766 rseq_migrate(p);
4767 /*
4768 * We're setting the CPU for the first time, we don't migrate,
4769 * so use __set_task_cpu().
4770 */
4771 __set_task_cpu(p, smp_processor_id());
4772 if (p->sched_class->task_fork)
4773 p->sched_class->task_fork(p);
4774 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4775
4776 return scx_fork(p);
4777 }
4778
sched_cancel_fork(struct task_struct * p)4779 void sched_cancel_fork(struct task_struct *p)
4780 {
4781 scx_cancel_fork(p);
4782 }
4783
sched_post_fork(struct task_struct * p)4784 void sched_post_fork(struct task_struct *p)
4785 {
4786 uclamp_post_fork(p);
4787 scx_post_fork(p);
4788 }
4789
to_ratio(u64 period,u64 runtime)4790 unsigned long to_ratio(u64 period, u64 runtime)
4791 {
4792 if (runtime == RUNTIME_INF)
4793 return BW_UNIT;
4794
4795 /*
4796 * Doing this here saves a lot of checks in all
4797 * the calling paths, and returning zero seems
4798 * safe for them anyway.
4799 */
4800 if (period == 0)
4801 return 0;
4802
4803 return div64_u64(runtime << BW_SHIFT, period);
4804 }
4805
4806 /*
4807 * wake_up_new_task - wake up a newly created task for the first time.
4808 *
4809 * This function will do some initial scheduler statistics housekeeping
4810 * that must be done for every newly created context, then puts the task
4811 * on the runqueue and wakes it.
4812 */
wake_up_new_task(struct task_struct * p)4813 void wake_up_new_task(struct task_struct *p)
4814 {
4815 struct rq_flags rf;
4816 struct rq *rq;
4817 int wake_flags = WF_FORK;
4818
4819 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4820 WRITE_ONCE(p->__state, TASK_RUNNING);
4821 /*
4822 * Fork balancing, do it here and not earlier because:
4823 * - cpus_ptr can change in the fork path
4824 * - any previously selected CPU might disappear through hotplug
4825 *
4826 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4827 * as we're not fully set-up yet.
4828 */
4829 p->recent_used_cpu = task_cpu(p);
4830 rseq_migrate(p);
4831 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4832 rq = __task_rq_lock(p, &rf);
4833 update_rq_clock(rq);
4834 post_init_entity_util_avg(p);
4835
4836 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4837 trace_sched_wakeup_new(p);
4838 wakeup_preempt(rq, p, wake_flags);
4839 if (p->sched_class->task_woken) {
4840 /*
4841 * Nothing relies on rq->lock after this, so it's fine to
4842 * drop it.
4843 */
4844 rq_unpin_lock(rq, &rf);
4845 p->sched_class->task_woken(rq, p);
4846 rq_repin_lock(rq, &rf);
4847 }
4848 task_rq_unlock(rq, p, &rf);
4849 }
4850
4851 #ifdef CONFIG_PREEMPT_NOTIFIERS
4852
4853 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4854
preempt_notifier_inc(void)4855 void preempt_notifier_inc(void)
4856 {
4857 static_branch_inc(&preempt_notifier_key);
4858 }
4859 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4860
preempt_notifier_dec(void)4861 void preempt_notifier_dec(void)
4862 {
4863 static_branch_dec(&preempt_notifier_key);
4864 }
4865 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4866
4867 /**
4868 * preempt_notifier_register - tell me when current is being preempted & rescheduled
4869 * @notifier: notifier struct to register
4870 */
preempt_notifier_register(struct preempt_notifier * notifier)4871 void preempt_notifier_register(struct preempt_notifier *notifier)
4872 {
4873 if (!static_branch_unlikely(&preempt_notifier_key))
4874 WARN(1, "registering preempt_notifier while notifiers disabled\n");
4875
4876 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
4877 }
4878 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4879
4880 /**
4881 * preempt_notifier_unregister - no longer interested in preemption notifications
4882 * @notifier: notifier struct to unregister
4883 *
4884 * This is *not* safe to call from within a preemption notifier.
4885 */
preempt_notifier_unregister(struct preempt_notifier * notifier)4886 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4887 {
4888 hlist_del(¬ifier->link);
4889 }
4890 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4891
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4892 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4893 {
4894 struct preempt_notifier *notifier;
4895
4896 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4897 notifier->ops->sched_in(notifier, raw_smp_processor_id());
4898 }
4899
fire_sched_in_preempt_notifiers(struct task_struct * curr)4900 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4901 {
4902 if (static_branch_unlikely(&preempt_notifier_key))
4903 __fire_sched_in_preempt_notifiers(curr);
4904 }
4905
4906 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4907 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4908 struct task_struct *next)
4909 {
4910 struct preempt_notifier *notifier;
4911
4912 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4913 notifier->ops->sched_out(notifier, next);
4914 }
4915
4916 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4917 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4918 struct task_struct *next)
4919 {
4920 if (static_branch_unlikely(&preempt_notifier_key))
4921 __fire_sched_out_preempt_notifiers(curr, next);
4922 }
4923
4924 #else /* !CONFIG_PREEMPT_NOTIFIERS: */
4925
fire_sched_in_preempt_notifiers(struct task_struct * curr)4926 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4927 {
4928 }
4929
4930 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4931 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4932 struct task_struct *next)
4933 {
4934 }
4935
4936 #endif /* !CONFIG_PREEMPT_NOTIFIERS */
4937
prepare_task(struct task_struct * next)4938 static inline void prepare_task(struct task_struct *next)
4939 {
4940 /*
4941 * Claim the task as running, we do this before switching to it
4942 * such that any running task will have this set.
4943 *
4944 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4945 * its ordering comment.
4946 */
4947 WRITE_ONCE(next->on_cpu, 1);
4948 }
4949
finish_task(struct task_struct * prev)4950 static inline void finish_task(struct task_struct *prev)
4951 {
4952 /*
4953 * This must be the very last reference to @prev from this CPU. After
4954 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4955 * must ensure this doesn't happen until the switch is completely
4956 * finished.
4957 *
4958 * In particular, the load of prev->state in finish_task_switch() must
4959 * happen before this.
4960 *
4961 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4962 */
4963 smp_store_release(&prev->on_cpu, 0);
4964 }
4965
do_balance_callbacks(struct rq * rq,struct balance_callback * head)4966 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4967 {
4968 void (*func)(struct rq *rq);
4969 struct balance_callback *next;
4970
4971 lockdep_assert_rq_held(rq);
4972
4973 while (head) {
4974 func = (void (*)(struct rq *))head->func;
4975 next = head->next;
4976 head->next = NULL;
4977 head = next;
4978
4979 func(rq);
4980 }
4981 }
4982
4983 static void balance_push(struct rq *rq);
4984
4985 /*
4986 * balance_push_callback is a right abuse of the callback interface and plays
4987 * by significantly different rules.
4988 *
4989 * Where the normal balance_callback's purpose is to be ran in the same context
4990 * that queued it (only later, when it's safe to drop rq->lock again),
4991 * balance_push_callback is specifically targeted at __schedule().
4992 *
4993 * This abuse is tolerated because it places all the unlikely/odd cases behind
4994 * a single test, namely: rq->balance_callback == NULL.
4995 */
4996 struct balance_callback balance_push_callback = {
4997 .next = NULL,
4998 .func = balance_push,
4999 };
5000
5001 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5002 __splice_balance_callbacks(struct rq *rq, bool split)
5003 {
5004 struct balance_callback *head = rq->balance_callback;
5005
5006 if (likely(!head))
5007 return NULL;
5008
5009 lockdep_assert_rq_held(rq);
5010 /*
5011 * Must not take balance_push_callback off the list when
5012 * splice_balance_callbacks() and balance_callbacks() are not
5013 * in the same rq->lock section.
5014 *
5015 * In that case it would be possible for __schedule() to interleave
5016 * and observe the list empty.
5017 */
5018 if (split && head == &balance_push_callback)
5019 head = NULL;
5020 else
5021 rq->balance_callback = NULL;
5022
5023 return head;
5024 }
5025
splice_balance_callbacks(struct rq * rq)5026 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5027 {
5028 return __splice_balance_callbacks(rq, true);
5029 }
5030
__balance_callbacks(struct rq * rq)5031 static void __balance_callbacks(struct rq *rq)
5032 {
5033 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5034 }
5035
balance_callbacks(struct rq * rq,struct balance_callback * head)5036 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5037 {
5038 unsigned long flags;
5039
5040 if (unlikely(head)) {
5041 raw_spin_rq_lock_irqsave(rq, flags);
5042 do_balance_callbacks(rq, head);
5043 raw_spin_rq_unlock_irqrestore(rq, flags);
5044 }
5045 }
5046
5047 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5048 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5049 {
5050 /*
5051 * Since the runqueue lock will be released by the next
5052 * task (which is an invalid locking op but in the case
5053 * of the scheduler it's an obvious special-case), so we
5054 * do an early lockdep release here:
5055 */
5056 rq_unpin_lock(rq, rf);
5057 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5058 #ifdef CONFIG_DEBUG_SPINLOCK
5059 /* this is a valid case when another task releases the spinlock */
5060 rq_lockp(rq)->owner = next;
5061 #endif
5062 }
5063
finish_lock_switch(struct rq * rq)5064 static inline void finish_lock_switch(struct rq *rq)
5065 {
5066 /*
5067 * If we are tracking spinlock dependencies then we have to
5068 * fix up the runqueue lock - which gets 'carried over' from
5069 * prev into current:
5070 */
5071 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5072 __balance_callbacks(rq);
5073 raw_spin_rq_unlock_irq(rq);
5074 }
5075
5076 /*
5077 * NOP if the arch has not defined these:
5078 */
5079
5080 #ifndef prepare_arch_switch
5081 # define prepare_arch_switch(next) do { } while (0)
5082 #endif
5083
5084 #ifndef finish_arch_post_lock_switch
5085 # define finish_arch_post_lock_switch() do { } while (0)
5086 #endif
5087
kmap_local_sched_out(void)5088 static inline void kmap_local_sched_out(void)
5089 {
5090 #ifdef CONFIG_KMAP_LOCAL
5091 if (unlikely(current->kmap_ctrl.idx))
5092 __kmap_local_sched_out();
5093 #endif
5094 }
5095
kmap_local_sched_in(void)5096 static inline void kmap_local_sched_in(void)
5097 {
5098 #ifdef CONFIG_KMAP_LOCAL
5099 if (unlikely(current->kmap_ctrl.idx))
5100 __kmap_local_sched_in();
5101 #endif
5102 }
5103
5104 /**
5105 * prepare_task_switch - prepare to switch tasks
5106 * @rq: the runqueue preparing to switch
5107 * @prev: the current task that is being switched out
5108 * @next: the task we are going to switch to.
5109 *
5110 * This is called with the rq lock held and interrupts off. It must
5111 * be paired with a subsequent finish_task_switch after the context
5112 * switch.
5113 *
5114 * prepare_task_switch sets up locking and calls architecture specific
5115 * hooks.
5116 */
5117 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5118 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5119 struct task_struct *next)
5120 {
5121 kcov_prepare_switch(prev);
5122 sched_info_switch(rq, prev, next);
5123 perf_event_task_sched_out(prev, next);
5124 rseq_preempt(prev);
5125 fire_sched_out_preempt_notifiers(prev, next);
5126 kmap_local_sched_out();
5127 prepare_task(next);
5128 prepare_arch_switch(next);
5129 }
5130
5131 /**
5132 * finish_task_switch - clean up after a task-switch
5133 * @prev: the thread we just switched away from.
5134 *
5135 * finish_task_switch must be called after the context switch, paired
5136 * with a prepare_task_switch call before the context switch.
5137 * finish_task_switch will reconcile locking set up by prepare_task_switch,
5138 * and do any other architecture-specific cleanup actions.
5139 *
5140 * Note that we may have delayed dropping an mm in context_switch(). If
5141 * so, we finish that here outside of the runqueue lock. (Doing it
5142 * with the lock held can cause deadlocks; see schedule() for
5143 * details.)
5144 *
5145 * The context switch have flipped the stack from under us and restored the
5146 * local variables which were saved when this task called schedule() in the
5147 * past. 'prev == current' is still correct but we need to recalculate this_rq
5148 * because prev may have moved to another CPU.
5149 */
finish_task_switch(struct task_struct * prev)5150 static struct rq *finish_task_switch(struct task_struct *prev)
5151 __releases(rq->lock)
5152 {
5153 struct rq *rq = this_rq();
5154 struct mm_struct *mm = rq->prev_mm;
5155 unsigned int prev_state;
5156
5157 /*
5158 * The previous task will have left us with a preempt_count of 2
5159 * because it left us after:
5160 *
5161 * schedule()
5162 * preempt_disable(); // 1
5163 * __schedule()
5164 * raw_spin_lock_irq(&rq->lock) // 2
5165 *
5166 * Also, see FORK_PREEMPT_COUNT.
5167 */
5168 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5169 "corrupted preempt_count: %s/%d/0x%x\n",
5170 current->comm, current->pid, preempt_count()))
5171 preempt_count_set(FORK_PREEMPT_COUNT);
5172
5173 rq->prev_mm = NULL;
5174
5175 /*
5176 * A task struct has one reference for the use as "current".
5177 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5178 * schedule one last time. The schedule call will never return, and
5179 * the scheduled task must drop that reference.
5180 *
5181 * We must observe prev->state before clearing prev->on_cpu (in
5182 * finish_task), otherwise a concurrent wakeup can get prev
5183 * running on another CPU and we could rave with its RUNNING -> DEAD
5184 * transition, resulting in a double drop.
5185 */
5186 prev_state = READ_ONCE(prev->__state);
5187 vtime_task_switch(prev);
5188 perf_event_task_sched_in(prev, current);
5189 finish_task(prev);
5190 tick_nohz_task_switch();
5191 finish_lock_switch(rq);
5192 finish_arch_post_lock_switch();
5193 kcov_finish_switch(current);
5194 /*
5195 * kmap_local_sched_out() is invoked with rq::lock held and
5196 * interrupts disabled. There is no requirement for that, but the
5197 * sched out code does not have an interrupt enabled section.
5198 * Restoring the maps on sched in does not require interrupts being
5199 * disabled either.
5200 */
5201 kmap_local_sched_in();
5202
5203 fire_sched_in_preempt_notifiers(current);
5204 /*
5205 * When switching through a kernel thread, the loop in
5206 * membarrier_{private,global}_expedited() may have observed that
5207 * kernel thread and not issued an IPI. It is therefore possible to
5208 * schedule between user->kernel->user threads without passing though
5209 * switch_mm(). Membarrier requires a barrier after storing to
5210 * rq->curr, before returning to userspace, so provide them here:
5211 *
5212 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5213 * provided by mmdrop_lazy_tlb(),
5214 * - a sync_core for SYNC_CORE.
5215 */
5216 if (mm) {
5217 membarrier_mm_sync_core_before_usermode(mm);
5218 mmdrop_lazy_tlb_sched(mm);
5219 }
5220
5221 if (unlikely(prev_state == TASK_DEAD)) {
5222 if (prev->sched_class->task_dead)
5223 prev->sched_class->task_dead(prev);
5224
5225 /* Task is done with its stack. */
5226 put_task_stack(prev);
5227
5228 put_task_struct_rcu_user(prev);
5229 }
5230
5231 return rq;
5232 }
5233
5234 /**
5235 * schedule_tail - first thing a freshly forked thread must call.
5236 * @prev: the thread we just switched away from.
5237 */
schedule_tail(struct task_struct * prev)5238 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5239 __releases(rq->lock)
5240 {
5241 /*
5242 * New tasks start with FORK_PREEMPT_COUNT, see there and
5243 * finish_task_switch() for details.
5244 *
5245 * finish_task_switch() will drop rq->lock() and lower preempt_count
5246 * and the preempt_enable() will end up enabling preemption (on
5247 * PREEMPT_COUNT kernels).
5248 */
5249
5250 finish_task_switch(prev);
5251 /*
5252 * This is a special case: the newly created task has just
5253 * switched the context for the first time. It is returning from
5254 * schedule for the first time in this path.
5255 */
5256 trace_sched_exit_tp(true);
5257 preempt_enable();
5258
5259 if (current->set_child_tid)
5260 put_user(task_pid_vnr(current), current->set_child_tid);
5261
5262 calculate_sigpending();
5263 }
5264
5265 /*
5266 * context_switch - switch to the new MM and the new thread's register state.
5267 */
5268 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5269 context_switch(struct rq *rq, struct task_struct *prev,
5270 struct task_struct *next, struct rq_flags *rf)
5271 {
5272 prepare_task_switch(rq, prev, next);
5273
5274 /*
5275 * For paravirt, this is coupled with an exit in switch_to to
5276 * combine the page table reload and the switch backend into
5277 * one hypercall.
5278 */
5279 arch_start_context_switch(prev);
5280
5281 /*
5282 * kernel -> kernel lazy + transfer active
5283 * user -> kernel lazy + mmgrab_lazy_tlb() active
5284 *
5285 * kernel -> user switch + mmdrop_lazy_tlb() active
5286 * user -> user switch
5287 *
5288 * switch_mm_cid() needs to be updated if the barriers provided
5289 * by context_switch() are modified.
5290 */
5291 if (!next->mm) { // to kernel
5292 enter_lazy_tlb(prev->active_mm, next);
5293
5294 next->active_mm = prev->active_mm;
5295 if (prev->mm) // from user
5296 mmgrab_lazy_tlb(prev->active_mm);
5297 else
5298 prev->active_mm = NULL;
5299 } else { // to user
5300 membarrier_switch_mm(rq, prev->active_mm, next->mm);
5301 /*
5302 * sys_membarrier() requires an smp_mb() between setting
5303 * rq->curr / membarrier_switch_mm() and returning to userspace.
5304 *
5305 * The below provides this either through switch_mm(), or in
5306 * case 'prev->active_mm == next->mm' through
5307 * finish_task_switch()'s mmdrop().
5308 */
5309 switch_mm_irqs_off(prev->active_mm, next->mm, next);
5310 lru_gen_use_mm(next->mm);
5311
5312 if (!prev->mm) { // from kernel
5313 /* will mmdrop_lazy_tlb() in finish_task_switch(). */
5314 rq->prev_mm = prev->active_mm;
5315 prev->active_mm = NULL;
5316 }
5317 }
5318
5319 /* switch_mm_cid() requires the memory barriers above. */
5320 switch_mm_cid(rq, prev, next);
5321
5322 prepare_lock_switch(rq, next, rf);
5323
5324 /* Here we just switch the register state and the stack. */
5325 switch_to(prev, next, prev);
5326 barrier();
5327
5328 return finish_task_switch(prev);
5329 }
5330
5331 /*
5332 * nr_running and nr_context_switches:
5333 *
5334 * externally visible scheduler statistics: current number of runnable
5335 * threads, total number of context switches performed since bootup.
5336 */
nr_running(void)5337 unsigned int nr_running(void)
5338 {
5339 unsigned int i, sum = 0;
5340
5341 for_each_online_cpu(i)
5342 sum += cpu_rq(i)->nr_running;
5343
5344 return sum;
5345 }
5346
5347 /*
5348 * Check if only the current task is running on the CPU.
5349 *
5350 * Caution: this function does not check that the caller has disabled
5351 * preemption, thus the result might have a time-of-check-to-time-of-use
5352 * race. The caller is responsible to use it correctly, for example:
5353 *
5354 * - from a non-preemptible section (of course)
5355 *
5356 * - from a thread that is bound to a single CPU
5357 *
5358 * - in a loop with very short iterations (e.g. a polling loop)
5359 */
single_task_running(void)5360 bool single_task_running(void)
5361 {
5362 return raw_rq()->nr_running == 1;
5363 }
5364 EXPORT_SYMBOL(single_task_running);
5365
nr_context_switches_cpu(int cpu)5366 unsigned long long nr_context_switches_cpu(int cpu)
5367 {
5368 return cpu_rq(cpu)->nr_switches;
5369 }
5370
nr_context_switches(void)5371 unsigned long long nr_context_switches(void)
5372 {
5373 int i;
5374 unsigned long long sum = 0;
5375
5376 for_each_possible_cpu(i)
5377 sum += cpu_rq(i)->nr_switches;
5378
5379 return sum;
5380 }
5381
5382 /*
5383 * Consumers of these two interfaces, like for example the cpuidle menu
5384 * governor, are using nonsensical data. Preferring shallow idle state selection
5385 * for a CPU that has IO-wait which might not even end up running the task when
5386 * it does become runnable.
5387 */
5388
nr_iowait_cpu(int cpu)5389 unsigned int nr_iowait_cpu(int cpu)
5390 {
5391 return atomic_read(&cpu_rq(cpu)->nr_iowait);
5392 }
5393
5394 /*
5395 * IO-wait accounting, and how it's mostly bollocks (on SMP).
5396 *
5397 * The idea behind IO-wait account is to account the idle time that we could
5398 * have spend running if it were not for IO. That is, if we were to improve the
5399 * storage performance, we'd have a proportional reduction in IO-wait time.
5400 *
5401 * This all works nicely on UP, where, when a task blocks on IO, we account
5402 * idle time as IO-wait, because if the storage were faster, it could've been
5403 * running and we'd not be idle.
5404 *
5405 * This has been extended to SMP, by doing the same for each CPU. This however
5406 * is broken.
5407 *
5408 * Imagine for instance the case where two tasks block on one CPU, only the one
5409 * CPU will have IO-wait accounted, while the other has regular idle. Even
5410 * though, if the storage were faster, both could've ran at the same time,
5411 * utilising both CPUs.
5412 *
5413 * This means, that when looking globally, the current IO-wait accounting on
5414 * SMP is a lower bound, by reason of under accounting.
5415 *
5416 * Worse, since the numbers are provided per CPU, they are sometimes
5417 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5418 * associated with any one particular CPU, it can wake to another CPU than it
5419 * blocked on. This means the per CPU IO-wait number is meaningless.
5420 *
5421 * Task CPU affinities can make all that even more 'interesting'.
5422 */
5423
nr_iowait(void)5424 unsigned int nr_iowait(void)
5425 {
5426 unsigned int i, sum = 0;
5427
5428 for_each_possible_cpu(i)
5429 sum += nr_iowait_cpu(i);
5430
5431 return sum;
5432 }
5433
5434 /*
5435 * sched_exec - execve() is a valuable balancing opportunity, because at
5436 * this point the task has the smallest effective memory and cache footprint.
5437 */
sched_exec(void)5438 void sched_exec(void)
5439 {
5440 struct task_struct *p = current;
5441 struct migration_arg arg;
5442 int dest_cpu;
5443
5444 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5445 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5446 if (dest_cpu == smp_processor_id())
5447 return;
5448
5449 if (unlikely(!cpu_active(dest_cpu)))
5450 return;
5451
5452 arg = (struct migration_arg){ p, dest_cpu };
5453 }
5454 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5455 }
5456
5457 DEFINE_PER_CPU(struct kernel_stat, kstat);
5458 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5459
5460 EXPORT_PER_CPU_SYMBOL(kstat);
5461 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5462
5463 /*
5464 * The function fair_sched_class.update_curr accesses the struct curr
5465 * and its field curr->exec_start; when called from task_sched_runtime(),
5466 * we observe a high rate of cache misses in practice.
5467 * Prefetching this data results in improved performance.
5468 */
prefetch_curr_exec_start(struct task_struct * p)5469 static inline void prefetch_curr_exec_start(struct task_struct *p)
5470 {
5471 #ifdef CONFIG_FAIR_GROUP_SCHED
5472 struct sched_entity *curr = p->se.cfs_rq->curr;
5473 #else
5474 struct sched_entity *curr = task_rq(p)->cfs.curr;
5475 #endif
5476 prefetch(curr);
5477 prefetch(&curr->exec_start);
5478 }
5479
5480 /*
5481 * Return accounted runtime for the task.
5482 * In case the task is currently running, return the runtime plus current's
5483 * pending runtime that have not been accounted yet.
5484 */
task_sched_runtime(struct task_struct * p)5485 unsigned long long task_sched_runtime(struct task_struct *p)
5486 {
5487 struct rq_flags rf;
5488 struct rq *rq;
5489 u64 ns;
5490
5491 #ifdef CONFIG_64BIT
5492 /*
5493 * 64-bit doesn't need locks to atomically read a 64-bit value.
5494 * So we have a optimization chance when the task's delta_exec is 0.
5495 * Reading ->on_cpu is racy, but this is OK.
5496 *
5497 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5498 * If we race with it entering CPU, unaccounted time is 0. This is
5499 * indistinguishable from the read occurring a few cycles earlier.
5500 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5501 * been accounted, so we're correct here as well.
5502 */
5503 if (!p->on_cpu || !task_on_rq_queued(p))
5504 return p->se.sum_exec_runtime;
5505 #endif
5506
5507 rq = task_rq_lock(p, &rf);
5508 /*
5509 * Must be ->curr _and_ ->on_rq. If dequeued, we would
5510 * project cycles that may never be accounted to this
5511 * thread, breaking clock_gettime().
5512 */
5513 if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5514 prefetch_curr_exec_start(p);
5515 update_rq_clock(rq);
5516 p->sched_class->update_curr(rq);
5517 }
5518 ns = p->se.sum_exec_runtime;
5519 task_rq_unlock(rq, p, &rf);
5520
5521 return ns;
5522 }
5523
cpu_resched_latency(struct rq * rq)5524 static u64 cpu_resched_latency(struct rq *rq)
5525 {
5526 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5527 u64 resched_latency, now = rq_clock(rq);
5528 static bool warned_once;
5529
5530 if (sysctl_resched_latency_warn_once && warned_once)
5531 return 0;
5532
5533 if (!need_resched() || !latency_warn_ms)
5534 return 0;
5535
5536 if (system_state == SYSTEM_BOOTING)
5537 return 0;
5538
5539 if (!rq->last_seen_need_resched_ns) {
5540 rq->last_seen_need_resched_ns = now;
5541 rq->ticks_without_resched = 0;
5542 return 0;
5543 }
5544
5545 rq->ticks_without_resched++;
5546 resched_latency = now - rq->last_seen_need_resched_ns;
5547 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5548 return 0;
5549
5550 warned_once = true;
5551
5552 return resched_latency;
5553 }
5554
setup_resched_latency_warn_ms(char * str)5555 static int __init setup_resched_latency_warn_ms(char *str)
5556 {
5557 long val;
5558
5559 if ((kstrtol(str, 0, &val))) {
5560 pr_warn("Unable to set resched_latency_warn_ms\n");
5561 return 1;
5562 }
5563
5564 sysctl_resched_latency_warn_ms = val;
5565 return 1;
5566 }
5567 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5568
5569 /*
5570 * This function gets called by the timer code, with HZ frequency.
5571 * We call it with interrupts disabled.
5572 */
sched_tick(void)5573 void sched_tick(void)
5574 {
5575 int cpu = smp_processor_id();
5576 struct rq *rq = cpu_rq(cpu);
5577 /* accounting goes to the donor task */
5578 struct task_struct *donor;
5579 struct rq_flags rf;
5580 unsigned long hw_pressure;
5581 u64 resched_latency;
5582
5583 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5584 arch_scale_freq_tick();
5585
5586 sched_clock_tick();
5587
5588 rq_lock(rq, &rf);
5589 donor = rq->donor;
5590
5591 psi_account_irqtime(rq, donor, NULL);
5592
5593 update_rq_clock(rq);
5594 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5595 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5596
5597 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5598 resched_curr(rq);
5599
5600 donor->sched_class->task_tick(rq, donor, 0);
5601 if (sched_feat(LATENCY_WARN))
5602 resched_latency = cpu_resched_latency(rq);
5603 calc_global_load_tick(rq);
5604 sched_core_tick(rq);
5605 task_tick_mm_cid(rq, donor);
5606 scx_tick(rq);
5607
5608 rq_unlock(rq, &rf);
5609
5610 if (sched_feat(LATENCY_WARN) && resched_latency)
5611 resched_latency_warn(cpu, resched_latency);
5612
5613 perf_event_task_tick();
5614
5615 if (donor->flags & PF_WQ_WORKER)
5616 wq_worker_tick(donor);
5617
5618 if (!scx_switched_all()) {
5619 rq->idle_balance = idle_cpu(cpu);
5620 sched_balance_trigger(rq);
5621 }
5622 }
5623
5624 #ifdef CONFIG_NO_HZ_FULL
5625
5626 struct tick_work {
5627 int cpu;
5628 atomic_t state;
5629 struct delayed_work work;
5630 };
5631 /* Values for ->state, see diagram below. */
5632 #define TICK_SCHED_REMOTE_OFFLINE 0
5633 #define TICK_SCHED_REMOTE_OFFLINING 1
5634 #define TICK_SCHED_REMOTE_RUNNING 2
5635
5636 /*
5637 * State diagram for ->state:
5638 *
5639 *
5640 * TICK_SCHED_REMOTE_OFFLINE
5641 * | ^
5642 * | |
5643 * | | sched_tick_remote()
5644 * | |
5645 * | |
5646 * +--TICK_SCHED_REMOTE_OFFLINING
5647 * | ^
5648 * | |
5649 * sched_tick_start() | | sched_tick_stop()
5650 * | |
5651 * V |
5652 * TICK_SCHED_REMOTE_RUNNING
5653 *
5654 *
5655 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5656 * and sched_tick_start() are happy to leave the state in RUNNING.
5657 */
5658
5659 static struct tick_work __percpu *tick_work_cpu;
5660
sched_tick_remote(struct work_struct * work)5661 static void sched_tick_remote(struct work_struct *work)
5662 {
5663 struct delayed_work *dwork = to_delayed_work(work);
5664 struct tick_work *twork = container_of(dwork, struct tick_work, work);
5665 int cpu = twork->cpu;
5666 struct rq *rq = cpu_rq(cpu);
5667 int os;
5668
5669 /*
5670 * Handle the tick only if it appears the remote CPU is running in full
5671 * dynticks mode. The check is racy by nature, but missing a tick or
5672 * having one too much is no big deal because the scheduler tick updates
5673 * statistics and checks timeslices in a time-independent way, regardless
5674 * of when exactly it is running.
5675 */
5676 if (tick_nohz_tick_stopped_cpu(cpu)) {
5677 guard(rq_lock_irq)(rq);
5678 struct task_struct *curr = rq->curr;
5679
5680 if (cpu_online(cpu)) {
5681 /*
5682 * Since this is a remote tick for full dynticks mode,
5683 * we are always sure that there is no proxy (only a
5684 * single task is running).
5685 */
5686 WARN_ON_ONCE(rq->curr != rq->donor);
5687 update_rq_clock(rq);
5688
5689 if (!is_idle_task(curr)) {
5690 /*
5691 * Make sure the next tick runs within a
5692 * reasonable amount of time.
5693 */
5694 u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5695 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5696 }
5697 curr->sched_class->task_tick(rq, curr, 0);
5698
5699 calc_load_nohz_remote(rq);
5700 }
5701 }
5702
5703 /*
5704 * Run the remote tick once per second (1Hz). This arbitrary
5705 * frequency is large enough to avoid overload but short enough
5706 * to keep scheduler internal stats reasonably up to date. But
5707 * first update state to reflect hotplug activity if required.
5708 */
5709 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5710 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5711 if (os == TICK_SCHED_REMOTE_RUNNING)
5712 queue_delayed_work(system_unbound_wq, dwork, HZ);
5713 }
5714
sched_tick_start(int cpu)5715 static void sched_tick_start(int cpu)
5716 {
5717 int os;
5718 struct tick_work *twork;
5719
5720 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5721 return;
5722
5723 WARN_ON_ONCE(!tick_work_cpu);
5724
5725 twork = per_cpu_ptr(tick_work_cpu, cpu);
5726 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5727 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5728 if (os == TICK_SCHED_REMOTE_OFFLINE) {
5729 twork->cpu = cpu;
5730 INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5731 queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5732 }
5733 }
5734
5735 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5736 static void sched_tick_stop(int cpu)
5737 {
5738 struct tick_work *twork;
5739 int os;
5740
5741 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5742 return;
5743
5744 WARN_ON_ONCE(!tick_work_cpu);
5745
5746 twork = per_cpu_ptr(tick_work_cpu, cpu);
5747 /* There cannot be competing actions, but don't rely on stop-machine. */
5748 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5749 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5750 /* Don't cancel, as this would mess up the state machine. */
5751 }
5752 #endif /* CONFIG_HOTPLUG_CPU */
5753
sched_tick_offload_init(void)5754 int __init sched_tick_offload_init(void)
5755 {
5756 tick_work_cpu = alloc_percpu(struct tick_work);
5757 BUG_ON(!tick_work_cpu);
5758 return 0;
5759 }
5760
5761 #else /* !CONFIG_NO_HZ_FULL: */
sched_tick_start(int cpu)5762 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5763 static inline void sched_tick_stop(int cpu) { }
5764 #endif /* !CONFIG_NO_HZ_FULL */
5765
5766 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5767 defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5768 /*
5769 * If the value passed in is equal to the current preempt count
5770 * then we just disabled preemption. Start timing the latency.
5771 */
preempt_latency_start(int val)5772 static inline void preempt_latency_start(int val)
5773 {
5774 if (preempt_count() == val) {
5775 unsigned long ip = get_lock_parent_ip();
5776 #ifdef CONFIG_DEBUG_PREEMPT
5777 current->preempt_disable_ip = ip;
5778 #endif
5779 trace_preempt_off(CALLER_ADDR0, ip);
5780 }
5781 }
5782
preempt_count_add(int val)5783 void preempt_count_add(int val)
5784 {
5785 #ifdef CONFIG_DEBUG_PREEMPT
5786 /*
5787 * Underflow?
5788 */
5789 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5790 return;
5791 #endif
5792 __preempt_count_add(val);
5793 #ifdef CONFIG_DEBUG_PREEMPT
5794 /*
5795 * Spinlock count overflowing soon?
5796 */
5797 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5798 PREEMPT_MASK - 10);
5799 #endif
5800 preempt_latency_start(val);
5801 }
5802 EXPORT_SYMBOL(preempt_count_add);
5803 NOKPROBE_SYMBOL(preempt_count_add);
5804
5805 /*
5806 * If the value passed in equals to the current preempt count
5807 * then we just enabled preemption. Stop timing the latency.
5808 */
preempt_latency_stop(int val)5809 static inline void preempt_latency_stop(int val)
5810 {
5811 if (preempt_count() == val)
5812 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5813 }
5814
preempt_count_sub(int val)5815 void preempt_count_sub(int val)
5816 {
5817 #ifdef CONFIG_DEBUG_PREEMPT
5818 /*
5819 * Underflow?
5820 */
5821 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5822 return;
5823 /*
5824 * Is the spinlock portion underflowing?
5825 */
5826 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5827 !(preempt_count() & PREEMPT_MASK)))
5828 return;
5829 #endif
5830
5831 preempt_latency_stop(val);
5832 __preempt_count_sub(val);
5833 }
5834 EXPORT_SYMBOL(preempt_count_sub);
5835 NOKPROBE_SYMBOL(preempt_count_sub);
5836
5837 #else
preempt_latency_start(int val)5838 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5839 static inline void preempt_latency_stop(int val) { }
5840 #endif
5841
get_preempt_disable_ip(struct task_struct * p)5842 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5843 {
5844 #ifdef CONFIG_DEBUG_PREEMPT
5845 return p->preempt_disable_ip;
5846 #else
5847 return 0;
5848 #endif
5849 }
5850
5851 /*
5852 * Print scheduling while atomic bug:
5853 */
__schedule_bug(struct task_struct * prev)5854 static noinline void __schedule_bug(struct task_struct *prev)
5855 {
5856 /* Save this before calling printk(), since that will clobber it */
5857 unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5858
5859 if (oops_in_progress)
5860 return;
5861
5862 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5863 prev->comm, prev->pid, preempt_count());
5864
5865 debug_show_held_locks(prev);
5866 print_modules();
5867 if (irqs_disabled())
5868 print_irqtrace_events(prev);
5869 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5870 pr_err("Preemption disabled at:");
5871 print_ip_sym(KERN_ERR, preempt_disable_ip);
5872 }
5873 check_panic_on_warn("scheduling while atomic");
5874
5875 dump_stack();
5876 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5877 }
5878
5879 /*
5880 * Various schedule()-time debugging checks and statistics:
5881 */
schedule_debug(struct task_struct * prev,bool preempt)5882 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5883 {
5884 #ifdef CONFIG_SCHED_STACK_END_CHECK
5885 if (task_stack_end_corrupted(prev))
5886 panic("corrupted stack end detected inside scheduler\n");
5887
5888 if (task_scs_end_corrupted(prev))
5889 panic("corrupted shadow stack detected inside scheduler\n");
5890 #endif
5891
5892 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5893 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5894 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5895 prev->comm, prev->pid, prev->non_block_count);
5896 dump_stack();
5897 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5898 }
5899 #endif
5900
5901 if (unlikely(in_atomic_preempt_off())) {
5902 __schedule_bug(prev);
5903 preempt_count_set(PREEMPT_DISABLED);
5904 }
5905 rcu_sleep_check();
5906 WARN_ON_ONCE(ct_state() == CT_STATE_USER);
5907
5908 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5909
5910 schedstat_inc(this_rq()->sched_count);
5911 }
5912
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5913 static void prev_balance(struct rq *rq, struct task_struct *prev,
5914 struct rq_flags *rf)
5915 {
5916 const struct sched_class *start_class = prev->sched_class;
5917 const struct sched_class *class;
5918
5919 #ifdef CONFIG_SCHED_CLASS_EXT
5920 /*
5921 * SCX requires a balance() call before every pick_task() including when
5922 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5923 * SCX instead. Also, set a flag to detect missing balance() call.
5924 */
5925 if (scx_enabled()) {
5926 rq->scx.flags |= SCX_RQ_BAL_PENDING;
5927 if (sched_class_above(&ext_sched_class, start_class))
5928 start_class = &ext_sched_class;
5929 }
5930 #endif
5931
5932 /*
5933 * We must do the balancing pass before put_prev_task(), such
5934 * that when we release the rq->lock the task is in the same
5935 * state as before we took rq->lock.
5936 *
5937 * We can terminate the balance pass as soon as we know there is
5938 * a runnable task of @class priority or higher.
5939 */
5940 for_active_class_range(class, start_class, &idle_sched_class) {
5941 if (class->balance && class->balance(rq, prev, rf))
5942 break;
5943 }
5944 }
5945
5946 /*
5947 * Pick up the highest-prio task:
5948 */
5949 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5950 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5951 {
5952 const struct sched_class *class;
5953 struct task_struct *p;
5954
5955 rq->dl_server = NULL;
5956
5957 if (scx_enabled())
5958 goto restart;
5959
5960 /*
5961 * Optimization: we know that if all tasks are in the fair class we can
5962 * call that function directly, but only if the @prev task wasn't of a
5963 * higher scheduling class, because otherwise those lose the
5964 * opportunity to pull in more work from other CPUs.
5965 */
5966 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5967 rq->nr_running == rq->cfs.h_nr_queued)) {
5968
5969 p = pick_next_task_fair(rq, prev, rf);
5970 if (unlikely(p == RETRY_TASK))
5971 goto restart;
5972
5973 /* Assume the next prioritized class is idle_sched_class */
5974 if (!p) {
5975 p = pick_task_idle(rq);
5976 put_prev_set_next_task(rq, prev, p);
5977 }
5978
5979 return p;
5980 }
5981
5982 restart:
5983 prev_balance(rq, prev, rf);
5984
5985 for_each_active_class(class) {
5986 if (class->pick_next_task) {
5987 p = class->pick_next_task(rq, prev);
5988 if (p)
5989 return p;
5990 } else {
5991 p = class->pick_task(rq);
5992 if (p) {
5993 put_prev_set_next_task(rq, prev, p);
5994 return p;
5995 }
5996 }
5997 }
5998
5999 BUG(); /* The idle class should always have a runnable task. */
6000 }
6001
6002 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6003 static inline bool is_task_rq_idle(struct task_struct *t)
6004 {
6005 return (task_rq(t)->idle == t);
6006 }
6007
cookie_equals(struct task_struct * a,unsigned long cookie)6008 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6009 {
6010 return is_task_rq_idle(a) || (a->core_cookie == cookie);
6011 }
6012
cookie_match(struct task_struct * a,struct task_struct * b)6013 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6014 {
6015 if (is_task_rq_idle(a) || is_task_rq_idle(b))
6016 return true;
6017
6018 return a->core_cookie == b->core_cookie;
6019 }
6020
pick_task(struct rq * rq)6021 static inline struct task_struct *pick_task(struct rq *rq)
6022 {
6023 const struct sched_class *class;
6024 struct task_struct *p;
6025
6026 rq->dl_server = NULL;
6027
6028 for_each_active_class(class) {
6029 p = class->pick_task(rq);
6030 if (p)
6031 return p;
6032 }
6033
6034 BUG(); /* The idle class should always have a runnable task. */
6035 }
6036
6037 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6038
6039 static void queue_core_balance(struct rq *rq);
6040
6041 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6042 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6043 {
6044 struct task_struct *next, *p, *max = NULL;
6045 const struct cpumask *smt_mask;
6046 bool fi_before = false;
6047 bool core_clock_updated = (rq == rq->core);
6048 unsigned long cookie;
6049 int i, cpu, occ = 0;
6050 struct rq *rq_i;
6051 bool need_sync;
6052
6053 if (!sched_core_enabled(rq))
6054 return __pick_next_task(rq, prev, rf);
6055
6056 cpu = cpu_of(rq);
6057
6058 /* Stopper task is switching into idle, no need core-wide selection. */
6059 if (cpu_is_offline(cpu)) {
6060 /*
6061 * Reset core_pick so that we don't enter the fastpath when
6062 * coming online. core_pick would already be migrated to
6063 * another cpu during offline.
6064 */
6065 rq->core_pick = NULL;
6066 rq->core_dl_server = NULL;
6067 return __pick_next_task(rq, prev, rf);
6068 }
6069
6070 /*
6071 * If there were no {en,de}queues since we picked (IOW, the task
6072 * pointers are all still valid), and we haven't scheduled the last
6073 * pick yet, do so now.
6074 *
6075 * rq->core_pick can be NULL if no selection was made for a CPU because
6076 * it was either offline or went offline during a sibling's core-wide
6077 * selection. In this case, do a core-wide selection.
6078 */
6079 if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6080 rq->core->core_pick_seq != rq->core_sched_seq &&
6081 rq->core_pick) {
6082 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6083
6084 next = rq->core_pick;
6085 rq->dl_server = rq->core_dl_server;
6086 rq->core_pick = NULL;
6087 rq->core_dl_server = NULL;
6088 goto out_set_next;
6089 }
6090
6091 prev_balance(rq, prev, rf);
6092
6093 smt_mask = cpu_smt_mask(cpu);
6094 need_sync = !!rq->core->core_cookie;
6095
6096 /* reset state */
6097 rq->core->core_cookie = 0UL;
6098 if (rq->core->core_forceidle_count) {
6099 if (!core_clock_updated) {
6100 update_rq_clock(rq->core);
6101 core_clock_updated = true;
6102 }
6103 sched_core_account_forceidle(rq);
6104 /* reset after accounting force idle */
6105 rq->core->core_forceidle_start = 0;
6106 rq->core->core_forceidle_count = 0;
6107 rq->core->core_forceidle_occupation = 0;
6108 need_sync = true;
6109 fi_before = true;
6110 }
6111
6112 /*
6113 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6114 *
6115 * @task_seq guards the task state ({en,de}queues)
6116 * @pick_seq is the @task_seq we did a selection on
6117 * @sched_seq is the @pick_seq we scheduled
6118 *
6119 * However, preemptions can cause multiple picks on the same task set.
6120 * 'Fix' this by also increasing @task_seq for every pick.
6121 */
6122 rq->core->core_task_seq++;
6123
6124 /*
6125 * Optimize for common case where this CPU has no cookies
6126 * and there are no cookied tasks running on siblings.
6127 */
6128 if (!need_sync) {
6129 next = pick_task(rq);
6130 if (!next->core_cookie) {
6131 rq->core_pick = NULL;
6132 rq->core_dl_server = NULL;
6133 /*
6134 * For robustness, update the min_vruntime_fi for
6135 * unconstrained picks as well.
6136 */
6137 WARN_ON_ONCE(fi_before);
6138 task_vruntime_update(rq, next, false);
6139 goto out_set_next;
6140 }
6141 }
6142
6143 /*
6144 * For each thread: do the regular task pick and find the max prio task
6145 * amongst them.
6146 *
6147 * Tie-break prio towards the current CPU
6148 */
6149 for_each_cpu_wrap(i, smt_mask, cpu) {
6150 rq_i = cpu_rq(i);
6151
6152 /*
6153 * Current cpu always has its clock updated on entrance to
6154 * pick_next_task(). If the current cpu is not the core,
6155 * the core may also have been updated above.
6156 */
6157 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6158 update_rq_clock(rq_i);
6159
6160 rq_i->core_pick = p = pick_task(rq_i);
6161 rq_i->core_dl_server = rq_i->dl_server;
6162
6163 if (!max || prio_less(max, p, fi_before))
6164 max = p;
6165 }
6166
6167 cookie = rq->core->core_cookie = max->core_cookie;
6168
6169 /*
6170 * For each thread: try and find a runnable task that matches @max or
6171 * force idle.
6172 */
6173 for_each_cpu(i, smt_mask) {
6174 rq_i = cpu_rq(i);
6175 p = rq_i->core_pick;
6176
6177 if (!cookie_equals(p, cookie)) {
6178 p = NULL;
6179 if (cookie)
6180 p = sched_core_find(rq_i, cookie);
6181 if (!p)
6182 p = idle_sched_class.pick_task(rq_i);
6183 }
6184
6185 rq_i->core_pick = p;
6186 rq_i->core_dl_server = NULL;
6187
6188 if (p == rq_i->idle) {
6189 if (rq_i->nr_running) {
6190 rq->core->core_forceidle_count++;
6191 if (!fi_before)
6192 rq->core->core_forceidle_seq++;
6193 }
6194 } else {
6195 occ++;
6196 }
6197 }
6198
6199 if (schedstat_enabled() && rq->core->core_forceidle_count) {
6200 rq->core->core_forceidle_start = rq_clock(rq->core);
6201 rq->core->core_forceidle_occupation = occ;
6202 }
6203
6204 rq->core->core_pick_seq = rq->core->core_task_seq;
6205 next = rq->core_pick;
6206 rq->core_sched_seq = rq->core->core_pick_seq;
6207
6208 /* Something should have been selected for current CPU */
6209 WARN_ON_ONCE(!next);
6210
6211 /*
6212 * Reschedule siblings
6213 *
6214 * NOTE: L1TF -- at this point we're no longer running the old task and
6215 * sending an IPI (below) ensures the sibling will no longer be running
6216 * their task. This ensures there is no inter-sibling overlap between
6217 * non-matching user state.
6218 */
6219 for_each_cpu(i, smt_mask) {
6220 rq_i = cpu_rq(i);
6221
6222 /*
6223 * An online sibling might have gone offline before a task
6224 * could be picked for it, or it might be offline but later
6225 * happen to come online, but its too late and nothing was
6226 * picked for it. That's Ok - it will pick tasks for itself,
6227 * so ignore it.
6228 */
6229 if (!rq_i->core_pick)
6230 continue;
6231
6232 /*
6233 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6234 * fi_before fi update?
6235 * 0 0 1
6236 * 0 1 1
6237 * 1 0 1
6238 * 1 1 0
6239 */
6240 if (!(fi_before && rq->core->core_forceidle_count))
6241 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6242
6243 rq_i->core_pick->core_occupation = occ;
6244
6245 if (i == cpu) {
6246 rq_i->core_pick = NULL;
6247 rq_i->core_dl_server = NULL;
6248 continue;
6249 }
6250
6251 /* Did we break L1TF mitigation requirements? */
6252 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6253
6254 if (rq_i->curr == rq_i->core_pick) {
6255 rq_i->core_pick = NULL;
6256 rq_i->core_dl_server = NULL;
6257 continue;
6258 }
6259
6260 resched_curr(rq_i);
6261 }
6262
6263 out_set_next:
6264 put_prev_set_next_task(rq, prev, next);
6265 if (rq->core->core_forceidle_count && next == rq->idle)
6266 queue_core_balance(rq);
6267
6268 return next;
6269 }
6270
try_steal_cookie(int this,int that)6271 static bool try_steal_cookie(int this, int that)
6272 {
6273 struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6274 struct task_struct *p;
6275 unsigned long cookie;
6276 bool success = false;
6277
6278 guard(irq)();
6279 guard(double_rq_lock)(dst, src);
6280
6281 cookie = dst->core->core_cookie;
6282 if (!cookie)
6283 return false;
6284
6285 if (dst->curr != dst->idle)
6286 return false;
6287
6288 p = sched_core_find(src, cookie);
6289 if (!p)
6290 return false;
6291
6292 do {
6293 if (p == src->core_pick || p == src->curr)
6294 goto next;
6295
6296 if (!is_cpu_allowed(p, this))
6297 goto next;
6298
6299 if (p->core_occupation > dst->idle->core_occupation)
6300 goto next;
6301 /*
6302 * sched_core_find() and sched_core_next() will ensure
6303 * that task @p is not throttled now, we also need to
6304 * check whether the runqueue of the destination CPU is
6305 * being throttled.
6306 */
6307 if (sched_task_is_throttled(p, this))
6308 goto next;
6309
6310 move_queued_task_locked(src, dst, p);
6311 resched_curr(dst);
6312
6313 success = true;
6314 break;
6315
6316 next:
6317 p = sched_core_next(p, cookie);
6318 } while (p);
6319
6320 return success;
6321 }
6322
steal_cookie_task(int cpu,struct sched_domain * sd)6323 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6324 {
6325 int i;
6326
6327 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6328 if (i == cpu)
6329 continue;
6330
6331 if (need_resched())
6332 break;
6333
6334 if (try_steal_cookie(cpu, i))
6335 return true;
6336 }
6337
6338 return false;
6339 }
6340
sched_core_balance(struct rq * rq)6341 static void sched_core_balance(struct rq *rq)
6342 {
6343 struct sched_domain *sd;
6344 int cpu = cpu_of(rq);
6345
6346 guard(preempt)();
6347 guard(rcu)();
6348
6349 raw_spin_rq_unlock_irq(rq);
6350 for_each_domain(cpu, sd) {
6351 if (need_resched())
6352 break;
6353
6354 if (steal_cookie_task(cpu, sd))
6355 break;
6356 }
6357 raw_spin_rq_lock_irq(rq);
6358 }
6359
6360 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6361
queue_core_balance(struct rq * rq)6362 static void queue_core_balance(struct rq *rq)
6363 {
6364 if (!sched_core_enabled(rq))
6365 return;
6366
6367 if (!rq->core->core_cookie)
6368 return;
6369
6370 if (!rq->nr_running) /* not forced idle */
6371 return;
6372
6373 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6374 }
6375
6376 DEFINE_LOCK_GUARD_1(core_lock, int,
6377 sched_core_lock(*_T->lock, &_T->flags),
6378 sched_core_unlock(*_T->lock, &_T->flags),
6379 unsigned long flags)
6380
sched_core_cpu_starting(unsigned int cpu)6381 static void sched_core_cpu_starting(unsigned int cpu)
6382 {
6383 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6384 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6385 int t;
6386
6387 guard(core_lock)(&cpu);
6388
6389 WARN_ON_ONCE(rq->core != rq);
6390
6391 /* if we're the first, we'll be our own leader */
6392 if (cpumask_weight(smt_mask) == 1)
6393 return;
6394
6395 /* find the leader */
6396 for_each_cpu(t, smt_mask) {
6397 if (t == cpu)
6398 continue;
6399 rq = cpu_rq(t);
6400 if (rq->core == rq) {
6401 core_rq = rq;
6402 break;
6403 }
6404 }
6405
6406 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6407 return;
6408
6409 /* install and validate core_rq */
6410 for_each_cpu(t, smt_mask) {
6411 rq = cpu_rq(t);
6412
6413 if (t == cpu)
6414 rq->core = core_rq;
6415
6416 WARN_ON_ONCE(rq->core != core_rq);
6417 }
6418 }
6419
sched_core_cpu_deactivate(unsigned int cpu)6420 static void sched_core_cpu_deactivate(unsigned int cpu)
6421 {
6422 const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6423 struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6424 int t;
6425
6426 guard(core_lock)(&cpu);
6427
6428 /* if we're the last man standing, nothing to do */
6429 if (cpumask_weight(smt_mask) == 1) {
6430 WARN_ON_ONCE(rq->core != rq);
6431 return;
6432 }
6433
6434 /* if we're not the leader, nothing to do */
6435 if (rq->core != rq)
6436 return;
6437
6438 /* find a new leader */
6439 for_each_cpu(t, smt_mask) {
6440 if (t == cpu)
6441 continue;
6442 core_rq = cpu_rq(t);
6443 break;
6444 }
6445
6446 if (WARN_ON_ONCE(!core_rq)) /* impossible */
6447 return;
6448
6449 /* copy the shared state to the new leader */
6450 core_rq->core_task_seq = rq->core_task_seq;
6451 core_rq->core_pick_seq = rq->core_pick_seq;
6452 core_rq->core_cookie = rq->core_cookie;
6453 core_rq->core_forceidle_count = rq->core_forceidle_count;
6454 core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6455 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6456
6457 /*
6458 * Accounting edge for forced idle is handled in pick_next_task().
6459 * Don't need another one here, since the hotplug thread shouldn't
6460 * have a cookie.
6461 */
6462 core_rq->core_forceidle_start = 0;
6463
6464 /* install new leader */
6465 for_each_cpu(t, smt_mask) {
6466 rq = cpu_rq(t);
6467 rq->core = core_rq;
6468 }
6469 }
6470
sched_core_cpu_dying(unsigned int cpu)6471 static inline void sched_core_cpu_dying(unsigned int cpu)
6472 {
6473 struct rq *rq = cpu_rq(cpu);
6474
6475 if (rq->core != rq)
6476 rq->core = rq;
6477 }
6478
6479 #else /* !CONFIG_SCHED_CORE: */
6480
sched_core_cpu_starting(unsigned int cpu)6481 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6482 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6483 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6484
6485 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6486 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6487 {
6488 return __pick_next_task(rq, prev, rf);
6489 }
6490
6491 #endif /* !CONFIG_SCHED_CORE */
6492
6493 /*
6494 * Constants for the sched_mode argument of __schedule().
6495 *
6496 * The mode argument allows RT enabled kernels to differentiate a
6497 * preemption from blocking on an 'sleeping' spin/rwlock.
6498 */
6499 #define SM_IDLE (-1)
6500 #define SM_NONE 0
6501 #define SM_PREEMPT 1
6502 #define SM_RTLOCK_WAIT 2
6503
6504 /*
6505 * Helper function for __schedule()
6506 *
6507 * Tries to deactivate the task, unless the should_block arg
6508 * is false or if a signal is pending. In the case a signal
6509 * is pending, marks the task's __state as RUNNING (and clear
6510 * blocked_on).
6511 */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long * task_state_p,bool should_block)6512 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6513 unsigned long *task_state_p, bool should_block)
6514 {
6515 unsigned long task_state = *task_state_p;
6516 int flags = DEQUEUE_NOCLOCK;
6517
6518 if (signal_pending_state(task_state, p)) {
6519 WRITE_ONCE(p->__state, TASK_RUNNING);
6520 *task_state_p = TASK_RUNNING;
6521 return false;
6522 }
6523
6524 /*
6525 * We check should_block after signal_pending because we
6526 * will want to wake the task in that case. But if
6527 * should_block is false, its likely due to the task being
6528 * blocked on a mutex, and we want to keep it on the runqueue
6529 * to be selectable for proxy-execution.
6530 */
6531 if (!should_block)
6532 return false;
6533
6534 p->sched_contributes_to_load =
6535 (task_state & TASK_UNINTERRUPTIBLE) &&
6536 !(task_state & TASK_NOLOAD) &&
6537 !(task_state & TASK_FROZEN);
6538
6539 if (unlikely(is_special_task_state(task_state)))
6540 flags |= DEQUEUE_SPECIAL;
6541
6542 /*
6543 * __schedule() ttwu()
6544 * prev_state = prev->state; if (p->on_rq && ...)
6545 * if (prev_state) goto out;
6546 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
6547 * p->state = TASK_WAKING
6548 *
6549 * Where __schedule() and ttwu() have matching control dependencies.
6550 *
6551 * After this, schedule() must not care about p->state any more.
6552 */
6553 block_task(rq, p, flags);
6554 return true;
6555 }
6556
6557 #ifdef CONFIG_SCHED_PROXY_EXEC
proxy_resched_idle(struct rq * rq)6558 static inline struct task_struct *proxy_resched_idle(struct rq *rq)
6559 {
6560 put_prev_set_next_task(rq, rq->donor, rq->idle);
6561 rq_set_donor(rq, rq->idle);
6562 set_tsk_need_resched(rq->idle);
6563 return rq->idle;
6564 }
6565
__proxy_deactivate(struct rq * rq,struct task_struct * donor)6566 static bool __proxy_deactivate(struct rq *rq, struct task_struct *donor)
6567 {
6568 unsigned long state = READ_ONCE(donor->__state);
6569
6570 /* Don't deactivate if the state has been changed to TASK_RUNNING */
6571 if (state == TASK_RUNNING)
6572 return false;
6573 /*
6574 * Because we got donor from pick_next_task(), it is *crucial*
6575 * that we call proxy_resched_idle() before we deactivate it.
6576 * As once we deactivate donor, donor->on_rq is set to zero,
6577 * which allows ttwu() to immediately try to wake the task on
6578 * another rq. So we cannot use *any* references to donor
6579 * after that point. So things like cfs_rq->curr or rq->donor
6580 * need to be changed from next *before* we deactivate.
6581 */
6582 proxy_resched_idle(rq);
6583 return try_to_block_task(rq, donor, &state, true);
6584 }
6585
proxy_deactivate(struct rq * rq,struct task_struct * donor)6586 static struct task_struct *proxy_deactivate(struct rq *rq, struct task_struct *donor)
6587 {
6588 if (!__proxy_deactivate(rq, donor)) {
6589 /*
6590 * XXX: For now, if deactivation failed, set donor
6591 * as unblocked, as we aren't doing proxy-migrations
6592 * yet (more logic will be needed then).
6593 */
6594 donor->blocked_on = NULL;
6595 }
6596 return NULL;
6597 }
6598
6599 /*
6600 * Find runnable lock owner to proxy for mutex blocked donor
6601 *
6602 * Follow the blocked-on relation:
6603 * task->blocked_on -> mutex->owner -> task...
6604 *
6605 * Lock order:
6606 *
6607 * p->pi_lock
6608 * rq->lock
6609 * mutex->wait_lock
6610 *
6611 * Returns the task that is going to be used as execution context (the one
6612 * that is actually going to be run on cpu_of(rq)).
6613 */
6614 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6615 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6616 {
6617 struct task_struct *owner = NULL;
6618 int this_cpu = cpu_of(rq);
6619 struct task_struct *p;
6620 struct mutex *mutex;
6621
6622 /* Follow blocked_on chain. */
6623 for (p = donor; task_is_blocked(p); p = owner) {
6624 mutex = p->blocked_on;
6625 /* Something changed in the chain, so pick again */
6626 if (!mutex)
6627 return NULL;
6628 /*
6629 * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6630 * and ensure @owner sticks around.
6631 */
6632 guard(raw_spinlock)(&mutex->wait_lock);
6633
6634 /* Check again that p is blocked with wait_lock held */
6635 if (mutex != __get_task_blocked_on(p)) {
6636 /*
6637 * Something changed in the blocked_on chain and
6638 * we don't know if only at this level. So, let's
6639 * just bail out completely and let __schedule()
6640 * figure things out (pick_again loop).
6641 */
6642 return NULL;
6643 }
6644
6645 owner = __mutex_owner(mutex);
6646 if (!owner) {
6647 __clear_task_blocked_on(p, mutex);
6648 return p;
6649 }
6650
6651 if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
6652 /* XXX Don't handle blocked owners/delayed dequeue yet */
6653 return proxy_deactivate(rq, donor);
6654 }
6655
6656 if (task_cpu(owner) != this_cpu) {
6657 /* XXX Don't handle migrations yet */
6658 return proxy_deactivate(rq, donor);
6659 }
6660
6661 if (task_on_rq_migrating(owner)) {
6662 /*
6663 * One of the chain of mutex owners is currently migrating to this
6664 * CPU, but has not yet been enqueued because we are holding the
6665 * rq lock. As a simple solution, just schedule rq->idle to give
6666 * the migration a chance to complete. Much like the migrate_task
6667 * case we should end up back in find_proxy_task(), this time
6668 * hopefully with all relevant tasks already enqueued.
6669 */
6670 return proxy_resched_idle(rq);
6671 }
6672
6673 /*
6674 * Its possible to race where after we check owner->on_rq
6675 * but before we check (owner_cpu != this_cpu) that the
6676 * task on another cpu was migrated back to this cpu. In
6677 * that case it could slip by our checks. So double check
6678 * we are still on this cpu and not migrating. If we get
6679 * inconsistent results, try again.
6680 */
6681 if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
6682 return NULL;
6683
6684 if (owner == p) {
6685 /*
6686 * It's possible we interleave with mutex_unlock like:
6687 *
6688 * lock(&rq->lock);
6689 * find_proxy_task()
6690 * mutex_unlock()
6691 * lock(&wait_lock);
6692 * donor(owner) = current->blocked_donor;
6693 * unlock(&wait_lock);
6694 *
6695 * wake_up_q();
6696 * ...
6697 * ttwu_runnable()
6698 * __task_rq_lock()
6699 * lock(&wait_lock);
6700 * owner == p
6701 *
6702 * Which leaves us to finish the ttwu_runnable() and make it go.
6703 *
6704 * So schedule rq->idle so that ttwu_runnable() can get the rq
6705 * lock and mark owner as running.
6706 */
6707 return proxy_resched_idle(rq);
6708 }
6709 /*
6710 * OK, now we're absolutely sure @owner is on this
6711 * rq, therefore holding @rq->lock is sufficient to
6712 * guarantee its existence, as per ttwu_remote().
6713 */
6714 }
6715
6716 WARN_ON_ONCE(owner && !owner->on_rq);
6717 return owner;
6718 }
6719 #else /* SCHED_PROXY_EXEC */
6720 static struct task_struct *
find_proxy_task(struct rq * rq,struct task_struct * donor,struct rq_flags * rf)6721 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6722 {
6723 WARN_ONCE(1, "This should never be called in the !SCHED_PROXY_EXEC case\n");
6724 return donor;
6725 }
6726 #endif /* SCHED_PROXY_EXEC */
6727
proxy_tag_curr(struct rq * rq,struct task_struct * owner)6728 static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
6729 {
6730 if (!sched_proxy_exec())
6731 return;
6732 /*
6733 * pick_next_task() calls set_next_task() on the chosen task
6734 * at some point, which ensures it is not push/pullable.
6735 * However, the chosen/donor task *and* the mutex owner form an
6736 * atomic pair wrt push/pull.
6737 *
6738 * Make sure owner we run is not pushable. Unfortunately we can
6739 * only deal with that by means of a dequeue/enqueue cycle. :-/
6740 */
6741 dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
6742 enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
6743 }
6744
6745 /*
6746 * __schedule() is the main scheduler function.
6747 *
6748 * The main means of driving the scheduler and thus entering this function are:
6749 *
6750 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6751 *
6752 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6753 * paths. For example, see arch/x86/entry_64.S.
6754 *
6755 * To drive preemption between tasks, the scheduler sets the flag in timer
6756 * interrupt handler sched_tick().
6757 *
6758 * 3. Wakeups don't really cause entry into schedule(). They add a
6759 * task to the run-queue and that's it.
6760 *
6761 * Now, if the new task added to the run-queue preempts the current
6762 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6763 * called on the nearest possible occasion:
6764 *
6765 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6766 *
6767 * - in syscall or exception context, at the next outmost
6768 * preempt_enable(). (this might be as soon as the wake_up()'s
6769 * spin_unlock()!)
6770 *
6771 * - in IRQ context, return from interrupt-handler to
6772 * preemptible context
6773 *
6774 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6775 * then at the next:
6776 *
6777 * - cond_resched() call
6778 * - explicit schedule() call
6779 * - return from syscall or exception to user-space
6780 * - return from interrupt-handler to user-space
6781 *
6782 * WARNING: must be called with preemption disabled!
6783 */
__schedule(int sched_mode)6784 static void __sched notrace __schedule(int sched_mode)
6785 {
6786 struct task_struct *prev, *next;
6787 /*
6788 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6789 * as a preemption by schedule_debug() and RCU.
6790 */
6791 bool preempt = sched_mode > SM_NONE;
6792 bool is_switch = false;
6793 unsigned long *switch_count;
6794 unsigned long prev_state;
6795 struct rq_flags rf;
6796 struct rq *rq;
6797 int cpu;
6798
6799 /* Trace preemptions consistently with task switches */
6800 trace_sched_entry_tp(sched_mode == SM_PREEMPT);
6801
6802 cpu = smp_processor_id();
6803 rq = cpu_rq(cpu);
6804 prev = rq->curr;
6805
6806 schedule_debug(prev, preempt);
6807
6808 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6809 hrtick_clear(rq);
6810
6811 klp_sched_try_switch(prev);
6812
6813 local_irq_disable();
6814 rcu_note_context_switch(preempt);
6815
6816 /*
6817 * Make sure that signal_pending_state()->signal_pending() below
6818 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6819 * done by the caller to avoid the race with signal_wake_up():
6820 *
6821 * __set_current_state(@state) signal_wake_up()
6822 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
6823 * wake_up_state(p, state)
6824 * LOCK rq->lock LOCK p->pi_state
6825 * smp_mb__after_spinlock() smp_mb__after_spinlock()
6826 * if (signal_pending_state()) if (p->state & @state)
6827 *
6828 * Also, the membarrier system call requires a full memory barrier
6829 * after coming from user-space, before storing to rq->curr; this
6830 * barrier matches a full barrier in the proximity of the membarrier
6831 * system call exit.
6832 */
6833 rq_lock(rq, &rf);
6834 smp_mb__after_spinlock();
6835
6836 /* Promote REQ to ACT */
6837 rq->clock_update_flags <<= 1;
6838 update_rq_clock(rq);
6839 rq->clock_update_flags = RQCF_UPDATED;
6840
6841 switch_count = &prev->nivcsw;
6842
6843 /* Task state changes only considers SM_PREEMPT as preemption */
6844 preempt = sched_mode == SM_PREEMPT;
6845
6846 /*
6847 * We must load prev->state once (task_struct::state is volatile), such
6848 * that we form a control dependency vs deactivate_task() below.
6849 */
6850 prev_state = READ_ONCE(prev->__state);
6851 if (sched_mode == SM_IDLE) {
6852 /* SCX must consult the BPF scheduler to tell if rq is empty */
6853 if (!rq->nr_running && !scx_enabled()) {
6854 next = prev;
6855 goto picked;
6856 }
6857 } else if (!preempt && prev_state) {
6858 /*
6859 * We pass task_is_blocked() as the should_block arg
6860 * in order to keep mutex-blocked tasks on the runqueue
6861 * for slection with proxy-exec (without proxy-exec
6862 * task_is_blocked() will always be false).
6863 */
6864 try_to_block_task(rq, prev, &prev_state,
6865 !task_is_blocked(prev));
6866 switch_count = &prev->nvcsw;
6867 }
6868
6869 pick_again:
6870 next = pick_next_task(rq, rq->donor, &rf);
6871 rq_set_donor(rq, next);
6872 if (unlikely(task_is_blocked(next))) {
6873 next = find_proxy_task(rq, next, &rf);
6874 if (!next)
6875 goto pick_again;
6876 if (next == rq->idle)
6877 goto keep_resched;
6878 }
6879 picked:
6880 clear_tsk_need_resched(prev);
6881 clear_preempt_need_resched();
6882 keep_resched:
6883 rq->last_seen_need_resched_ns = 0;
6884
6885 is_switch = prev != next;
6886 if (likely(is_switch)) {
6887 rq->nr_switches++;
6888 /*
6889 * RCU users of rcu_dereference(rq->curr) may not see
6890 * changes to task_struct made by pick_next_task().
6891 */
6892 RCU_INIT_POINTER(rq->curr, next);
6893
6894 if (!task_current_donor(rq, next))
6895 proxy_tag_curr(rq, next);
6896
6897 /*
6898 * The membarrier system call requires each architecture
6899 * to have a full memory barrier after updating
6900 * rq->curr, before returning to user-space.
6901 *
6902 * Here are the schemes providing that barrier on the
6903 * various architectures:
6904 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6905 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm()
6906 * on PowerPC and on RISC-V.
6907 * - finish_lock_switch() for weakly-ordered
6908 * architectures where spin_unlock is a full barrier,
6909 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6910 * is a RELEASE barrier),
6911 *
6912 * The barrier matches a full barrier in the proximity of
6913 * the membarrier system call entry.
6914 *
6915 * On RISC-V, this barrier pairing is also needed for the
6916 * SYNC_CORE command when switching between processes, cf.
6917 * the inline comments in membarrier_arch_switch_mm().
6918 */
6919 ++*switch_count;
6920
6921 migrate_disable_switch(rq, prev);
6922 psi_account_irqtime(rq, prev, next);
6923 psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6924 prev->se.sched_delayed);
6925
6926 trace_sched_switch(preempt, prev, next, prev_state);
6927
6928 /* Also unlocks the rq: */
6929 rq = context_switch(rq, prev, next, &rf);
6930 } else {
6931 /* In case next was already curr but just got blocked_donor */
6932 if (!task_current_donor(rq, next))
6933 proxy_tag_curr(rq, next);
6934
6935 rq_unpin_lock(rq, &rf);
6936 __balance_callbacks(rq);
6937 raw_spin_rq_unlock_irq(rq);
6938 }
6939 trace_sched_exit_tp(is_switch);
6940 }
6941
do_task_dead(void)6942 void __noreturn do_task_dead(void)
6943 {
6944 /* Causes final put_task_struct in finish_task_switch(): */
6945 set_special_state(TASK_DEAD);
6946
6947 /* Tell freezer to ignore us: */
6948 current->flags |= PF_NOFREEZE;
6949
6950 __schedule(SM_NONE);
6951 BUG();
6952
6953 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6954 for (;;)
6955 cpu_relax();
6956 }
6957
sched_submit_work(struct task_struct * tsk)6958 static inline void sched_submit_work(struct task_struct *tsk)
6959 {
6960 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6961 unsigned int task_flags;
6962
6963 /*
6964 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6965 * will use a blocking primitive -- which would lead to recursion.
6966 */
6967 lock_map_acquire_try(&sched_map);
6968
6969 task_flags = tsk->flags;
6970 /*
6971 * If a worker goes to sleep, notify and ask workqueue whether it
6972 * wants to wake up a task to maintain concurrency.
6973 */
6974 if (task_flags & PF_WQ_WORKER)
6975 wq_worker_sleeping(tsk);
6976 else if (task_flags & PF_IO_WORKER)
6977 io_wq_worker_sleeping(tsk);
6978
6979 /*
6980 * spinlock and rwlock must not flush block requests. This will
6981 * deadlock if the callback attempts to acquire a lock which is
6982 * already acquired.
6983 */
6984 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
6985
6986 /*
6987 * If we are going to sleep and we have plugged IO queued,
6988 * make sure to submit it to avoid deadlocks.
6989 */
6990 blk_flush_plug(tsk->plug, true);
6991
6992 lock_map_release(&sched_map);
6993 }
6994
sched_update_worker(struct task_struct * tsk)6995 static void sched_update_worker(struct task_struct *tsk)
6996 {
6997 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6998 if (tsk->flags & PF_BLOCK_TS)
6999 blk_plug_invalidate_ts(tsk);
7000 if (tsk->flags & PF_WQ_WORKER)
7001 wq_worker_running(tsk);
7002 else if (tsk->flags & PF_IO_WORKER)
7003 io_wq_worker_running(tsk);
7004 }
7005 }
7006
__schedule_loop(int sched_mode)7007 static __always_inline void __schedule_loop(int sched_mode)
7008 {
7009 do {
7010 preempt_disable();
7011 __schedule(sched_mode);
7012 sched_preempt_enable_no_resched();
7013 } while (need_resched());
7014 }
7015
schedule(void)7016 asmlinkage __visible void __sched schedule(void)
7017 {
7018 struct task_struct *tsk = current;
7019
7020 #ifdef CONFIG_RT_MUTEXES
7021 lockdep_assert(!tsk->sched_rt_mutex);
7022 #endif
7023
7024 if (!task_is_running(tsk))
7025 sched_submit_work(tsk);
7026 __schedule_loop(SM_NONE);
7027 sched_update_worker(tsk);
7028 }
7029 EXPORT_SYMBOL(schedule);
7030
7031 /*
7032 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
7033 * state (have scheduled out non-voluntarily) by making sure that all
7034 * tasks have either left the run queue or have gone into user space.
7035 * As idle tasks do not do either, they must not ever be preempted
7036 * (schedule out non-voluntarily).
7037 *
7038 * schedule_idle() is similar to schedule_preempt_disable() except that it
7039 * never enables preemption because it does not call sched_submit_work().
7040 */
schedule_idle(void)7041 void __sched schedule_idle(void)
7042 {
7043 /*
7044 * As this skips calling sched_submit_work(), which the idle task does
7045 * regardless because that function is a NOP when the task is in a
7046 * TASK_RUNNING state, make sure this isn't used someplace that the
7047 * current task can be in any other state. Note, idle is always in the
7048 * TASK_RUNNING state.
7049 */
7050 WARN_ON_ONCE(current->__state);
7051 do {
7052 __schedule(SM_IDLE);
7053 } while (need_resched());
7054 }
7055
7056 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)7057 asmlinkage __visible void __sched schedule_user(void)
7058 {
7059 /*
7060 * If we come here after a random call to set_need_resched(),
7061 * or we have been woken up remotely but the IPI has not yet arrived,
7062 * we haven't yet exited the RCU idle mode. Do it here manually until
7063 * we find a better solution.
7064 *
7065 * NB: There are buggy callers of this function. Ideally we
7066 * should warn if prev_state != CT_STATE_USER, but that will trigger
7067 * too frequently to make sense yet.
7068 */
7069 enum ctx_state prev_state = exception_enter();
7070 schedule();
7071 exception_exit(prev_state);
7072 }
7073 #endif
7074
7075 /**
7076 * schedule_preempt_disabled - called with preemption disabled
7077 *
7078 * Returns with preemption disabled. Note: preempt_count must be 1
7079 */
schedule_preempt_disabled(void)7080 void __sched schedule_preempt_disabled(void)
7081 {
7082 sched_preempt_enable_no_resched();
7083 schedule();
7084 preempt_disable();
7085 }
7086
7087 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)7088 void __sched notrace schedule_rtlock(void)
7089 {
7090 __schedule_loop(SM_RTLOCK_WAIT);
7091 }
7092 NOKPROBE_SYMBOL(schedule_rtlock);
7093 #endif
7094
preempt_schedule_common(void)7095 static void __sched notrace preempt_schedule_common(void)
7096 {
7097 do {
7098 /*
7099 * Because the function tracer can trace preempt_count_sub()
7100 * and it also uses preempt_enable/disable_notrace(), if
7101 * NEED_RESCHED is set, the preempt_enable_notrace() called
7102 * by the function tracer will call this function again and
7103 * cause infinite recursion.
7104 *
7105 * Preemption must be disabled here before the function
7106 * tracer can trace. Break up preempt_disable() into two
7107 * calls. One to disable preemption without fear of being
7108 * traced. The other to still record the preemption latency,
7109 * which can also be traced by the function tracer.
7110 */
7111 preempt_disable_notrace();
7112 preempt_latency_start(1);
7113 __schedule(SM_PREEMPT);
7114 preempt_latency_stop(1);
7115 preempt_enable_no_resched_notrace();
7116
7117 /*
7118 * Check again in case we missed a preemption opportunity
7119 * between schedule and now.
7120 */
7121 } while (need_resched());
7122 }
7123
7124 #ifdef CONFIG_PREEMPTION
7125 /*
7126 * This is the entry point to schedule() from in-kernel preemption
7127 * off of preempt_enable.
7128 */
preempt_schedule(void)7129 asmlinkage __visible void __sched notrace preempt_schedule(void)
7130 {
7131 /*
7132 * If there is a non-zero preempt_count or interrupts are disabled,
7133 * we do not want to preempt the current task. Just return..
7134 */
7135 if (likely(!preemptible()))
7136 return;
7137 preempt_schedule_common();
7138 }
7139 NOKPROBE_SYMBOL(preempt_schedule);
7140 EXPORT_SYMBOL(preempt_schedule);
7141
7142 #ifdef CONFIG_PREEMPT_DYNAMIC
7143 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7144 # ifndef preempt_schedule_dynamic_enabled
7145 # define preempt_schedule_dynamic_enabled preempt_schedule
7146 # define preempt_schedule_dynamic_disabled NULL
7147 # endif
7148 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
7149 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
7150 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7151 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)7152 void __sched notrace dynamic_preempt_schedule(void)
7153 {
7154 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7155 return;
7156 preempt_schedule();
7157 }
7158 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7159 EXPORT_SYMBOL(dynamic_preempt_schedule);
7160 # endif
7161 #endif /* CONFIG_PREEMPT_DYNAMIC */
7162
7163 /**
7164 * preempt_schedule_notrace - preempt_schedule called by tracing
7165 *
7166 * The tracing infrastructure uses preempt_enable_notrace to prevent
7167 * recursion and tracing preempt enabling caused by the tracing
7168 * infrastructure itself. But as tracing can happen in areas coming
7169 * from userspace or just about to enter userspace, a preempt enable
7170 * can occur before user_exit() is called. This will cause the scheduler
7171 * to be called when the system is still in usermode.
7172 *
7173 * To prevent this, the preempt_enable_notrace will use this function
7174 * instead of preempt_schedule() to exit user context if needed before
7175 * calling the scheduler.
7176 */
preempt_schedule_notrace(void)7177 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7178 {
7179 enum ctx_state prev_ctx;
7180
7181 if (likely(!preemptible()))
7182 return;
7183
7184 do {
7185 /*
7186 * Because the function tracer can trace preempt_count_sub()
7187 * and it also uses preempt_enable/disable_notrace(), if
7188 * NEED_RESCHED is set, the preempt_enable_notrace() called
7189 * by the function tracer will call this function again and
7190 * cause infinite recursion.
7191 *
7192 * Preemption must be disabled here before the function
7193 * tracer can trace. Break up preempt_disable() into two
7194 * calls. One to disable preemption without fear of being
7195 * traced. The other to still record the preemption latency,
7196 * which can also be traced by the function tracer.
7197 */
7198 preempt_disable_notrace();
7199 preempt_latency_start(1);
7200 /*
7201 * Needs preempt disabled in case user_exit() is traced
7202 * and the tracer calls preempt_enable_notrace() causing
7203 * an infinite recursion.
7204 */
7205 prev_ctx = exception_enter();
7206 __schedule(SM_PREEMPT);
7207 exception_exit(prev_ctx);
7208
7209 preempt_latency_stop(1);
7210 preempt_enable_no_resched_notrace();
7211 } while (need_resched());
7212 }
7213 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7214
7215 #ifdef CONFIG_PREEMPT_DYNAMIC
7216 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7217 # ifndef preempt_schedule_notrace_dynamic_enabled
7218 # define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
7219 # define preempt_schedule_notrace_dynamic_disabled NULL
7220 # endif
7221 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7222 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7223 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7224 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7225 void __sched notrace dynamic_preempt_schedule_notrace(void)
7226 {
7227 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7228 return;
7229 preempt_schedule_notrace();
7230 }
7231 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7232 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7233 # endif
7234 #endif
7235
7236 #endif /* CONFIG_PREEMPTION */
7237
7238 /*
7239 * This is the entry point to schedule() from kernel preemption
7240 * off of IRQ context.
7241 * Note, that this is called and return with IRQs disabled. This will
7242 * protect us against recursive calling from IRQ contexts.
7243 */
preempt_schedule_irq(void)7244 asmlinkage __visible void __sched preempt_schedule_irq(void)
7245 {
7246 enum ctx_state prev_state;
7247
7248 /* Catch callers which need to be fixed */
7249 BUG_ON(preempt_count() || !irqs_disabled());
7250
7251 prev_state = exception_enter();
7252
7253 do {
7254 preempt_disable();
7255 local_irq_enable();
7256 __schedule(SM_PREEMPT);
7257 local_irq_disable();
7258 sched_preempt_enable_no_resched();
7259 } while (need_resched());
7260
7261 exception_exit(prev_state);
7262 }
7263
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7264 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7265 void *key)
7266 {
7267 WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7268 return try_to_wake_up(curr->private, mode, wake_flags);
7269 }
7270 EXPORT_SYMBOL(default_wake_function);
7271
__setscheduler_class(int policy,int prio)7272 const struct sched_class *__setscheduler_class(int policy, int prio)
7273 {
7274 if (dl_prio(prio))
7275 return &dl_sched_class;
7276
7277 if (rt_prio(prio))
7278 return &rt_sched_class;
7279
7280 #ifdef CONFIG_SCHED_CLASS_EXT
7281 if (task_should_scx(policy))
7282 return &ext_sched_class;
7283 #endif
7284
7285 return &fair_sched_class;
7286 }
7287
7288 #ifdef CONFIG_RT_MUTEXES
7289
7290 /*
7291 * Would be more useful with typeof()/auto_type but they don't mix with
7292 * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7293 * name such that if someone were to implement this function we get to compare
7294 * notes.
7295 */
7296 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7297
rt_mutex_pre_schedule(void)7298 void rt_mutex_pre_schedule(void)
7299 {
7300 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7301 sched_submit_work(current);
7302 }
7303
rt_mutex_schedule(void)7304 void rt_mutex_schedule(void)
7305 {
7306 lockdep_assert(current->sched_rt_mutex);
7307 __schedule_loop(SM_NONE);
7308 }
7309
rt_mutex_post_schedule(void)7310 void rt_mutex_post_schedule(void)
7311 {
7312 sched_update_worker(current);
7313 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7314 }
7315
7316 /*
7317 * rt_mutex_setprio - set the current priority of a task
7318 * @p: task to boost
7319 * @pi_task: donor task
7320 *
7321 * This function changes the 'effective' priority of a task. It does
7322 * not touch ->normal_prio like __setscheduler().
7323 *
7324 * Used by the rt_mutex code to implement priority inheritance
7325 * logic. Call site only calls if the priority of the task changed.
7326 */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7327 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7328 {
7329 int prio, oldprio, queued, running, queue_flag =
7330 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7331 const struct sched_class *prev_class, *next_class;
7332 struct rq_flags rf;
7333 struct rq *rq;
7334
7335 /* XXX used to be waiter->prio, not waiter->task->prio */
7336 prio = __rt_effective_prio(pi_task, p->normal_prio);
7337
7338 /*
7339 * If nothing changed; bail early.
7340 */
7341 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7342 return;
7343
7344 rq = __task_rq_lock(p, &rf);
7345 update_rq_clock(rq);
7346 /*
7347 * Set under pi_lock && rq->lock, such that the value can be used under
7348 * either lock.
7349 *
7350 * Note that there is loads of tricky to make this pointer cache work
7351 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7352 * ensure a task is de-boosted (pi_task is set to NULL) before the
7353 * task is allowed to run again (and can exit). This ensures the pointer
7354 * points to a blocked task -- which guarantees the task is present.
7355 */
7356 p->pi_top_task = pi_task;
7357
7358 /*
7359 * For FIFO/RR we only need to set prio, if that matches we're done.
7360 */
7361 if (prio == p->prio && !dl_prio(prio))
7362 goto out_unlock;
7363
7364 /*
7365 * Idle task boosting is a no-no in general. There is one
7366 * exception, when PREEMPT_RT and NOHZ is active:
7367 *
7368 * The idle task calls get_next_timer_interrupt() and holds
7369 * the timer wheel base->lock on the CPU and another CPU wants
7370 * to access the timer (probably to cancel it). We can safely
7371 * ignore the boosting request, as the idle CPU runs this code
7372 * with interrupts disabled and will complete the lock
7373 * protected section without being interrupted. So there is no
7374 * real need to boost.
7375 */
7376 if (unlikely(p == rq->idle)) {
7377 WARN_ON(p != rq->curr);
7378 WARN_ON(p->pi_blocked_on);
7379 goto out_unlock;
7380 }
7381
7382 trace_sched_pi_setprio(p, pi_task);
7383 oldprio = p->prio;
7384
7385 if (oldprio == prio)
7386 queue_flag &= ~DEQUEUE_MOVE;
7387
7388 prev_class = p->sched_class;
7389 next_class = __setscheduler_class(p->policy, prio);
7390
7391 if (prev_class != next_class && p->se.sched_delayed)
7392 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7393
7394 queued = task_on_rq_queued(p);
7395 running = task_current_donor(rq, p);
7396 if (queued)
7397 dequeue_task(rq, p, queue_flag);
7398 if (running)
7399 put_prev_task(rq, p);
7400
7401 /*
7402 * Boosting condition are:
7403 * 1. -rt task is running and holds mutex A
7404 * --> -dl task blocks on mutex A
7405 *
7406 * 2. -dl task is running and holds mutex A
7407 * --> -dl task blocks on mutex A and could preempt the
7408 * running task
7409 */
7410 if (dl_prio(prio)) {
7411 if (!dl_prio(p->normal_prio) ||
7412 (pi_task && dl_prio(pi_task->prio) &&
7413 dl_entity_preempt(&pi_task->dl, &p->dl))) {
7414 p->dl.pi_se = pi_task->dl.pi_se;
7415 queue_flag |= ENQUEUE_REPLENISH;
7416 } else {
7417 p->dl.pi_se = &p->dl;
7418 }
7419 } else if (rt_prio(prio)) {
7420 if (dl_prio(oldprio))
7421 p->dl.pi_se = &p->dl;
7422 if (oldprio < prio)
7423 queue_flag |= ENQUEUE_HEAD;
7424 } else {
7425 if (dl_prio(oldprio))
7426 p->dl.pi_se = &p->dl;
7427 if (rt_prio(oldprio))
7428 p->rt.timeout = 0;
7429 }
7430
7431 p->sched_class = next_class;
7432 p->prio = prio;
7433
7434 check_class_changing(rq, p, prev_class);
7435
7436 if (queued)
7437 enqueue_task(rq, p, queue_flag);
7438 if (running)
7439 set_next_task(rq, p);
7440
7441 check_class_changed(rq, p, prev_class, oldprio);
7442 out_unlock:
7443 /* Avoid rq from going away on us: */
7444 preempt_disable();
7445
7446 rq_unpin_lock(rq, &rf);
7447 __balance_callbacks(rq);
7448 raw_spin_rq_unlock(rq);
7449
7450 preempt_enable();
7451 }
7452 #endif /* CONFIG_RT_MUTEXES */
7453
7454 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7455 int __sched __cond_resched(void)
7456 {
7457 if (should_resched(0) && !irqs_disabled()) {
7458 preempt_schedule_common();
7459 return 1;
7460 }
7461 /*
7462 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
7463 * whether the current CPU is in an RCU read-side critical section,
7464 * so the tick can report quiescent states even for CPUs looping
7465 * in kernel context. In contrast, in non-preemptible kernels,
7466 * RCU readers leave no in-memory hints, which means that CPU-bound
7467 * processes executing in kernel context might never report an
7468 * RCU quiescent state. Therefore, the following code causes
7469 * cond_resched() to report a quiescent state, but only when RCU
7470 * is in urgent need of one.
7471 * A third case, preemptible, but non-PREEMPT_RCU provides for
7472 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
7473 */
7474 #ifndef CONFIG_PREEMPT_RCU
7475 rcu_all_qs();
7476 #endif
7477 return 0;
7478 }
7479 EXPORT_SYMBOL(__cond_resched);
7480 #endif
7481
7482 #ifdef CONFIG_PREEMPT_DYNAMIC
7483 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7484 # define cond_resched_dynamic_enabled __cond_resched
7485 # define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
7486 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7487 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7488
7489 # define might_resched_dynamic_enabled __cond_resched
7490 # define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7491 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7492 EXPORT_STATIC_CALL_TRAMP(might_resched);
7493 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7494 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7495 int __sched dynamic_cond_resched(void)
7496 {
7497 if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7498 return 0;
7499 return __cond_resched();
7500 }
7501 EXPORT_SYMBOL(dynamic_cond_resched);
7502
7503 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7504 int __sched dynamic_might_resched(void)
7505 {
7506 if (!static_branch_unlikely(&sk_dynamic_might_resched))
7507 return 0;
7508 return __cond_resched();
7509 }
7510 EXPORT_SYMBOL(dynamic_might_resched);
7511 # endif
7512 #endif /* CONFIG_PREEMPT_DYNAMIC */
7513
7514 /*
7515 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7516 * call schedule, and on return reacquire the lock.
7517 *
7518 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7519 * operations here to prevent schedule() from being called twice (once via
7520 * spin_unlock(), once by hand).
7521 */
__cond_resched_lock(spinlock_t * lock)7522 int __cond_resched_lock(spinlock_t *lock)
7523 {
7524 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7525 int ret = 0;
7526
7527 lockdep_assert_held(lock);
7528
7529 if (spin_needbreak(lock) || resched) {
7530 spin_unlock(lock);
7531 if (!_cond_resched())
7532 cpu_relax();
7533 ret = 1;
7534 spin_lock(lock);
7535 }
7536 return ret;
7537 }
7538 EXPORT_SYMBOL(__cond_resched_lock);
7539
__cond_resched_rwlock_read(rwlock_t * lock)7540 int __cond_resched_rwlock_read(rwlock_t *lock)
7541 {
7542 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7543 int ret = 0;
7544
7545 lockdep_assert_held_read(lock);
7546
7547 if (rwlock_needbreak(lock) || resched) {
7548 read_unlock(lock);
7549 if (!_cond_resched())
7550 cpu_relax();
7551 ret = 1;
7552 read_lock(lock);
7553 }
7554 return ret;
7555 }
7556 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7557
__cond_resched_rwlock_write(rwlock_t * lock)7558 int __cond_resched_rwlock_write(rwlock_t *lock)
7559 {
7560 int resched = should_resched(PREEMPT_LOCK_OFFSET);
7561 int ret = 0;
7562
7563 lockdep_assert_held_write(lock);
7564
7565 if (rwlock_needbreak(lock) || resched) {
7566 write_unlock(lock);
7567 if (!_cond_resched())
7568 cpu_relax();
7569 ret = 1;
7570 write_lock(lock);
7571 }
7572 return ret;
7573 }
7574 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7575
7576 #ifdef CONFIG_PREEMPT_DYNAMIC
7577
7578 # ifdef CONFIG_GENERIC_IRQ_ENTRY
7579 # include <linux/irq-entry-common.h>
7580 # endif
7581
7582 /*
7583 * SC:cond_resched
7584 * SC:might_resched
7585 * SC:preempt_schedule
7586 * SC:preempt_schedule_notrace
7587 * SC:irqentry_exit_cond_resched
7588 *
7589 *
7590 * NONE:
7591 * cond_resched <- __cond_resched
7592 * might_resched <- RET0
7593 * preempt_schedule <- NOP
7594 * preempt_schedule_notrace <- NOP
7595 * irqentry_exit_cond_resched <- NOP
7596 * dynamic_preempt_lazy <- false
7597 *
7598 * VOLUNTARY:
7599 * cond_resched <- __cond_resched
7600 * might_resched <- __cond_resched
7601 * preempt_schedule <- NOP
7602 * preempt_schedule_notrace <- NOP
7603 * irqentry_exit_cond_resched <- NOP
7604 * dynamic_preempt_lazy <- false
7605 *
7606 * FULL:
7607 * cond_resched <- RET0
7608 * might_resched <- RET0
7609 * preempt_schedule <- preempt_schedule
7610 * preempt_schedule_notrace <- preempt_schedule_notrace
7611 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7612 * dynamic_preempt_lazy <- false
7613 *
7614 * LAZY:
7615 * cond_resched <- RET0
7616 * might_resched <- RET0
7617 * preempt_schedule <- preempt_schedule
7618 * preempt_schedule_notrace <- preempt_schedule_notrace
7619 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7620 * dynamic_preempt_lazy <- true
7621 */
7622
7623 enum {
7624 preempt_dynamic_undefined = -1,
7625 preempt_dynamic_none,
7626 preempt_dynamic_voluntary,
7627 preempt_dynamic_full,
7628 preempt_dynamic_lazy,
7629 };
7630
7631 int preempt_dynamic_mode = preempt_dynamic_undefined;
7632
sched_dynamic_mode(const char * str)7633 int sched_dynamic_mode(const char *str)
7634 {
7635 # ifndef CONFIG_PREEMPT_RT
7636 if (!strcmp(str, "none"))
7637 return preempt_dynamic_none;
7638
7639 if (!strcmp(str, "voluntary"))
7640 return preempt_dynamic_voluntary;
7641 # endif
7642
7643 if (!strcmp(str, "full"))
7644 return preempt_dynamic_full;
7645
7646 # ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7647 if (!strcmp(str, "lazy"))
7648 return preempt_dynamic_lazy;
7649 # endif
7650
7651 return -EINVAL;
7652 }
7653
7654 # define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key)
7655 # define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key)
7656
7657 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7658 # define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
7659 # define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
7660 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7661 # define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f)
7662 # define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f)
7663 # else
7664 # error "Unsupported PREEMPT_DYNAMIC mechanism"
7665 # endif
7666
7667 static DEFINE_MUTEX(sched_dynamic_mutex);
7668
__sched_dynamic_update(int mode)7669 static void __sched_dynamic_update(int mode)
7670 {
7671 /*
7672 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7673 * the ZERO state, which is invalid.
7674 */
7675 preempt_dynamic_enable(cond_resched);
7676 preempt_dynamic_enable(might_resched);
7677 preempt_dynamic_enable(preempt_schedule);
7678 preempt_dynamic_enable(preempt_schedule_notrace);
7679 preempt_dynamic_enable(irqentry_exit_cond_resched);
7680 preempt_dynamic_key_disable(preempt_lazy);
7681
7682 switch (mode) {
7683 case preempt_dynamic_none:
7684 preempt_dynamic_enable(cond_resched);
7685 preempt_dynamic_disable(might_resched);
7686 preempt_dynamic_disable(preempt_schedule);
7687 preempt_dynamic_disable(preempt_schedule_notrace);
7688 preempt_dynamic_disable(irqentry_exit_cond_resched);
7689 preempt_dynamic_key_disable(preempt_lazy);
7690 if (mode != preempt_dynamic_mode)
7691 pr_info("Dynamic Preempt: none\n");
7692 break;
7693
7694 case preempt_dynamic_voluntary:
7695 preempt_dynamic_enable(cond_resched);
7696 preempt_dynamic_enable(might_resched);
7697 preempt_dynamic_disable(preempt_schedule);
7698 preempt_dynamic_disable(preempt_schedule_notrace);
7699 preempt_dynamic_disable(irqentry_exit_cond_resched);
7700 preempt_dynamic_key_disable(preempt_lazy);
7701 if (mode != preempt_dynamic_mode)
7702 pr_info("Dynamic Preempt: voluntary\n");
7703 break;
7704
7705 case preempt_dynamic_full:
7706 preempt_dynamic_disable(cond_resched);
7707 preempt_dynamic_disable(might_resched);
7708 preempt_dynamic_enable(preempt_schedule);
7709 preempt_dynamic_enable(preempt_schedule_notrace);
7710 preempt_dynamic_enable(irqentry_exit_cond_resched);
7711 preempt_dynamic_key_disable(preempt_lazy);
7712 if (mode != preempt_dynamic_mode)
7713 pr_info("Dynamic Preempt: full\n");
7714 break;
7715
7716 case preempt_dynamic_lazy:
7717 preempt_dynamic_disable(cond_resched);
7718 preempt_dynamic_disable(might_resched);
7719 preempt_dynamic_enable(preempt_schedule);
7720 preempt_dynamic_enable(preempt_schedule_notrace);
7721 preempt_dynamic_enable(irqentry_exit_cond_resched);
7722 preempt_dynamic_key_enable(preempt_lazy);
7723 if (mode != preempt_dynamic_mode)
7724 pr_info("Dynamic Preempt: lazy\n");
7725 break;
7726 }
7727
7728 preempt_dynamic_mode = mode;
7729 }
7730
sched_dynamic_update(int mode)7731 void sched_dynamic_update(int mode)
7732 {
7733 mutex_lock(&sched_dynamic_mutex);
7734 __sched_dynamic_update(mode);
7735 mutex_unlock(&sched_dynamic_mutex);
7736 }
7737
setup_preempt_mode(char * str)7738 static int __init setup_preempt_mode(char *str)
7739 {
7740 int mode = sched_dynamic_mode(str);
7741 if (mode < 0) {
7742 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7743 return 0;
7744 }
7745
7746 sched_dynamic_update(mode);
7747 return 1;
7748 }
7749 __setup("preempt=", setup_preempt_mode);
7750
preempt_dynamic_init(void)7751 static void __init preempt_dynamic_init(void)
7752 {
7753 if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7754 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7755 sched_dynamic_update(preempt_dynamic_none);
7756 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7757 sched_dynamic_update(preempt_dynamic_voluntary);
7758 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7759 sched_dynamic_update(preempt_dynamic_lazy);
7760 } else {
7761 /* Default static call setting, nothing to do */
7762 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7763 preempt_dynamic_mode = preempt_dynamic_full;
7764 pr_info("Dynamic Preempt: full\n");
7765 }
7766 }
7767 }
7768
7769 # define PREEMPT_MODEL_ACCESSOR(mode) \
7770 bool preempt_model_##mode(void) \
7771 { \
7772 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7773 return preempt_dynamic_mode == preempt_dynamic_##mode; \
7774 } \
7775 EXPORT_SYMBOL_GPL(preempt_model_##mode)
7776
7777 PREEMPT_MODEL_ACCESSOR(none);
7778 PREEMPT_MODEL_ACCESSOR(voluntary);
7779 PREEMPT_MODEL_ACCESSOR(full);
7780 PREEMPT_MODEL_ACCESSOR(lazy);
7781
7782 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7783
7784 #define preempt_dynamic_mode -1
7785
preempt_dynamic_init(void)7786 static inline void preempt_dynamic_init(void) { }
7787
7788 #endif /* CONFIG_PREEMPT_DYNAMIC */
7789
7790 const char *preempt_modes[] = {
7791 "none", "voluntary", "full", "lazy", NULL,
7792 };
7793
preempt_model_str(void)7794 const char *preempt_model_str(void)
7795 {
7796 bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
7797 (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
7798 IS_ENABLED(CONFIG_PREEMPT_LAZY));
7799 static char buf[128];
7800
7801 if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
7802 struct seq_buf s;
7803
7804 seq_buf_init(&s, buf, sizeof(buf));
7805 seq_buf_puts(&s, "PREEMPT");
7806
7807 if (IS_ENABLED(CONFIG_PREEMPT_RT))
7808 seq_buf_printf(&s, "%sRT%s",
7809 brace ? "_{" : "_",
7810 brace ? "," : "");
7811
7812 if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
7813 seq_buf_printf(&s, "(%s)%s",
7814 preempt_dynamic_mode >= 0 ?
7815 preempt_modes[preempt_dynamic_mode] : "undef",
7816 brace ? "}" : "");
7817 return seq_buf_str(&s);
7818 }
7819
7820 if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7821 seq_buf_printf(&s, "LAZY%s",
7822 brace ? "}" : "");
7823 return seq_buf_str(&s);
7824 }
7825
7826 return seq_buf_str(&s);
7827 }
7828
7829 if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
7830 return "VOLUNTARY";
7831
7832 return "NONE";
7833 }
7834
io_schedule_prepare(void)7835 int io_schedule_prepare(void)
7836 {
7837 int old_iowait = current->in_iowait;
7838
7839 current->in_iowait = 1;
7840 blk_flush_plug(current->plug, true);
7841 return old_iowait;
7842 }
7843
io_schedule_finish(int token)7844 void io_schedule_finish(int token)
7845 {
7846 current->in_iowait = token;
7847 }
7848
7849 /*
7850 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7851 * that process accounting knows that this is a task in IO wait state.
7852 */
io_schedule_timeout(long timeout)7853 long __sched io_schedule_timeout(long timeout)
7854 {
7855 int token;
7856 long ret;
7857
7858 token = io_schedule_prepare();
7859 ret = schedule_timeout(timeout);
7860 io_schedule_finish(token);
7861
7862 return ret;
7863 }
7864 EXPORT_SYMBOL(io_schedule_timeout);
7865
io_schedule(void)7866 void __sched io_schedule(void)
7867 {
7868 int token;
7869
7870 token = io_schedule_prepare();
7871 schedule();
7872 io_schedule_finish(token);
7873 }
7874 EXPORT_SYMBOL(io_schedule);
7875
sched_show_task(struct task_struct * p)7876 void sched_show_task(struct task_struct *p)
7877 {
7878 unsigned long free;
7879 int ppid;
7880
7881 if (!try_get_task_stack(p))
7882 return;
7883
7884 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7885
7886 if (task_is_running(p))
7887 pr_cont(" running task ");
7888 free = stack_not_used(p);
7889 ppid = 0;
7890 rcu_read_lock();
7891 if (pid_alive(p))
7892 ppid = task_pid_nr(rcu_dereference(p->real_parent));
7893 rcu_read_unlock();
7894 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7895 free, task_pid_nr(p), task_tgid_nr(p),
7896 ppid, p->flags, read_task_thread_flags(p));
7897
7898 print_worker_info(KERN_INFO, p);
7899 print_stop_info(KERN_INFO, p);
7900 print_scx_info(KERN_INFO, p);
7901 show_stack(p, NULL, KERN_INFO);
7902 put_task_stack(p);
7903 }
7904 EXPORT_SYMBOL_GPL(sched_show_task);
7905
7906 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7907 state_filter_match(unsigned long state_filter, struct task_struct *p)
7908 {
7909 unsigned int state = READ_ONCE(p->__state);
7910
7911 /* no filter, everything matches */
7912 if (!state_filter)
7913 return true;
7914
7915 /* filter, but doesn't match */
7916 if (!(state & state_filter))
7917 return false;
7918
7919 /*
7920 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7921 * TASK_KILLABLE).
7922 */
7923 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7924 return false;
7925
7926 return true;
7927 }
7928
7929
show_state_filter(unsigned int state_filter)7930 void show_state_filter(unsigned int state_filter)
7931 {
7932 struct task_struct *g, *p;
7933
7934 rcu_read_lock();
7935 for_each_process_thread(g, p) {
7936 /*
7937 * reset the NMI-timeout, listing all files on a slow
7938 * console might take a lot of time:
7939 * Also, reset softlockup watchdogs on all CPUs, because
7940 * another CPU might be blocked waiting for us to process
7941 * an IPI.
7942 */
7943 touch_nmi_watchdog();
7944 touch_all_softlockup_watchdogs();
7945 if (state_filter_match(state_filter, p))
7946 sched_show_task(p);
7947 }
7948
7949 if (!state_filter)
7950 sysrq_sched_debug_show();
7951
7952 rcu_read_unlock();
7953 /*
7954 * Only show locks if all tasks are dumped:
7955 */
7956 if (!state_filter)
7957 debug_show_all_locks();
7958 }
7959
7960 /**
7961 * init_idle - set up an idle thread for a given CPU
7962 * @idle: task in question
7963 * @cpu: CPU the idle task belongs to
7964 *
7965 * NOTE: this function does not set the idle thread's NEED_RESCHED
7966 * flag, to make booting more robust.
7967 */
init_idle(struct task_struct * idle,int cpu)7968 void __init init_idle(struct task_struct *idle, int cpu)
7969 {
7970 struct affinity_context ac = (struct affinity_context) {
7971 .new_mask = cpumask_of(cpu),
7972 .flags = 0,
7973 };
7974 struct rq *rq = cpu_rq(cpu);
7975 unsigned long flags;
7976
7977 raw_spin_lock_irqsave(&idle->pi_lock, flags);
7978 raw_spin_rq_lock(rq);
7979
7980 idle->__state = TASK_RUNNING;
7981 idle->se.exec_start = sched_clock();
7982 /*
7983 * PF_KTHREAD should already be set at this point; regardless, make it
7984 * look like a proper per-CPU kthread.
7985 */
7986 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7987 kthread_set_per_cpu(idle, cpu);
7988
7989 /*
7990 * No validation and serialization required at boot time and for
7991 * setting up the idle tasks of not yet online CPUs.
7992 */
7993 set_cpus_allowed_common(idle, &ac);
7994 /*
7995 * We're having a chicken and egg problem, even though we are
7996 * holding rq->lock, the CPU isn't yet set to this CPU so the
7997 * lockdep check in task_group() will fail.
7998 *
7999 * Similar case to sched_fork(). / Alternatively we could
8000 * use task_rq_lock() here and obtain the other rq->lock.
8001 *
8002 * Silence PROVE_RCU
8003 */
8004 rcu_read_lock();
8005 __set_task_cpu(idle, cpu);
8006 rcu_read_unlock();
8007
8008 rq->idle = idle;
8009 rq_set_donor(rq, idle);
8010 rcu_assign_pointer(rq->curr, idle);
8011 idle->on_rq = TASK_ON_RQ_QUEUED;
8012 idle->on_cpu = 1;
8013 raw_spin_rq_unlock(rq);
8014 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
8015
8016 /* Set the preempt count _outside_ the spinlocks! */
8017 init_idle_preempt_count(idle, cpu);
8018
8019 /*
8020 * The idle tasks have their own, simple scheduling class:
8021 */
8022 idle->sched_class = &idle_sched_class;
8023 ftrace_graph_init_idle_task(idle, cpu);
8024 vtime_init_idle(idle, cpu);
8025 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
8026 }
8027
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)8028 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
8029 const struct cpumask *trial)
8030 {
8031 int ret = 1;
8032
8033 if (cpumask_empty(cur))
8034 return ret;
8035
8036 ret = dl_cpuset_cpumask_can_shrink(cur, trial);
8037
8038 return ret;
8039 }
8040
task_can_attach(struct task_struct * p)8041 int task_can_attach(struct task_struct *p)
8042 {
8043 int ret = 0;
8044
8045 /*
8046 * Kthreads which disallow setaffinity shouldn't be moved
8047 * to a new cpuset; we don't want to change their CPU
8048 * affinity and isolating such threads by their set of
8049 * allowed nodes is unnecessary. Thus, cpusets are not
8050 * applicable for such threads. This prevents checking for
8051 * success of set_cpus_allowed_ptr() on all attached tasks
8052 * before cpus_mask may be changed.
8053 */
8054 if (p->flags & PF_NO_SETAFFINITY)
8055 ret = -EINVAL;
8056
8057 return ret;
8058 }
8059
8060 bool sched_smp_initialized __read_mostly;
8061
8062 #ifdef CONFIG_NUMA_BALANCING
8063 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)8064 int migrate_task_to(struct task_struct *p, int target_cpu)
8065 {
8066 struct migration_arg arg = { p, target_cpu };
8067 int curr_cpu = task_cpu(p);
8068
8069 if (curr_cpu == target_cpu)
8070 return 0;
8071
8072 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
8073 return -EINVAL;
8074
8075 /* TODO: This is not properly updating schedstats */
8076
8077 trace_sched_move_numa(p, curr_cpu, target_cpu);
8078 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8079 }
8080
8081 /*
8082 * Requeue a task on a given node and accurately track the number of NUMA
8083 * tasks on the runqueues
8084 */
sched_setnuma(struct task_struct * p,int nid)8085 void sched_setnuma(struct task_struct *p, int nid)
8086 {
8087 bool queued, running;
8088 struct rq_flags rf;
8089 struct rq *rq;
8090
8091 rq = task_rq_lock(p, &rf);
8092 queued = task_on_rq_queued(p);
8093 running = task_current_donor(rq, p);
8094
8095 if (queued)
8096 dequeue_task(rq, p, DEQUEUE_SAVE);
8097 if (running)
8098 put_prev_task(rq, p);
8099
8100 p->numa_preferred_nid = nid;
8101
8102 if (queued)
8103 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
8104 if (running)
8105 set_next_task(rq, p);
8106 task_rq_unlock(rq, p, &rf);
8107 }
8108 #endif /* CONFIG_NUMA_BALANCING */
8109
8110 #ifdef CONFIG_HOTPLUG_CPU
8111 /*
8112 * Invoked on the outgoing CPU in context of the CPU hotplug thread
8113 * after ensuring that there are no user space tasks left on the CPU.
8114 *
8115 * If there is a lazy mm in use on the hotplug thread, drop it and
8116 * switch to init_mm.
8117 *
8118 * The reference count on init_mm is dropped in finish_cpu().
8119 */
sched_force_init_mm(void)8120 static void sched_force_init_mm(void)
8121 {
8122 struct mm_struct *mm = current->active_mm;
8123
8124 if (mm != &init_mm) {
8125 mmgrab_lazy_tlb(&init_mm);
8126 local_irq_disable();
8127 current->active_mm = &init_mm;
8128 switch_mm_irqs_off(mm, &init_mm, current);
8129 local_irq_enable();
8130 finish_arch_post_lock_switch();
8131 mmdrop_lazy_tlb(mm);
8132 }
8133
8134 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8135 }
8136
__balance_push_cpu_stop(void * arg)8137 static int __balance_push_cpu_stop(void *arg)
8138 {
8139 struct task_struct *p = arg;
8140 struct rq *rq = this_rq();
8141 struct rq_flags rf;
8142 int cpu;
8143
8144 raw_spin_lock_irq(&p->pi_lock);
8145 rq_lock(rq, &rf);
8146
8147 update_rq_clock(rq);
8148
8149 if (task_rq(p) == rq && task_on_rq_queued(p)) {
8150 cpu = select_fallback_rq(rq->cpu, p);
8151 rq = __migrate_task(rq, &rf, p, cpu);
8152 }
8153
8154 rq_unlock(rq, &rf);
8155 raw_spin_unlock_irq(&p->pi_lock);
8156
8157 put_task_struct(p);
8158
8159 return 0;
8160 }
8161
8162 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8163
8164 /*
8165 * Ensure we only run per-cpu kthreads once the CPU goes !active.
8166 *
8167 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8168 * effective when the hotplug motion is down.
8169 */
balance_push(struct rq * rq)8170 static void balance_push(struct rq *rq)
8171 {
8172 struct task_struct *push_task = rq->curr;
8173
8174 lockdep_assert_rq_held(rq);
8175
8176 /*
8177 * Ensure the thing is persistent until balance_push_set(.on = false);
8178 */
8179 rq->balance_callback = &balance_push_callback;
8180
8181 /*
8182 * Only active while going offline and when invoked on the outgoing
8183 * CPU.
8184 */
8185 if (!cpu_dying(rq->cpu) || rq != this_rq())
8186 return;
8187
8188 /*
8189 * Both the cpu-hotplug and stop task are in this case and are
8190 * required to complete the hotplug process.
8191 */
8192 if (kthread_is_per_cpu(push_task) ||
8193 is_migration_disabled(push_task)) {
8194
8195 /*
8196 * If this is the idle task on the outgoing CPU try to wake
8197 * up the hotplug control thread which might wait for the
8198 * last task to vanish. The rcuwait_active() check is
8199 * accurate here because the waiter is pinned on this CPU
8200 * and can't obviously be running in parallel.
8201 *
8202 * On RT kernels this also has to check whether there are
8203 * pinned and scheduled out tasks on the runqueue. They
8204 * need to leave the migrate disabled section first.
8205 */
8206 if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8207 rcuwait_active(&rq->hotplug_wait)) {
8208 raw_spin_rq_unlock(rq);
8209 rcuwait_wake_up(&rq->hotplug_wait);
8210 raw_spin_rq_lock(rq);
8211 }
8212 return;
8213 }
8214
8215 get_task_struct(push_task);
8216 /*
8217 * Temporarily drop rq->lock such that we can wake-up the stop task.
8218 * Both preemption and IRQs are still disabled.
8219 */
8220 preempt_disable();
8221 raw_spin_rq_unlock(rq);
8222 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8223 this_cpu_ptr(&push_work));
8224 preempt_enable();
8225 /*
8226 * At this point need_resched() is true and we'll take the loop in
8227 * schedule(). The next pick is obviously going to be the stop task
8228 * which kthread_is_per_cpu() and will push this task away.
8229 */
8230 raw_spin_rq_lock(rq);
8231 }
8232
balance_push_set(int cpu,bool on)8233 static void balance_push_set(int cpu, bool on)
8234 {
8235 struct rq *rq = cpu_rq(cpu);
8236 struct rq_flags rf;
8237
8238 rq_lock_irqsave(rq, &rf);
8239 if (on) {
8240 WARN_ON_ONCE(rq->balance_callback);
8241 rq->balance_callback = &balance_push_callback;
8242 } else if (rq->balance_callback == &balance_push_callback) {
8243 rq->balance_callback = NULL;
8244 }
8245 rq_unlock_irqrestore(rq, &rf);
8246 }
8247
8248 /*
8249 * Invoked from a CPUs hotplug control thread after the CPU has been marked
8250 * inactive. All tasks which are not per CPU kernel threads are either
8251 * pushed off this CPU now via balance_push() or placed on a different CPU
8252 * during wakeup. Wait until the CPU is quiescent.
8253 */
balance_hotplug_wait(void)8254 static void balance_hotplug_wait(void)
8255 {
8256 struct rq *rq = this_rq();
8257
8258 rcuwait_wait_event(&rq->hotplug_wait,
8259 rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8260 TASK_UNINTERRUPTIBLE);
8261 }
8262
8263 #else /* !CONFIG_HOTPLUG_CPU: */
8264
balance_push(struct rq * rq)8265 static inline void balance_push(struct rq *rq)
8266 {
8267 }
8268
balance_push_set(int cpu,bool on)8269 static inline void balance_push_set(int cpu, bool on)
8270 {
8271 }
8272
balance_hotplug_wait(void)8273 static inline void balance_hotplug_wait(void)
8274 {
8275 }
8276
8277 #endif /* !CONFIG_HOTPLUG_CPU */
8278
set_rq_online(struct rq * rq)8279 void set_rq_online(struct rq *rq)
8280 {
8281 if (!rq->online) {
8282 const struct sched_class *class;
8283
8284 cpumask_set_cpu(rq->cpu, rq->rd->online);
8285 rq->online = 1;
8286
8287 for_each_class(class) {
8288 if (class->rq_online)
8289 class->rq_online(rq);
8290 }
8291 }
8292 }
8293
set_rq_offline(struct rq * rq)8294 void set_rq_offline(struct rq *rq)
8295 {
8296 if (rq->online) {
8297 const struct sched_class *class;
8298
8299 update_rq_clock(rq);
8300 for_each_class(class) {
8301 if (class->rq_offline)
8302 class->rq_offline(rq);
8303 }
8304
8305 cpumask_clear_cpu(rq->cpu, rq->rd->online);
8306 rq->online = 0;
8307 }
8308 }
8309
sched_set_rq_online(struct rq * rq,int cpu)8310 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8311 {
8312 struct rq_flags rf;
8313
8314 rq_lock_irqsave(rq, &rf);
8315 if (rq->rd) {
8316 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8317 set_rq_online(rq);
8318 }
8319 rq_unlock_irqrestore(rq, &rf);
8320 }
8321
sched_set_rq_offline(struct rq * rq,int cpu)8322 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8323 {
8324 struct rq_flags rf;
8325
8326 rq_lock_irqsave(rq, &rf);
8327 if (rq->rd) {
8328 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8329 set_rq_offline(rq);
8330 }
8331 rq_unlock_irqrestore(rq, &rf);
8332 }
8333
8334 /*
8335 * used to mark begin/end of suspend/resume:
8336 */
8337 static int num_cpus_frozen;
8338
8339 /*
8340 * Update cpusets according to cpu_active mask. If cpusets are
8341 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8342 * around partition_sched_domains().
8343 *
8344 * If we come here as part of a suspend/resume, don't touch cpusets because we
8345 * want to restore it back to its original state upon resume anyway.
8346 */
cpuset_cpu_active(void)8347 static void cpuset_cpu_active(void)
8348 {
8349 if (cpuhp_tasks_frozen) {
8350 /*
8351 * num_cpus_frozen tracks how many CPUs are involved in suspend
8352 * resume sequence. As long as this is not the last online
8353 * operation in the resume sequence, just build a single sched
8354 * domain, ignoring cpusets.
8355 */
8356 cpuset_reset_sched_domains();
8357 if (--num_cpus_frozen)
8358 return;
8359 /*
8360 * This is the last CPU online operation. So fall through and
8361 * restore the original sched domains by considering the
8362 * cpuset configurations.
8363 */
8364 cpuset_force_rebuild();
8365 }
8366 cpuset_update_active_cpus();
8367 }
8368
cpuset_cpu_inactive(unsigned int cpu)8369 static void cpuset_cpu_inactive(unsigned int cpu)
8370 {
8371 if (!cpuhp_tasks_frozen) {
8372 cpuset_update_active_cpus();
8373 } else {
8374 num_cpus_frozen++;
8375 cpuset_reset_sched_domains();
8376 }
8377 }
8378
sched_smt_present_inc(int cpu)8379 static inline void sched_smt_present_inc(int cpu)
8380 {
8381 #ifdef CONFIG_SCHED_SMT
8382 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8383 static_branch_inc_cpuslocked(&sched_smt_present);
8384 #endif
8385 }
8386
sched_smt_present_dec(int cpu)8387 static inline void sched_smt_present_dec(int cpu)
8388 {
8389 #ifdef CONFIG_SCHED_SMT
8390 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8391 static_branch_dec_cpuslocked(&sched_smt_present);
8392 #endif
8393 }
8394
sched_cpu_activate(unsigned int cpu)8395 int sched_cpu_activate(unsigned int cpu)
8396 {
8397 struct rq *rq = cpu_rq(cpu);
8398
8399 /*
8400 * Clear the balance_push callback and prepare to schedule
8401 * regular tasks.
8402 */
8403 balance_push_set(cpu, false);
8404
8405 /*
8406 * When going up, increment the number of cores with SMT present.
8407 */
8408 sched_smt_present_inc(cpu);
8409 set_cpu_active(cpu, true);
8410
8411 if (sched_smp_initialized) {
8412 sched_update_numa(cpu, true);
8413 sched_domains_numa_masks_set(cpu);
8414 cpuset_cpu_active();
8415 }
8416
8417 scx_rq_activate(rq);
8418
8419 /*
8420 * Put the rq online, if not already. This happens:
8421 *
8422 * 1) In the early boot process, because we build the real domains
8423 * after all CPUs have been brought up.
8424 *
8425 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8426 * domains.
8427 */
8428 sched_set_rq_online(rq, cpu);
8429
8430 return 0;
8431 }
8432
sched_cpu_deactivate(unsigned int cpu)8433 int sched_cpu_deactivate(unsigned int cpu)
8434 {
8435 struct rq *rq = cpu_rq(cpu);
8436 int ret;
8437
8438 ret = dl_bw_deactivate(cpu);
8439
8440 if (ret)
8441 return ret;
8442
8443 /*
8444 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8445 * load balancing when not active
8446 */
8447 nohz_balance_exit_idle(rq);
8448
8449 set_cpu_active(cpu, false);
8450
8451 /*
8452 * From this point forward, this CPU will refuse to run any task that
8453 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8454 * push those tasks away until this gets cleared, see
8455 * sched_cpu_dying().
8456 */
8457 balance_push_set(cpu, true);
8458
8459 /*
8460 * We've cleared cpu_active_mask / set balance_push, wait for all
8461 * preempt-disabled and RCU users of this state to go away such that
8462 * all new such users will observe it.
8463 *
8464 * Specifically, we rely on ttwu to no longer target this CPU, see
8465 * ttwu_queue_cond() and is_cpu_allowed().
8466 *
8467 * Do sync before park smpboot threads to take care the RCU boost case.
8468 */
8469 synchronize_rcu();
8470
8471 sched_set_rq_offline(rq, cpu);
8472
8473 scx_rq_deactivate(rq);
8474
8475 /*
8476 * When going down, decrement the number of cores with SMT present.
8477 */
8478 sched_smt_present_dec(cpu);
8479
8480 #ifdef CONFIG_SCHED_SMT
8481 sched_core_cpu_deactivate(cpu);
8482 #endif
8483
8484 if (!sched_smp_initialized)
8485 return 0;
8486
8487 sched_update_numa(cpu, false);
8488 cpuset_cpu_inactive(cpu);
8489 sched_domains_numa_masks_clear(cpu);
8490 return 0;
8491 }
8492
sched_rq_cpu_starting(unsigned int cpu)8493 static void sched_rq_cpu_starting(unsigned int cpu)
8494 {
8495 struct rq *rq = cpu_rq(cpu);
8496
8497 rq->calc_load_update = calc_load_update;
8498 update_max_interval();
8499 }
8500
sched_cpu_starting(unsigned int cpu)8501 int sched_cpu_starting(unsigned int cpu)
8502 {
8503 sched_core_cpu_starting(cpu);
8504 sched_rq_cpu_starting(cpu);
8505 sched_tick_start(cpu);
8506 return 0;
8507 }
8508
8509 #ifdef CONFIG_HOTPLUG_CPU
8510
8511 /*
8512 * Invoked immediately before the stopper thread is invoked to bring the
8513 * CPU down completely. At this point all per CPU kthreads except the
8514 * hotplug thread (current) and the stopper thread (inactive) have been
8515 * either parked or have been unbound from the outgoing CPU. Ensure that
8516 * any of those which might be on the way out are gone.
8517 *
8518 * If after this point a bound task is being woken on this CPU then the
8519 * responsible hotplug callback has failed to do it's job.
8520 * sched_cpu_dying() will catch it with the appropriate fireworks.
8521 */
sched_cpu_wait_empty(unsigned int cpu)8522 int sched_cpu_wait_empty(unsigned int cpu)
8523 {
8524 balance_hotplug_wait();
8525 sched_force_init_mm();
8526 return 0;
8527 }
8528
8529 /*
8530 * Since this CPU is going 'away' for a while, fold any nr_active delta we
8531 * might have. Called from the CPU stopper task after ensuring that the
8532 * stopper is the last running task on the CPU, so nr_active count is
8533 * stable. We need to take the tear-down thread which is calling this into
8534 * account, so we hand in adjust = 1 to the load calculation.
8535 *
8536 * Also see the comment "Global load-average calculations".
8537 */
calc_load_migrate(struct rq * rq)8538 static void calc_load_migrate(struct rq *rq)
8539 {
8540 long delta = calc_load_fold_active(rq, 1);
8541
8542 if (delta)
8543 atomic_long_add(delta, &calc_load_tasks);
8544 }
8545
dump_rq_tasks(struct rq * rq,const char * loglvl)8546 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8547 {
8548 struct task_struct *g, *p;
8549 int cpu = cpu_of(rq);
8550
8551 lockdep_assert_rq_held(rq);
8552
8553 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8554 for_each_process_thread(g, p) {
8555 if (task_cpu(p) != cpu)
8556 continue;
8557
8558 if (!task_on_rq_queued(p))
8559 continue;
8560
8561 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8562 }
8563 }
8564
sched_cpu_dying(unsigned int cpu)8565 int sched_cpu_dying(unsigned int cpu)
8566 {
8567 struct rq *rq = cpu_rq(cpu);
8568 struct rq_flags rf;
8569
8570 /* Handle pending wakeups and then migrate everything off */
8571 sched_tick_stop(cpu);
8572
8573 rq_lock_irqsave(rq, &rf);
8574 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8575 WARN(true, "Dying CPU not properly vacated!");
8576 dump_rq_tasks(rq, KERN_WARNING);
8577 }
8578 rq_unlock_irqrestore(rq, &rf);
8579
8580 calc_load_migrate(rq);
8581 update_max_interval();
8582 hrtick_clear(rq);
8583 sched_core_cpu_dying(cpu);
8584 return 0;
8585 }
8586 #endif /* CONFIG_HOTPLUG_CPU */
8587
sched_init_smp(void)8588 void __init sched_init_smp(void)
8589 {
8590 sched_init_numa(NUMA_NO_NODE);
8591
8592 /*
8593 * There's no userspace yet to cause hotplug operations; hence all the
8594 * CPU masks are stable and all blatant races in the below code cannot
8595 * happen.
8596 */
8597 sched_domains_mutex_lock();
8598 sched_init_domains(cpu_active_mask);
8599 sched_domains_mutex_unlock();
8600
8601 /* Move init over to a non-isolated CPU */
8602 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8603 BUG();
8604 current->flags &= ~PF_NO_SETAFFINITY;
8605 sched_init_granularity();
8606
8607 init_sched_rt_class();
8608 init_sched_dl_class();
8609
8610 sched_init_dl_servers();
8611
8612 sched_smp_initialized = true;
8613 }
8614
migration_init(void)8615 static int __init migration_init(void)
8616 {
8617 sched_cpu_starting(smp_processor_id());
8618 return 0;
8619 }
8620 early_initcall(migration_init);
8621
in_sched_functions(unsigned long addr)8622 int in_sched_functions(unsigned long addr)
8623 {
8624 return in_lock_functions(addr) ||
8625 (addr >= (unsigned long)__sched_text_start
8626 && addr < (unsigned long)__sched_text_end);
8627 }
8628
8629 #ifdef CONFIG_CGROUP_SCHED
8630 /*
8631 * Default task group.
8632 * Every task in system belongs to this group at bootup.
8633 */
8634 struct task_group root_task_group;
8635 LIST_HEAD(task_groups);
8636
8637 /* Cacheline aligned slab cache for task_group */
8638 static struct kmem_cache *task_group_cache __ro_after_init;
8639 #endif
8640
sched_init(void)8641 void __init sched_init(void)
8642 {
8643 unsigned long ptr = 0;
8644 int i;
8645
8646 /* Make sure the linker didn't screw up */
8647 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8648 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8649 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8650 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8651 #ifdef CONFIG_SCHED_CLASS_EXT
8652 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8653 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8654 #endif
8655
8656 wait_bit_init();
8657
8658 #ifdef CONFIG_FAIR_GROUP_SCHED
8659 ptr += 2 * nr_cpu_ids * sizeof(void **);
8660 #endif
8661 #ifdef CONFIG_RT_GROUP_SCHED
8662 ptr += 2 * nr_cpu_ids * sizeof(void **);
8663 #endif
8664 if (ptr) {
8665 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8666
8667 #ifdef CONFIG_FAIR_GROUP_SCHED
8668 root_task_group.se = (struct sched_entity **)ptr;
8669 ptr += nr_cpu_ids * sizeof(void **);
8670
8671 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8672 ptr += nr_cpu_ids * sizeof(void **);
8673
8674 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8675 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8676 #endif /* CONFIG_FAIR_GROUP_SCHED */
8677 #ifdef CONFIG_EXT_GROUP_SCHED
8678 scx_tg_init(&root_task_group);
8679 #endif /* CONFIG_EXT_GROUP_SCHED */
8680 #ifdef CONFIG_RT_GROUP_SCHED
8681 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8682 ptr += nr_cpu_ids * sizeof(void **);
8683
8684 root_task_group.rt_rq = (struct rt_rq **)ptr;
8685 ptr += nr_cpu_ids * sizeof(void **);
8686
8687 #endif /* CONFIG_RT_GROUP_SCHED */
8688 }
8689
8690 init_defrootdomain();
8691
8692 #ifdef CONFIG_RT_GROUP_SCHED
8693 init_rt_bandwidth(&root_task_group.rt_bandwidth,
8694 global_rt_period(), global_rt_runtime());
8695 #endif /* CONFIG_RT_GROUP_SCHED */
8696
8697 #ifdef CONFIG_CGROUP_SCHED
8698 task_group_cache = KMEM_CACHE(task_group, 0);
8699
8700 list_add(&root_task_group.list, &task_groups);
8701 INIT_LIST_HEAD(&root_task_group.children);
8702 INIT_LIST_HEAD(&root_task_group.siblings);
8703 autogroup_init(&init_task);
8704 #endif /* CONFIG_CGROUP_SCHED */
8705
8706 for_each_possible_cpu(i) {
8707 struct rq *rq;
8708
8709 rq = cpu_rq(i);
8710 raw_spin_lock_init(&rq->__lock);
8711 rq->nr_running = 0;
8712 rq->calc_load_active = 0;
8713 rq->calc_load_update = jiffies + LOAD_FREQ;
8714 init_cfs_rq(&rq->cfs);
8715 init_rt_rq(&rq->rt);
8716 init_dl_rq(&rq->dl);
8717 #ifdef CONFIG_FAIR_GROUP_SCHED
8718 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8719 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8720 /*
8721 * How much CPU bandwidth does root_task_group get?
8722 *
8723 * In case of task-groups formed through the cgroup filesystem, it
8724 * gets 100% of the CPU resources in the system. This overall
8725 * system CPU resource is divided among the tasks of
8726 * root_task_group and its child task-groups in a fair manner,
8727 * based on each entity's (task or task-group's) weight
8728 * (se->load.weight).
8729 *
8730 * In other words, if root_task_group has 10 tasks of weight
8731 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8732 * then A0's share of the CPU resource is:
8733 *
8734 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8735 *
8736 * We achieve this by letting root_task_group's tasks sit
8737 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8738 */
8739 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8740 #endif /* CONFIG_FAIR_GROUP_SCHED */
8741
8742 #ifdef CONFIG_RT_GROUP_SCHED
8743 /*
8744 * This is required for init cpu because rt.c:__enable_runtime()
8745 * starts working after scheduler_running, which is not the case
8746 * yet.
8747 */
8748 rq->rt.rt_runtime = global_rt_runtime();
8749 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8750 #endif
8751 rq->sd = NULL;
8752 rq->rd = NULL;
8753 rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8754 rq->balance_callback = &balance_push_callback;
8755 rq->active_balance = 0;
8756 rq->next_balance = jiffies;
8757 rq->push_cpu = 0;
8758 rq->cpu = i;
8759 rq->online = 0;
8760 rq->idle_stamp = 0;
8761 rq->avg_idle = 2*sysctl_sched_migration_cost;
8762 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8763
8764 INIT_LIST_HEAD(&rq->cfs_tasks);
8765
8766 rq_attach_root(rq, &def_root_domain);
8767 #ifdef CONFIG_NO_HZ_COMMON
8768 rq->last_blocked_load_update_tick = jiffies;
8769 atomic_set(&rq->nohz_flags, 0);
8770
8771 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8772 #endif
8773 #ifdef CONFIG_HOTPLUG_CPU
8774 rcuwait_init(&rq->hotplug_wait);
8775 #endif
8776 hrtick_rq_init(rq);
8777 atomic_set(&rq->nr_iowait, 0);
8778 fair_server_init(rq);
8779
8780 #ifdef CONFIG_SCHED_CORE
8781 rq->core = rq;
8782 rq->core_pick = NULL;
8783 rq->core_dl_server = NULL;
8784 rq->core_enabled = 0;
8785 rq->core_tree = RB_ROOT;
8786 rq->core_forceidle_count = 0;
8787 rq->core_forceidle_occupation = 0;
8788 rq->core_forceidle_start = 0;
8789
8790 rq->core_cookie = 0UL;
8791 #endif
8792 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8793 }
8794
8795 set_load_weight(&init_task, false);
8796 init_task.se.slice = sysctl_sched_base_slice,
8797
8798 /*
8799 * The boot idle thread does lazy MMU switching as well:
8800 */
8801 mmgrab_lazy_tlb(&init_mm);
8802 enter_lazy_tlb(&init_mm, current);
8803
8804 /*
8805 * The idle task doesn't need the kthread struct to function, but it
8806 * is dressed up as a per-CPU kthread and thus needs to play the part
8807 * if we want to avoid special-casing it in code that deals with per-CPU
8808 * kthreads.
8809 */
8810 WARN_ON(!set_kthread_struct(current));
8811
8812 /*
8813 * Make us the idle thread. Technically, schedule() should not be
8814 * called from this thread, however somewhere below it might be,
8815 * but because we are the idle thread, we just pick up running again
8816 * when this runqueue becomes "idle".
8817 */
8818 __sched_fork(0, current);
8819 init_idle(current, smp_processor_id());
8820
8821 calc_load_update = jiffies + LOAD_FREQ;
8822
8823 idle_thread_set_boot_cpu();
8824
8825 balance_push_set(smp_processor_id(), false);
8826 init_sched_fair_class();
8827 init_sched_ext_class();
8828
8829 psi_init();
8830
8831 init_uclamp();
8832
8833 preempt_dynamic_init();
8834
8835 scheduler_running = 1;
8836 }
8837
8838 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8839
__might_sleep(const char * file,int line)8840 void __might_sleep(const char *file, int line)
8841 {
8842 unsigned int state = get_current_state();
8843 /*
8844 * Blocking primitives will set (and therefore destroy) current->state,
8845 * since we will exit with TASK_RUNNING make sure we enter with it,
8846 * otherwise we will destroy state.
8847 */
8848 WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8849 "do not call blocking ops when !TASK_RUNNING; "
8850 "state=%x set at [<%p>] %pS\n", state,
8851 (void *)current->task_state_change,
8852 (void *)current->task_state_change);
8853
8854 __might_resched(file, line, 0);
8855 }
8856 EXPORT_SYMBOL(__might_sleep);
8857
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8858 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8859 {
8860 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8861 return;
8862
8863 if (preempt_count() == preempt_offset)
8864 return;
8865
8866 pr_err("Preemption disabled at:");
8867 print_ip_sym(KERN_ERR, ip);
8868 }
8869
resched_offsets_ok(unsigned int offsets)8870 static inline bool resched_offsets_ok(unsigned int offsets)
8871 {
8872 unsigned int nested = preempt_count();
8873
8874 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8875
8876 return nested == offsets;
8877 }
8878
__might_resched(const char * file,int line,unsigned int offsets)8879 void __might_resched(const char *file, int line, unsigned int offsets)
8880 {
8881 /* Ratelimiting timestamp: */
8882 static unsigned long prev_jiffy;
8883
8884 unsigned long preempt_disable_ip;
8885
8886 /* WARN_ON_ONCE() by default, no rate limit required: */
8887 rcu_sleep_check();
8888
8889 if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8890 !is_idle_task(current) && !current->non_block_count) ||
8891 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8892 oops_in_progress)
8893 return;
8894
8895 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8896 return;
8897 prev_jiffy = jiffies;
8898
8899 /* Save this before calling printk(), since that will clobber it: */
8900 preempt_disable_ip = get_preempt_disable_ip(current);
8901
8902 pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8903 file, line);
8904 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8905 in_atomic(), irqs_disabled(), current->non_block_count,
8906 current->pid, current->comm);
8907 pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8908 offsets & MIGHT_RESCHED_PREEMPT_MASK);
8909
8910 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8911 pr_err("RCU nest depth: %d, expected: %u\n",
8912 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8913 }
8914
8915 if (task_stack_end_corrupted(current))
8916 pr_emerg("Thread overran stack, or stack corrupted\n");
8917
8918 debug_show_held_locks(current);
8919 if (irqs_disabled())
8920 print_irqtrace_events(current);
8921
8922 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8923 preempt_disable_ip);
8924
8925 dump_stack();
8926 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8927 }
8928 EXPORT_SYMBOL(__might_resched);
8929
__cant_sleep(const char * file,int line,int preempt_offset)8930 void __cant_sleep(const char *file, int line, int preempt_offset)
8931 {
8932 static unsigned long prev_jiffy;
8933
8934 if (irqs_disabled())
8935 return;
8936
8937 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8938 return;
8939
8940 if (preempt_count() > preempt_offset)
8941 return;
8942
8943 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8944 return;
8945 prev_jiffy = jiffies;
8946
8947 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8948 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8949 in_atomic(), irqs_disabled(),
8950 current->pid, current->comm);
8951
8952 debug_show_held_locks(current);
8953 dump_stack();
8954 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8955 }
8956 EXPORT_SYMBOL_GPL(__cant_sleep);
8957
8958 # ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8959 void __cant_migrate(const char *file, int line)
8960 {
8961 static unsigned long prev_jiffy;
8962
8963 if (irqs_disabled())
8964 return;
8965
8966 if (is_migration_disabled(current))
8967 return;
8968
8969 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8970 return;
8971
8972 if (preempt_count() > 0)
8973 return;
8974
8975 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8976 return;
8977 prev_jiffy = jiffies;
8978
8979 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8980 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8981 in_atomic(), irqs_disabled(), is_migration_disabled(current),
8982 current->pid, current->comm);
8983
8984 debug_show_held_locks(current);
8985 dump_stack();
8986 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8987 }
8988 EXPORT_SYMBOL_GPL(__cant_migrate);
8989 # endif /* CONFIG_SMP */
8990 #endif /* CONFIG_DEBUG_ATOMIC_SLEEP */
8991
8992 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8993 void normalize_rt_tasks(void)
8994 {
8995 struct task_struct *g, *p;
8996 struct sched_attr attr = {
8997 .sched_policy = SCHED_NORMAL,
8998 };
8999
9000 read_lock(&tasklist_lock);
9001 for_each_process_thread(g, p) {
9002 /*
9003 * Only normalize user tasks:
9004 */
9005 if (p->flags & PF_KTHREAD)
9006 continue;
9007
9008 p->se.exec_start = 0;
9009 schedstat_set(p->stats.wait_start, 0);
9010 schedstat_set(p->stats.sleep_start, 0);
9011 schedstat_set(p->stats.block_start, 0);
9012
9013 if (!rt_or_dl_task(p)) {
9014 /*
9015 * Renice negative nice level userspace
9016 * tasks back to 0:
9017 */
9018 if (task_nice(p) < 0)
9019 set_user_nice(p, 0);
9020 continue;
9021 }
9022
9023 __sched_setscheduler(p, &attr, false, false);
9024 }
9025 read_unlock(&tasklist_lock);
9026 }
9027
9028 #endif /* CONFIG_MAGIC_SYSRQ */
9029
9030 #ifdef CONFIG_KGDB_KDB
9031 /*
9032 * These functions are only useful for KDB.
9033 *
9034 * They can only be called when the whole system has been
9035 * stopped - every CPU needs to be quiescent, and no scheduling
9036 * activity can take place. Using them for anything else would
9037 * be a serious bug, and as a result, they aren't even visible
9038 * under any other configuration.
9039 */
9040
9041 /**
9042 * curr_task - return the current task for a given CPU.
9043 * @cpu: the processor in question.
9044 *
9045 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9046 *
9047 * Return: The current task for @cpu.
9048 */
curr_task(int cpu)9049 struct task_struct *curr_task(int cpu)
9050 {
9051 return cpu_curr(cpu);
9052 }
9053
9054 #endif /* CONFIG_KGDB_KDB */
9055
9056 #ifdef CONFIG_CGROUP_SCHED
9057 /* task_group_lock serializes the addition/removal of task groups */
9058 static DEFINE_SPINLOCK(task_group_lock);
9059
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)9060 static inline void alloc_uclamp_sched_group(struct task_group *tg,
9061 struct task_group *parent)
9062 {
9063 #ifdef CONFIG_UCLAMP_TASK_GROUP
9064 enum uclamp_id clamp_id;
9065
9066 for_each_clamp_id(clamp_id) {
9067 uclamp_se_set(&tg->uclamp_req[clamp_id],
9068 uclamp_none(clamp_id), false);
9069 tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
9070 }
9071 #endif
9072 }
9073
sched_free_group(struct task_group * tg)9074 static void sched_free_group(struct task_group *tg)
9075 {
9076 free_fair_sched_group(tg);
9077 free_rt_sched_group(tg);
9078 autogroup_free(tg);
9079 kmem_cache_free(task_group_cache, tg);
9080 }
9081
sched_free_group_rcu(struct rcu_head * rcu)9082 static void sched_free_group_rcu(struct rcu_head *rcu)
9083 {
9084 sched_free_group(container_of(rcu, struct task_group, rcu));
9085 }
9086
sched_unregister_group(struct task_group * tg)9087 static void sched_unregister_group(struct task_group *tg)
9088 {
9089 unregister_fair_sched_group(tg);
9090 unregister_rt_sched_group(tg);
9091 /*
9092 * We have to wait for yet another RCU grace period to expire, as
9093 * print_cfs_stats() might run concurrently.
9094 */
9095 call_rcu(&tg->rcu, sched_free_group_rcu);
9096 }
9097
9098 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)9099 struct task_group *sched_create_group(struct task_group *parent)
9100 {
9101 struct task_group *tg;
9102
9103 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
9104 if (!tg)
9105 return ERR_PTR(-ENOMEM);
9106
9107 if (!alloc_fair_sched_group(tg, parent))
9108 goto err;
9109
9110 if (!alloc_rt_sched_group(tg, parent))
9111 goto err;
9112
9113 scx_tg_init(tg);
9114 alloc_uclamp_sched_group(tg, parent);
9115
9116 return tg;
9117
9118 err:
9119 sched_free_group(tg);
9120 return ERR_PTR(-ENOMEM);
9121 }
9122
sched_online_group(struct task_group * tg,struct task_group * parent)9123 void sched_online_group(struct task_group *tg, struct task_group *parent)
9124 {
9125 unsigned long flags;
9126
9127 spin_lock_irqsave(&task_group_lock, flags);
9128 list_add_tail_rcu(&tg->list, &task_groups);
9129
9130 /* Root should already exist: */
9131 WARN_ON(!parent);
9132
9133 tg->parent = parent;
9134 INIT_LIST_HEAD(&tg->children);
9135 list_add_rcu(&tg->siblings, &parent->children);
9136 spin_unlock_irqrestore(&task_group_lock, flags);
9137
9138 online_fair_sched_group(tg);
9139 }
9140
9141 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)9142 static void sched_unregister_group_rcu(struct rcu_head *rhp)
9143 {
9144 /* Now it should be safe to free those cfs_rqs: */
9145 sched_unregister_group(container_of(rhp, struct task_group, rcu));
9146 }
9147
sched_destroy_group(struct task_group * tg)9148 void sched_destroy_group(struct task_group *tg)
9149 {
9150 /* Wait for possible concurrent references to cfs_rqs complete: */
9151 call_rcu(&tg->rcu, sched_unregister_group_rcu);
9152 }
9153
sched_release_group(struct task_group * tg)9154 void sched_release_group(struct task_group *tg)
9155 {
9156 unsigned long flags;
9157
9158 /*
9159 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9160 * sched_cfs_period_timer()).
9161 *
9162 * For this to be effective, we have to wait for all pending users of
9163 * this task group to leave their RCU critical section to ensure no new
9164 * user will see our dying task group any more. Specifically ensure
9165 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9166 *
9167 * We therefore defer calling unregister_fair_sched_group() to
9168 * sched_unregister_group() which is guarantied to get called only after the
9169 * current RCU grace period has expired.
9170 */
9171 spin_lock_irqsave(&task_group_lock, flags);
9172 list_del_rcu(&tg->list);
9173 list_del_rcu(&tg->siblings);
9174 spin_unlock_irqrestore(&task_group_lock, flags);
9175 }
9176
sched_change_group(struct task_struct * tsk)9177 static void sched_change_group(struct task_struct *tsk)
9178 {
9179 struct task_group *tg;
9180
9181 /*
9182 * All callers are synchronized by task_rq_lock(); we do not use RCU
9183 * which is pointless here. Thus, we pass "true" to task_css_check()
9184 * to prevent lockdep warnings.
9185 */
9186 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9187 struct task_group, css);
9188 tg = autogroup_task_group(tsk, tg);
9189 tsk->sched_task_group = tg;
9190
9191 #ifdef CONFIG_FAIR_GROUP_SCHED
9192 if (tsk->sched_class->task_change_group)
9193 tsk->sched_class->task_change_group(tsk);
9194 else
9195 #endif
9196 set_task_rq(tsk, task_cpu(tsk));
9197 }
9198
9199 /*
9200 * Change task's runqueue when it moves between groups.
9201 *
9202 * The caller of this function should have put the task in its new group by
9203 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9204 * its new group.
9205 */
sched_move_task(struct task_struct * tsk,bool for_autogroup)9206 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9207 {
9208 int queued, running, queue_flags =
9209 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9210 struct rq *rq;
9211
9212 CLASS(task_rq_lock, rq_guard)(tsk);
9213 rq = rq_guard.rq;
9214
9215 update_rq_clock(rq);
9216
9217 running = task_current_donor(rq, tsk);
9218 queued = task_on_rq_queued(tsk);
9219
9220 if (queued)
9221 dequeue_task(rq, tsk, queue_flags);
9222 if (running)
9223 put_prev_task(rq, tsk);
9224
9225 sched_change_group(tsk);
9226 if (!for_autogroup)
9227 scx_cgroup_move_task(tsk);
9228
9229 if (queued)
9230 enqueue_task(rq, tsk, queue_flags);
9231 if (running) {
9232 set_next_task(rq, tsk);
9233 /*
9234 * After changing group, the running task may have joined a
9235 * throttled one but it's still the running task. Trigger a
9236 * resched to make sure that task can still run.
9237 */
9238 resched_curr(rq);
9239 }
9240 }
9241
9242 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9243 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9244 {
9245 struct task_group *parent = css_tg(parent_css);
9246 struct task_group *tg;
9247
9248 if (!parent) {
9249 /* This is early initialization for the top cgroup */
9250 return &root_task_group.css;
9251 }
9252
9253 tg = sched_create_group(parent);
9254 if (IS_ERR(tg))
9255 return ERR_PTR(-ENOMEM);
9256
9257 return &tg->css;
9258 }
9259
9260 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9261 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9262 {
9263 struct task_group *tg = css_tg(css);
9264 struct task_group *parent = css_tg(css->parent);
9265 int ret;
9266
9267 ret = scx_tg_online(tg);
9268 if (ret)
9269 return ret;
9270
9271 if (parent)
9272 sched_online_group(tg, parent);
9273
9274 #ifdef CONFIG_UCLAMP_TASK_GROUP
9275 /* Propagate the effective uclamp value for the new group */
9276 guard(mutex)(&uclamp_mutex);
9277 guard(rcu)();
9278 cpu_util_update_eff(css);
9279 #endif
9280
9281 return 0;
9282 }
9283
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9284 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9285 {
9286 struct task_group *tg = css_tg(css);
9287
9288 scx_tg_offline(tg);
9289 }
9290
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9291 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9292 {
9293 struct task_group *tg = css_tg(css);
9294
9295 sched_release_group(tg);
9296 }
9297
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9298 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9299 {
9300 struct task_group *tg = css_tg(css);
9301
9302 /*
9303 * Relies on the RCU grace period between css_released() and this.
9304 */
9305 sched_unregister_group(tg);
9306 }
9307
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9308 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9309 {
9310 #ifdef CONFIG_RT_GROUP_SCHED
9311 struct task_struct *task;
9312 struct cgroup_subsys_state *css;
9313
9314 if (!rt_group_sched_enabled())
9315 goto scx_check;
9316
9317 cgroup_taskset_for_each(task, css, tset) {
9318 if (!sched_rt_can_attach(css_tg(css), task))
9319 return -EINVAL;
9320 }
9321 scx_check:
9322 #endif /* CONFIG_RT_GROUP_SCHED */
9323 return scx_cgroup_can_attach(tset);
9324 }
9325
cpu_cgroup_attach(struct cgroup_taskset * tset)9326 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9327 {
9328 struct task_struct *task;
9329 struct cgroup_subsys_state *css;
9330
9331 cgroup_taskset_for_each(task, css, tset)
9332 sched_move_task(task, false);
9333 }
9334
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9335 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9336 {
9337 scx_cgroup_cancel_attach(tset);
9338 }
9339
9340 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9341 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9342 {
9343 struct cgroup_subsys_state *top_css = css;
9344 struct uclamp_se *uc_parent = NULL;
9345 struct uclamp_se *uc_se = NULL;
9346 unsigned int eff[UCLAMP_CNT];
9347 enum uclamp_id clamp_id;
9348 unsigned int clamps;
9349
9350 lockdep_assert_held(&uclamp_mutex);
9351 WARN_ON_ONCE(!rcu_read_lock_held());
9352
9353 css_for_each_descendant_pre(css, top_css) {
9354 uc_parent = css_tg(css)->parent
9355 ? css_tg(css)->parent->uclamp : NULL;
9356
9357 for_each_clamp_id(clamp_id) {
9358 /* Assume effective clamps matches requested clamps */
9359 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9360 /* Cap effective clamps with parent's effective clamps */
9361 if (uc_parent &&
9362 eff[clamp_id] > uc_parent[clamp_id].value) {
9363 eff[clamp_id] = uc_parent[clamp_id].value;
9364 }
9365 }
9366 /* Ensure protection is always capped by limit */
9367 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9368
9369 /* Propagate most restrictive effective clamps */
9370 clamps = 0x0;
9371 uc_se = css_tg(css)->uclamp;
9372 for_each_clamp_id(clamp_id) {
9373 if (eff[clamp_id] == uc_se[clamp_id].value)
9374 continue;
9375 uc_se[clamp_id].value = eff[clamp_id];
9376 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9377 clamps |= (0x1 << clamp_id);
9378 }
9379 if (!clamps) {
9380 css = css_rightmost_descendant(css);
9381 continue;
9382 }
9383
9384 /* Immediately update descendants RUNNABLE tasks */
9385 uclamp_update_active_tasks(css);
9386 }
9387 }
9388
9389 /*
9390 * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9391 * C expression. Since there is no way to convert a macro argument (N) into a
9392 * character constant, use two levels of macros.
9393 */
9394 #define _POW10(exp) ((unsigned int)1e##exp)
9395 #define POW10(exp) _POW10(exp)
9396
9397 struct uclamp_request {
9398 #define UCLAMP_PERCENT_SHIFT 2
9399 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT))
9400 s64 percent;
9401 u64 util;
9402 int ret;
9403 };
9404
9405 static inline struct uclamp_request
capacity_from_percent(char * buf)9406 capacity_from_percent(char *buf)
9407 {
9408 struct uclamp_request req = {
9409 .percent = UCLAMP_PERCENT_SCALE,
9410 .util = SCHED_CAPACITY_SCALE,
9411 .ret = 0,
9412 };
9413
9414 buf = strim(buf);
9415 if (strcmp(buf, "max")) {
9416 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9417 &req.percent);
9418 if (req.ret)
9419 return req;
9420 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9421 req.ret = -ERANGE;
9422 return req;
9423 }
9424
9425 req.util = req.percent << SCHED_CAPACITY_SHIFT;
9426 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9427 }
9428
9429 return req;
9430 }
9431
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9432 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9433 size_t nbytes, loff_t off,
9434 enum uclamp_id clamp_id)
9435 {
9436 struct uclamp_request req;
9437 struct task_group *tg;
9438
9439 req = capacity_from_percent(buf);
9440 if (req.ret)
9441 return req.ret;
9442
9443 sched_uclamp_enable();
9444
9445 guard(mutex)(&uclamp_mutex);
9446 guard(rcu)();
9447
9448 tg = css_tg(of_css(of));
9449 if (tg->uclamp_req[clamp_id].value != req.util)
9450 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9451
9452 /*
9453 * Because of not recoverable conversion rounding we keep track of the
9454 * exact requested value
9455 */
9456 tg->uclamp_pct[clamp_id] = req.percent;
9457
9458 /* Update effective clamps to track the most restrictive value */
9459 cpu_util_update_eff(of_css(of));
9460
9461 return nbytes;
9462 }
9463
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9464 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9465 char *buf, size_t nbytes,
9466 loff_t off)
9467 {
9468 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9469 }
9470
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9471 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9472 char *buf, size_t nbytes,
9473 loff_t off)
9474 {
9475 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9476 }
9477
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9478 static inline void cpu_uclamp_print(struct seq_file *sf,
9479 enum uclamp_id clamp_id)
9480 {
9481 struct task_group *tg;
9482 u64 util_clamp;
9483 u64 percent;
9484 u32 rem;
9485
9486 scoped_guard (rcu) {
9487 tg = css_tg(seq_css(sf));
9488 util_clamp = tg->uclamp_req[clamp_id].value;
9489 }
9490
9491 if (util_clamp == SCHED_CAPACITY_SCALE) {
9492 seq_puts(sf, "max\n");
9493 return;
9494 }
9495
9496 percent = tg->uclamp_pct[clamp_id];
9497 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9498 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9499 }
9500
cpu_uclamp_min_show(struct seq_file * sf,void * v)9501 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9502 {
9503 cpu_uclamp_print(sf, UCLAMP_MIN);
9504 return 0;
9505 }
9506
cpu_uclamp_max_show(struct seq_file * sf,void * v)9507 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9508 {
9509 cpu_uclamp_print(sf, UCLAMP_MAX);
9510 return 0;
9511 }
9512 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9513
9514 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9515 static unsigned long tg_weight(struct task_group *tg)
9516 {
9517 #ifdef CONFIG_FAIR_GROUP_SCHED
9518 return scale_load_down(tg->shares);
9519 #else
9520 return sched_weight_from_cgroup(tg->scx.weight);
9521 #endif
9522 }
9523
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9524 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9525 struct cftype *cftype, u64 shareval)
9526 {
9527 int ret;
9528
9529 if (shareval > scale_load_down(ULONG_MAX))
9530 shareval = MAX_SHARES;
9531 ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9532 if (!ret)
9533 scx_group_set_weight(css_tg(css),
9534 sched_weight_to_cgroup(shareval));
9535 return ret;
9536 }
9537
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9538 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9539 struct cftype *cft)
9540 {
9541 return tg_weight(css_tg(css));
9542 }
9543 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9544
9545 #ifdef CONFIG_CFS_BANDWIDTH
9546 static DEFINE_MUTEX(cfs_constraints_mutex);
9547
9548 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9549
tg_set_cfs_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)9550 static int tg_set_cfs_bandwidth(struct task_group *tg,
9551 u64 period_us, u64 quota_us, u64 burst_us)
9552 {
9553 int i, ret = 0, runtime_enabled, runtime_was_enabled;
9554 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9555 u64 period, quota, burst;
9556
9557 period = (u64)period_us * NSEC_PER_USEC;
9558
9559 if (quota_us == RUNTIME_INF)
9560 quota = RUNTIME_INF;
9561 else
9562 quota = (u64)quota_us * NSEC_PER_USEC;
9563
9564 burst = (u64)burst_us * NSEC_PER_USEC;
9565
9566 /*
9567 * Prevent race between setting of cfs_rq->runtime_enabled and
9568 * unthrottle_offline_cfs_rqs().
9569 */
9570 guard(cpus_read_lock)();
9571 guard(mutex)(&cfs_constraints_mutex);
9572
9573 ret = __cfs_schedulable(tg, period, quota);
9574 if (ret)
9575 return ret;
9576
9577 runtime_enabled = quota != RUNTIME_INF;
9578 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9579 /*
9580 * If we need to toggle cfs_bandwidth_used, off->on must occur
9581 * before making related changes, and on->off must occur afterwards
9582 */
9583 if (runtime_enabled && !runtime_was_enabled)
9584 cfs_bandwidth_usage_inc();
9585
9586 scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9587 cfs_b->period = ns_to_ktime(period);
9588 cfs_b->quota = quota;
9589 cfs_b->burst = burst;
9590
9591 __refill_cfs_bandwidth_runtime(cfs_b);
9592
9593 /*
9594 * Restart the period timer (if active) to handle new
9595 * period expiry:
9596 */
9597 if (runtime_enabled)
9598 start_cfs_bandwidth(cfs_b);
9599 }
9600
9601 for_each_online_cpu(i) {
9602 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9603 struct rq *rq = cfs_rq->rq;
9604
9605 guard(rq_lock_irq)(rq);
9606 cfs_rq->runtime_enabled = runtime_enabled;
9607 cfs_rq->runtime_remaining = 0;
9608
9609 if (cfs_rq->throttled)
9610 unthrottle_cfs_rq(cfs_rq);
9611 }
9612
9613 if (runtime_was_enabled && !runtime_enabled)
9614 cfs_bandwidth_usage_dec();
9615
9616 return 0;
9617 }
9618
tg_get_cfs_period(struct task_group * tg)9619 static u64 tg_get_cfs_period(struct task_group *tg)
9620 {
9621 u64 cfs_period_us;
9622
9623 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9624 do_div(cfs_period_us, NSEC_PER_USEC);
9625
9626 return cfs_period_us;
9627 }
9628
tg_get_cfs_quota(struct task_group * tg)9629 static u64 tg_get_cfs_quota(struct task_group *tg)
9630 {
9631 u64 quota_us;
9632
9633 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9634 return RUNTIME_INF;
9635
9636 quota_us = tg->cfs_bandwidth.quota;
9637 do_div(quota_us, NSEC_PER_USEC);
9638
9639 return quota_us;
9640 }
9641
tg_get_cfs_burst(struct task_group * tg)9642 static u64 tg_get_cfs_burst(struct task_group *tg)
9643 {
9644 u64 burst_us;
9645
9646 burst_us = tg->cfs_bandwidth.burst;
9647 do_div(burst_us, NSEC_PER_USEC);
9648
9649 return burst_us;
9650 }
9651
9652 struct cfs_schedulable_data {
9653 struct task_group *tg;
9654 u64 period, quota;
9655 };
9656
9657 /*
9658 * normalize group quota/period to be quota/max_period
9659 * note: units are usecs
9660 */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9661 static u64 normalize_cfs_quota(struct task_group *tg,
9662 struct cfs_schedulable_data *d)
9663 {
9664 u64 quota, period;
9665
9666 if (tg == d->tg) {
9667 period = d->period;
9668 quota = d->quota;
9669 } else {
9670 period = tg_get_cfs_period(tg);
9671 quota = tg_get_cfs_quota(tg);
9672 }
9673
9674 /* note: these should typically be equivalent */
9675 if (quota == RUNTIME_INF || quota == -1)
9676 return RUNTIME_INF;
9677
9678 return to_ratio(period, quota);
9679 }
9680
tg_cfs_schedulable_down(struct task_group * tg,void * data)9681 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9682 {
9683 struct cfs_schedulable_data *d = data;
9684 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9685 s64 quota = 0, parent_quota = -1;
9686
9687 if (!tg->parent) {
9688 quota = RUNTIME_INF;
9689 } else {
9690 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9691
9692 quota = normalize_cfs_quota(tg, d);
9693 parent_quota = parent_b->hierarchical_quota;
9694
9695 /*
9696 * Ensure max(child_quota) <= parent_quota. On cgroup2,
9697 * always take the non-RUNTIME_INF min. On cgroup1, only
9698 * inherit when no limit is set. In both cases this is used
9699 * by the scheduler to determine if a given CFS task has a
9700 * bandwidth constraint at some higher level.
9701 */
9702 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9703 if (quota == RUNTIME_INF)
9704 quota = parent_quota;
9705 else if (parent_quota != RUNTIME_INF)
9706 quota = min(quota, parent_quota);
9707 } else {
9708 if (quota == RUNTIME_INF)
9709 quota = parent_quota;
9710 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9711 return -EINVAL;
9712 }
9713 }
9714 cfs_b->hierarchical_quota = quota;
9715
9716 return 0;
9717 }
9718
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9719 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9720 {
9721 struct cfs_schedulable_data data = {
9722 .tg = tg,
9723 .period = period,
9724 .quota = quota,
9725 };
9726
9727 if (quota != RUNTIME_INF) {
9728 do_div(data.period, NSEC_PER_USEC);
9729 do_div(data.quota, NSEC_PER_USEC);
9730 }
9731
9732 guard(rcu)();
9733 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9734 }
9735
cpu_cfs_stat_show(struct seq_file * sf,void * v)9736 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9737 {
9738 struct task_group *tg = css_tg(seq_css(sf));
9739 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9740
9741 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9742 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9743 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9744
9745 if (schedstat_enabled() && tg != &root_task_group) {
9746 struct sched_statistics *stats;
9747 u64 ws = 0;
9748 int i;
9749
9750 for_each_possible_cpu(i) {
9751 stats = __schedstats_from_se(tg->se[i]);
9752 ws += schedstat_val(stats->wait_sum);
9753 }
9754
9755 seq_printf(sf, "wait_sum %llu\n", ws);
9756 }
9757
9758 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9759 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9760
9761 return 0;
9762 }
9763
throttled_time_self(struct task_group * tg)9764 static u64 throttled_time_self(struct task_group *tg)
9765 {
9766 int i;
9767 u64 total = 0;
9768
9769 for_each_possible_cpu(i) {
9770 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9771 }
9772
9773 return total;
9774 }
9775
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9776 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9777 {
9778 struct task_group *tg = css_tg(seq_css(sf));
9779
9780 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9781
9782 return 0;
9783 }
9784 #endif /* CONFIG_CFS_BANDWIDTH */
9785
9786 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9787 const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC; /* 1s */
9788 static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC; /* 1ms */
9789 /* More than 203 days if BW_SHIFT equals 20. */
9790 static const u64 max_bw_runtime_us = MAX_BW;
9791
tg_bandwidth(struct task_group * tg,u64 * period_us_p,u64 * quota_us_p,u64 * burst_us_p)9792 static void tg_bandwidth(struct task_group *tg,
9793 u64 *period_us_p, u64 *quota_us_p, u64 *burst_us_p)
9794 {
9795 #ifdef CONFIG_CFS_BANDWIDTH
9796 if (period_us_p)
9797 *period_us_p = tg_get_cfs_period(tg);
9798 if (quota_us_p)
9799 *quota_us_p = tg_get_cfs_quota(tg);
9800 if (burst_us_p)
9801 *burst_us_p = tg_get_cfs_burst(tg);
9802 #else /* !CONFIG_CFS_BANDWIDTH */
9803 if (period_us_p)
9804 *period_us_p = tg->scx.bw_period_us;
9805 if (quota_us_p)
9806 *quota_us_p = tg->scx.bw_quota_us;
9807 if (burst_us_p)
9808 *burst_us_p = tg->scx.bw_burst_us;
9809 #endif /* CONFIG_CFS_BANDWIDTH */
9810 }
9811
cpu_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9812 static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
9813 struct cftype *cft)
9814 {
9815 u64 period_us;
9816
9817 tg_bandwidth(css_tg(css), &period_us, NULL, NULL);
9818 return period_us;
9819 }
9820
tg_set_bandwidth(struct task_group * tg,u64 period_us,u64 quota_us,u64 burst_us)9821 static int tg_set_bandwidth(struct task_group *tg,
9822 u64 period_us, u64 quota_us, u64 burst_us)
9823 {
9824 const u64 max_usec = U64_MAX / NSEC_PER_USEC;
9825 int ret = 0;
9826
9827 if (tg == &root_task_group)
9828 return -EINVAL;
9829
9830 /* Values should survive translation to nsec */
9831 if (period_us > max_usec ||
9832 (quota_us != RUNTIME_INF && quota_us > max_usec) ||
9833 burst_us > max_usec)
9834 return -EINVAL;
9835
9836 /*
9837 * Ensure we have some amount of bandwidth every period. This is to
9838 * prevent reaching a state of large arrears when throttled via
9839 * entity_tick() resulting in prolonged exit starvation.
9840 */
9841 if (quota_us < min_bw_quota_period_us ||
9842 period_us < min_bw_quota_period_us)
9843 return -EINVAL;
9844
9845 /*
9846 * Likewise, bound things on the other side by preventing insane quota
9847 * periods. This also allows us to normalize in computing quota
9848 * feasibility.
9849 */
9850 if (period_us > max_bw_quota_period_us)
9851 return -EINVAL;
9852
9853 /*
9854 * Bound quota to defend quota against overflow during bandwidth shift.
9855 */
9856 if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us)
9857 return -EINVAL;
9858
9859 if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
9860 burst_us + quota_us > max_bw_runtime_us))
9861 return -EINVAL;
9862
9863 #ifdef CONFIG_CFS_BANDWIDTH
9864 ret = tg_set_cfs_bandwidth(tg, period_us, quota_us, burst_us);
9865 #endif /* CONFIG_CFS_BANDWIDTH */
9866 if (!ret)
9867 scx_group_set_bandwidth(tg, period_us, quota_us, burst_us);
9868 return ret;
9869 }
9870
cpu_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9871 static s64 cpu_quota_read_s64(struct cgroup_subsys_state *css,
9872 struct cftype *cft)
9873 {
9874 u64 quota_us;
9875
9876 tg_bandwidth(css_tg(css), NULL, "a_us, NULL);
9877 return quota_us; /* (s64)RUNTIME_INF becomes -1 */
9878 }
9879
cpu_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9880 static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
9881 struct cftype *cft)
9882 {
9883 u64 burst_us;
9884
9885 tg_bandwidth(css_tg(css), NULL, NULL, &burst_us);
9886 return burst_us;
9887 }
9888
cpu_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 period_us)9889 static int cpu_period_write_u64(struct cgroup_subsys_state *css,
9890 struct cftype *cftype, u64 period_us)
9891 {
9892 struct task_group *tg = css_tg(css);
9893 u64 quota_us, burst_us;
9894
9895 tg_bandwidth(tg, NULL, "a_us, &burst_us);
9896 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9897 }
9898
cpu_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 quota_us)9899 static int cpu_quota_write_s64(struct cgroup_subsys_state *css,
9900 struct cftype *cftype, s64 quota_us)
9901 {
9902 struct task_group *tg = css_tg(css);
9903 u64 period_us, burst_us;
9904
9905 if (quota_us < 0)
9906 quota_us = RUNTIME_INF;
9907
9908 tg_bandwidth(tg, &period_us, NULL, &burst_us);
9909 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9910 }
9911
cpu_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 burst_us)9912 static int cpu_burst_write_u64(struct cgroup_subsys_state *css,
9913 struct cftype *cftype, u64 burst_us)
9914 {
9915 struct task_group *tg = css_tg(css);
9916 u64 period_us, quota_us;
9917
9918 tg_bandwidth(tg, &period_us, "a_us, NULL);
9919 return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9920 }
9921 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */
9922
9923 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9924 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9925 struct cftype *cft, s64 val)
9926 {
9927 return sched_group_set_rt_runtime(css_tg(css), val);
9928 }
9929
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9930 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9931 struct cftype *cft)
9932 {
9933 return sched_group_rt_runtime(css_tg(css));
9934 }
9935
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9936 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9937 struct cftype *cftype, u64 rt_period_us)
9938 {
9939 return sched_group_set_rt_period(css_tg(css), rt_period_us);
9940 }
9941
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9942 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9943 struct cftype *cft)
9944 {
9945 return sched_group_rt_period(css_tg(css));
9946 }
9947 #endif /* CONFIG_RT_GROUP_SCHED */
9948
9949 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9950 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9951 struct cftype *cft)
9952 {
9953 return css_tg(css)->idle;
9954 }
9955
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9956 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9957 struct cftype *cft, s64 idle)
9958 {
9959 int ret;
9960
9961 ret = sched_group_set_idle(css_tg(css), idle);
9962 if (!ret)
9963 scx_group_set_idle(css_tg(css), idle);
9964 return ret;
9965 }
9966 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9967
9968 static struct cftype cpu_legacy_files[] = {
9969 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9970 {
9971 .name = "shares",
9972 .read_u64 = cpu_shares_read_u64,
9973 .write_u64 = cpu_shares_write_u64,
9974 },
9975 {
9976 .name = "idle",
9977 .read_s64 = cpu_idle_read_s64,
9978 .write_s64 = cpu_idle_write_s64,
9979 },
9980 #endif
9981 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9982 {
9983 .name = "cfs_period_us",
9984 .read_u64 = cpu_period_read_u64,
9985 .write_u64 = cpu_period_write_u64,
9986 },
9987 {
9988 .name = "cfs_quota_us",
9989 .read_s64 = cpu_quota_read_s64,
9990 .write_s64 = cpu_quota_write_s64,
9991 },
9992 {
9993 .name = "cfs_burst_us",
9994 .read_u64 = cpu_burst_read_u64,
9995 .write_u64 = cpu_burst_write_u64,
9996 },
9997 #endif
9998 #ifdef CONFIG_CFS_BANDWIDTH
9999 {
10000 .name = "stat",
10001 .seq_show = cpu_cfs_stat_show,
10002 },
10003 {
10004 .name = "stat.local",
10005 .seq_show = cpu_cfs_local_stat_show,
10006 },
10007 #endif
10008 #ifdef CONFIG_UCLAMP_TASK_GROUP
10009 {
10010 .name = "uclamp.min",
10011 .flags = CFTYPE_NOT_ON_ROOT,
10012 .seq_show = cpu_uclamp_min_show,
10013 .write = cpu_uclamp_min_write,
10014 },
10015 {
10016 .name = "uclamp.max",
10017 .flags = CFTYPE_NOT_ON_ROOT,
10018 .seq_show = cpu_uclamp_max_show,
10019 .write = cpu_uclamp_max_write,
10020 },
10021 #endif
10022 { } /* Terminate */
10023 };
10024
10025 #ifdef CONFIG_RT_GROUP_SCHED
10026 static struct cftype rt_group_files[] = {
10027 {
10028 .name = "rt_runtime_us",
10029 .read_s64 = cpu_rt_runtime_read,
10030 .write_s64 = cpu_rt_runtime_write,
10031 },
10032 {
10033 .name = "rt_period_us",
10034 .read_u64 = cpu_rt_period_read_uint,
10035 .write_u64 = cpu_rt_period_write_uint,
10036 },
10037 { } /* Terminate */
10038 };
10039
10040 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
10041 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
10042 # else
10043 DEFINE_STATIC_KEY_TRUE(rt_group_sched);
10044 # endif
10045
setup_rt_group_sched(char * str)10046 static int __init setup_rt_group_sched(char *str)
10047 {
10048 long val;
10049
10050 if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
10051 pr_warn("Unable to set rt_group_sched\n");
10052 return 1;
10053 }
10054 if (val)
10055 static_branch_enable(&rt_group_sched);
10056 else
10057 static_branch_disable(&rt_group_sched);
10058
10059 return 1;
10060 }
10061 __setup("rt_group_sched=", setup_rt_group_sched);
10062
cpu_rt_group_init(void)10063 static int __init cpu_rt_group_init(void)
10064 {
10065 if (!rt_group_sched_enabled())
10066 return 0;
10067
10068 WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
10069 return 0;
10070 }
10071 subsys_initcall(cpu_rt_group_init);
10072 #endif /* CONFIG_RT_GROUP_SCHED */
10073
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)10074 static int cpu_extra_stat_show(struct seq_file *sf,
10075 struct cgroup_subsys_state *css)
10076 {
10077 #ifdef CONFIG_CFS_BANDWIDTH
10078 {
10079 struct task_group *tg = css_tg(css);
10080 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10081 u64 throttled_usec, burst_usec;
10082
10083 throttled_usec = cfs_b->throttled_time;
10084 do_div(throttled_usec, NSEC_PER_USEC);
10085 burst_usec = cfs_b->burst_time;
10086 do_div(burst_usec, NSEC_PER_USEC);
10087
10088 seq_printf(sf, "nr_periods %d\n"
10089 "nr_throttled %d\n"
10090 "throttled_usec %llu\n"
10091 "nr_bursts %d\n"
10092 "burst_usec %llu\n",
10093 cfs_b->nr_periods, cfs_b->nr_throttled,
10094 throttled_usec, cfs_b->nr_burst, burst_usec);
10095 }
10096 #endif /* CONFIG_CFS_BANDWIDTH */
10097 return 0;
10098 }
10099
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)10100 static int cpu_local_stat_show(struct seq_file *sf,
10101 struct cgroup_subsys_state *css)
10102 {
10103 #ifdef CONFIG_CFS_BANDWIDTH
10104 {
10105 struct task_group *tg = css_tg(css);
10106 u64 throttled_self_usec;
10107
10108 throttled_self_usec = throttled_time_self(tg);
10109 do_div(throttled_self_usec, NSEC_PER_USEC);
10110
10111 seq_printf(sf, "throttled_usec %llu\n",
10112 throttled_self_usec);
10113 }
10114 #endif
10115 return 0;
10116 }
10117
10118 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10119
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)10120 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10121 struct cftype *cft)
10122 {
10123 return sched_weight_to_cgroup(tg_weight(css_tg(css)));
10124 }
10125
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)10126 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10127 struct cftype *cft, u64 cgrp_weight)
10128 {
10129 unsigned long weight;
10130 int ret;
10131
10132 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
10133 return -ERANGE;
10134
10135 weight = sched_weight_from_cgroup(cgrp_weight);
10136
10137 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10138 if (!ret)
10139 scx_group_set_weight(css_tg(css), cgrp_weight);
10140 return ret;
10141 }
10142
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)10143 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10144 struct cftype *cft)
10145 {
10146 unsigned long weight = tg_weight(css_tg(css));
10147 int last_delta = INT_MAX;
10148 int prio, delta;
10149
10150 /* find the closest nice value to the current weight */
10151 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10152 delta = abs(sched_prio_to_weight[prio] - weight);
10153 if (delta >= last_delta)
10154 break;
10155 last_delta = delta;
10156 }
10157
10158 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10159 }
10160
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)10161 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10162 struct cftype *cft, s64 nice)
10163 {
10164 unsigned long weight;
10165 int idx, ret;
10166
10167 if (nice < MIN_NICE || nice > MAX_NICE)
10168 return -ERANGE;
10169
10170 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10171 idx = array_index_nospec(idx, 40);
10172 weight = sched_prio_to_weight[idx];
10173
10174 ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10175 if (!ret)
10176 scx_group_set_weight(css_tg(css),
10177 sched_weight_to_cgroup(weight));
10178 return ret;
10179 }
10180 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10181
cpu_period_quota_print(struct seq_file * sf,long period,long quota)10182 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10183 long period, long quota)
10184 {
10185 if (quota < 0)
10186 seq_puts(sf, "max");
10187 else
10188 seq_printf(sf, "%ld", quota);
10189
10190 seq_printf(sf, " %ld\n", period);
10191 }
10192
10193 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * period_us_p,u64 * quota_us_p)10194 static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *period_us_p,
10195 u64 *quota_us_p)
10196 {
10197 char tok[21]; /* U64_MAX */
10198
10199 if (sscanf(buf, "%20s %llu", tok, period_us_p) < 1)
10200 return -EINVAL;
10201
10202 if (sscanf(tok, "%llu", quota_us_p) < 1) {
10203 if (!strcmp(tok, "max"))
10204 *quota_us_p = RUNTIME_INF;
10205 else
10206 return -EINVAL;
10207 }
10208
10209 return 0;
10210 }
10211
10212 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)10213 static int cpu_max_show(struct seq_file *sf, void *v)
10214 {
10215 struct task_group *tg = css_tg(seq_css(sf));
10216 u64 period_us, quota_us;
10217
10218 tg_bandwidth(tg, &period_us, "a_us, NULL);
10219 cpu_period_quota_print(sf, period_us, quota_us);
10220 return 0;
10221 }
10222
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10223 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10224 char *buf, size_t nbytes, loff_t off)
10225 {
10226 struct task_group *tg = css_tg(of_css(of));
10227 u64 period_us, quota_us, burst_us;
10228 int ret;
10229
10230 tg_bandwidth(tg, &period_us, NULL, &burst_us);
10231 ret = cpu_period_quota_parse(buf, &period_us, "a_us);
10232 if (!ret)
10233 ret = tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10234 return ret ?: nbytes;
10235 }
10236 #endif /* CONFIG_CFS_BANDWIDTH */
10237
10238 static struct cftype cpu_files[] = {
10239 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10240 {
10241 .name = "weight",
10242 .flags = CFTYPE_NOT_ON_ROOT,
10243 .read_u64 = cpu_weight_read_u64,
10244 .write_u64 = cpu_weight_write_u64,
10245 },
10246 {
10247 .name = "weight.nice",
10248 .flags = CFTYPE_NOT_ON_ROOT,
10249 .read_s64 = cpu_weight_nice_read_s64,
10250 .write_s64 = cpu_weight_nice_write_s64,
10251 },
10252 {
10253 .name = "idle",
10254 .flags = CFTYPE_NOT_ON_ROOT,
10255 .read_s64 = cpu_idle_read_s64,
10256 .write_s64 = cpu_idle_write_s64,
10257 },
10258 #endif
10259 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10260 {
10261 .name = "max",
10262 .flags = CFTYPE_NOT_ON_ROOT,
10263 .seq_show = cpu_max_show,
10264 .write = cpu_max_write,
10265 },
10266 {
10267 .name = "max.burst",
10268 .flags = CFTYPE_NOT_ON_ROOT,
10269 .read_u64 = cpu_burst_read_u64,
10270 .write_u64 = cpu_burst_write_u64,
10271 },
10272 #endif /* CONFIG_CFS_BANDWIDTH */
10273 #ifdef CONFIG_UCLAMP_TASK_GROUP
10274 {
10275 .name = "uclamp.min",
10276 .flags = CFTYPE_NOT_ON_ROOT,
10277 .seq_show = cpu_uclamp_min_show,
10278 .write = cpu_uclamp_min_write,
10279 },
10280 {
10281 .name = "uclamp.max",
10282 .flags = CFTYPE_NOT_ON_ROOT,
10283 .seq_show = cpu_uclamp_max_show,
10284 .write = cpu_uclamp_max_write,
10285 },
10286 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10287 { } /* terminate */
10288 };
10289
10290 struct cgroup_subsys cpu_cgrp_subsys = {
10291 .css_alloc = cpu_cgroup_css_alloc,
10292 .css_online = cpu_cgroup_css_online,
10293 .css_offline = cpu_cgroup_css_offline,
10294 .css_released = cpu_cgroup_css_released,
10295 .css_free = cpu_cgroup_css_free,
10296 .css_extra_stat_show = cpu_extra_stat_show,
10297 .css_local_stat_show = cpu_local_stat_show,
10298 .can_attach = cpu_cgroup_can_attach,
10299 .attach = cpu_cgroup_attach,
10300 .cancel_attach = cpu_cgroup_cancel_attach,
10301 .legacy_cftypes = cpu_legacy_files,
10302 .dfl_cftypes = cpu_files,
10303 .early_init = true,
10304 .threaded = true,
10305 };
10306
10307 #endif /* CONFIG_CGROUP_SCHED */
10308
dump_cpu_task(int cpu)10309 void dump_cpu_task(int cpu)
10310 {
10311 if (in_hardirq() && cpu == smp_processor_id()) {
10312 struct pt_regs *regs;
10313
10314 regs = get_irq_regs();
10315 if (regs) {
10316 show_regs(regs);
10317 return;
10318 }
10319 }
10320
10321 if (trigger_single_cpu_backtrace(cpu))
10322 return;
10323
10324 pr_info("Task dump for CPU %d:\n", cpu);
10325 sched_show_task(cpu_curr(cpu));
10326 }
10327
10328 /*
10329 * Nice levels are multiplicative, with a gentle 10% change for every
10330 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10331 * nice 1, it will get ~10% less CPU time than another CPU-bound task
10332 * that remained on nice 0.
10333 *
10334 * The "10% effect" is relative and cumulative: from _any_ nice level,
10335 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10336 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10337 * If a task goes up by ~10% and another task goes down by ~10% then
10338 * the relative distance between them is ~25%.)
10339 */
10340 const int sched_prio_to_weight[40] = {
10341 /* -20 */ 88761, 71755, 56483, 46273, 36291,
10342 /* -15 */ 29154, 23254, 18705, 14949, 11916,
10343 /* -10 */ 9548, 7620, 6100, 4904, 3906,
10344 /* -5 */ 3121, 2501, 1991, 1586, 1277,
10345 /* 0 */ 1024, 820, 655, 526, 423,
10346 /* 5 */ 335, 272, 215, 172, 137,
10347 /* 10 */ 110, 87, 70, 56, 45,
10348 /* 15 */ 36, 29, 23, 18, 15,
10349 };
10350
10351 /*
10352 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10353 *
10354 * In cases where the weight does not change often, we can use the
10355 * pre-calculated inverse to speed up arithmetics by turning divisions
10356 * into multiplications:
10357 */
10358 const u32 sched_prio_to_wmult[40] = {
10359 /* -20 */ 48388, 59856, 76040, 92818, 118348,
10360 /* -15 */ 147320, 184698, 229616, 287308, 360437,
10361 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
10362 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
10363 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
10364 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
10365 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
10366 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10367 };
10368
call_trace_sched_update_nr_running(struct rq * rq,int count)10369 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10370 {
10371 trace_sched_update_nr_running_tp(rq, count);
10372 }
10373
10374 #ifdef CONFIG_SCHED_MM_CID
10375
10376 /*
10377 * @cid_lock: Guarantee forward-progress of cid allocation.
10378 *
10379 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10380 * is only used when contention is detected by the lock-free allocation so
10381 * forward progress can be guaranteed.
10382 */
10383 DEFINE_RAW_SPINLOCK(cid_lock);
10384
10385 /*
10386 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10387 *
10388 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10389 * detected, it is set to 1 to ensure that all newly coming allocations are
10390 * serialized by @cid_lock until the allocation which detected contention
10391 * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10392 * of a cid allocation.
10393 */
10394 int use_cid_lock;
10395
10396 /*
10397 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10398 * concurrently with respect to the execution of the source runqueue context
10399 * switch.
10400 *
10401 * There is one basic properties we want to guarantee here:
10402 *
10403 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10404 * used by a task. That would lead to concurrent allocation of the cid and
10405 * userspace corruption.
10406 *
10407 * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10408 * that a pair of loads observe at least one of a pair of stores, which can be
10409 * shown as:
10410 *
10411 * X = Y = 0
10412 *
10413 * w[X]=1 w[Y]=1
10414 * MB MB
10415 * r[Y]=y r[X]=x
10416 *
10417 * Which guarantees that x==0 && y==0 is impossible. But rather than using
10418 * values 0 and 1, this algorithm cares about specific state transitions of the
10419 * runqueue current task (as updated by the scheduler context switch), and the
10420 * per-mm/cpu cid value.
10421 *
10422 * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10423 * task->mm != mm for the rest of the discussion. There are two scheduler state
10424 * transitions on context switch we care about:
10425 *
10426 * (TSA) Store to rq->curr with transition from (N) to (Y)
10427 *
10428 * (TSB) Store to rq->curr with transition from (Y) to (N)
10429 *
10430 * On the remote-clear side, there is one transition we care about:
10431 *
10432 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10433 *
10434 * There is also a transition to UNSET state which can be performed from all
10435 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10436 * guarantees that only a single thread will succeed:
10437 *
10438 * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10439 *
10440 * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10441 * when a thread is actively using the cid (property (1)).
10442 *
10443 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10444 *
10445 * Scenario A) (TSA)+(TMA) (from next task perspective)
10446 *
10447 * CPU0 CPU1
10448 *
10449 * Context switch CS-1 Remote-clear
10450 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA)
10451 * (implied barrier after cmpxchg)
10452 * - switch_mm_cid()
10453 * - memory barrier (see switch_mm_cid()
10454 * comment explaining how this barrier
10455 * is combined with other scheduler
10456 * barriers)
10457 * - mm_cid_get (next)
10458 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr)
10459 *
10460 * This Dekker ensures that either task (Y) is observed by the
10461 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10462 * observed.
10463 *
10464 * If task (Y) store is observed by rcu_dereference(), it means that there is
10465 * still an active task on the cpu. Remote-clear will therefore not transition
10466 * to UNSET, which fulfills property (1).
10467 *
10468 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10469 * it will move its state to UNSET, which clears the percpu cid perhaps
10470 * uselessly (which is not an issue for correctness). Because task (Y) is not
10471 * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10472 * state to UNSET is done with a cmpxchg expecting that the old state has the
10473 * LAZY flag set, only one thread will successfully UNSET.
10474 *
10475 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10476 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10477 * CPU1 will observe task (Y) and do nothing more, which is fine.
10478 *
10479 * What we are effectively preventing with this Dekker is a scenario where
10480 * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10481 * because this would UNSET a cid which is actively used.
10482 */
10483
sched_mm_cid_migrate_from(struct task_struct * t)10484 void sched_mm_cid_migrate_from(struct task_struct *t)
10485 {
10486 t->migrate_from_cpu = task_cpu(t);
10487 }
10488
10489 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10490 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10491 struct task_struct *t,
10492 struct mm_cid *src_pcpu_cid)
10493 {
10494 struct mm_struct *mm = t->mm;
10495 struct task_struct *src_task;
10496 int src_cid, last_mm_cid;
10497
10498 if (!mm)
10499 return -1;
10500
10501 last_mm_cid = t->last_mm_cid;
10502 /*
10503 * If the migrated task has no last cid, or if the current
10504 * task on src rq uses the cid, it means the source cid does not need
10505 * to be moved to the destination cpu.
10506 */
10507 if (last_mm_cid == -1)
10508 return -1;
10509 src_cid = READ_ONCE(src_pcpu_cid->cid);
10510 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10511 return -1;
10512
10513 /*
10514 * If we observe an active task using the mm on this rq, it means we
10515 * are not the last task to be migrated from this cpu for this mm, so
10516 * there is no need to move src_cid to the destination cpu.
10517 */
10518 guard(rcu)();
10519 src_task = rcu_dereference(src_rq->curr);
10520 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10521 t->last_mm_cid = -1;
10522 return -1;
10523 }
10524
10525 return src_cid;
10526 }
10527
10528 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10529 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10530 struct task_struct *t,
10531 struct mm_cid *src_pcpu_cid,
10532 int src_cid)
10533 {
10534 struct task_struct *src_task;
10535 struct mm_struct *mm = t->mm;
10536 int lazy_cid;
10537
10538 if (src_cid == -1)
10539 return -1;
10540
10541 /*
10542 * Attempt to clear the source cpu cid to move it to the destination
10543 * cpu.
10544 */
10545 lazy_cid = mm_cid_set_lazy_put(src_cid);
10546 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10547 return -1;
10548
10549 /*
10550 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10551 * rq->curr->mm matches the scheduler barrier in context_switch()
10552 * between store to rq->curr and load of prev and next task's
10553 * per-mm/cpu cid.
10554 *
10555 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10556 * rq->curr->mm_cid_active matches the barrier in
10557 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10558 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10559 * load of per-mm/cpu cid.
10560 */
10561
10562 /*
10563 * If we observe an active task using the mm on this rq after setting
10564 * the lazy-put flag, this task will be responsible for transitioning
10565 * from lazy-put flag set to MM_CID_UNSET.
10566 */
10567 scoped_guard (rcu) {
10568 src_task = rcu_dereference(src_rq->curr);
10569 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10570 /*
10571 * We observed an active task for this mm, there is therefore
10572 * no point in moving this cid to the destination cpu.
10573 */
10574 t->last_mm_cid = -1;
10575 return -1;
10576 }
10577 }
10578
10579 /*
10580 * The src_cid is unused, so it can be unset.
10581 */
10582 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10583 return -1;
10584 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
10585 return src_cid;
10586 }
10587
10588 /*
10589 * Migration to dst cpu. Called with dst_rq lock held.
10590 * Interrupts are disabled, which keeps the window of cid ownership without the
10591 * source rq lock held small.
10592 */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10593 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10594 {
10595 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10596 struct mm_struct *mm = t->mm;
10597 int src_cid, src_cpu;
10598 bool dst_cid_is_set;
10599 struct rq *src_rq;
10600
10601 lockdep_assert_rq_held(dst_rq);
10602
10603 if (!mm)
10604 return;
10605 src_cpu = t->migrate_from_cpu;
10606 if (src_cpu == -1) {
10607 t->last_mm_cid = -1;
10608 return;
10609 }
10610 /*
10611 * Move the src cid if the dst cid is unset. This keeps id
10612 * allocation closest to 0 in cases where few threads migrate around
10613 * many CPUs.
10614 *
10615 * If destination cid or recent cid is already set, we may have
10616 * to just clear the src cid to ensure compactness in frequent
10617 * migrations scenarios.
10618 *
10619 * It is not useful to clear the src cid when the number of threads is
10620 * greater or equal to the number of allowed CPUs, because user-space
10621 * can expect that the number of allowed cids can reach the number of
10622 * allowed CPUs.
10623 */
10624 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10625 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10626 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10627 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
10628 return;
10629 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10630 src_rq = cpu_rq(src_cpu);
10631 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10632 if (src_cid == -1)
10633 return;
10634 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10635 src_cid);
10636 if (src_cid == -1)
10637 return;
10638 if (dst_cid_is_set) {
10639 __mm_cid_put(mm, src_cid);
10640 return;
10641 }
10642 /* Move src_cid to dst cpu. */
10643 mm_cid_snapshot_time(dst_rq, mm);
10644 WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10645 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
10646 }
10647
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10648 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10649 int cpu)
10650 {
10651 struct rq *rq = cpu_rq(cpu);
10652 struct task_struct *t;
10653 int cid, lazy_cid;
10654
10655 cid = READ_ONCE(pcpu_cid->cid);
10656 if (!mm_cid_is_valid(cid))
10657 return;
10658
10659 /*
10660 * Clear the cpu cid if it is set to keep cid allocation compact. If
10661 * there happens to be other tasks left on the source cpu using this
10662 * mm, the next task using this mm will reallocate its cid on context
10663 * switch.
10664 */
10665 lazy_cid = mm_cid_set_lazy_put(cid);
10666 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10667 return;
10668
10669 /*
10670 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10671 * rq->curr->mm matches the scheduler barrier in context_switch()
10672 * between store to rq->curr and load of prev and next task's
10673 * per-mm/cpu cid.
10674 *
10675 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10676 * rq->curr->mm_cid_active matches the barrier in
10677 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10678 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10679 * load of per-mm/cpu cid.
10680 */
10681
10682 /*
10683 * If we observe an active task using the mm on this rq after setting
10684 * the lazy-put flag, that task will be responsible for transitioning
10685 * from lazy-put flag set to MM_CID_UNSET.
10686 */
10687 scoped_guard (rcu) {
10688 t = rcu_dereference(rq->curr);
10689 if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10690 return;
10691 }
10692
10693 /*
10694 * The cid is unused, so it can be unset.
10695 * Disable interrupts to keep the window of cid ownership without rq
10696 * lock small.
10697 */
10698 scoped_guard (irqsave) {
10699 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10700 __mm_cid_put(mm, cid);
10701 }
10702 }
10703
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10704 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10705 {
10706 struct rq *rq = cpu_rq(cpu);
10707 struct mm_cid *pcpu_cid;
10708 struct task_struct *curr;
10709 u64 rq_clock;
10710
10711 /*
10712 * rq->clock load is racy on 32-bit but one spurious clear once in a
10713 * while is irrelevant.
10714 */
10715 rq_clock = READ_ONCE(rq->clock);
10716 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10717
10718 /*
10719 * In order to take care of infrequently scheduled tasks, bump the time
10720 * snapshot associated with this cid if an active task using the mm is
10721 * observed on this rq.
10722 */
10723 scoped_guard (rcu) {
10724 curr = rcu_dereference(rq->curr);
10725 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10726 WRITE_ONCE(pcpu_cid->time, rq_clock);
10727 return;
10728 }
10729 }
10730
10731 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10732 return;
10733 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10734 }
10735
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10736 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10737 int weight)
10738 {
10739 struct mm_cid *pcpu_cid;
10740 int cid;
10741
10742 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10743 cid = READ_ONCE(pcpu_cid->cid);
10744 if (!mm_cid_is_valid(cid) || cid < weight)
10745 return;
10746 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10747 }
10748
task_mm_cid_work(struct callback_head * work)10749 static void task_mm_cid_work(struct callback_head *work)
10750 {
10751 unsigned long now = jiffies, old_scan, next_scan;
10752 struct task_struct *t = current;
10753 struct cpumask *cidmask;
10754 struct mm_struct *mm;
10755 int weight, cpu;
10756
10757 WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work));
10758
10759 work->next = work; /* Prevent double-add */
10760 if (t->flags & PF_EXITING)
10761 return;
10762 mm = t->mm;
10763 if (!mm)
10764 return;
10765 old_scan = READ_ONCE(mm->mm_cid_next_scan);
10766 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10767 if (!old_scan) {
10768 unsigned long res;
10769
10770 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10771 if (res != old_scan)
10772 old_scan = res;
10773 else
10774 old_scan = next_scan;
10775 }
10776 if (time_before(now, old_scan))
10777 return;
10778 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10779 return;
10780 cidmask = mm_cidmask(mm);
10781 /* Clear cids that were not recently used. */
10782 for_each_possible_cpu(cpu)
10783 sched_mm_cid_remote_clear_old(mm, cpu);
10784 weight = cpumask_weight(cidmask);
10785 /*
10786 * Clear cids that are greater or equal to the cidmask weight to
10787 * recompact it.
10788 */
10789 for_each_possible_cpu(cpu)
10790 sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10791 }
10792
init_sched_mm_cid(struct task_struct * t)10793 void init_sched_mm_cid(struct task_struct *t)
10794 {
10795 struct mm_struct *mm = t->mm;
10796 int mm_users = 0;
10797
10798 if (mm) {
10799 mm_users = atomic_read(&mm->mm_users);
10800 if (mm_users == 1)
10801 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10802 }
10803 t->cid_work.next = &t->cid_work; /* Protect against double add */
10804 init_task_work(&t->cid_work, task_mm_cid_work);
10805 }
10806
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10807 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10808 {
10809 struct callback_head *work = &curr->cid_work;
10810 unsigned long now = jiffies;
10811
10812 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10813 work->next != work)
10814 return;
10815 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10816 return;
10817
10818 /* No page allocation under rq lock */
10819 task_work_add(curr, work, TWA_RESUME);
10820 }
10821
sched_mm_cid_exit_signals(struct task_struct * t)10822 void sched_mm_cid_exit_signals(struct task_struct *t)
10823 {
10824 struct mm_struct *mm = t->mm;
10825 struct rq *rq;
10826
10827 if (!mm)
10828 return;
10829
10830 preempt_disable();
10831 rq = this_rq();
10832 guard(rq_lock_irqsave)(rq);
10833 preempt_enable_no_resched(); /* holding spinlock */
10834 WRITE_ONCE(t->mm_cid_active, 0);
10835 /*
10836 * Store t->mm_cid_active before loading per-mm/cpu cid.
10837 * Matches barrier in sched_mm_cid_remote_clear_old().
10838 */
10839 smp_mb();
10840 mm_cid_put(mm);
10841 t->last_mm_cid = t->mm_cid = -1;
10842 }
10843
sched_mm_cid_before_execve(struct task_struct * t)10844 void sched_mm_cid_before_execve(struct task_struct *t)
10845 {
10846 struct mm_struct *mm = t->mm;
10847 struct rq *rq;
10848
10849 if (!mm)
10850 return;
10851
10852 preempt_disable();
10853 rq = this_rq();
10854 guard(rq_lock_irqsave)(rq);
10855 preempt_enable_no_resched(); /* holding spinlock */
10856 WRITE_ONCE(t->mm_cid_active, 0);
10857 /*
10858 * Store t->mm_cid_active before loading per-mm/cpu cid.
10859 * Matches barrier in sched_mm_cid_remote_clear_old().
10860 */
10861 smp_mb();
10862 mm_cid_put(mm);
10863 t->last_mm_cid = t->mm_cid = -1;
10864 }
10865
sched_mm_cid_after_execve(struct task_struct * t)10866 void sched_mm_cid_after_execve(struct task_struct *t)
10867 {
10868 struct mm_struct *mm = t->mm;
10869 struct rq *rq;
10870
10871 if (!mm)
10872 return;
10873
10874 preempt_disable();
10875 rq = this_rq();
10876 scoped_guard (rq_lock_irqsave, rq) {
10877 preempt_enable_no_resched(); /* holding spinlock */
10878 WRITE_ONCE(t->mm_cid_active, 1);
10879 /*
10880 * Store t->mm_cid_active before loading per-mm/cpu cid.
10881 * Matches barrier in sched_mm_cid_remote_clear_old().
10882 */
10883 smp_mb();
10884 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
10885 }
10886 }
10887
sched_mm_cid_fork(struct task_struct * t)10888 void sched_mm_cid_fork(struct task_struct *t)
10889 {
10890 WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10891 t->mm_cid_active = 1;
10892 }
10893 #endif /* CONFIG_SCHED_MM_CID */
10894
10895 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10896 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10897 struct sched_enq_and_set_ctx *ctx)
10898 {
10899 struct rq *rq = task_rq(p);
10900
10901 lockdep_assert_rq_held(rq);
10902
10903 *ctx = (struct sched_enq_and_set_ctx){
10904 .p = p,
10905 .queue_flags = queue_flags,
10906 .queued = task_on_rq_queued(p),
10907 .running = task_current(rq, p),
10908 };
10909
10910 update_rq_clock(rq);
10911 if (ctx->queued)
10912 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10913 if (ctx->running)
10914 put_prev_task(rq, p);
10915 }
10916
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10917 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10918 {
10919 struct rq *rq = task_rq(ctx->p);
10920
10921 lockdep_assert_rq_held(rq);
10922
10923 if (ctx->queued)
10924 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10925 if (ctx->running)
10926 set_next_task(rq, ctx->p);
10927 }
10928 #endif /* CONFIG_SCHED_CLASS_EXT */
10929