xref: /linux/kernel/sched/core.c (revision c234c6534040b1c1f8adcaf44702fc3e584cb1fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel CPU scheduler code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40 
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69 
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 #  include <linux/entry-common.h>
73 # endif
74 #endif
75 
76 #include <uapi/linux/sched/types.h>
77 
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81 
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87 
88 #include "sched.h"
89 #include "stats.h"
90 
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95 
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99 
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102 
103 /*
104  * Export tracepoints that act as a bare tracehook (ie: have no trace event
105  * associated with them) to allow external modules to probe them.
106  */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119 
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121 
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124  * Debugging: various feature bits
125  *
126  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127  * sysctl_sched_features, defined in sched.h, to allow constants propagation
128  * at compile time and compiler optimization based on features default.
129  */
130 #define SCHED_FEAT(name, enabled)	\
131 	(1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 	0;
135 #undef SCHED_FEAT
136 
137 /*
138  * Print a warning if need_resched is set for the given duration (if
139  * LATENCY_WARN is enabled).
140  *
141  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142  * per boot.
143  */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147 
148 /*
149  * Number of tasks to iterate in a single balance run.
150  * Limited because this is done with IRQs disabled.
151  */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153 
154 __read_mostly int scheduler_running;
155 
156 #ifdef CONFIG_SCHED_CORE
157 
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159 
160 /* kernel prio, less is more */
161 static inline int __task_prio(const struct task_struct *p)
162 {
163 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 		return -2;
165 
166 	if (p->dl_server)
167 		return -1; /* deadline */
168 
169 	if (rt_or_dl_prio(p->prio))
170 		return p->prio; /* [-1, 99] */
171 
172 	if (p->sched_class == &idle_sched_class)
173 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174 
175 	if (task_on_scx(p))
176 		return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177 
178 	return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180 
181 /*
182  * l(a,b)
183  * le(a,b) := !l(b,a)
184  * g(a,b)  := l(b,a)
185  * ge(a,b) := !l(a,b)
186  */
187 
188 /* real prio, less is less */
189 static inline bool prio_less(const struct task_struct *a,
190 			     const struct task_struct *b, bool in_fi)
191 {
192 
193 	int pa = __task_prio(a), pb = __task_prio(b);
194 
195 	if (-pa < -pb)
196 		return true;
197 
198 	if (-pb < -pa)
199 		return false;
200 
201 	if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 		const struct sched_dl_entity *a_dl, *b_dl;
203 
204 		a_dl = &a->dl;
205 		/*
206 		 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 		 * __task_prio() can return -1 (for DL) even for those. In that
208 		 * case, get to the dl_server's DL entity.
209 		 */
210 		if (a->dl_server)
211 			a_dl = a->dl_server;
212 
213 		b_dl = &b->dl;
214 		if (b->dl_server)
215 			b_dl = b->dl_server;
216 
217 		return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 	}
219 
220 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
221 		return cfs_prio_less(a, b, in_fi);
222 
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 	if (pa == MAX_RT_PRIO + MAX_NICE + 1)	/* ext */
225 		return scx_prio_less(a, b, in_fi);
226 #endif
227 
228 	return false;
229 }
230 
231 static inline bool __sched_core_less(const struct task_struct *a,
232 				     const struct task_struct *b)
233 {
234 	if (a->core_cookie < b->core_cookie)
235 		return true;
236 
237 	if (a->core_cookie > b->core_cookie)
238 		return false;
239 
240 	/* flip prio, so high prio is leftmost */
241 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 		return true;
243 
244 	return false;
245 }
246 
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248 
249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253 
254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 	const struct task_struct *p = __node_2_sc(node);
257 	unsigned long cookie = (unsigned long)key;
258 
259 	if (cookie < p->core_cookie)
260 		return -1;
261 
262 	if (cookie > p->core_cookie)
263 		return 1;
264 
265 	return 0;
266 }
267 
268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 	if (p->se.sched_delayed)
271 		return;
272 
273 	rq->core->core_task_seq++;
274 
275 	if (!p->core_cookie)
276 		return;
277 
278 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280 
281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 	if (p->se.sched_delayed)
284 		return;
285 
286 	rq->core->core_task_seq++;
287 
288 	if (sched_core_enqueued(p)) {
289 		rb_erase(&p->core_node, &rq->core_tree);
290 		RB_CLEAR_NODE(&p->core_node);
291 	}
292 
293 	/*
294 	 * Migrating the last task off the cpu, with the cpu in forced idle
295 	 * state. Reschedule to create an accounting edge for forced idle,
296 	 * and re-examine whether the core is still in forced idle state.
297 	 */
298 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
300 		resched_curr(rq);
301 }
302 
303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 	if (p->sched_class->task_is_throttled)
306 		return p->sched_class->task_is_throttled(p, cpu);
307 
308 	return 0;
309 }
310 
311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 	struct rb_node *node = &p->core_node;
314 	int cpu = task_cpu(p);
315 
316 	do {
317 		node = rb_next(node);
318 		if (!node)
319 			return NULL;
320 
321 		p = __node_2_sc(node);
322 		if (p->core_cookie != cookie)
323 			return NULL;
324 
325 	} while (sched_task_is_throttled(p, cpu));
326 
327 	return p;
328 }
329 
330 /*
331  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332  * If no suitable task is found, NULL will be returned.
333  */
334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 	struct task_struct *p;
337 	struct rb_node *node;
338 
339 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 	if (!node)
341 		return NULL;
342 
343 	p = __node_2_sc(node);
344 	if (!sched_task_is_throttled(p, rq->cpu))
345 		return p;
346 
347 	return sched_core_next(p, cookie);
348 }
349 
350 /*
351  * Magic required such that:
352  *
353  *	raw_spin_rq_lock(rq);
354  *	...
355  *	raw_spin_rq_unlock(rq);
356  *
357  * ends up locking and unlocking the _same_ lock, and all CPUs
358  * always agree on what rq has what lock.
359  *
360  * XXX entirely possible to selectively enable cores, don't bother for now.
361  */
362 
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366 
367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 	int t, i = 0;
371 
372 	local_irq_save(*flags);
373 	for_each_cpu(t, smt_mask)
374 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376 
377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 	int t;
381 
382 	for_each_cpu(t, smt_mask)
383 		raw_spin_unlock(&cpu_rq(t)->__lock);
384 	local_irq_restore(*flags);
385 }
386 
387 static void __sched_core_flip(bool enabled)
388 {
389 	unsigned long flags;
390 	int cpu, t;
391 
392 	cpus_read_lock();
393 
394 	/*
395 	 * Toggle the online cores, one by one.
396 	 */
397 	cpumask_copy(&sched_core_mask, cpu_online_mask);
398 	for_each_cpu(cpu, &sched_core_mask) {
399 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400 
401 		sched_core_lock(cpu, &flags);
402 
403 		for_each_cpu(t, smt_mask)
404 			cpu_rq(t)->core_enabled = enabled;
405 
406 		cpu_rq(cpu)->core->core_forceidle_start = 0;
407 
408 		sched_core_unlock(cpu, &flags);
409 
410 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 	}
412 
413 	/*
414 	 * Toggle the offline CPUs.
415 	 */
416 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 		cpu_rq(cpu)->core_enabled = enabled;
418 
419 	cpus_read_unlock();
420 }
421 
422 static void sched_core_assert_empty(void)
423 {
424 	int cpu;
425 
426 	for_each_possible_cpu(cpu)
427 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429 
430 static void __sched_core_enable(void)
431 {
432 	static_branch_enable(&__sched_core_enabled);
433 	/*
434 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 	 * and future ones will observe !sched_core_disabled().
436 	 */
437 	synchronize_rcu();
438 	__sched_core_flip(true);
439 	sched_core_assert_empty();
440 }
441 
442 static void __sched_core_disable(void)
443 {
444 	sched_core_assert_empty();
445 	__sched_core_flip(false);
446 	static_branch_disable(&__sched_core_enabled);
447 }
448 
449 void sched_core_get(void)
450 {
451 	if (atomic_inc_not_zero(&sched_core_count))
452 		return;
453 
454 	mutex_lock(&sched_core_mutex);
455 	if (!atomic_read(&sched_core_count))
456 		__sched_core_enable();
457 
458 	smp_mb__before_atomic();
459 	atomic_inc(&sched_core_count);
460 	mutex_unlock(&sched_core_mutex);
461 }
462 
463 static void __sched_core_put(struct work_struct *work)
464 {
465 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 		__sched_core_disable();
467 		mutex_unlock(&sched_core_mutex);
468 	}
469 }
470 
471 void sched_core_put(void)
472 {
473 	static DECLARE_WORK(_work, __sched_core_put);
474 
475 	/*
476 	 * "There can be only one"
477 	 *
478 	 * Either this is the last one, or we don't actually need to do any
479 	 * 'work'. If it is the last *again*, we rely on
480 	 * WORK_STRUCT_PENDING_BIT.
481 	 */
482 	if (!atomic_add_unless(&sched_core_count, -1, 1))
483 		schedule_work(&_work);
484 }
485 
486 #else /* !CONFIG_SCHED_CORE */
487 
488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491 
492 #endif /* CONFIG_SCHED_CORE */
493 
494 /*
495  * Serialization rules:
496  *
497  * Lock order:
498  *
499  *   p->pi_lock
500  *     rq->lock
501  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502  *
503  *  rq1->lock
504  *    rq2->lock  where: rq1 < rq2
505  *
506  * Regular state:
507  *
508  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509  * local CPU's rq->lock, it optionally removes the task from the runqueue and
510  * always looks at the local rq data structures to find the most eligible task
511  * to run next.
512  *
513  * Task enqueue is also under rq->lock, possibly taken from another CPU.
514  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515  * the local CPU to avoid bouncing the runqueue state around [ see
516  * ttwu_queue_wakelist() ]
517  *
518  * Task wakeup, specifically wakeups that involve migration, are horribly
519  * complicated to avoid having to take two rq->locks.
520  *
521  * Special state:
522  *
523  * System-calls and anything external will use task_rq_lock() which acquires
524  * both p->pi_lock and rq->lock. As a consequence the state they change is
525  * stable while holding either lock:
526  *
527  *  - sched_setaffinity()/
528  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
529  *  - set_user_nice():		p->se.load, p->*prio
530  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
531  *				p->se.load, p->rt_priority,
532  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
533  *  - sched_setnuma():		p->numa_preferred_nid
534  *  - sched_move_task():	p->sched_task_group
535  *  - uclamp_update_active()	p->uclamp*
536  *
537  * p->state <- TASK_*:
538  *
539  *   is changed locklessly using set_current_state(), __set_current_state() or
540  *   set_special_state(), see their respective comments, or by
541  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
542  *   concurrent self.
543  *
544  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545  *
546  *   is set by activate_task() and cleared by deactivate_task(), under
547  *   rq->lock. Non-zero indicates the task is runnable, the special
548  *   ON_RQ_MIGRATING state is used for migration without holding both
549  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550  *
551  * p->on_cpu <- { 0, 1 }:
552  *
553  *   is set by prepare_task() and cleared by finish_task() such that it will be
554  *   set before p is scheduled-in and cleared after p is scheduled-out, both
555  *   under rq->lock. Non-zero indicates the task is running on its CPU.
556  *
557  *   [ The astute reader will observe that it is possible for two tasks on one
558  *     CPU to have ->on_cpu = 1 at the same time. ]
559  *
560  * task_cpu(p): is changed by set_task_cpu(), the rules are:
561  *
562  *  - Don't call set_task_cpu() on a blocked task:
563  *
564  *    We don't care what CPU we're not running on, this simplifies hotplug,
565  *    the CPU assignment of blocked tasks isn't required to be valid.
566  *
567  *  - for try_to_wake_up(), called under p->pi_lock:
568  *
569  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
570  *
571  *  - for migration called under rq->lock:
572  *    [ see task_on_rq_migrating() in task_rq_lock() ]
573  *
574  *    o move_queued_task()
575  *    o detach_task()
576  *
577  *  - for migration called under double_rq_lock():
578  *
579  *    o __migrate_swap_task()
580  *    o push_rt_task() / pull_rt_task()
581  *    o push_dl_task() / pull_dl_task()
582  *    o dl_task_offline_migration()
583  *
584  */
585 
586 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
587 {
588 	raw_spinlock_t *lock;
589 
590 	/* Matches synchronize_rcu() in __sched_core_enable() */
591 	preempt_disable();
592 	if (sched_core_disabled()) {
593 		raw_spin_lock_nested(&rq->__lock, subclass);
594 		/* preempt_count *MUST* be > 1 */
595 		preempt_enable_no_resched();
596 		return;
597 	}
598 
599 	for (;;) {
600 		lock = __rq_lockp(rq);
601 		raw_spin_lock_nested(lock, subclass);
602 		if (likely(lock == __rq_lockp(rq))) {
603 			/* preempt_count *MUST* be > 1 */
604 			preempt_enable_no_resched();
605 			return;
606 		}
607 		raw_spin_unlock(lock);
608 	}
609 }
610 
611 bool raw_spin_rq_trylock(struct rq *rq)
612 {
613 	raw_spinlock_t *lock;
614 	bool ret;
615 
616 	/* Matches synchronize_rcu() in __sched_core_enable() */
617 	preempt_disable();
618 	if (sched_core_disabled()) {
619 		ret = raw_spin_trylock(&rq->__lock);
620 		preempt_enable();
621 		return ret;
622 	}
623 
624 	for (;;) {
625 		lock = __rq_lockp(rq);
626 		ret = raw_spin_trylock(lock);
627 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
628 			preempt_enable();
629 			return ret;
630 		}
631 		raw_spin_unlock(lock);
632 	}
633 }
634 
635 void raw_spin_rq_unlock(struct rq *rq)
636 {
637 	raw_spin_unlock(rq_lockp(rq));
638 }
639 
640 #ifdef CONFIG_SMP
641 /*
642  * double_rq_lock - safely lock two runqueues
643  */
644 void double_rq_lock(struct rq *rq1, struct rq *rq2)
645 {
646 	lockdep_assert_irqs_disabled();
647 
648 	if (rq_order_less(rq2, rq1))
649 		swap(rq1, rq2);
650 
651 	raw_spin_rq_lock(rq1);
652 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
653 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
654 
655 	double_rq_clock_clear_update(rq1, rq2);
656 }
657 #endif
658 
659 /*
660  * __task_rq_lock - lock the rq @p resides on.
661  */
662 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
663 	__acquires(rq->lock)
664 {
665 	struct rq *rq;
666 
667 	lockdep_assert_held(&p->pi_lock);
668 
669 	for (;;) {
670 		rq = task_rq(p);
671 		raw_spin_rq_lock(rq);
672 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
673 			rq_pin_lock(rq, rf);
674 			return rq;
675 		}
676 		raw_spin_rq_unlock(rq);
677 
678 		while (unlikely(task_on_rq_migrating(p)))
679 			cpu_relax();
680 	}
681 }
682 
683 /*
684  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
685  */
686 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
687 	__acquires(p->pi_lock)
688 	__acquires(rq->lock)
689 {
690 	struct rq *rq;
691 
692 	for (;;) {
693 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
694 		rq = task_rq(p);
695 		raw_spin_rq_lock(rq);
696 		/*
697 		 *	move_queued_task()		task_rq_lock()
698 		 *
699 		 *	ACQUIRE (rq->lock)
700 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
701 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
702 		 *	[S] ->cpu = new_cpu		[L] task_rq()
703 		 *					[L] ->on_rq
704 		 *	RELEASE (rq->lock)
705 		 *
706 		 * If we observe the old CPU in task_rq_lock(), the acquire of
707 		 * the old rq->lock will fully serialize against the stores.
708 		 *
709 		 * If we observe the new CPU in task_rq_lock(), the address
710 		 * dependency headed by '[L] rq = task_rq()' and the acquire
711 		 * will pair with the WMB to ensure we then also see migrating.
712 		 */
713 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
714 			rq_pin_lock(rq, rf);
715 			return rq;
716 		}
717 		raw_spin_rq_unlock(rq);
718 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
719 
720 		while (unlikely(task_on_rq_migrating(p)))
721 			cpu_relax();
722 	}
723 }
724 
725 /*
726  * RQ-clock updating methods:
727  */
728 
729 static void update_rq_clock_task(struct rq *rq, s64 delta)
730 {
731 /*
732  * In theory, the compile should just see 0 here, and optimize out the call
733  * to sched_rt_avg_update. But I don't trust it...
734  */
735 	s64 __maybe_unused steal = 0, irq_delta = 0;
736 
737 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
738 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
739 
740 	/*
741 	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
742 	 * this case when a previous update_rq_clock() happened inside a
743 	 * {soft,}IRQ region.
744 	 *
745 	 * When this happens, we stop ->clock_task and only update the
746 	 * prev_irq_time stamp to account for the part that fit, so that a next
747 	 * update will consume the rest. This ensures ->clock_task is
748 	 * monotonic.
749 	 *
750 	 * It does however cause some slight miss-attribution of {soft,}IRQ
751 	 * time, a more accurate solution would be to update the irq_time using
752 	 * the current rq->clock timestamp, except that would require using
753 	 * atomic ops.
754 	 */
755 	if (irq_delta > delta)
756 		irq_delta = delta;
757 
758 	rq->prev_irq_time += irq_delta;
759 	delta -= irq_delta;
760 	delayacct_irq(rq->curr, irq_delta);
761 #endif
762 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
763 	if (static_key_false((&paravirt_steal_rq_enabled))) {
764 		steal = paravirt_steal_clock(cpu_of(rq));
765 		steal -= rq->prev_steal_time_rq;
766 
767 		if (unlikely(steal > delta))
768 			steal = delta;
769 
770 		rq->prev_steal_time_rq += steal;
771 		delta -= steal;
772 	}
773 #endif
774 
775 	rq->clock_task += delta;
776 
777 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
778 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
779 		update_irq_load_avg(rq, irq_delta + steal);
780 #endif
781 	update_rq_clock_pelt(rq, delta);
782 }
783 
784 void update_rq_clock(struct rq *rq)
785 {
786 	s64 delta;
787 
788 	lockdep_assert_rq_held(rq);
789 
790 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
791 		return;
792 
793 #ifdef CONFIG_SCHED_DEBUG
794 	if (sched_feat(WARN_DOUBLE_CLOCK))
795 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
796 	rq->clock_update_flags |= RQCF_UPDATED;
797 #endif
798 
799 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
800 	if (delta < 0)
801 		return;
802 	rq->clock += delta;
803 	update_rq_clock_task(rq, delta);
804 }
805 
806 #ifdef CONFIG_SCHED_HRTICK
807 /*
808  * Use HR-timers to deliver accurate preemption points.
809  */
810 
811 static void hrtick_clear(struct rq *rq)
812 {
813 	if (hrtimer_active(&rq->hrtick_timer))
814 		hrtimer_cancel(&rq->hrtick_timer);
815 }
816 
817 /*
818  * High-resolution timer tick.
819  * Runs from hardirq context with interrupts disabled.
820  */
821 static enum hrtimer_restart hrtick(struct hrtimer *timer)
822 {
823 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
824 	struct rq_flags rf;
825 
826 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
827 
828 	rq_lock(rq, &rf);
829 	update_rq_clock(rq);
830 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
831 	rq_unlock(rq, &rf);
832 
833 	return HRTIMER_NORESTART;
834 }
835 
836 #ifdef CONFIG_SMP
837 
838 static void __hrtick_restart(struct rq *rq)
839 {
840 	struct hrtimer *timer = &rq->hrtick_timer;
841 	ktime_t time = rq->hrtick_time;
842 
843 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
844 }
845 
846 /*
847  * called from hardirq (IPI) context
848  */
849 static void __hrtick_start(void *arg)
850 {
851 	struct rq *rq = arg;
852 	struct rq_flags rf;
853 
854 	rq_lock(rq, &rf);
855 	__hrtick_restart(rq);
856 	rq_unlock(rq, &rf);
857 }
858 
859 /*
860  * Called to set the hrtick timer state.
861  *
862  * called with rq->lock held and IRQs disabled
863  */
864 void hrtick_start(struct rq *rq, u64 delay)
865 {
866 	struct hrtimer *timer = &rq->hrtick_timer;
867 	s64 delta;
868 
869 	/*
870 	 * Don't schedule slices shorter than 10000ns, that just
871 	 * doesn't make sense and can cause timer DoS.
872 	 */
873 	delta = max_t(s64, delay, 10000LL);
874 	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
875 
876 	if (rq == this_rq())
877 		__hrtick_restart(rq);
878 	else
879 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
880 }
881 
882 #else
883 /*
884  * Called to set the hrtick timer state.
885  *
886  * called with rq->lock held and IRQs disabled
887  */
888 void hrtick_start(struct rq *rq, u64 delay)
889 {
890 	/*
891 	 * Don't schedule slices shorter than 10000ns, that just
892 	 * doesn't make sense. Rely on vruntime for fairness.
893 	 */
894 	delay = max_t(u64, delay, 10000LL);
895 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
896 		      HRTIMER_MODE_REL_PINNED_HARD);
897 }
898 
899 #endif /* CONFIG_SMP */
900 
901 static void hrtick_rq_init(struct rq *rq)
902 {
903 #ifdef CONFIG_SMP
904 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
905 #endif
906 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
907 	rq->hrtick_timer.function = hrtick;
908 }
909 #else	/* CONFIG_SCHED_HRTICK */
910 static inline void hrtick_clear(struct rq *rq)
911 {
912 }
913 
914 static inline void hrtick_rq_init(struct rq *rq)
915 {
916 }
917 #endif	/* CONFIG_SCHED_HRTICK */
918 
919 /*
920  * try_cmpxchg based fetch_or() macro so it works for different integer types:
921  */
922 #define fetch_or(ptr, mask)						\
923 	({								\
924 		typeof(ptr) _ptr = (ptr);				\
925 		typeof(mask) _mask = (mask);				\
926 		typeof(*_ptr) _val = *_ptr;				\
927 									\
928 		do {							\
929 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
930 	_val;								\
931 })
932 
933 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
934 /*
935  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
936  * this avoids any races wrt polling state changes and thereby avoids
937  * spurious IPIs.
938  */
939 static inline bool set_nr_and_not_polling(struct task_struct *p)
940 {
941 	struct thread_info *ti = task_thread_info(p);
942 	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
943 }
944 
945 /*
946  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
947  *
948  * If this returns true, then the idle task promises to call
949  * sched_ttwu_pending() and reschedule soon.
950  */
951 static bool set_nr_if_polling(struct task_struct *p)
952 {
953 	struct thread_info *ti = task_thread_info(p);
954 	typeof(ti->flags) val = READ_ONCE(ti->flags);
955 
956 	do {
957 		if (!(val & _TIF_POLLING_NRFLAG))
958 			return false;
959 		if (val & _TIF_NEED_RESCHED)
960 			return true;
961 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
962 
963 	return true;
964 }
965 
966 #else
967 static inline bool set_nr_and_not_polling(struct task_struct *p)
968 {
969 	set_tsk_need_resched(p);
970 	return true;
971 }
972 
973 #ifdef CONFIG_SMP
974 static inline bool set_nr_if_polling(struct task_struct *p)
975 {
976 	return false;
977 }
978 #endif
979 #endif
980 
981 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
982 {
983 	struct wake_q_node *node = &task->wake_q;
984 
985 	/*
986 	 * Atomically grab the task, if ->wake_q is !nil already it means
987 	 * it's already queued (either by us or someone else) and will get the
988 	 * wakeup due to that.
989 	 *
990 	 * In order to ensure that a pending wakeup will observe our pending
991 	 * state, even in the failed case, an explicit smp_mb() must be used.
992 	 */
993 	smp_mb__before_atomic();
994 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
995 		return false;
996 
997 	/*
998 	 * The head is context local, there can be no concurrency.
999 	 */
1000 	*head->lastp = node;
1001 	head->lastp = &node->next;
1002 	return true;
1003 }
1004 
1005 /**
1006  * wake_q_add() - queue a wakeup for 'later' waking.
1007  * @head: the wake_q_head to add @task to
1008  * @task: the task to queue for 'later' wakeup
1009  *
1010  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1011  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1012  * instantly.
1013  *
1014  * This function must be used as-if it were wake_up_process(); IOW the task
1015  * must be ready to be woken at this location.
1016  */
1017 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1018 {
1019 	if (__wake_q_add(head, task))
1020 		get_task_struct(task);
1021 }
1022 
1023 /**
1024  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1025  * @head: the wake_q_head to add @task to
1026  * @task: the task to queue for 'later' wakeup
1027  *
1028  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1029  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1030  * instantly.
1031  *
1032  * This function must be used as-if it were wake_up_process(); IOW the task
1033  * must be ready to be woken at this location.
1034  *
1035  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1036  * that already hold reference to @task can call the 'safe' version and trust
1037  * wake_q to do the right thing depending whether or not the @task is already
1038  * queued for wakeup.
1039  */
1040 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1041 {
1042 	if (!__wake_q_add(head, task))
1043 		put_task_struct(task);
1044 }
1045 
1046 void wake_up_q(struct wake_q_head *head)
1047 {
1048 	struct wake_q_node *node = head->first;
1049 
1050 	while (node != WAKE_Q_TAIL) {
1051 		struct task_struct *task;
1052 
1053 		task = container_of(node, struct task_struct, wake_q);
1054 		/* Task can safely be re-inserted now: */
1055 		node = node->next;
1056 		task->wake_q.next = NULL;
1057 
1058 		/*
1059 		 * wake_up_process() executes a full barrier, which pairs with
1060 		 * the queueing in wake_q_add() so as not to miss wakeups.
1061 		 */
1062 		wake_up_process(task);
1063 		put_task_struct(task);
1064 	}
1065 }
1066 
1067 /*
1068  * resched_curr - mark rq's current task 'to be rescheduled now'.
1069  *
1070  * On UP this means the setting of the need_resched flag, on SMP it
1071  * might also involve a cross-CPU call to trigger the scheduler on
1072  * the target CPU.
1073  */
1074 void resched_curr(struct rq *rq)
1075 {
1076 	struct task_struct *curr = rq->curr;
1077 	int cpu;
1078 
1079 	lockdep_assert_rq_held(rq);
1080 
1081 	if (test_tsk_need_resched(curr))
1082 		return;
1083 
1084 	cpu = cpu_of(rq);
1085 
1086 	if (cpu == smp_processor_id()) {
1087 		set_tsk_need_resched(curr);
1088 		set_preempt_need_resched();
1089 		return;
1090 	}
1091 
1092 	if (set_nr_and_not_polling(curr))
1093 		smp_send_reschedule(cpu);
1094 	else
1095 		trace_sched_wake_idle_without_ipi(cpu);
1096 }
1097 
1098 void resched_cpu(int cpu)
1099 {
1100 	struct rq *rq = cpu_rq(cpu);
1101 	unsigned long flags;
1102 
1103 	raw_spin_rq_lock_irqsave(rq, flags);
1104 	if (cpu_online(cpu) || cpu == smp_processor_id())
1105 		resched_curr(rq);
1106 	raw_spin_rq_unlock_irqrestore(rq, flags);
1107 }
1108 
1109 #ifdef CONFIG_SMP
1110 #ifdef CONFIG_NO_HZ_COMMON
1111 /*
1112  * In the semi idle case, use the nearest busy CPU for migrating timers
1113  * from an idle CPU.  This is good for power-savings.
1114  *
1115  * We don't do similar optimization for completely idle system, as
1116  * selecting an idle CPU will add more delays to the timers than intended
1117  * (as that CPU's timer base may not be up to date wrt jiffies etc).
1118  */
1119 int get_nohz_timer_target(void)
1120 {
1121 	int i, cpu = smp_processor_id(), default_cpu = -1;
1122 	struct sched_domain *sd;
1123 	const struct cpumask *hk_mask;
1124 
1125 	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1126 		if (!idle_cpu(cpu))
1127 			return cpu;
1128 		default_cpu = cpu;
1129 	}
1130 
1131 	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1132 
1133 	guard(rcu)();
1134 
1135 	for_each_domain(cpu, sd) {
1136 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1137 			if (cpu == i)
1138 				continue;
1139 
1140 			if (!idle_cpu(i))
1141 				return i;
1142 		}
1143 	}
1144 
1145 	if (default_cpu == -1)
1146 		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1147 
1148 	return default_cpu;
1149 }
1150 
1151 /*
1152  * When add_timer_on() enqueues a timer into the timer wheel of an
1153  * idle CPU then this timer might expire before the next timer event
1154  * which is scheduled to wake up that CPU. In case of a completely
1155  * idle system the next event might even be infinite time into the
1156  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1157  * leaves the inner idle loop so the newly added timer is taken into
1158  * account when the CPU goes back to idle and evaluates the timer
1159  * wheel for the next timer event.
1160  */
1161 static void wake_up_idle_cpu(int cpu)
1162 {
1163 	struct rq *rq = cpu_rq(cpu);
1164 
1165 	if (cpu == smp_processor_id())
1166 		return;
1167 
1168 	/*
1169 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1170 	 * part of the idle loop. This forces an exit from the idle loop
1171 	 * and a round trip to schedule(). Now this could be optimized
1172 	 * because a simple new idle loop iteration is enough to
1173 	 * re-evaluate the next tick. Provided some re-ordering of tick
1174 	 * nohz functions that would need to follow TIF_NR_POLLING
1175 	 * clearing:
1176 	 *
1177 	 * - On most architectures, a simple fetch_or on ti::flags with a
1178 	 *   "0" value would be enough to know if an IPI needs to be sent.
1179 	 *
1180 	 * - x86 needs to perform a last need_resched() check between
1181 	 *   monitor and mwait which doesn't take timers into account.
1182 	 *   There a dedicated TIF_TIMER flag would be required to
1183 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1184 	 *   before mwait().
1185 	 *
1186 	 * However, remote timer enqueue is not such a frequent event
1187 	 * and testing of the above solutions didn't appear to report
1188 	 * much benefits.
1189 	 */
1190 	if (set_nr_and_not_polling(rq->idle))
1191 		smp_send_reschedule(cpu);
1192 	else
1193 		trace_sched_wake_idle_without_ipi(cpu);
1194 }
1195 
1196 static bool wake_up_full_nohz_cpu(int cpu)
1197 {
1198 	/*
1199 	 * We just need the target to call irq_exit() and re-evaluate
1200 	 * the next tick. The nohz full kick at least implies that.
1201 	 * If needed we can still optimize that later with an
1202 	 * empty IRQ.
1203 	 */
1204 	if (cpu_is_offline(cpu))
1205 		return true;  /* Don't try to wake offline CPUs. */
1206 	if (tick_nohz_full_cpu(cpu)) {
1207 		if (cpu != smp_processor_id() ||
1208 		    tick_nohz_tick_stopped())
1209 			tick_nohz_full_kick_cpu(cpu);
1210 		return true;
1211 	}
1212 
1213 	return false;
1214 }
1215 
1216 /*
1217  * Wake up the specified CPU.  If the CPU is going offline, it is the
1218  * caller's responsibility to deal with the lost wakeup, for example,
1219  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1220  */
1221 void wake_up_nohz_cpu(int cpu)
1222 {
1223 	if (!wake_up_full_nohz_cpu(cpu))
1224 		wake_up_idle_cpu(cpu);
1225 }
1226 
1227 static void nohz_csd_func(void *info)
1228 {
1229 	struct rq *rq = info;
1230 	int cpu = cpu_of(rq);
1231 	unsigned int flags;
1232 
1233 	/*
1234 	 * Release the rq::nohz_csd.
1235 	 */
1236 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1237 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1238 
1239 	rq->idle_balance = idle_cpu(cpu);
1240 	if (rq->idle_balance && !need_resched()) {
1241 		rq->nohz_idle_balance = flags;
1242 		raise_softirq_irqoff(SCHED_SOFTIRQ);
1243 	}
1244 }
1245 
1246 #endif /* CONFIG_NO_HZ_COMMON */
1247 
1248 #ifdef CONFIG_NO_HZ_FULL
1249 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1250 {
1251 	if (rq->nr_running != 1)
1252 		return false;
1253 
1254 	if (p->sched_class != &fair_sched_class)
1255 		return false;
1256 
1257 	if (!task_on_rq_queued(p))
1258 		return false;
1259 
1260 	return true;
1261 }
1262 
1263 bool sched_can_stop_tick(struct rq *rq)
1264 {
1265 	int fifo_nr_running;
1266 
1267 	/* Deadline tasks, even if single, need the tick */
1268 	if (rq->dl.dl_nr_running)
1269 		return false;
1270 
1271 	/*
1272 	 * If there are more than one RR tasks, we need the tick to affect the
1273 	 * actual RR behaviour.
1274 	 */
1275 	if (rq->rt.rr_nr_running) {
1276 		if (rq->rt.rr_nr_running == 1)
1277 			return true;
1278 		else
1279 			return false;
1280 	}
1281 
1282 	/*
1283 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1284 	 * forced preemption between FIFO tasks.
1285 	 */
1286 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1287 	if (fifo_nr_running)
1288 		return true;
1289 
1290 	/*
1291 	 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1292 	 * left. For CFS, if there's more than one we need the tick for
1293 	 * involuntary preemption. For SCX, ask.
1294 	 */
1295 	if (scx_enabled() && !scx_can_stop_tick(rq))
1296 		return false;
1297 
1298 	if (rq->cfs.nr_running > 1)
1299 		return false;
1300 
1301 	/*
1302 	 * If there is one task and it has CFS runtime bandwidth constraints
1303 	 * and it's on the cpu now we don't want to stop the tick.
1304 	 * This check prevents clearing the bit if a newly enqueued task here is
1305 	 * dequeued by migrating while the constrained task continues to run.
1306 	 * E.g. going from 2->1 without going through pick_next_task().
1307 	 */
1308 	if (__need_bw_check(rq, rq->curr)) {
1309 		if (cfs_task_bw_constrained(rq->curr))
1310 			return false;
1311 	}
1312 
1313 	return true;
1314 }
1315 #endif /* CONFIG_NO_HZ_FULL */
1316 #endif /* CONFIG_SMP */
1317 
1318 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1319 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1320 /*
1321  * Iterate task_group tree rooted at *from, calling @down when first entering a
1322  * node and @up when leaving it for the final time.
1323  *
1324  * Caller must hold rcu_lock or sufficient equivalent.
1325  */
1326 int walk_tg_tree_from(struct task_group *from,
1327 			     tg_visitor down, tg_visitor up, void *data)
1328 {
1329 	struct task_group *parent, *child;
1330 	int ret;
1331 
1332 	parent = from;
1333 
1334 down:
1335 	ret = (*down)(parent, data);
1336 	if (ret)
1337 		goto out;
1338 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1339 		parent = child;
1340 		goto down;
1341 
1342 up:
1343 		continue;
1344 	}
1345 	ret = (*up)(parent, data);
1346 	if (ret || parent == from)
1347 		goto out;
1348 
1349 	child = parent;
1350 	parent = parent->parent;
1351 	if (parent)
1352 		goto up;
1353 out:
1354 	return ret;
1355 }
1356 
1357 int tg_nop(struct task_group *tg, void *data)
1358 {
1359 	return 0;
1360 }
1361 #endif
1362 
1363 void set_load_weight(struct task_struct *p, bool update_load)
1364 {
1365 	int prio = p->static_prio - MAX_RT_PRIO;
1366 	struct load_weight lw;
1367 
1368 	if (task_has_idle_policy(p)) {
1369 		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1370 		lw.inv_weight = WMULT_IDLEPRIO;
1371 	} else {
1372 		lw.weight = scale_load(sched_prio_to_weight[prio]);
1373 		lw.inv_weight = sched_prio_to_wmult[prio];
1374 	}
1375 
1376 	/*
1377 	 * SCHED_OTHER tasks have to update their load when changing their
1378 	 * weight
1379 	 */
1380 	if (update_load && p->sched_class->reweight_task)
1381 		p->sched_class->reweight_task(task_rq(p), p, &lw);
1382 	else
1383 		p->se.load = lw;
1384 }
1385 
1386 #ifdef CONFIG_UCLAMP_TASK
1387 /*
1388  * Serializes updates of utilization clamp values
1389  *
1390  * The (slow-path) user-space triggers utilization clamp value updates which
1391  * can require updates on (fast-path) scheduler's data structures used to
1392  * support enqueue/dequeue operations.
1393  * While the per-CPU rq lock protects fast-path update operations, user-space
1394  * requests are serialized using a mutex to reduce the risk of conflicting
1395  * updates or API abuses.
1396  */
1397 static DEFINE_MUTEX(uclamp_mutex);
1398 
1399 /* Max allowed minimum utilization */
1400 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1401 
1402 /* Max allowed maximum utilization */
1403 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1404 
1405 /*
1406  * By default RT tasks run at the maximum performance point/capacity of the
1407  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1408  * SCHED_CAPACITY_SCALE.
1409  *
1410  * This knob allows admins to change the default behavior when uclamp is being
1411  * used. In battery powered devices, particularly, running at the maximum
1412  * capacity and frequency will increase energy consumption and shorten the
1413  * battery life.
1414  *
1415  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1416  *
1417  * This knob will not override the system default sched_util_clamp_min defined
1418  * above.
1419  */
1420 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1421 
1422 /* All clamps are required to be less or equal than these values */
1423 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1424 
1425 /*
1426  * This static key is used to reduce the uclamp overhead in the fast path. It
1427  * primarily disables the call to uclamp_rq_{inc, dec}() in
1428  * enqueue/dequeue_task().
1429  *
1430  * This allows users to continue to enable uclamp in their kernel config with
1431  * minimum uclamp overhead in the fast path.
1432  *
1433  * As soon as userspace modifies any of the uclamp knobs, the static key is
1434  * enabled, since we have an actual users that make use of uclamp
1435  * functionality.
1436  *
1437  * The knobs that would enable this static key are:
1438  *
1439  *   * A task modifying its uclamp value with sched_setattr().
1440  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1441  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1442  */
1443 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1444 
1445 static inline unsigned int
1446 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1447 		  unsigned int clamp_value)
1448 {
1449 	/*
1450 	 * Avoid blocked utilization pushing up the frequency when we go
1451 	 * idle (which drops the max-clamp) by retaining the last known
1452 	 * max-clamp.
1453 	 */
1454 	if (clamp_id == UCLAMP_MAX) {
1455 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1456 		return clamp_value;
1457 	}
1458 
1459 	return uclamp_none(UCLAMP_MIN);
1460 }
1461 
1462 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1463 				     unsigned int clamp_value)
1464 {
1465 	/* Reset max-clamp retention only on idle exit */
1466 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1467 		return;
1468 
1469 	uclamp_rq_set(rq, clamp_id, clamp_value);
1470 }
1471 
1472 static inline
1473 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1474 				   unsigned int clamp_value)
1475 {
1476 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1477 	int bucket_id = UCLAMP_BUCKETS - 1;
1478 
1479 	/*
1480 	 * Since both min and max clamps are max aggregated, find the
1481 	 * top most bucket with tasks in.
1482 	 */
1483 	for ( ; bucket_id >= 0; bucket_id--) {
1484 		if (!bucket[bucket_id].tasks)
1485 			continue;
1486 		return bucket[bucket_id].value;
1487 	}
1488 
1489 	/* No tasks -- default clamp values */
1490 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1491 }
1492 
1493 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1494 {
1495 	unsigned int default_util_min;
1496 	struct uclamp_se *uc_se;
1497 
1498 	lockdep_assert_held(&p->pi_lock);
1499 
1500 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1501 
1502 	/* Only sync if user didn't override the default */
1503 	if (uc_se->user_defined)
1504 		return;
1505 
1506 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1507 	uclamp_se_set(uc_se, default_util_min, false);
1508 }
1509 
1510 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1511 {
1512 	if (!rt_task(p))
1513 		return;
1514 
1515 	/* Protect updates to p->uclamp_* */
1516 	guard(task_rq_lock)(p);
1517 	__uclamp_update_util_min_rt_default(p);
1518 }
1519 
1520 static inline struct uclamp_se
1521 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1522 {
1523 	/* Copy by value as we could modify it */
1524 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1525 #ifdef CONFIG_UCLAMP_TASK_GROUP
1526 	unsigned int tg_min, tg_max, value;
1527 
1528 	/*
1529 	 * Tasks in autogroups or root task group will be
1530 	 * restricted by system defaults.
1531 	 */
1532 	if (task_group_is_autogroup(task_group(p)))
1533 		return uc_req;
1534 	if (task_group(p) == &root_task_group)
1535 		return uc_req;
1536 
1537 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1538 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1539 	value = uc_req.value;
1540 	value = clamp(value, tg_min, tg_max);
1541 	uclamp_se_set(&uc_req, value, false);
1542 #endif
1543 
1544 	return uc_req;
1545 }
1546 
1547 /*
1548  * The effective clamp bucket index of a task depends on, by increasing
1549  * priority:
1550  * - the task specific clamp value, when explicitly requested from userspace
1551  * - the task group effective clamp value, for tasks not either in the root
1552  *   group or in an autogroup
1553  * - the system default clamp value, defined by the sysadmin
1554  */
1555 static inline struct uclamp_se
1556 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1557 {
1558 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1559 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1560 
1561 	/* System default restrictions always apply */
1562 	if (unlikely(uc_req.value > uc_max.value))
1563 		return uc_max;
1564 
1565 	return uc_req;
1566 }
1567 
1568 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1569 {
1570 	struct uclamp_se uc_eff;
1571 
1572 	/* Task currently refcounted: use back-annotated (effective) value */
1573 	if (p->uclamp[clamp_id].active)
1574 		return (unsigned long)p->uclamp[clamp_id].value;
1575 
1576 	uc_eff = uclamp_eff_get(p, clamp_id);
1577 
1578 	return (unsigned long)uc_eff.value;
1579 }
1580 
1581 /*
1582  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1583  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1584  * updates the rq's clamp value if required.
1585  *
1586  * Tasks can have a task-specific value requested from user-space, track
1587  * within each bucket the maximum value for tasks refcounted in it.
1588  * This "local max aggregation" allows to track the exact "requested" value
1589  * for each bucket when all its RUNNABLE tasks require the same clamp.
1590  */
1591 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1592 				    enum uclamp_id clamp_id)
1593 {
1594 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1595 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1596 	struct uclamp_bucket *bucket;
1597 
1598 	lockdep_assert_rq_held(rq);
1599 
1600 	/* Update task effective clamp */
1601 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1602 
1603 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1604 	bucket->tasks++;
1605 	uc_se->active = true;
1606 
1607 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1608 
1609 	/*
1610 	 * Local max aggregation: rq buckets always track the max
1611 	 * "requested" clamp value of its RUNNABLE tasks.
1612 	 */
1613 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1614 		bucket->value = uc_se->value;
1615 
1616 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1617 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1618 }
1619 
1620 /*
1621  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1622  * is released. If this is the last task reference counting the rq's max
1623  * active clamp value, then the rq's clamp value is updated.
1624  *
1625  * Both refcounted tasks and rq's cached clamp values are expected to be
1626  * always valid. If it's detected they are not, as defensive programming,
1627  * enforce the expected state and warn.
1628  */
1629 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1630 				    enum uclamp_id clamp_id)
1631 {
1632 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1633 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1634 	struct uclamp_bucket *bucket;
1635 	unsigned int bkt_clamp;
1636 	unsigned int rq_clamp;
1637 
1638 	lockdep_assert_rq_held(rq);
1639 
1640 	/*
1641 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1642 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1643 	 *
1644 	 * In this case the uc_se->active flag should be false since no uclamp
1645 	 * accounting was performed at enqueue time and we can just return
1646 	 * here.
1647 	 *
1648 	 * Need to be careful of the following enqueue/dequeue ordering
1649 	 * problem too
1650 	 *
1651 	 *	enqueue(taskA)
1652 	 *	// sched_uclamp_used gets enabled
1653 	 *	enqueue(taskB)
1654 	 *	dequeue(taskA)
1655 	 *	// Must not decrement bucket->tasks here
1656 	 *	dequeue(taskB)
1657 	 *
1658 	 * where we could end up with stale data in uc_se and
1659 	 * bucket[uc_se->bucket_id].
1660 	 *
1661 	 * The following check here eliminates the possibility of such race.
1662 	 */
1663 	if (unlikely(!uc_se->active))
1664 		return;
1665 
1666 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1667 
1668 	SCHED_WARN_ON(!bucket->tasks);
1669 	if (likely(bucket->tasks))
1670 		bucket->tasks--;
1671 
1672 	uc_se->active = false;
1673 
1674 	/*
1675 	 * Keep "local max aggregation" simple and accept to (possibly)
1676 	 * overboost some RUNNABLE tasks in the same bucket.
1677 	 * The rq clamp bucket value is reset to its base value whenever
1678 	 * there are no more RUNNABLE tasks refcounting it.
1679 	 */
1680 	if (likely(bucket->tasks))
1681 		return;
1682 
1683 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1684 	/*
1685 	 * Defensive programming: this should never happen. If it happens,
1686 	 * e.g. due to future modification, warn and fix up the expected value.
1687 	 */
1688 	SCHED_WARN_ON(bucket->value > rq_clamp);
1689 	if (bucket->value >= rq_clamp) {
1690 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1691 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1692 	}
1693 }
1694 
1695 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1696 {
1697 	enum uclamp_id clamp_id;
1698 
1699 	/*
1700 	 * Avoid any overhead until uclamp is actually used by the userspace.
1701 	 *
1702 	 * The condition is constructed such that a NOP is generated when
1703 	 * sched_uclamp_used is disabled.
1704 	 */
1705 	if (!static_branch_unlikely(&sched_uclamp_used))
1706 		return;
1707 
1708 	if (unlikely(!p->sched_class->uclamp_enabled))
1709 		return;
1710 
1711 	if (p->se.sched_delayed)
1712 		return;
1713 
1714 	for_each_clamp_id(clamp_id)
1715 		uclamp_rq_inc_id(rq, p, clamp_id);
1716 
1717 	/* Reset clamp idle holding when there is one RUNNABLE task */
1718 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1719 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1720 }
1721 
1722 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1723 {
1724 	enum uclamp_id clamp_id;
1725 
1726 	/*
1727 	 * Avoid any overhead until uclamp is actually used by the userspace.
1728 	 *
1729 	 * The condition is constructed such that a NOP is generated when
1730 	 * sched_uclamp_used is disabled.
1731 	 */
1732 	if (!static_branch_unlikely(&sched_uclamp_used))
1733 		return;
1734 
1735 	if (unlikely(!p->sched_class->uclamp_enabled))
1736 		return;
1737 
1738 	if (p->se.sched_delayed)
1739 		return;
1740 
1741 	for_each_clamp_id(clamp_id)
1742 		uclamp_rq_dec_id(rq, p, clamp_id);
1743 }
1744 
1745 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1746 				      enum uclamp_id clamp_id)
1747 {
1748 	if (!p->uclamp[clamp_id].active)
1749 		return;
1750 
1751 	uclamp_rq_dec_id(rq, p, clamp_id);
1752 	uclamp_rq_inc_id(rq, p, clamp_id);
1753 
1754 	/*
1755 	 * Make sure to clear the idle flag if we've transiently reached 0
1756 	 * active tasks on rq.
1757 	 */
1758 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1759 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1760 }
1761 
1762 static inline void
1763 uclamp_update_active(struct task_struct *p)
1764 {
1765 	enum uclamp_id clamp_id;
1766 	struct rq_flags rf;
1767 	struct rq *rq;
1768 
1769 	/*
1770 	 * Lock the task and the rq where the task is (or was) queued.
1771 	 *
1772 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1773 	 * price to pay to safely serialize util_{min,max} updates with
1774 	 * enqueues, dequeues and migration operations.
1775 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1776 	 */
1777 	rq = task_rq_lock(p, &rf);
1778 
1779 	/*
1780 	 * Setting the clamp bucket is serialized by task_rq_lock().
1781 	 * If the task is not yet RUNNABLE and its task_struct is not
1782 	 * affecting a valid clamp bucket, the next time it's enqueued,
1783 	 * it will already see the updated clamp bucket value.
1784 	 */
1785 	for_each_clamp_id(clamp_id)
1786 		uclamp_rq_reinc_id(rq, p, clamp_id);
1787 
1788 	task_rq_unlock(rq, p, &rf);
1789 }
1790 
1791 #ifdef CONFIG_UCLAMP_TASK_GROUP
1792 static inline void
1793 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1794 {
1795 	struct css_task_iter it;
1796 	struct task_struct *p;
1797 
1798 	css_task_iter_start(css, 0, &it);
1799 	while ((p = css_task_iter_next(&it)))
1800 		uclamp_update_active(p);
1801 	css_task_iter_end(&it);
1802 }
1803 
1804 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1805 #endif
1806 
1807 #ifdef CONFIG_SYSCTL
1808 #ifdef CONFIG_UCLAMP_TASK_GROUP
1809 static void uclamp_update_root_tg(void)
1810 {
1811 	struct task_group *tg = &root_task_group;
1812 
1813 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1814 		      sysctl_sched_uclamp_util_min, false);
1815 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1816 		      sysctl_sched_uclamp_util_max, false);
1817 
1818 	guard(rcu)();
1819 	cpu_util_update_eff(&root_task_group.css);
1820 }
1821 #else
1822 static void uclamp_update_root_tg(void) { }
1823 #endif
1824 
1825 static void uclamp_sync_util_min_rt_default(void)
1826 {
1827 	struct task_struct *g, *p;
1828 
1829 	/*
1830 	 * copy_process()			sysctl_uclamp
1831 	 *					  uclamp_min_rt = X;
1832 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1833 	 *   // link thread			  smp_mb__after_spinlock()
1834 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1835 	 *   sched_post_fork()			  for_each_process_thread()
1836 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1837 	 *
1838 	 * Ensures that either sched_post_fork() will observe the new
1839 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1840 	 * task.
1841 	 */
1842 	read_lock(&tasklist_lock);
1843 	smp_mb__after_spinlock();
1844 	read_unlock(&tasklist_lock);
1845 
1846 	guard(rcu)();
1847 	for_each_process_thread(g, p)
1848 		uclamp_update_util_min_rt_default(p);
1849 }
1850 
1851 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1852 				void *buffer, size_t *lenp, loff_t *ppos)
1853 {
1854 	bool update_root_tg = false;
1855 	int old_min, old_max, old_min_rt;
1856 	int result;
1857 
1858 	guard(mutex)(&uclamp_mutex);
1859 
1860 	old_min = sysctl_sched_uclamp_util_min;
1861 	old_max = sysctl_sched_uclamp_util_max;
1862 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1863 
1864 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1865 	if (result)
1866 		goto undo;
1867 	if (!write)
1868 		return 0;
1869 
1870 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1871 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1872 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1873 
1874 		result = -EINVAL;
1875 		goto undo;
1876 	}
1877 
1878 	if (old_min != sysctl_sched_uclamp_util_min) {
1879 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1880 			      sysctl_sched_uclamp_util_min, false);
1881 		update_root_tg = true;
1882 	}
1883 	if (old_max != sysctl_sched_uclamp_util_max) {
1884 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1885 			      sysctl_sched_uclamp_util_max, false);
1886 		update_root_tg = true;
1887 	}
1888 
1889 	if (update_root_tg) {
1890 		static_branch_enable(&sched_uclamp_used);
1891 		uclamp_update_root_tg();
1892 	}
1893 
1894 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1895 		static_branch_enable(&sched_uclamp_used);
1896 		uclamp_sync_util_min_rt_default();
1897 	}
1898 
1899 	/*
1900 	 * We update all RUNNABLE tasks only when task groups are in use.
1901 	 * Otherwise, keep it simple and do just a lazy update at each next
1902 	 * task enqueue time.
1903 	 */
1904 	return 0;
1905 
1906 undo:
1907 	sysctl_sched_uclamp_util_min = old_min;
1908 	sysctl_sched_uclamp_util_max = old_max;
1909 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1910 	return result;
1911 }
1912 #endif
1913 
1914 static void uclamp_fork(struct task_struct *p)
1915 {
1916 	enum uclamp_id clamp_id;
1917 
1918 	/*
1919 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1920 	 * as the task is still at its early fork stages.
1921 	 */
1922 	for_each_clamp_id(clamp_id)
1923 		p->uclamp[clamp_id].active = false;
1924 
1925 	if (likely(!p->sched_reset_on_fork))
1926 		return;
1927 
1928 	for_each_clamp_id(clamp_id) {
1929 		uclamp_se_set(&p->uclamp_req[clamp_id],
1930 			      uclamp_none(clamp_id), false);
1931 	}
1932 }
1933 
1934 static void uclamp_post_fork(struct task_struct *p)
1935 {
1936 	uclamp_update_util_min_rt_default(p);
1937 }
1938 
1939 static void __init init_uclamp_rq(struct rq *rq)
1940 {
1941 	enum uclamp_id clamp_id;
1942 	struct uclamp_rq *uc_rq = rq->uclamp;
1943 
1944 	for_each_clamp_id(clamp_id) {
1945 		uc_rq[clamp_id] = (struct uclamp_rq) {
1946 			.value = uclamp_none(clamp_id)
1947 		};
1948 	}
1949 
1950 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1951 }
1952 
1953 static void __init init_uclamp(void)
1954 {
1955 	struct uclamp_se uc_max = {};
1956 	enum uclamp_id clamp_id;
1957 	int cpu;
1958 
1959 	for_each_possible_cpu(cpu)
1960 		init_uclamp_rq(cpu_rq(cpu));
1961 
1962 	for_each_clamp_id(clamp_id) {
1963 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
1964 			      uclamp_none(clamp_id), false);
1965 	}
1966 
1967 	/* System defaults allow max clamp values for both indexes */
1968 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1969 	for_each_clamp_id(clamp_id) {
1970 		uclamp_default[clamp_id] = uc_max;
1971 #ifdef CONFIG_UCLAMP_TASK_GROUP
1972 		root_task_group.uclamp_req[clamp_id] = uc_max;
1973 		root_task_group.uclamp[clamp_id] = uc_max;
1974 #endif
1975 	}
1976 }
1977 
1978 #else /* !CONFIG_UCLAMP_TASK */
1979 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1980 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
1981 static inline void uclamp_fork(struct task_struct *p) { }
1982 static inline void uclamp_post_fork(struct task_struct *p) { }
1983 static inline void init_uclamp(void) { }
1984 #endif /* CONFIG_UCLAMP_TASK */
1985 
1986 bool sched_task_on_rq(struct task_struct *p)
1987 {
1988 	return task_on_rq_queued(p);
1989 }
1990 
1991 unsigned long get_wchan(struct task_struct *p)
1992 {
1993 	unsigned long ip = 0;
1994 	unsigned int state;
1995 
1996 	if (!p || p == current)
1997 		return 0;
1998 
1999 	/* Only get wchan if task is blocked and we can keep it that way. */
2000 	raw_spin_lock_irq(&p->pi_lock);
2001 	state = READ_ONCE(p->__state);
2002 	smp_rmb(); /* see try_to_wake_up() */
2003 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2004 		ip = __get_wchan(p);
2005 	raw_spin_unlock_irq(&p->pi_lock);
2006 
2007 	return ip;
2008 }
2009 
2010 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2011 {
2012 	if (!(flags & ENQUEUE_NOCLOCK))
2013 		update_rq_clock(rq);
2014 
2015 	if (!(flags & ENQUEUE_RESTORE)) {
2016 		sched_info_enqueue(rq, p);
2017 		psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2018 	}
2019 
2020 	p->sched_class->enqueue_task(rq, p, flags);
2021 	/*
2022 	 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2023 	 * ->sched_delayed.
2024 	 */
2025 	uclamp_rq_inc(rq, p);
2026 
2027 	if (sched_core_enabled(rq))
2028 		sched_core_enqueue(rq, p);
2029 }
2030 
2031 /*
2032  * Must only return false when DEQUEUE_SLEEP.
2033  */
2034 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2035 {
2036 	if (sched_core_enabled(rq))
2037 		sched_core_dequeue(rq, p, flags);
2038 
2039 	if (!(flags & DEQUEUE_NOCLOCK))
2040 		update_rq_clock(rq);
2041 
2042 	if (!(flags & DEQUEUE_SAVE)) {
2043 		sched_info_dequeue(rq, p);
2044 		psi_dequeue(p, flags & DEQUEUE_SLEEP);
2045 	}
2046 
2047 	/*
2048 	 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2049 	 * and mark the task ->sched_delayed.
2050 	 */
2051 	uclamp_rq_dec(rq, p);
2052 	return p->sched_class->dequeue_task(rq, p, flags);
2053 }
2054 
2055 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2056 {
2057 	if (task_on_rq_migrating(p))
2058 		flags |= ENQUEUE_MIGRATED;
2059 	if (flags & ENQUEUE_MIGRATED)
2060 		sched_mm_cid_migrate_to(rq, p);
2061 
2062 	enqueue_task(rq, p, flags);
2063 
2064 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2065 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2066 }
2067 
2068 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2069 {
2070 	SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2071 
2072 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2073 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2074 
2075 	/*
2076 	 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2077 	 * dequeue_task() and cleared *after* enqueue_task().
2078 	 */
2079 
2080 	dequeue_task(rq, p, flags);
2081 }
2082 
2083 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2084 {
2085 	if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2086 		__block_task(rq, p);
2087 }
2088 
2089 /**
2090  * task_curr - is this task currently executing on a CPU?
2091  * @p: the task in question.
2092  *
2093  * Return: 1 if the task is currently executing. 0 otherwise.
2094  */
2095 inline int task_curr(const struct task_struct *p)
2096 {
2097 	return cpu_curr(task_cpu(p)) == p;
2098 }
2099 
2100 /*
2101  * ->switching_to() is called with the pi_lock and rq_lock held and must not
2102  * mess with locking.
2103  */
2104 void check_class_changing(struct rq *rq, struct task_struct *p,
2105 			  const struct sched_class *prev_class)
2106 {
2107 	if (prev_class != p->sched_class && p->sched_class->switching_to)
2108 		p->sched_class->switching_to(rq, p);
2109 }
2110 
2111 /*
2112  * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2113  * use the balance_callback list if you want balancing.
2114  *
2115  * this means any call to check_class_changed() must be followed by a call to
2116  * balance_callback().
2117  */
2118 void check_class_changed(struct rq *rq, struct task_struct *p,
2119 			 const struct sched_class *prev_class,
2120 			 int oldprio)
2121 {
2122 	if (prev_class != p->sched_class) {
2123 		if (prev_class->switched_from)
2124 			prev_class->switched_from(rq, p);
2125 
2126 		p->sched_class->switched_to(rq, p);
2127 	} else if (oldprio != p->prio || dl_task(p))
2128 		p->sched_class->prio_changed(rq, p, oldprio);
2129 }
2130 
2131 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2132 {
2133 	if (p->sched_class == rq->curr->sched_class)
2134 		rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2135 	else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2136 		resched_curr(rq);
2137 
2138 	/*
2139 	 * A queue event has occurred, and we're going to schedule.  In
2140 	 * this case, we can save a useless back to back clock update.
2141 	 */
2142 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2143 		rq_clock_skip_update(rq);
2144 }
2145 
2146 static __always_inline
2147 int __task_state_match(struct task_struct *p, unsigned int state)
2148 {
2149 	if (READ_ONCE(p->__state) & state)
2150 		return 1;
2151 
2152 	if (READ_ONCE(p->saved_state) & state)
2153 		return -1;
2154 
2155 	return 0;
2156 }
2157 
2158 static __always_inline
2159 int task_state_match(struct task_struct *p, unsigned int state)
2160 {
2161 	/*
2162 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2163 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2164 	 */
2165 	guard(raw_spinlock_irq)(&p->pi_lock);
2166 	return __task_state_match(p, state);
2167 }
2168 
2169 /*
2170  * wait_task_inactive - wait for a thread to unschedule.
2171  *
2172  * Wait for the thread to block in any of the states set in @match_state.
2173  * If it changes, i.e. @p might have woken up, then return zero.  When we
2174  * succeed in waiting for @p to be off its CPU, we return a positive number
2175  * (its total switch count).  If a second call a short while later returns the
2176  * same number, the caller can be sure that @p has remained unscheduled the
2177  * whole time.
2178  *
2179  * The caller must ensure that the task *will* unschedule sometime soon,
2180  * else this function might spin for a *long* time. This function can't
2181  * be called with interrupts off, or it may introduce deadlock with
2182  * smp_call_function() if an IPI is sent by the same process we are
2183  * waiting to become inactive.
2184  */
2185 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2186 {
2187 	int running, queued, match;
2188 	struct rq_flags rf;
2189 	unsigned long ncsw;
2190 	struct rq *rq;
2191 
2192 	for (;;) {
2193 		/*
2194 		 * We do the initial early heuristics without holding
2195 		 * any task-queue locks at all. We'll only try to get
2196 		 * the runqueue lock when things look like they will
2197 		 * work out!
2198 		 */
2199 		rq = task_rq(p);
2200 
2201 		/*
2202 		 * If the task is actively running on another CPU
2203 		 * still, just relax and busy-wait without holding
2204 		 * any locks.
2205 		 *
2206 		 * NOTE! Since we don't hold any locks, it's not
2207 		 * even sure that "rq" stays as the right runqueue!
2208 		 * But we don't care, since "task_on_cpu()" will
2209 		 * return false if the runqueue has changed and p
2210 		 * is actually now running somewhere else!
2211 		 */
2212 		while (task_on_cpu(rq, p)) {
2213 			if (!task_state_match(p, match_state))
2214 				return 0;
2215 			cpu_relax();
2216 		}
2217 
2218 		/*
2219 		 * Ok, time to look more closely! We need the rq
2220 		 * lock now, to be *sure*. If we're wrong, we'll
2221 		 * just go back and repeat.
2222 		 */
2223 		rq = task_rq_lock(p, &rf);
2224 		trace_sched_wait_task(p);
2225 		running = task_on_cpu(rq, p);
2226 		queued = task_on_rq_queued(p);
2227 		ncsw = 0;
2228 		if ((match = __task_state_match(p, match_state))) {
2229 			/*
2230 			 * When matching on p->saved_state, consider this task
2231 			 * still queued so it will wait.
2232 			 */
2233 			if (match < 0)
2234 				queued = 1;
2235 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2236 		}
2237 		task_rq_unlock(rq, p, &rf);
2238 
2239 		/*
2240 		 * If it changed from the expected state, bail out now.
2241 		 */
2242 		if (unlikely(!ncsw))
2243 			break;
2244 
2245 		/*
2246 		 * Was it really running after all now that we
2247 		 * checked with the proper locks actually held?
2248 		 *
2249 		 * Oops. Go back and try again..
2250 		 */
2251 		if (unlikely(running)) {
2252 			cpu_relax();
2253 			continue;
2254 		}
2255 
2256 		/*
2257 		 * It's not enough that it's not actively running,
2258 		 * it must be off the runqueue _entirely_, and not
2259 		 * preempted!
2260 		 *
2261 		 * So if it was still runnable (but just not actively
2262 		 * running right now), it's preempted, and we should
2263 		 * yield - it could be a while.
2264 		 */
2265 		if (unlikely(queued)) {
2266 			ktime_t to = NSEC_PER_SEC / HZ;
2267 
2268 			set_current_state(TASK_UNINTERRUPTIBLE);
2269 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2270 			continue;
2271 		}
2272 
2273 		/*
2274 		 * Ahh, all good. It wasn't running, and it wasn't
2275 		 * runnable, which means that it will never become
2276 		 * running in the future either. We're all done!
2277 		 */
2278 		break;
2279 	}
2280 
2281 	return ncsw;
2282 }
2283 
2284 #ifdef CONFIG_SMP
2285 
2286 static void
2287 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2288 
2289 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2290 {
2291 	struct affinity_context ac = {
2292 		.new_mask  = cpumask_of(rq->cpu),
2293 		.flags     = SCA_MIGRATE_DISABLE,
2294 	};
2295 
2296 	if (likely(!p->migration_disabled))
2297 		return;
2298 
2299 	if (p->cpus_ptr != &p->cpus_mask)
2300 		return;
2301 
2302 	/*
2303 	 * Violates locking rules! See comment in __do_set_cpus_allowed().
2304 	 */
2305 	__do_set_cpus_allowed(p, &ac);
2306 }
2307 
2308 void migrate_disable(void)
2309 {
2310 	struct task_struct *p = current;
2311 
2312 	if (p->migration_disabled) {
2313 #ifdef CONFIG_DEBUG_PREEMPT
2314 		/*
2315 		 *Warn about overflow half-way through the range.
2316 		 */
2317 		WARN_ON_ONCE((s16)p->migration_disabled < 0);
2318 #endif
2319 		p->migration_disabled++;
2320 		return;
2321 	}
2322 
2323 	guard(preempt)();
2324 	this_rq()->nr_pinned++;
2325 	p->migration_disabled = 1;
2326 }
2327 EXPORT_SYMBOL_GPL(migrate_disable);
2328 
2329 void migrate_enable(void)
2330 {
2331 	struct task_struct *p = current;
2332 	struct affinity_context ac = {
2333 		.new_mask  = &p->cpus_mask,
2334 		.flags     = SCA_MIGRATE_ENABLE,
2335 	};
2336 
2337 #ifdef CONFIG_DEBUG_PREEMPT
2338 	/*
2339 	 * Check both overflow from migrate_disable() and superfluous
2340 	 * migrate_enable().
2341 	 */
2342 	if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2343 		return;
2344 #endif
2345 
2346 	if (p->migration_disabled > 1) {
2347 		p->migration_disabled--;
2348 		return;
2349 	}
2350 
2351 	/*
2352 	 * Ensure stop_task runs either before or after this, and that
2353 	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2354 	 */
2355 	guard(preempt)();
2356 	if (p->cpus_ptr != &p->cpus_mask)
2357 		__set_cpus_allowed_ptr(p, &ac);
2358 	/*
2359 	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2360 	 * regular cpus_mask, otherwise things that race (eg.
2361 	 * select_fallback_rq) get confused.
2362 	 */
2363 	barrier();
2364 	p->migration_disabled = 0;
2365 	this_rq()->nr_pinned--;
2366 }
2367 EXPORT_SYMBOL_GPL(migrate_enable);
2368 
2369 static inline bool rq_has_pinned_tasks(struct rq *rq)
2370 {
2371 	return rq->nr_pinned;
2372 }
2373 
2374 /*
2375  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2376  * __set_cpus_allowed_ptr() and select_fallback_rq().
2377  */
2378 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2379 {
2380 	/* When not in the task's cpumask, no point in looking further. */
2381 	if (!task_allowed_on_cpu(p, cpu))
2382 		return false;
2383 
2384 	/* migrate_disabled() must be allowed to finish. */
2385 	if (is_migration_disabled(p))
2386 		return cpu_online(cpu);
2387 
2388 	/* Non kernel threads are not allowed during either online or offline. */
2389 	if (!(p->flags & PF_KTHREAD))
2390 		return cpu_active(cpu);
2391 
2392 	/* KTHREAD_IS_PER_CPU is always allowed. */
2393 	if (kthread_is_per_cpu(p))
2394 		return cpu_online(cpu);
2395 
2396 	/* Regular kernel threads don't get to stay during offline. */
2397 	if (cpu_dying(cpu))
2398 		return false;
2399 
2400 	/* But are allowed during online. */
2401 	return cpu_online(cpu);
2402 }
2403 
2404 /*
2405  * This is how migration works:
2406  *
2407  * 1) we invoke migration_cpu_stop() on the target CPU using
2408  *    stop_one_cpu().
2409  * 2) stopper starts to run (implicitly forcing the migrated thread
2410  *    off the CPU)
2411  * 3) it checks whether the migrated task is still in the wrong runqueue.
2412  * 4) if it's in the wrong runqueue then the migration thread removes
2413  *    it and puts it into the right queue.
2414  * 5) stopper completes and stop_one_cpu() returns and the migration
2415  *    is done.
2416  */
2417 
2418 /*
2419  * move_queued_task - move a queued task to new rq.
2420  *
2421  * Returns (locked) new rq. Old rq's lock is released.
2422  */
2423 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2424 				   struct task_struct *p, int new_cpu)
2425 {
2426 	lockdep_assert_rq_held(rq);
2427 
2428 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2429 	set_task_cpu(p, new_cpu);
2430 	rq_unlock(rq, rf);
2431 
2432 	rq = cpu_rq(new_cpu);
2433 
2434 	rq_lock(rq, rf);
2435 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2436 	activate_task(rq, p, 0);
2437 	wakeup_preempt(rq, p, 0);
2438 
2439 	return rq;
2440 }
2441 
2442 struct migration_arg {
2443 	struct task_struct		*task;
2444 	int				dest_cpu;
2445 	struct set_affinity_pending	*pending;
2446 };
2447 
2448 /*
2449  * @refs: number of wait_for_completion()
2450  * @stop_pending: is @stop_work in use
2451  */
2452 struct set_affinity_pending {
2453 	refcount_t		refs;
2454 	unsigned int		stop_pending;
2455 	struct completion	done;
2456 	struct cpu_stop_work	stop_work;
2457 	struct migration_arg	arg;
2458 };
2459 
2460 /*
2461  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2462  * this because either it can't run here any more (set_cpus_allowed()
2463  * away from this CPU, or CPU going down), or because we're
2464  * attempting to rebalance this task on exec (sched_exec).
2465  *
2466  * So we race with normal scheduler movements, but that's OK, as long
2467  * as the task is no longer on this CPU.
2468  */
2469 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2470 				 struct task_struct *p, int dest_cpu)
2471 {
2472 	/* Affinity changed (again). */
2473 	if (!is_cpu_allowed(p, dest_cpu))
2474 		return rq;
2475 
2476 	rq = move_queued_task(rq, rf, p, dest_cpu);
2477 
2478 	return rq;
2479 }
2480 
2481 /*
2482  * migration_cpu_stop - this will be executed by a high-prio stopper thread
2483  * and performs thread migration by bumping thread off CPU then
2484  * 'pushing' onto another runqueue.
2485  */
2486 static int migration_cpu_stop(void *data)
2487 {
2488 	struct migration_arg *arg = data;
2489 	struct set_affinity_pending *pending = arg->pending;
2490 	struct task_struct *p = arg->task;
2491 	struct rq *rq = this_rq();
2492 	bool complete = false;
2493 	struct rq_flags rf;
2494 
2495 	/*
2496 	 * The original target CPU might have gone down and we might
2497 	 * be on another CPU but it doesn't matter.
2498 	 */
2499 	local_irq_save(rf.flags);
2500 	/*
2501 	 * We need to explicitly wake pending tasks before running
2502 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2503 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2504 	 */
2505 	flush_smp_call_function_queue();
2506 
2507 	raw_spin_lock(&p->pi_lock);
2508 	rq_lock(rq, &rf);
2509 
2510 	/*
2511 	 * If we were passed a pending, then ->stop_pending was set, thus
2512 	 * p->migration_pending must have remained stable.
2513 	 */
2514 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2515 
2516 	/*
2517 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2518 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2519 	 * we're holding p->pi_lock.
2520 	 */
2521 	if (task_rq(p) == rq) {
2522 		if (is_migration_disabled(p))
2523 			goto out;
2524 
2525 		if (pending) {
2526 			p->migration_pending = NULL;
2527 			complete = true;
2528 
2529 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2530 				goto out;
2531 		}
2532 
2533 		if (task_on_rq_queued(p)) {
2534 			update_rq_clock(rq);
2535 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2536 		} else {
2537 			p->wake_cpu = arg->dest_cpu;
2538 		}
2539 
2540 		/*
2541 		 * XXX __migrate_task() can fail, at which point we might end
2542 		 * up running on a dodgy CPU, AFAICT this can only happen
2543 		 * during CPU hotplug, at which point we'll get pushed out
2544 		 * anyway, so it's probably not a big deal.
2545 		 */
2546 
2547 	} else if (pending) {
2548 		/*
2549 		 * This happens when we get migrated between migrate_enable()'s
2550 		 * preempt_enable() and scheduling the stopper task. At that
2551 		 * point we're a regular task again and not current anymore.
2552 		 *
2553 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2554 		 * more likely.
2555 		 */
2556 
2557 		/*
2558 		 * The task moved before the stopper got to run. We're holding
2559 		 * ->pi_lock, so the allowed mask is stable - if it got
2560 		 * somewhere allowed, we're done.
2561 		 */
2562 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2563 			p->migration_pending = NULL;
2564 			complete = true;
2565 			goto out;
2566 		}
2567 
2568 		/*
2569 		 * When migrate_enable() hits a rq mis-match we can't reliably
2570 		 * determine is_migration_disabled() and so have to chase after
2571 		 * it.
2572 		 */
2573 		WARN_ON_ONCE(!pending->stop_pending);
2574 		preempt_disable();
2575 		task_rq_unlock(rq, p, &rf);
2576 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2577 				    &pending->arg, &pending->stop_work);
2578 		preempt_enable();
2579 		return 0;
2580 	}
2581 out:
2582 	if (pending)
2583 		pending->stop_pending = false;
2584 	task_rq_unlock(rq, p, &rf);
2585 
2586 	if (complete)
2587 		complete_all(&pending->done);
2588 
2589 	return 0;
2590 }
2591 
2592 int push_cpu_stop(void *arg)
2593 {
2594 	struct rq *lowest_rq = NULL, *rq = this_rq();
2595 	struct task_struct *p = arg;
2596 
2597 	raw_spin_lock_irq(&p->pi_lock);
2598 	raw_spin_rq_lock(rq);
2599 
2600 	if (task_rq(p) != rq)
2601 		goto out_unlock;
2602 
2603 	if (is_migration_disabled(p)) {
2604 		p->migration_flags |= MDF_PUSH;
2605 		goto out_unlock;
2606 	}
2607 
2608 	p->migration_flags &= ~MDF_PUSH;
2609 
2610 	if (p->sched_class->find_lock_rq)
2611 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2612 
2613 	if (!lowest_rq)
2614 		goto out_unlock;
2615 
2616 	// XXX validate p is still the highest prio task
2617 	if (task_rq(p) == rq) {
2618 		deactivate_task(rq, p, 0);
2619 		set_task_cpu(p, lowest_rq->cpu);
2620 		activate_task(lowest_rq, p, 0);
2621 		resched_curr(lowest_rq);
2622 	}
2623 
2624 	double_unlock_balance(rq, lowest_rq);
2625 
2626 out_unlock:
2627 	rq->push_busy = false;
2628 	raw_spin_rq_unlock(rq);
2629 	raw_spin_unlock_irq(&p->pi_lock);
2630 
2631 	put_task_struct(p);
2632 	return 0;
2633 }
2634 
2635 /*
2636  * sched_class::set_cpus_allowed must do the below, but is not required to
2637  * actually call this function.
2638  */
2639 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2640 {
2641 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2642 		p->cpus_ptr = ctx->new_mask;
2643 		return;
2644 	}
2645 
2646 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2647 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2648 
2649 	/*
2650 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2651 	 */
2652 	if (ctx->flags & SCA_USER)
2653 		swap(p->user_cpus_ptr, ctx->user_mask);
2654 }
2655 
2656 static void
2657 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2658 {
2659 	struct rq *rq = task_rq(p);
2660 	bool queued, running;
2661 
2662 	/*
2663 	 * This here violates the locking rules for affinity, since we're only
2664 	 * supposed to change these variables while holding both rq->lock and
2665 	 * p->pi_lock.
2666 	 *
2667 	 * HOWEVER, it magically works, because ttwu() is the only code that
2668 	 * accesses these variables under p->pi_lock and only does so after
2669 	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2670 	 * before finish_task().
2671 	 *
2672 	 * XXX do further audits, this smells like something putrid.
2673 	 */
2674 	if (ctx->flags & SCA_MIGRATE_DISABLE)
2675 		SCHED_WARN_ON(!p->on_cpu);
2676 	else
2677 		lockdep_assert_held(&p->pi_lock);
2678 
2679 	queued = task_on_rq_queued(p);
2680 	running = task_current(rq, p);
2681 
2682 	if (queued) {
2683 		/*
2684 		 * Because __kthread_bind() calls this on blocked tasks without
2685 		 * holding rq->lock.
2686 		 */
2687 		lockdep_assert_rq_held(rq);
2688 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2689 	}
2690 	if (running)
2691 		put_prev_task(rq, p);
2692 
2693 	p->sched_class->set_cpus_allowed(p, ctx);
2694 
2695 	if (queued)
2696 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2697 	if (running)
2698 		set_next_task(rq, p);
2699 }
2700 
2701 /*
2702  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2703  * affinity (if any) should be destroyed too.
2704  */
2705 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2706 {
2707 	struct affinity_context ac = {
2708 		.new_mask  = new_mask,
2709 		.user_mask = NULL,
2710 		.flags     = SCA_USER,	/* clear the user requested mask */
2711 	};
2712 	union cpumask_rcuhead {
2713 		cpumask_t cpumask;
2714 		struct rcu_head rcu;
2715 	};
2716 
2717 	__do_set_cpus_allowed(p, &ac);
2718 
2719 	/*
2720 	 * Because this is called with p->pi_lock held, it is not possible
2721 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2722 	 * kfree_rcu().
2723 	 */
2724 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2725 }
2726 
2727 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2728 		      int node)
2729 {
2730 	cpumask_t *user_mask;
2731 	unsigned long flags;
2732 
2733 	/*
2734 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2735 	 * may differ by now due to racing.
2736 	 */
2737 	dst->user_cpus_ptr = NULL;
2738 
2739 	/*
2740 	 * This check is racy and losing the race is a valid situation.
2741 	 * It is not worth the extra overhead of taking the pi_lock on
2742 	 * every fork/clone.
2743 	 */
2744 	if (data_race(!src->user_cpus_ptr))
2745 		return 0;
2746 
2747 	user_mask = alloc_user_cpus_ptr(node);
2748 	if (!user_mask)
2749 		return -ENOMEM;
2750 
2751 	/*
2752 	 * Use pi_lock to protect content of user_cpus_ptr
2753 	 *
2754 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2755 	 * do_set_cpus_allowed().
2756 	 */
2757 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2758 	if (src->user_cpus_ptr) {
2759 		swap(dst->user_cpus_ptr, user_mask);
2760 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2761 	}
2762 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2763 
2764 	if (unlikely(user_mask))
2765 		kfree(user_mask);
2766 
2767 	return 0;
2768 }
2769 
2770 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2771 {
2772 	struct cpumask *user_mask = NULL;
2773 
2774 	swap(p->user_cpus_ptr, user_mask);
2775 
2776 	return user_mask;
2777 }
2778 
2779 void release_user_cpus_ptr(struct task_struct *p)
2780 {
2781 	kfree(clear_user_cpus_ptr(p));
2782 }
2783 
2784 /*
2785  * This function is wildly self concurrent; here be dragons.
2786  *
2787  *
2788  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2789  * designated task is enqueued on an allowed CPU. If that task is currently
2790  * running, we have to kick it out using the CPU stopper.
2791  *
2792  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2793  * Consider:
2794  *
2795  *     Initial conditions: P0->cpus_mask = [0, 1]
2796  *
2797  *     P0@CPU0                  P1
2798  *
2799  *     migrate_disable();
2800  *     <preempted>
2801  *                              set_cpus_allowed_ptr(P0, [1]);
2802  *
2803  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2804  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2805  * This means we need the following scheme:
2806  *
2807  *     P0@CPU0                  P1
2808  *
2809  *     migrate_disable();
2810  *     <preempted>
2811  *                              set_cpus_allowed_ptr(P0, [1]);
2812  *                                <blocks>
2813  *     <resumes>
2814  *     migrate_enable();
2815  *       __set_cpus_allowed_ptr();
2816  *       <wakes local stopper>
2817  *                         `--> <woken on migration completion>
2818  *
2819  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2820  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2821  * task p are serialized by p->pi_lock, which we can leverage: the one that
2822  * should come into effect at the end of the Migrate-Disable region is the last
2823  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2824  * but we still need to properly signal those waiting tasks at the appropriate
2825  * moment.
2826  *
2827  * This is implemented using struct set_affinity_pending. The first
2828  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2829  * setup an instance of that struct and install it on the targeted task_struct.
2830  * Any and all further callers will reuse that instance. Those then wait for
2831  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2832  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2833  *
2834  *
2835  * (1) In the cases covered above. There is one more where the completion is
2836  * signaled within affine_move_task() itself: when a subsequent affinity request
2837  * occurs after the stopper bailed out due to the targeted task still being
2838  * Migrate-Disable. Consider:
2839  *
2840  *     Initial conditions: P0->cpus_mask = [0, 1]
2841  *
2842  *     CPU0		  P1				P2
2843  *     <P0>
2844  *       migrate_disable();
2845  *       <preempted>
2846  *                        set_cpus_allowed_ptr(P0, [1]);
2847  *                          <blocks>
2848  *     <migration/0>
2849  *       migration_cpu_stop()
2850  *         is_migration_disabled()
2851  *           <bails>
2852  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2853  *                                                         <signal completion>
2854  *                          <awakes>
2855  *
2856  * Note that the above is safe vs a concurrent migrate_enable(), as any
2857  * pending affinity completion is preceded by an uninstallation of
2858  * p->migration_pending done with p->pi_lock held.
2859  */
2860 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2861 			    int dest_cpu, unsigned int flags)
2862 	__releases(rq->lock)
2863 	__releases(p->pi_lock)
2864 {
2865 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2866 	bool stop_pending, complete = false;
2867 
2868 	/* Can the task run on the task's current CPU? If so, we're done */
2869 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2870 		struct task_struct *push_task = NULL;
2871 
2872 		if ((flags & SCA_MIGRATE_ENABLE) &&
2873 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2874 			rq->push_busy = true;
2875 			push_task = get_task_struct(p);
2876 		}
2877 
2878 		/*
2879 		 * If there are pending waiters, but no pending stop_work,
2880 		 * then complete now.
2881 		 */
2882 		pending = p->migration_pending;
2883 		if (pending && !pending->stop_pending) {
2884 			p->migration_pending = NULL;
2885 			complete = true;
2886 		}
2887 
2888 		preempt_disable();
2889 		task_rq_unlock(rq, p, rf);
2890 		if (push_task) {
2891 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2892 					    p, &rq->push_work);
2893 		}
2894 		preempt_enable();
2895 
2896 		if (complete)
2897 			complete_all(&pending->done);
2898 
2899 		return 0;
2900 	}
2901 
2902 	if (!(flags & SCA_MIGRATE_ENABLE)) {
2903 		/* serialized by p->pi_lock */
2904 		if (!p->migration_pending) {
2905 			/* Install the request */
2906 			refcount_set(&my_pending.refs, 1);
2907 			init_completion(&my_pending.done);
2908 			my_pending.arg = (struct migration_arg) {
2909 				.task = p,
2910 				.dest_cpu = dest_cpu,
2911 				.pending = &my_pending,
2912 			};
2913 
2914 			p->migration_pending = &my_pending;
2915 		} else {
2916 			pending = p->migration_pending;
2917 			refcount_inc(&pending->refs);
2918 			/*
2919 			 * Affinity has changed, but we've already installed a
2920 			 * pending. migration_cpu_stop() *must* see this, else
2921 			 * we risk a completion of the pending despite having a
2922 			 * task on a disallowed CPU.
2923 			 *
2924 			 * Serialized by p->pi_lock, so this is safe.
2925 			 */
2926 			pending->arg.dest_cpu = dest_cpu;
2927 		}
2928 	}
2929 	pending = p->migration_pending;
2930 	/*
2931 	 * - !MIGRATE_ENABLE:
2932 	 *   we'll have installed a pending if there wasn't one already.
2933 	 *
2934 	 * - MIGRATE_ENABLE:
2935 	 *   we're here because the current CPU isn't matching anymore,
2936 	 *   the only way that can happen is because of a concurrent
2937 	 *   set_cpus_allowed_ptr() call, which should then still be
2938 	 *   pending completion.
2939 	 *
2940 	 * Either way, we really should have a @pending here.
2941 	 */
2942 	if (WARN_ON_ONCE(!pending)) {
2943 		task_rq_unlock(rq, p, rf);
2944 		return -EINVAL;
2945 	}
2946 
2947 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2948 		/*
2949 		 * MIGRATE_ENABLE gets here because 'p == current', but for
2950 		 * anything else we cannot do is_migration_disabled(), punt
2951 		 * and have the stopper function handle it all race-free.
2952 		 */
2953 		stop_pending = pending->stop_pending;
2954 		if (!stop_pending)
2955 			pending->stop_pending = true;
2956 
2957 		if (flags & SCA_MIGRATE_ENABLE)
2958 			p->migration_flags &= ~MDF_PUSH;
2959 
2960 		preempt_disable();
2961 		task_rq_unlock(rq, p, rf);
2962 		if (!stop_pending) {
2963 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2964 					    &pending->arg, &pending->stop_work);
2965 		}
2966 		preempt_enable();
2967 
2968 		if (flags & SCA_MIGRATE_ENABLE)
2969 			return 0;
2970 	} else {
2971 
2972 		if (!is_migration_disabled(p)) {
2973 			if (task_on_rq_queued(p))
2974 				rq = move_queued_task(rq, rf, p, dest_cpu);
2975 
2976 			if (!pending->stop_pending) {
2977 				p->migration_pending = NULL;
2978 				complete = true;
2979 			}
2980 		}
2981 		task_rq_unlock(rq, p, rf);
2982 
2983 		if (complete)
2984 			complete_all(&pending->done);
2985 	}
2986 
2987 	wait_for_completion(&pending->done);
2988 
2989 	if (refcount_dec_and_test(&pending->refs))
2990 		wake_up_var(&pending->refs); /* No UaF, just an address */
2991 
2992 	/*
2993 	 * Block the original owner of &pending until all subsequent callers
2994 	 * have seen the completion and decremented the refcount
2995 	 */
2996 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2997 
2998 	/* ARGH */
2999 	WARN_ON_ONCE(my_pending.stop_pending);
3000 
3001 	return 0;
3002 }
3003 
3004 /*
3005  * Called with both p->pi_lock and rq->lock held; drops both before returning.
3006  */
3007 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3008 					 struct affinity_context *ctx,
3009 					 struct rq *rq,
3010 					 struct rq_flags *rf)
3011 	__releases(rq->lock)
3012 	__releases(p->pi_lock)
3013 {
3014 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3015 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3016 	bool kthread = p->flags & PF_KTHREAD;
3017 	unsigned int dest_cpu;
3018 	int ret = 0;
3019 
3020 	update_rq_clock(rq);
3021 
3022 	if (kthread || is_migration_disabled(p)) {
3023 		/*
3024 		 * Kernel threads are allowed on online && !active CPUs,
3025 		 * however, during cpu-hot-unplug, even these might get pushed
3026 		 * away if not KTHREAD_IS_PER_CPU.
3027 		 *
3028 		 * Specifically, migration_disabled() tasks must not fail the
3029 		 * cpumask_any_and_distribute() pick below, esp. so on
3030 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3031 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3032 		 */
3033 		cpu_valid_mask = cpu_online_mask;
3034 	}
3035 
3036 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3037 		ret = -EINVAL;
3038 		goto out;
3039 	}
3040 
3041 	/*
3042 	 * Must re-check here, to close a race against __kthread_bind(),
3043 	 * sched_setaffinity() is not guaranteed to observe the flag.
3044 	 */
3045 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3046 		ret = -EINVAL;
3047 		goto out;
3048 	}
3049 
3050 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3051 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3052 			if (ctx->flags & SCA_USER)
3053 				swap(p->user_cpus_ptr, ctx->user_mask);
3054 			goto out;
3055 		}
3056 
3057 		if (WARN_ON_ONCE(p == current &&
3058 				 is_migration_disabled(p) &&
3059 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3060 			ret = -EBUSY;
3061 			goto out;
3062 		}
3063 	}
3064 
3065 	/*
3066 	 * Picking a ~random cpu helps in cases where we are changing affinity
3067 	 * for groups of tasks (ie. cpuset), so that load balancing is not
3068 	 * immediately required to distribute the tasks within their new mask.
3069 	 */
3070 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3071 	if (dest_cpu >= nr_cpu_ids) {
3072 		ret = -EINVAL;
3073 		goto out;
3074 	}
3075 
3076 	__do_set_cpus_allowed(p, ctx);
3077 
3078 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3079 
3080 out:
3081 	task_rq_unlock(rq, p, rf);
3082 
3083 	return ret;
3084 }
3085 
3086 /*
3087  * Change a given task's CPU affinity. Migrate the thread to a
3088  * proper CPU and schedule it away if the CPU it's executing on
3089  * is removed from the allowed bitmask.
3090  *
3091  * NOTE: the caller must have a valid reference to the task, the
3092  * task must not exit() & deallocate itself prematurely. The
3093  * call is not atomic; no spinlocks may be held.
3094  */
3095 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3096 {
3097 	struct rq_flags rf;
3098 	struct rq *rq;
3099 
3100 	rq = task_rq_lock(p, &rf);
3101 	/*
3102 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3103 	 * flags are set.
3104 	 */
3105 	if (p->user_cpus_ptr &&
3106 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3107 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3108 		ctx->new_mask = rq->scratch_mask;
3109 
3110 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3111 }
3112 
3113 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3114 {
3115 	struct affinity_context ac = {
3116 		.new_mask  = new_mask,
3117 		.flags     = 0,
3118 	};
3119 
3120 	return __set_cpus_allowed_ptr(p, &ac);
3121 }
3122 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3123 
3124 /*
3125  * Change a given task's CPU affinity to the intersection of its current
3126  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3127  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3128  * affinity or use cpu_online_mask instead.
3129  *
3130  * If the resulting mask is empty, leave the affinity unchanged and return
3131  * -EINVAL.
3132  */
3133 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3134 				     struct cpumask *new_mask,
3135 				     const struct cpumask *subset_mask)
3136 {
3137 	struct affinity_context ac = {
3138 		.new_mask  = new_mask,
3139 		.flags     = 0,
3140 	};
3141 	struct rq_flags rf;
3142 	struct rq *rq;
3143 	int err;
3144 
3145 	rq = task_rq_lock(p, &rf);
3146 
3147 	/*
3148 	 * Forcefully restricting the affinity of a deadline task is
3149 	 * likely to cause problems, so fail and noisily override the
3150 	 * mask entirely.
3151 	 */
3152 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3153 		err = -EPERM;
3154 		goto err_unlock;
3155 	}
3156 
3157 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3158 		err = -EINVAL;
3159 		goto err_unlock;
3160 	}
3161 
3162 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3163 
3164 err_unlock:
3165 	task_rq_unlock(rq, p, &rf);
3166 	return err;
3167 }
3168 
3169 /*
3170  * Restrict the CPU affinity of task @p so that it is a subset of
3171  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3172  * old affinity mask. If the resulting mask is empty, we warn and walk
3173  * up the cpuset hierarchy until we find a suitable mask.
3174  */
3175 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3176 {
3177 	cpumask_var_t new_mask;
3178 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3179 
3180 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3181 
3182 	/*
3183 	 * __migrate_task() can fail silently in the face of concurrent
3184 	 * offlining of the chosen destination CPU, so take the hotplug
3185 	 * lock to ensure that the migration succeeds.
3186 	 */
3187 	cpus_read_lock();
3188 	if (!cpumask_available(new_mask))
3189 		goto out_set_mask;
3190 
3191 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3192 		goto out_free_mask;
3193 
3194 	/*
3195 	 * We failed to find a valid subset of the affinity mask for the
3196 	 * task, so override it based on its cpuset hierarchy.
3197 	 */
3198 	cpuset_cpus_allowed(p, new_mask);
3199 	override_mask = new_mask;
3200 
3201 out_set_mask:
3202 	if (printk_ratelimit()) {
3203 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3204 				task_pid_nr(p), p->comm,
3205 				cpumask_pr_args(override_mask));
3206 	}
3207 
3208 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3209 out_free_mask:
3210 	cpus_read_unlock();
3211 	free_cpumask_var(new_mask);
3212 }
3213 
3214 /*
3215  * Restore the affinity of a task @p which was previously restricted by a
3216  * call to force_compatible_cpus_allowed_ptr().
3217  *
3218  * It is the caller's responsibility to serialise this with any calls to
3219  * force_compatible_cpus_allowed_ptr(@p).
3220  */
3221 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3222 {
3223 	struct affinity_context ac = {
3224 		.new_mask  = task_user_cpus(p),
3225 		.flags     = 0,
3226 	};
3227 	int ret;
3228 
3229 	/*
3230 	 * Try to restore the old affinity mask with __sched_setaffinity().
3231 	 * Cpuset masking will be done there too.
3232 	 */
3233 	ret = __sched_setaffinity(p, &ac);
3234 	WARN_ON_ONCE(ret);
3235 }
3236 
3237 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3238 {
3239 #ifdef CONFIG_SCHED_DEBUG
3240 	unsigned int state = READ_ONCE(p->__state);
3241 
3242 	/*
3243 	 * We should never call set_task_cpu() on a blocked task,
3244 	 * ttwu() will sort out the placement.
3245 	 */
3246 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3247 
3248 	/*
3249 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3250 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3251 	 * time relying on p->on_rq.
3252 	 */
3253 	WARN_ON_ONCE(state == TASK_RUNNING &&
3254 		     p->sched_class == &fair_sched_class &&
3255 		     (p->on_rq && !task_on_rq_migrating(p)));
3256 
3257 #ifdef CONFIG_LOCKDEP
3258 	/*
3259 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3260 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3261 	 *
3262 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3263 	 * see task_group().
3264 	 *
3265 	 * Furthermore, all task_rq users should acquire both locks, see
3266 	 * task_rq_lock().
3267 	 */
3268 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3269 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3270 #endif
3271 	/*
3272 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3273 	 */
3274 	WARN_ON_ONCE(!cpu_online(new_cpu));
3275 
3276 	WARN_ON_ONCE(is_migration_disabled(p));
3277 #endif
3278 
3279 	trace_sched_migrate_task(p, new_cpu);
3280 
3281 	if (task_cpu(p) != new_cpu) {
3282 		if (p->sched_class->migrate_task_rq)
3283 			p->sched_class->migrate_task_rq(p, new_cpu);
3284 		p->se.nr_migrations++;
3285 		rseq_migrate(p);
3286 		sched_mm_cid_migrate_from(p);
3287 		perf_event_task_migrate(p);
3288 	}
3289 
3290 	__set_task_cpu(p, new_cpu);
3291 }
3292 
3293 #ifdef CONFIG_NUMA_BALANCING
3294 static void __migrate_swap_task(struct task_struct *p, int cpu)
3295 {
3296 	if (task_on_rq_queued(p)) {
3297 		struct rq *src_rq, *dst_rq;
3298 		struct rq_flags srf, drf;
3299 
3300 		src_rq = task_rq(p);
3301 		dst_rq = cpu_rq(cpu);
3302 
3303 		rq_pin_lock(src_rq, &srf);
3304 		rq_pin_lock(dst_rq, &drf);
3305 
3306 		deactivate_task(src_rq, p, 0);
3307 		set_task_cpu(p, cpu);
3308 		activate_task(dst_rq, p, 0);
3309 		wakeup_preempt(dst_rq, p, 0);
3310 
3311 		rq_unpin_lock(dst_rq, &drf);
3312 		rq_unpin_lock(src_rq, &srf);
3313 
3314 	} else {
3315 		/*
3316 		 * Task isn't running anymore; make it appear like we migrated
3317 		 * it before it went to sleep. This means on wakeup we make the
3318 		 * previous CPU our target instead of where it really is.
3319 		 */
3320 		p->wake_cpu = cpu;
3321 	}
3322 }
3323 
3324 struct migration_swap_arg {
3325 	struct task_struct *src_task, *dst_task;
3326 	int src_cpu, dst_cpu;
3327 };
3328 
3329 static int migrate_swap_stop(void *data)
3330 {
3331 	struct migration_swap_arg *arg = data;
3332 	struct rq *src_rq, *dst_rq;
3333 
3334 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3335 		return -EAGAIN;
3336 
3337 	src_rq = cpu_rq(arg->src_cpu);
3338 	dst_rq = cpu_rq(arg->dst_cpu);
3339 
3340 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3341 	guard(double_rq_lock)(src_rq, dst_rq);
3342 
3343 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3344 		return -EAGAIN;
3345 
3346 	if (task_cpu(arg->src_task) != arg->src_cpu)
3347 		return -EAGAIN;
3348 
3349 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3350 		return -EAGAIN;
3351 
3352 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3353 		return -EAGAIN;
3354 
3355 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3356 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3357 
3358 	return 0;
3359 }
3360 
3361 /*
3362  * Cross migrate two tasks
3363  */
3364 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3365 		int target_cpu, int curr_cpu)
3366 {
3367 	struct migration_swap_arg arg;
3368 	int ret = -EINVAL;
3369 
3370 	arg = (struct migration_swap_arg){
3371 		.src_task = cur,
3372 		.src_cpu = curr_cpu,
3373 		.dst_task = p,
3374 		.dst_cpu = target_cpu,
3375 	};
3376 
3377 	if (arg.src_cpu == arg.dst_cpu)
3378 		goto out;
3379 
3380 	/*
3381 	 * These three tests are all lockless; this is OK since all of them
3382 	 * will be re-checked with proper locks held further down the line.
3383 	 */
3384 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3385 		goto out;
3386 
3387 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3388 		goto out;
3389 
3390 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3391 		goto out;
3392 
3393 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3394 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3395 
3396 out:
3397 	return ret;
3398 }
3399 #endif /* CONFIG_NUMA_BALANCING */
3400 
3401 /***
3402  * kick_process - kick a running thread to enter/exit the kernel
3403  * @p: the to-be-kicked thread
3404  *
3405  * Cause a process which is running on another CPU to enter
3406  * kernel-mode, without any delay. (to get signals handled.)
3407  *
3408  * NOTE: this function doesn't have to take the runqueue lock,
3409  * because all it wants to ensure is that the remote task enters
3410  * the kernel. If the IPI races and the task has been migrated
3411  * to another CPU then no harm is done and the purpose has been
3412  * achieved as well.
3413  */
3414 void kick_process(struct task_struct *p)
3415 {
3416 	guard(preempt)();
3417 	int cpu = task_cpu(p);
3418 
3419 	if ((cpu != smp_processor_id()) && task_curr(p))
3420 		smp_send_reschedule(cpu);
3421 }
3422 EXPORT_SYMBOL_GPL(kick_process);
3423 
3424 /*
3425  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3426  *
3427  * A few notes on cpu_active vs cpu_online:
3428  *
3429  *  - cpu_active must be a subset of cpu_online
3430  *
3431  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3432  *    see __set_cpus_allowed_ptr(). At this point the newly online
3433  *    CPU isn't yet part of the sched domains, and balancing will not
3434  *    see it.
3435  *
3436  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3437  *    avoid the load balancer to place new tasks on the to be removed
3438  *    CPU. Existing tasks will remain running there and will be taken
3439  *    off.
3440  *
3441  * This means that fallback selection must not select !active CPUs.
3442  * And can assume that any active CPU must be online. Conversely
3443  * select_task_rq() below may allow selection of !active CPUs in order
3444  * to satisfy the above rules.
3445  */
3446 static int select_fallback_rq(int cpu, struct task_struct *p)
3447 {
3448 	int nid = cpu_to_node(cpu);
3449 	const struct cpumask *nodemask = NULL;
3450 	enum { cpuset, possible, fail } state = cpuset;
3451 	int dest_cpu;
3452 
3453 	/*
3454 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3455 	 * will return -1. There is no CPU on the node, and we should
3456 	 * select the CPU on the other node.
3457 	 */
3458 	if (nid != -1) {
3459 		nodemask = cpumask_of_node(nid);
3460 
3461 		/* Look for allowed, online CPU in same node. */
3462 		for_each_cpu(dest_cpu, nodemask) {
3463 			if (is_cpu_allowed(p, dest_cpu))
3464 				return dest_cpu;
3465 		}
3466 	}
3467 
3468 	for (;;) {
3469 		/* Any allowed, online CPU? */
3470 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3471 			if (!is_cpu_allowed(p, dest_cpu))
3472 				continue;
3473 
3474 			goto out;
3475 		}
3476 
3477 		/* No more Mr. Nice Guy. */
3478 		switch (state) {
3479 		case cpuset:
3480 			if (cpuset_cpus_allowed_fallback(p)) {
3481 				state = possible;
3482 				break;
3483 			}
3484 			fallthrough;
3485 		case possible:
3486 			/*
3487 			 * XXX When called from select_task_rq() we only
3488 			 * hold p->pi_lock and again violate locking order.
3489 			 *
3490 			 * More yuck to audit.
3491 			 */
3492 			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3493 			state = fail;
3494 			break;
3495 		case fail:
3496 			BUG();
3497 			break;
3498 		}
3499 	}
3500 
3501 out:
3502 	if (state != cpuset) {
3503 		/*
3504 		 * Don't tell them about moving exiting tasks or
3505 		 * kernel threads (both mm NULL), since they never
3506 		 * leave kernel.
3507 		 */
3508 		if (p->mm && printk_ratelimit()) {
3509 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3510 					task_pid_nr(p), p->comm, cpu);
3511 		}
3512 	}
3513 
3514 	return dest_cpu;
3515 }
3516 
3517 /*
3518  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3519  */
3520 static inline
3521 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3522 {
3523 	lockdep_assert_held(&p->pi_lock);
3524 
3525 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3526 		cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3527 	else
3528 		cpu = cpumask_any(p->cpus_ptr);
3529 
3530 	/*
3531 	 * In order not to call set_task_cpu() on a blocking task we need
3532 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3533 	 * CPU.
3534 	 *
3535 	 * Since this is common to all placement strategies, this lives here.
3536 	 *
3537 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3538 	 *   not worry about this generic constraint ]
3539 	 */
3540 	if (unlikely(!is_cpu_allowed(p, cpu)))
3541 		cpu = select_fallback_rq(task_cpu(p), p);
3542 
3543 	return cpu;
3544 }
3545 
3546 void sched_set_stop_task(int cpu, struct task_struct *stop)
3547 {
3548 	static struct lock_class_key stop_pi_lock;
3549 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3550 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3551 
3552 	if (stop) {
3553 		/*
3554 		 * Make it appear like a SCHED_FIFO task, its something
3555 		 * userspace knows about and won't get confused about.
3556 		 *
3557 		 * Also, it will make PI more or less work without too
3558 		 * much confusion -- but then, stop work should not
3559 		 * rely on PI working anyway.
3560 		 */
3561 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3562 
3563 		stop->sched_class = &stop_sched_class;
3564 
3565 		/*
3566 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3567 		 * adjust the effective priority of a task. As a result,
3568 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3569 		 * which can then trigger wakeups of the stop thread to push
3570 		 * around the current task.
3571 		 *
3572 		 * The stop task itself will never be part of the PI-chain, it
3573 		 * never blocks, therefore that ->pi_lock recursion is safe.
3574 		 * Tell lockdep about this by placing the stop->pi_lock in its
3575 		 * own class.
3576 		 */
3577 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3578 	}
3579 
3580 	cpu_rq(cpu)->stop = stop;
3581 
3582 	if (old_stop) {
3583 		/*
3584 		 * Reset it back to a normal scheduling class so that
3585 		 * it can die in pieces.
3586 		 */
3587 		old_stop->sched_class = &rt_sched_class;
3588 	}
3589 }
3590 
3591 #else /* CONFIG_SMP */
3592 
3593 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3594 
3595 static inline bool rq_has_pinned_tasks(struct rq *rq)
3596 {
3597 	return false;
3598 }
3599 
3600 #endif /* !CONFIG_SMP */
3601 
3602 static void
3603 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3604 {
3605 	struct rq *rq;
3606 
3607 	if (!schedstat_enabled())
3608 		return;
3609 
3610 	rq = this_rq();
3611 
3612 #ifdef CONFIG_SMP
3613 	if (cpu == rq->cpu) {
3614 		__schedstat_inc(rq->ttwu_local);
3615 		__schedstat_inc(p->stats.nr_wakeups_local);
3616 	} else {
3617 		struct sched_domain *sd;
3618 
3619 		__schedstat_inc(p->stats.nr_wakeups_remote);
3620 
3621 		guard(rcu)();
3622 		for_each_domain(rq->cpu, sd) {
3623 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3624 				__schedstat_inc(sd->ttwu_wake_remote);
3625 				break;
3626 			}
3627 		}
3628 	}
3629 
3630 	if (wake_flags & WF_MIGRATED)
3631 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3632 #endif /* CONFIG_SMP */
3633 
3634 	__schedstat_inc(rq->ttwu_count);
3635 	__schedstat_inc(p->stats.nr_wakeups);
3636 
3637 	if (wake_flags & WF_SYNC)
3638 		__schedstat_inc(p->stats.nr_wakeups_sync);
3639 }
3640 
3641 /*
3642  * Mark the task runnable.
3643  */
3644 static inline void ttwu_do_wakeup(struct task_struct *p)
3645 {
3646 	WRITE_ONCE(p->__state, TASK_RUNNING);
3647 	trace_sched_wakeup(p);
3648 }
3649 
3650 static void
3651 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3652 		 struct rq_flags *rf)
3653 {
3654 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3655 
3656 	lockdep_assert_rq_held(rq);
3657 
3658 	if (p->sched_contributes_to_load)
3659 		rq->nr_uninterruptible--;
3660 
3661 #ifdef CONFIG_SMP
3662 	if (wake_flags & WF_MIGRATED)
3663 		en_flags |= ENQUEUE_MIGRATED;
3664 	else
3665 #endif
3666 	if (p->in_iowait) {
3667 		delayacct_blkio_end(p);
3668 		atomic_dec(&task_rq(p)->nr_iowait);
3669 	}
3670 
3671 	activate_task(rq, p, en_flags);
3672 	wakeup_preempt(rq, p, wake_flags);
3673 
3674 	ttwu_do_wakeup(p);
3675 
3676 #ifdef CONFIG_SMP
3677 	if (p->sched_class->task_woken) {
3678 		/*
3679 		 * Our task @p is fully woken up and running; so it's safe to
3680 		 * drop the rq->lock, hereafter rq is only used for statistics.
3681 		 */
3682 		rq_unpin_lock(rq, rf);
3683 		p->sched_class->task_woken(rq, p);
3684 		rq_repin_lock(rq, rf);
3685 	}
3686 
3687 	if (rq->idle_stamp) {
3688 		u64 delta = rq_clock(rq) - rq->idle_stamp;
3689 		u64 max = 2*rq->max_idle_balance_cost;
3690 
3691 		update_avg(&rq->avg_idle, delta);
3692 
3693 		if (rq->avg_idle > max)
3694 			rq->avg_idle = max;
3695 
3696 		rq->idle_stamp = 0;
3697 	}
3698 #endif
3699 }
3700 
3701 /*
3702  * Consider @p being inside a wait loop:
3703  *
3704  *   for (;;) {
3705  *      set_current_state(TASK_UNINTERRUPTIBLE);
3706  *
3707  *      if (CONDITION)
3708  *         break;
3709  *
3710  *      schedule();
3711  *   }
3712  *   __set_current_state(TASK_RUNNING);
3713  *
3714  * between set_current_state() and schedule(). In this case @p is still
3715  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3716  * an atomic manner.
3717  *
3718  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3719  * then schedule() must still happen and p->state can be changed to
3720  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3721  * need to do a full wakeup with enqueue.
3722  *
3723  * Returns: %true when the wakeup is done,
3724  *          %false otherwise.
3725  */
3726 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3727 {
3728 	struct rq_flags rf;
3729 	struct rq *rq;
3730 	int ret = 0;
3731 
3732 	rq = __task_rq_lock(p, &rf);
3733 	if (task_on_rq_queued(p)) {
3734 		update_rq_clock(rq);
3735 		if (p->se.sched_delayed)
3736 			enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3737 		if (!task_on_cpu(rq, p)) {
3738 			/*
3739 			 * When on_rq && !on_cpu the task is preempted, see if
3740 			 * it should preempt the task that is current now.
3741 			 */
3742 			wakeup_preempt(rq, p, wake_flags);
3743 		}
3744 		ttwu_do_wakeup(p);
3745 		ret = 1;
3746 	}
3747 	__task_rq_unlock(rq, &rf);
3748 
3749 	return ret;
3750 }
3751 
3752 #ifdef CONFIG_SMP
3753 void sched_ttwu_pending(void *arg)
3754 {
3755 	struct llist_node *llist = arg;
3756 	struct rq *rq = this_rq();
3757 	struct task_struct *p, *t;
3758 	struct rq_flags rf;
3759 
3760 	if (!llist)
3761 		return;
3762 
3763 	rq_lock_irqsave(rq, &rf);
3764 	update_rq_clock(rq);
3765 
3766 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3767 		if (WARN_ON_ONCE(p->on_cpu))
3768 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3769 
3770 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3771 			set_task_cpu(p, cpu_of(rq));
3772 
3773 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3774 	}
3775 
3776 	/*
3777 	 * Must be after enqueueing at least once task such that
3778 	 * idle_cpu() does not observe a false-negative -- if it does,
3779 	 * it is possible for select_idle_siblings() to stack a number
3780 	 * of tasks on this CPU during that window.
3781 	 *
3782 	 * It is OK to clear ttwu_pending when another task pending.
3783 	 * We will receive IPI after local IRQ enabled and then enqueue it.
3784 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3785 	 */
3786 	WRITE_ONCE(rq->ttwu_pending, 0);
3787 	rq_unlock_irqrestore(rq, &rf);
3788 }
3789 
3790 /*
3791  * Prepare the scene for sending an IPI for a remote smp_call
3792  *
3793  * Returns true if the caller can proceed with sending the IPI.
3794  * Returns false otherwise.
3795  */
3796 bool call_function_single_prep_ipi(int cpu)
3797 {
3798 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3799 		trace_sched_wake_idle_without_ipi(cpu);
3800 		return false;
3801 	}
3802 
3803 	return true;
3804 }
3805 
3806 /*
3807  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3808  * necessary. The wakee CPU on receipt of the IPI will queue the task
3809  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3810  * of the wakeup instead of the waker.
3811  */
3812 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3813 {
3814 	struct rq *rq = cpu_rq(cpu);
3815 
3816 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3817 
3818 	WRITE_ONCE(rq->ttwu_pending, 1);
3819 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3820 }
3821 
3822 void wake_up_if_idle(int cpu)
3823 {
3824 	struct rq *rq = cpu_rq(cpu);
3825 
3826 	guard(rcu)();
3827 	if (is_idle_task(rcu_dereference(rq->curr))) {
3828 		guard(rq_lock_irqsave)(rq);
3829 		if (is_idle_task(rq->curr))
3830 			resched_curr(rq);
3831 	}
3832 }
3833 
3834 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3835 {
3836 	if (!sched_asym_cpucap_active())
3837 		return true;
3838 
3839 	if (this_cpu == that_cpu)
3840 		return true;
3841 
3842 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3843 }
3844 
3845 bool cpus_share_cache(int this_cpu, int that_cpu)
3846 {
3847 	if (this_cpu == that_cpu)
3848 		return true;
3849 
3850 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3851 }
3852 
3853 /*
3854  * Whether CPUs are share cache resources, which means LLC on non-cluster
3855  * machines and LLC tag or L2 on machines with clusters.
3856  */
3857 bool cpus_share_resources(int this_cpu, int that_cpu)
3858 {
3859 	if (this_cpu == that_cpu)
3860 		return true;
3861 
3862 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3863 }
3864 
3865 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3866 {
3867 	/*
3868 	 * The BPF scheduler may depend on select_task_rq() being invoked during
3869 	 * wakeups. In addition, @p may end up executing on a different CPU
3870 	 * regardless of what happens in the wakeup path making the ttwu_queue
3871 	 * optimization less meaningful. Skip if on SCX.
3872 	 */
3873 	if (task_on_scx(p))
3874 		return false;
3875 
3876 	/*
3877 	 * Do not complicate things with the async wake_list while the CPU is
3878 	 * in hotplug state.
3879 	 */
3880 	if (!cpu_active(cpu))
3881 		return false;
3882 
3883 	/* Ensure the task will still be allowed to run on the CPU. */
3884 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3885 		return false;
3886 
3887 	/*
3888 	 * If the CPU does not share cache, then queue the task on the
3889 	 * remote rqs wakelist to avoid accessing remote data.
3890 	 */
3891 	if (!cpus_share_cache(smp_processor_id(), cpu))
3892 		return true;
3893 
3894 	if (cpu == smp_processor_id())
3895 		return false;
3896 
3897 	/*
3898 	 * If the wakee cpu is idle, or the task is descheduling and the
3899 	 * only running task on the CPU, then use the wakelist to offload
3900 	 * the task activation to the idle (or soon-to-be-idle) CPU as
3901 	 * the current CPU is likely busy. nr_running is checked to
3902 	 * avoid unnecessary task stacking.
3903 	 *
3904 	 * Note that we can only get here with (wakee) p->on_rq=0,
3905 	 * p->on_cpu can be whatever, we've done the dequeue, so
3906 	 * the wakee has been accounted out of ->nr_running.
3907 	 */
3908 	if (!cpu_rq(cpu)->nr_running)
3909 		return true;
3910 
3911 	return false;
3912 }
3913 
3914 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3915 {
3916 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3917 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3918 		__ttwu_queue_wakelist(p, cpu, wake_flags);
3919 		return true;
3920 	}
3921 
3922 	return false;
3923 }
3924 
3925 #else /* !CONFIG_SMP */
3926 
3927 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3928 {
3929 	return false;
3930 }
3931 
3932 #endif /* CONFIG_SMP */
3933 
3934 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3935 {
3936 	struct rq *rq = cpu_rq(cpu);
3937 	struct rq_flags rf;
3938 
3939 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
3940 		return;
3941 
3942 	rq_lock(rq, &rf);
3943 	update_rq_clock(rq);
3944 	ttwu_do_activate(rq, p, wake_flags, &rf);
3945 	rq_unlock(rq, &rf);
3946 }
3947 
3948 /*
3949  * Invoked from try_to_wake_up() to check whether the task can be woken up.
3950  *
3951  * The caller holds p::pi_lock if p != current or has preemption
3952  * disabled when p == current.
3953  *
3954  * The rules of saved_state:
3955  *
3956  *   The related locking code always holds p::pi_lock when updating
3957  *   p::saved_state, which means the code is fully serialized in both cases.
3958  *
3959  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3960  *   No other bits set. This allows to distinguish all wakeup scenarios.
3961  *
3962  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3963  *   allows us to prevent early wakeup of tasks before they can be run on
3964  *   asymmetric ISA architectures (eg ARMv9).
3965  */
3966 static __always_inline
3967 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3968 {
3969 	int match;
3970 
3971 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3972 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3973 			     state != TASK_RTLOCK_WAIT);
3974 	}
3975 
3976 	*success = !!(match = __task_state_match(p, state));
3977 
3978 	/*
3979 	 * Saved state preserves the task state across blocking on
3980 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
3981 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
3982 	 * because it waits for a lock wakeup or __thaw_task(). Also
3983 	 * indicate success because from the regular waker's point of
3984 	 * view this has succeeded.
3985 	 *
3986 	 * After acquiring the lock the task will restore p::__state
3987 	 * from p::saved_state which ensures that the regular
3988 	 * wakeup is not lost. The restore will also set
3989 	 * p::saved_state to TASK_RUNNING so any further tests will
3990 	 * not result in false positives vs. @success
3991 	 */
3992 	if (match < 0)
3993 		p->saved_state = TASK_RUNNING;
3994 
3995 	return match > 0;
3996 }
3997 
3998 /*
3999  * Notes on Program-Order guarantees on SMP systems.
4000  *
4001  *  MIGRATION
4002  *
4003  * The basic program-order guarantee on SMP systems is that when a task [t]
4004  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4005  * execution on its new CPU [c1].
4006  *
4007  * For migration (of runnable tasks) this is provided by the following means:
4008  *
4009  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
4010  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
4011  *     rq(c1)->lock (if not at the same time, then in that order).
4012  *  C) LOCK of the rq(c1)->lock scheduling in task
4013  *
4014  * Release/acquire chaining guarantees that B happens after A and C after B.
4015  * Note: the CPU doing B need not be c0 or c1
4016  *
4017  * Example:
4018  *
4019  *   CPU0            CPU1            CPU2
4020  *
4021  *   LOCK rq(0)->lock
4022  *   sched-out X
4023  *   sched-in Y
4024  *   UNLOCK rq(0)->lock
4025  *
4026  *                                   LOCK rq(0)->lock // orders against CPU0
4027  *                                   dequeue X
4028  *                                   UNLOCK rq(0)->lock
4029  *
4030  *                                   LOCK rq(1)->lock
4031  *                                   enqueue X
4032  *                                   UNLOCK rq(1)->lock
4033  *
4034  *                   LOCK rq(1)->lock // orders against CPU2
4035  *                   sched-out Z
4036  *                   sched-in X
4037  *                   UNLOCK rq(1)->lock
4038  *
4039  *
4040  *  BLOCKING -- aka. SLEEP + WAKEUP
4041  *
4042  * For blocking we (obviously) need to provide the same guarantee as for
4043  * migration. However the means are completely different as there is no lock
4044  * chain to provide order. Instead we do:
4045  *
4046  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4047  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4048  *
4049  * Example:
4050  *
4051  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4052  *
4053  *   LOCK rq(0)->lock LOCK X->pi_lock
4054  *   dequeue X
4055  *   sched-out X
4056  *   smp_store_release(X->on_cpu, 0);
4057  *
4058  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4059  *                    X->state = WAKING
4060  *                    set_task_cpu(X,2)
4061  *
4062  *                    LOCK rq(2)->lock
4063  *                    enqueue X
4064  *                    X->state = RUNNING
4065  *                    UNLOCK rq(2)->lock
4066  *
4067  *                                          LOCK rq(2)->lock // orders against CPU1
4068  *                                          sched-out Z
4069  *                                          sched-in X
4070  *                                          UNLOCK rq(2)->lock
4071  *
4072  *                    UNLOCK X->pi_lock
4073  *   UNLOCK rq(0)->lock
4074  *
4075  *
4076  * However, for wakeups there is a second guarantee we must provide, namely we
4077  * must ensure that CONDITION=1 done by the caller can not be reordered with
4078  * accesses to the task state; see try_to_wake_up() and set_current_state().
4079  */
4080 
4081 /**
4082  * try_to_wake_up - wake up a thread
4083  * @p: the thread to be awakened
4084  * @state: the mask of task states that can be woken
4085  * @wake_flags: wake modifier flags (WF_*)
4086  *
4087  * Conceptually does:
4088  *
4089  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4090  *
4091  * If the task was not queued/runnable, also place it back on a runqueue.
4092  *
4093  * This function is atomic against schedule() which would dequeue the task.
4094  *
4095  * It issues a full memory barrier before accessing @p->state, see the comment
4096  * with set_current_state().
4097  *
4098  * Uses p->pi_lock to serialize against concurrent wake-ups.
4099  *
4100  * Relies on p->pi_lock stabilizing:
4101  *  - p->sched_class
4102  *  - p->cpus_ptr
4103  *  - p->sched_task_group
4104  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4105  *
4106  * Tries really hard to only take one task_rq(p)->lock for performance.
4107  * Takes rq->lock in:
4108  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4109  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4110  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4111  *
4112  * As a consequence we race really badly with just about everything. See the
4113  * many memory barriers and their comments for details.
4114  *
4115  * Return: %true if @p->state changes (an actual wakeup was done),
4116  *	   %false otherwise.
4117  */
4118 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4119 {
4120 	guard(preempt)();
4121 	int cpu, success = 0;
4122 
4123 	if (p == current) {
4124 		/*
4125 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4126 		 * == smp_processor_id()'. Together this means we can special
4127 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4128 		 * without taking any locks.
4129 		 *
4130 		 * Specifically, given current runs ttwu() we must be before
4131 		 * schedule()'s block_task(), as such this must not observe
4132 		 * sched_delayed.
4133 		 *
4134 		 * In particular:
4135 		 *  - we rely on Program-Order guarantees for all the ordering,
4136 		 *  - we're serialized against set_special_state() by virtue of
4137 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4138 		 */
4139 		SCHED_WARN_ON(p->se.sched_delayed);
4140 		if (!ttwu_state_match(p, state, &success))
4141 			goto out;
4142 
4143 		trace_sched_waking(p);
4144 		ttwu_do_wakeup(p);
4145 		goto out;
4146 	}
4147 
4148 	/*
4149 	 * If we are going to wake up a thread waiting for CONDITION we
4150 	 * need to ensure that CONDITION=1 done by the caller can not be
4151 	 * reordered with p->state check below. This pairs with smp_store_mb()
4152 	 * in set_current_state() that the waiting thread does.
4153 	 */
4154 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4155 		smp_mb__after_spinlock();
4156 		if (!ttwu_state_match(p, state, &success))
4157 			break;
4158 
4159 		trace_sched_waking(p);
4160 
4161 		/*
4162 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4163 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4164 		 * in smp_cond_load_acquire() below.
4165 		 *
4166 		 * sched_ttwu_pending()			try_to_wake_up()
4167 		 *   STORE p->on_rq = 1			  LOAD p->state
4168 		 *   UNLOCK rq->lock
4169 		 *
4170 		 * __schedule() (switch to task 'p')
4171 		 *   LOCK rq->lock			  smp_rmb();
4172 		 *   smp_mb__after_spinlock();
4173 		 *   UNLOCK rq->lock
4174 		 *
4175 		 * [task p]
4176 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4177 		 *
4178 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4179 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4180 		 *
4181 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4182 		 */
4183 		smp_rmb();
4184 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4185 			break;
4186 
4187 #ifdef CONFIG_SMP
4188 		/*
4189 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4190 		 * possible to, falsely, observe p->on_cpu == 0.
4191 		 *
4192 		 * One must be running (->on_cpu == 1) in order to remove oneself
4193 		 * from the runqueue.
4194 		 *
4195 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4196 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4197 		 *   UNLOCK rq->lock
4198 		 *
4199 		 * __schedule() (put 'p' to sleep)
4200 		 *   LOCK rq->lock			  smp_rmb();
4201 		 *   smp_mb__after_spinlock();
4202 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4203 		 *
4204 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4205 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4206 		 *
4207 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4208 		 * schedule()'s deactivate_task() has 'happened' and p will no longer
4209 		 * care about it's own p->state. See the comment in __schedule().
4210 		 */
4211 		smp_acquire__after_ctrl_dep();
4212 
4213 		/*
4214 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4215 		 * == 0), which means we need to do an enqueue, change p->state to
4216 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4217 		 * enqueue, such as ttwu_queue_wakelist().
4218 		 */
4219 		WRITE_ONCE(p->__state, TASK_WAKING);
4220 
4221 		/*
4222 		 * If the owning (remote) CPU is still in the middle of schedule() with
4223 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4224 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4225 		 * let the waker make forward progress. This is safe because IRQs are
4226 		 * disabled and the IPI will deliver after on_cpu is cleared.
4227 		 *
4228 		 * Ensure we load task_cpu(p) after p->on_cpu:
4229 		 *
4230 		 * set_task_cpu(p, cpu);
4231 		 *   STORE p->cpu = @cpu
4232 		 * __schedule() (switch to task 'p')
4233 		 *   LOCK rq->lock
4234 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4235 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4236 		 *
4237 		 * to ensure we observe the correct CPU on which the task is currently
4238 		 * scheduling.
4239 		 */
4240 		if (smp_load_acquire(&p->on_cpu) &&
4241 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4242 			break;
4243 
4244 		/*
4245 		 * If the owning (remote) CPU is still in the middle of schedule() with
4246 		 * this task as prev, wait until it's done referencing the task.
4247 		 *
4248 		 * Pairs with the smp_store_release() in finish_task().
4249 		 *
4250 		 * This ensures that tasks getting woken will be fully ordered against
4251 		 * their previous state and preserve Program Order.
4252 		 */
4253 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4254 
4255 		cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4256 		if (task_cpu(p) != cpu) {
4257 			if (p->in_iowait) {
4258 				delayacct_blkio_end(p);
4259 				atomic_dec(&task_rq(p)->nr_iowait);
4260 			}
4261 
4262 			wake_flags |= WF_MIGRATED;
4263 			psi_ttwu_dequeue(p);
4264 			set_task_cpu(p, cpu);
4265 		}
4266 #else
4267 		cpu = task_cpu(p);
4268 #endif /* CONFIG_SMP */
4269 
4270 		ttwu_queue(p, cpu, wake_flags);
4271 	}
4272 out:
4273 	if (success)
4274 		ttwu_stat(p, task_cpu(p), wake_flags);
4275 
4276 	return success;
4277 }
4278 
4279 static bool __task_needs_rq_lock(struct task_struct *p)
4280 {
4281 	unsigned int state = READ_ONCE(p->__state);
4282 
4283 	/*
4284 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4285 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4286 	 * locks at the end, see ttwu_queue_wakelist().
4287 	 */
4288 	if (state == TASK_RUNNING || state == TASK_WAKING)
4289 		return true;
4290 
4291 	/*
4292 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4293 	 * possible to, falsely, observe p->on_rq == 0.
4294 	 *
4295 	 * See try_to_wake_up() for a longer comment.
4296 	 */
4297 	smp_rmb();
4298 	if (p->on_rq)
4299 		return true;
4300 
4301 #ifdef CONFIG_SMP
4302 	/*
4303 	 * Ensure the task has finished __schedule() and will not be referenced
4304 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4305 	 */
4306 	smp_rmb();
4307 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4308 #endif
4309 
4310 	return false;
4311 }
4312 
4313 /**
4314  * task_call_func - Invoke a function on task in fixed state
4315  * @p: Process for which the function is to be invoked, can be @current.
4316  * @func: Function to invoke.
4317  * @arg: Argument to function.
4318  *
4319  * Fix the task in it's current state by avoiding wakeups and or rq operations
4320  * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
4321  * to work out what the state is, if required.  Given that @func can be invoked
4322  * with a runqueue lock held, it had better be quite lightweight.
4323  *
4324  * Returns:
4325  *   Whatever @func returns
4326  */
4327 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4328 {
4329 	struct rq *rq = NULL;
4330 	struct rq_flags rf;
4331 	int ret;
4332 
4333 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4334 
4335 	if (__task_needs_rq_lock(p))
4336 		rq = __task_rq_lock(p, &rf);
4337 
4338 	/*
4339 	 * At this point the task is pinned; either:
4340 	 *  - blocked and we're holding off wakeups	 (pi->lock)
4341 	 *  - woken, and we're holding off enqueue	 (rq->lock)
4342 	 *  - queued, and we're holding off schedule	 (rq->lock)
4343 	 *  - running, and we're holding off de-schedule (rq->lock)
4344 	 *
4345 	 * The called function (@func) can use: task_curr(), p->on_rq and
4346 	 * p->__state to differentiate between these states.
4347 	 */
4348 	ret = func(p, arg);
4349 
4350 	if (rq)
4351 		rq_unlock(rq, &rf);
4352 
4353 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4354 	return ret;
4355 }
4356 
4357 /**
4358  * cpu_curr_snapshot - Return a snapshot of the currently running task
4359  * @cpu: The CPU on which to snapshot the task.
4360  *
4361  * Returns the task_struct pointer of the task "currently" running on
4362  * the specified CPU.
4363  *
4364  * If the specified CPU was offline, the return value is whatever it
4365  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4366  * task, but there is no guarantee.  Callers wishing a useful return
4367  * value must take some action to ensure that the specified CPU remains
4368  * online throughout.
4369  *
4370  * This function executes full memory barriers before and after fetching
4371  * the pointer, which permits the caller to confine this function's fetch
4372  * with respect to the caller's accesses to other shared variables.
4373  */
4374 struct task_struct *cpu_curr_snapshot(int cpu)
4375 {
4376 	struct rq *rq = cpu_rq(cpu);
4377 	struct task_struct *t;
4378 	struct rq_flags rf;
4379 
4380 	rq_lock_irqsave(rq, &rf);
4381 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4382 	t = rcu_dereference(cpu_curr(cpu));
4383 	rq_unlock_irqrestore(rq, &rf);
4384 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4385 
4386 	return t;
4387 }
4388 
4389 /**
4390  * wake_up_process - Wake up a specific process
4391  * @p: The process to be woken up.
4392  *
4393  * Attempt to wake up the nominated process and move it to the set of runnable
4394  * processes.
4395  *
4396  * Return: 1 if the process was woken up, 0 if it was already running.
4397  *
4398  * This function executes a full memory barrier before accessing the task state.
4399  */
4400 int wake_up_process(struct task_struct *p)
4401 {
4402 	return try_to_wake_up(p, TASK_NORMAL, 0);
4403 }
4404 EXPORT_SYMBOL(wake_up_process);
4405 
4406 int wake_up_state(struct task_struct *p, unsigned int state)
4407 {
4408 	return try_to_wake_up(p, state, 0);
4409 }
4410 
4411 /*
4412  * Perform scheduler related setup for a newly forked process p.
4413  * p is forked by current.
4414  *
4415  * __sched_fork() is basic setup used by init_idle() too:
4416  */
4417 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4418 {
4419 	p->on_rq			= 0;
4420 
4421 	p->se.on_rq			= 0;
4422 	p->se.exec_start		= 0;
4423 	p->se.sum_exec_runtime		= 0;
4424 	p->se.prev_sum_exec_runtime	= 0;
4425 	p->se.nr_migrations		= 0;
4426 	p->se.vruntime			= 0;
4427 	p->se.vlag			= 0;
4428 	INIT_LIST_HEAD(&p->se.group_node);
4429 
4430 	/* A delayed task cannot be in clone(). */
4431 	SCHED_WARN_ON(p->se.sched_delayed);
4432 
4433 #ifdef CONFIG_FAIR_GROUP_SCHED
4434 	p->se.cfs_rq			= NULL;
4435 #endif
4436 
4437 #ifdef CONFIG_SCHEDSTATS
4438 	/* Even if schedstat is disabled, there should not be garbage */
4439 	memset(&p->stats, 0, sizeof(p->stats));
4440 #endif
4441 
4442 	init_dl_entity(&p->dl);
4443 
4444 	INIT_LIST_HEAD(&p->rt.run_list);
4445 	p->rt.timeout		= 0;
4446 	p->rt.time_slice	= sched_rr_timeslice;
4447 	p->rt.on_rq		= 0;
4448 	p->rt.on_list		= 0;
4449 
4450 #ifdef CONFIG_SCHED_CLASS_EXT
4451 	init_scx_entity(&p->scx);
4452 #endif
4453 
4454 #ifdef CONFIG_PREEMPT_NOTIFIERS
4455 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4456 #endif
4457 
4458 #ifdef CONFIG_COMPACTION
4459 	p->capture_control = NULL;
4460 #endif
4461 	init_numa_balancing(clone_flags, p);
4462 #ifdef CONFIG_SMP
4463 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4464 	p->migration_pending = NULL;
4465 #endif
4466 	init_sched_mm_cid(p);
4467 }
4468 
4469 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4470 
4471 #ifdef CONFIG_NUMA_BALANCING
4472 
4473 int sysctl_numa_balancing_mode;
4474 
4475 static void __set_numabalancing_state(bool enabled)
4476 {
4477 	if (enabled)
4478 		static_branch_enable(&sched_numa_balancing);
4479 	else
4480 		static_branch_disable(&sched_numa_balancing);
4481 }
4482 
4483 void set_numabalancing_state(bool enabled)
4484 {
4485 	if (enabled)
4486 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4487 	else
4488 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4489 	__set_numabalancing_state(enabled);
4490 }
4491 
4492 #ifdef CONFIG_PROC_SYSCTL
4493 static void reset_memory_tiering(void)
4494 {
4495 	struct pglist_data *pgdat;
4496 
4497 	for_each_online_pgdat(pgdat) {
4498 		pgdat->nbp_threshold = 0;
4499 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4500 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4501 	}
4502 }
4503 
4504 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4505 			  void *buffer, size_t *lenp, loff_t *ppos)
4506 {
4507 	struct ctl_table t;
4508 	int err;
4509 	int state = sysctl_numa_balancing_mode;
4510 
4511 	if (write && !capable(CAP_SYS_ADMIN))
4512 		return -EPERM;
4513 
4514 	t = *table;
4515 	t.data = &state;
4516 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4517 	if (err < 0)
4518 		return err;
4519 	if (write) {
4520 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4521 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4522 			reset_memory_tiering();
4523 		sysctl_numa_balancing_mode = state;
4524 		__set_numabalancing_state(state);
4525 	}
4526 	return err;
4527 }
4528 #endif
4529 #endif
4530 
4531 #ifdef CONFIG_SCHEDSTATS
4532 
4533 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4534 
4535 static void set_schedstats(bool enabled)
4536 {
4537 	if (enabled)
4538 		static_branch_enable(&sched_schedstats);
4539 	else
4540 		static_branch_disable(&sched_schedstats);
4541 }
4542 
4543 void force_schedstat_enabled(void)
4544 {
4545 	if (!schedstat_enabled()) {
4546 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4547 		static_branch_enable(&sched_schedstats);
4548 	}
4549 }
4550 
4551 static int __init setup_schedstats(char *str)
4552 {
4553 	int ret = 0;
4554 	if (!str)
4555 		goto out;
4556 
4557 	if (!strcmp(str, "enable")) {
4558 		set_schedstats(true);
4559 		ret = 1;
4560 	} else if (!strcmp(str, "disable")) {
4561 		set_schedstats(false);
4562 		ret = 1;
4563 	}
4564 out:
4565 	if (!ret)
4566 		pr_warn("Unable to parse schedstats=\n");
4567 
4568 	return ret;
4569 }
4570 __setup("schedstats=", setup_schedstats);
4571 
4572 #ifdef CONFIG_PROC_SYSCTL
4573 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4574 		size_t *lenp, loff_t *ppos)
4575 {
4576 	struct ctl_table t;
4577 	int err;
4578 	int state = static_branch_likely(&sched_schedstats);
4579 
4580 	if (write && !capable(CAP_SYS_ADMIN))
4581 		return -EPERM;
4582 
4583 	t = *table;
4584 	t.data = &state;
4585 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4586 	if (err < 0)
4587 		return err;
4588 	if (write)
4589 		set_schedstats(state);
4590 	return err;
4591 }
4592 #endif /* CONFIG_PROC_SYSCTL */
4593 #endif /* CONFIG_SCHEDSTATS */
4594 
4595 #ifdef CONFIG_SYSCTL
4596 static struct ctl_table sched_core_sysctls[] = {
4597 #ifdef CONFIG_SCHEDSTATS
4598 	{
4599 		.procname       = "sched_schedstats",
4600 		.data           = NULL,
4601 		.maxlen         = sizeof(unsigned int),
4602 		.mode           = 0644,
4603 		.proc_handler   = sysctl_schedstats,
4604 		.extra1         = SYSCTL_ZERO,
4605 		.extra2         = SYSCTL_ONE,
4606 	},
4607 #endif /* CONFIG_SCHEDSTATS */
4608 #ifdef CONFIG_UCLAMP_TASK
4609 	{
4610 		.procname       = "sched_util_clamp_min",
4611 		.data           = &sysctl_sched_uclamp_util_min,
4612 		.maxlen         = sizeof(unsigned int),
4613 		.mode           = 0644,
4614 		.proc_handler   = sysctl_sched_uclamp_handler,
4615 	},
4616 	{
4617 		.procname       = "sched_util_clamp_max",
4618 		.data           = &sysctl_sched_uclamp_util_max,
4619 		.maxlen         = sizeof(unsigned int),
4620 		.mode           = 0644,
4621 		.proc_handler   = sysctl_sched_uclamp_handler,
4622 	},
4623 	{
4624 		.procname       = "sched_util_clamp_min_rt_default",
4625 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4626 		.maxlen         = sizeof(unsigned int),
4627 		.mode           = 0644,
4628 		.proc_handler   = sysctl_sched_uclamp_handler,
4629 	},
4630 #endif /* CONFIG_UCLAMP_TASK */
4631 #ifdef CONFIG_NUMA_BALANCING
4632 	{
4633 		.procname	= "numa_balancing",
4634 		.data		= NULL, /* filled in by handler */
4635 		.maxlen		= sizeof(unsigned int),
4636 		.mode		= 0644,
4637 		.proc_handler	= sysctl_numa_balancing,
4638 		.extra1		= SYSCTL_ZERO,
4639 		.extra2		= SYSCTL_FOUR,
4640 	},
4641 #endif /* CONFIG_NUMA_BALANCING */
4642 };
4643 static int __init sched_core_sysctl_init(void)
4644 {
4645 	register_sysctl_init("kernel", sched_core_sysctls);
4646 	return 0;
4647 }
4648 late_initcall(sched_core_sysctl_init);
4649 #endif /* CONFIG_SYSCTL */
4650 
4651 /*
4652  * fork()/clone()-time setup:
4653  */
4654 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4655 {
4656 	__sched_fork(clone_flags, p);
4657 	/*
4658 	 * We mark the process as NEW here. This guarantees that
4659 	 * nobody will actually run it, and a signal or other external
4660 	 * event cannot wake it up and insert it on the runqueue either.
4661 	 */
4662 	p->__state = TASK_NEW;
4663 
4664 	/*
4665 	 * Make sure we do not leak PI boosting priority to the child.
4666 	 */
4667 	p->prio = current->normal_prio;
4668 
4669 	uclamp_fork(p);
4670 
4671 	/*
4672 	 * Revert to default priority/policy on fork if requested.
4673 	 */
4674 	if (unlikely(p->sched_reset_on_fork)) {
4675 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4676 			p->policy = SCHED_NORMAL;
4677 			p->static_prio = NICE_TO_PRIO(0);
4678 			p->rt_priority = 0;
4679 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4680 			p->static_prio = NICE_TO_PRIO(0);
4681 
4682 		p->prio = p->normal_prio = p->static_prio;
4683 		set_load_weight(p, false);
4684 		p->se.custom_slice = 0;
4685 		p->se.slice = sysctl_sched_base_slice;
4686 
4687 		/*
4688 		 * We don't need the reset flag anymore after the fork. It has
4689 		 * fulfilled its duty:
4690 		 */
4691 		p->sched_reset_on_fork = 0;
4692 	}
4693 
4694 	if (dl_prio(p->prio))
4695 		return -EAGAIN;
4696 
4697 	scx_pre_fork(p);
4698 
4699 	if (rt_prio(p->prio)) {
4700 		p->sched_class = &rt_sched_class;
4701 #ifdef CONFIG_SCHED_CLASS_EXT
4702 	} else if (task_should_scx(p)) {
4703 		p->sched_class = &ext_sched_class;
4704 #endif
4705 	} else {
4706 		p->sched_class = &fair_sched_class;
4707 	}
4708 
4709 	init_entity_runnable_average(&p->se);
4710 
4711 
4712 #ifdef CONFIG_SCHED_INFO
4713 	if (likely(sched_info_on()))
4714 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4715 #endif
4716 #if defined(CONFIG_SMP)
4717 	p->on_cpu = 0;
4718 #endif
4719 	init_task_preempt_count(p);
4720 #ifdef CONFIG_SMP
4721 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4722 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4723 #endif
4724 	return 0;
4725 }
4726 
4727 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4728 {
4729 	unsigned long flags;
4730 
4731 	/*
4732 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4733 	 * required yet, but lockdep gets upset if rules are violated.
4734 	 */
4735 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4736 #ifdef CONFIG_CGROUP_SCHED
4737 	if (1) {
4738 		struct task_group *tg;
4739 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4740 				  struct task_group, css);
4741 		tg = autogroup_task_group(p, tg);
4742 		p->sched_task_group = tg;
4743 	}
4744 #endif
4745 	rseq_migrate(p);
4746 	/*
4747 	 * We're setting the CPU for the first time, we don't migrate,
4748 	 * so use __set_task_cpu().
4749 	 */
4750 	__set_task_cpu(p, smp_processor_id());
4751 	if (p->sched_class->task_fork)
4752 		p->sched_class->task_fork(p);
4753 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4754 
4755 	return scx_fork(p);
4756 }
4757 
4758 void sched_cancel_fork(struct task_struct *p)
4759 {
4760 	scx_cancel_fork(p);
4761 }
4762 
4763 void sched_post_fork(struct task_struct *p)
4764 {
4765 	uclamp_post_fork(p);
4766 	scx_post_fork(p);
4767 }
4768 
4769 unsigned long to_ratio(u64 period, u64 runtime)
4770 {
4771 	if (runtime == RUNTIME_INF)
4772 		return BW_UNIT;
4773 
4774 	/*
4775 	 * Doing this here saves a lot of checks in all
4776 	 * the calling paths, and returning zero seems
4777 	 * safe for them anyway.
4778 	 */
4779 	if (period == 0)
4780 		return 0;
4781 
4782 	return div64_u64(runtime << BW_SHIFT, period);
4783 }
4784 
4785 /*
4786  * wake_up_new_task - wake up a newly created task for the first time.
4787  *
4788  * This function will do some initial scheduler statistics housekeeping
4789  * that must be done for every newly created context, then puts the task
4790  * on the runqueue and wakes it.
4791  */
4792 void wake_up_new_task(struct task_struct *p)
4793 {
4794 	struct rq_flags rf;
4795 	struct rq *rq;
4796 
4797 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4798 	WRITE_ONCE(p->__state, TASK_RUNNING);
4799 #ifdef CONFIG_SMP
4800 	/*
4801 	 * Fork balancing, do it here and not earlier because:
4802 	 *  - cpus_ptr can change in the fork path
4803 	 *  - any previously selected CPU might disappear through hotplug
4804 	 *
4805 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4806 	 * as we're not fully set-up yet.
4807 	 */
4808 	p->recent_used_cpu = task_cpu(p);
4809 	rseq_migrate(p);
4810 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4811 #endif
4812 	rq = __task_rq_lock(p, &rf);
4813 	update_rq_clock(rq);
4814 	post_init_entity_util_avg(p);
4815 
4816 	activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4817 	trace_sched_wakeup_new(p);
4818 	wakeup_preempt(rq, p, WF_FORK);
4819 #ifdef CONFIG_SMP
4820 	if (p->sched_class->task_woken) {
4821 		/*
4822 		 * Nothing relies on rq->lock after this, so it's fine to
4823 		 * drop it.
4824 		 */
4825 		rq_unpin_lock(rq, &rf);
4826 		p->sched_class->task_woken(rq, p);
4827 		rq_repin_lock(rq, &rf);
4828 	}
4829 #endif
4830 	task_rq_unlock(rq, p, &rf);
4831 }
4832 
4833 #ifdef CONFIG_PREEMPT_NOTIFIERS
4834 
4835 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4836 
4837 void preempt_notifier_inc(void)
4838 {
4839 	static_branch_inc(&preempt_notifier_key);
4840 }
4841 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4842 
4843 void preempt_notifier_dec(void)
4844 {
4845 	static_branch_dec(&preempt_notifier_key);
4846 }
4847 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4848 
4849 /**
4850  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4851  * @notifier: notifier struct to register
4852  */
4853 void preempt_notifier_register(struct preempt_notifier *notifier)
4854 {
4855 	if (!static_branch_unlikely(&preempt_notifier_key))
4856 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4857 
4858 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4859 }
4860 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4861 
4862 /**
4863  * preempt_notifier_unregister - no longer interested in preemption notifications
4864  * @notifier: notifier struct to unregister
4865  *
4866  * This is *not* safe to call from within a preemption notifier.
4867  */
4868 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4869 {
4870 	hlist_del(&notifier->link);
4871 }
4872 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4873 
4874 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4875 {
4876 	struct preempt_notifier *notifier;
4877 
4878 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4879 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4880 }
4881 
4882 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4883 {
4884 	if (static_branch_unlikely(&preempt_notifier_key))
4885 		__fire_sched_in_preempt_notifiers(curr);
4886 }
4887 
4888 static void
4889 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4890 				   struct task_struct *next)
4891 {
4892 	struct preempt_notifier *notifier;
4893 
4894 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4895 		notifier->ops->sched_out(notifier, next);
4896 }
4897 
4898 static __always_inline void
4899 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4900 				 struct task_struct *next)
4901 {
4902 	if (static_branch_unlikely(&preempt_notifier_key))
4903 		__fire_sched_out_preempt_notifiers(curr, next);
4904 }
4905 
4906 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4907 
4908 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4909 {
4910 }
4911 
4912 static inline void
4913 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4914 				 struct task_struct *next)
4915 {
4916 }
4917 
4918 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4919 
4920 static inline void prepare_task(struct task_struct *next)
4921 {
4922 #ifdef CONFIG_SMP
4923 	/*
4924 	 * Claim the task as running, we do this before switching to it
4925 	 * such that any running task will have this set.
4926 	 *
4927 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4928 	 * its ordering comment.
4929 	 */
4930 	WRITE_ONCE(next->on_cpu, 1);
4931 #endif
4932 }
4933 
4934 static inline void finish_task(struct task_struct *prev)
4935 {
4936 #ifdef CONFIG_SMP
4937 	/*
4938 	 * This must be the very last reference to @prev from this CPU. After
4939 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4940 	 * must ensure this doesn't happen until the switch is completely
4941 	 * finished.
4942 	 *
4943 	 * In particular, the load of prev->state in finish_task_switch() must
4944 	 * happen before this.
4945 	 *
4946 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4947 	 */
4948 	smp_store_release(&prev->on_cpu, 0);
4949 #endif
4950 }
4951 
4952 #ifdef CONFIG_SMP
4953 
4954 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4955 {
4956 	void (*func)(struct rq *rq);
4957 	struct balance_callback *next;
4958 
4959 	lockdep_assert_rq_held(rq);
4960 
4961 	while (head) {
4962 		func = (void (*)(struct rq *))head->func;
4963 		next = head->next;
4964 		head->next = NULL;
4965 		head = next;
4966 
4967 		func(rq);
4968 	}
4969 }
4970 
4971 static void balance_push(struct rq *rq);
4972 
4973 /*
4974  * balance_push_callback is a right abuse of the callback interface and plays
4975  * by significantly different rules.
4976  *
4977  * Where the normal balance_callback's purpose is to be ran in the same context
4978  * that queued it (only later, when it's safe to drop rq->lock again),
4979  * balance_push_callback is specifically targeted at __schedule().
4980  *
4981  * This abuse is tolerated because it places all the unlikely/odd cases behind
4982  * a single test, namely: rq->balance_callback == NULL.
4983  */
4984 struct balance_callback balance_push_callback = {
4985 	.next = NULL,
4986 	.func = balance_push,
4987 };
4988 
4989 static inline struct balance_callback *
4990 __splice_balance_callbacks(struct rq *rq, bool split)
4991 {
4992 	struct balance_callback *head = rq->balance_callback;
4993 
4994 	if (likely(!head))
4995 		return NULL;
4996 
4997 	lockdep_assert_rq_held(rq);
4998 	/*
4999 	 * Must not take balance_push_callback off the list when
5000 	 * splice_balance_callbacks() and balance_callbacks() are not
5001 	 * in the same rq->lock section.
5002 	 *
5003 	 * In that case it would be possible for __schedule() to interleave
5004 	 * and observe the list empty.
5005 	 */
5006 	if (split && head == &balance_push_callback)
5007 		head = NULL;
5008 	else
5009 		rq->balance_callback = NULL;
5010 
5011 	return head;
5012 }
5013 
5014 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5015 {
5016 	return __splice_balance_callbacks(rq, true);
5017 }
5018 
5019 static void __balance_callbacks(struct rq *rq)
5020 {
5021 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5022 }
5023 
5024 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5025 {
5026 	unsigned long flags;
5027 
5028 	if (unlikely(head)) {
5029 		raw_spin_rq_lock_irqsave(rq, flags);
5030 		do_balance_callbacks(rq, head);
5031 		raw_spin_rq_unlock_irqrestore(rq, flags);
5032 	}
5033 }
5034 
5035 #else
5036 
5037 static inline void __balance_callbacks(struct rq *rq)
5038 {
5039 }
5040 
5041 #endif
5042 
5043 static inline void
5044 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5045 {
5046 	/*
5047 	 * Since the runqueue lock will be released by the next
5048 	 * task (which is an invalid locking op but in the case
5049 	 * of the scheduler it's an obvious special-case), so we
5050 	 * do an early lockdep release here:
5051 	 */
5052 	rq_unpin_lock(rq, rf);
5053 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5054 #ifdef CONFIG_DEBUG_SPINLOCK
5055 	/* this is a valid case when another task releases the spinlock */
5056 	rq_lockp(rq)->owner = next;
5057 #endif
5058 }
5059 
5060 static inline void finish_lock_switch(struct rq *rq)
5061 {
5062 	/*
5063 	 * If we are tracking spinlock dependencies then we have to
5064 	 * fix up the runqueue lock - which gets 'carried over' from
5065 	 * prev into current:
5066 	 */
5067 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5068 	__balance_callbacks(rq);
5069 	raw_spin_rq_unlock_irq(rq);
5070 }
5071 
5072 /*
5073  * NOP if the arch has not defined these:
5074  */
5075 
5076 #ifndef prepare_arch_switch
5077 # define prepare_arch_switch(next)	do { } while (0)
5078 #endif
5079 
5080 #ifndef finish_arch_post_lock_switch
5081 # define finish_arch_post_lock_switch()	do { } while (0)
5082 #endif
5083 
5084 static inline void kmap_local_sched_out(void)
5085 {
5086 #ifdef CONFIG_KMAP_LOCAL
5087 	if (unlikely(current->kmap_ctrl.idx))
5088 		__kmap_local_sched_out();
5089 #endif
5090 }
5091 
5092 static inline void kmap_local_sched_in(void)
5093 {
5094 #ifdef CONFIG_KMAP_LOCAL
5095 	if (unlikely(current->kmap_ctrl.idx))
5096 		__kmap_local_sched_in();
5097 #endif
5098 }
5099 
5100 /**
5101  * prepare_task_switch - prepare to switch tasks
5102  * @rq: the runqueue preparing to switch
5103  * @prev: the current task that is being switched out
5104  * @next: the task we are going to switch to.
5105  *
5106  * This is called with the rq lock held and interrupts off. It must
5107  * be paired with a subsequent finish_task_switch after the context
5108  * switch.
5109  *
5110  * prepare_task_switch sets up locking and calls architecture specific
5111  * hooks.
5112  */
5113 static inline void
5114 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5115 		    struct task_struct *next)
5116 {
5117 	kcov_prepare_switch(prev);
5118 	sched_info_switch(rq, prev, next);
5119 	perf_event_task_sched_out(prev, next);
5120 	rseq_preempt(prev);
5121 	fire_sched_out_preempt_notifiers(prev, next);
5122 	kmap_local_sched_out();
5123 	prepare_task(next);
5124 	prepare_arch_switch(next);
5125 }
5126 
5127 /**
5128  * finish_task_switch - clean up after a task-switch
5129  * @prev: the thread we just switched away from.
5130  *
5131  * finish_task_switch must be called after the context switch, paired
5132  * with a prepare_task_switch call before the context switch.
5133  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5134  * and do any other architecture-specific cleanup actions.
5135  *
5136  * Note that we may have delayed dropping an mm in context_switch(). If
5137  * so, we finish that here outside of the runqueue lock. (Doing it
5138  * with the lock held can cause deadlocks; see schedule() for
5139  * details.)
5140  *
5141  * The context switch have flipped the stack from under us and restored the
5142  * local variables which were saved when this task called schedule() in the
5143  * past. 'prev == current' is still correct but we need to recalculate this_rq
5144  * because prev may have moved to another CPU.
5145  */
5146 static struct rq *finish_task_switch(struct task_struct *prev)
5147 	__releases(rq->lock)
5148 {
5149 	struct rq *rq = this_rq();
5150 	struct mm_struct *mm = rq->prev_mm;
5151 	unsigned int prev_state;
5152 
5153 	/*
5154 	 * The previous task will have left us with a preempt_count of 2
5155 	 * because it left us after:
5156 	 *
5157 	 *	schedule()
5158 	 *	  preempt_disable();			// 1
5159 	 *	  __schedule()
5160 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5161 	 *
5162 	 * Also, see FORK_PREEMPT_COUNT.
5163 	 */
5164 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5165 		      "corrupted preempt_count: %s/%d/0x%x\n",
5166 		      current->comm, current->pid, preempt_count()))
5167 		preempt_count_set(FORK_PREEMPT_COUNT);
5168 
5169 	rq->prev_mm = NULL;
5170 
5171 	/*
5172 	 * A task struct has one reference for the use as "current".
5173 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5174 	 * schedule one last time. The schedule call will never return, and
5175 	 * the scheduled task must drop that reference.
5176 	 *
5177 	 * We must observe prev->state before clearing prev->on_cpu (in
5178 	 * finish_task), otherwise a concurrent wakeup can get prev
5179 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5180 	 * transition, resulting in a double drop.
5181 	 */
5182 	prev_state = READ_ONCE(prev->__state);
5183 	vtime_task_switch(prev);
5184 	perf_event_task_sched_in(prev, current);
5185 	finish_task(prev);
5186 	tick_nohz_task_switch();
5187 	finish_lock_switch(rq);
5188 	finish_arch_post_lock_switch();
5189 	kcov_finish_switch(current);
5190 	/*
5191 	 * kmap_local_sched_out() is invoked with rq::lock held and
5192 	 * interrupts disabled. There is no requirement for that, but the
5193 	 * sched out code does not have an interrupt enabled section.
5194 	 * Restoring the maps on sched in does not require interrupts being
5195 	 * disabled either.
5196 	 */
5197 	kmap_local_sched_in();
5198 
5199 	fire_sched_in_preempt_notifiers(current);
5200 	/*
5201 	 * When switching through a kernel thread, the loop in
5202 	 * membarrier_{private,global}_expedited() may have observed that
5203 	 * kernel thread and not issued an IPI. It is therefore possible to
5204 	 * schedule between user->kernel->user threads without passing though
5205 	 * switch_mm(). Membarrier requires a barrier after storing to
5206 	 * rq->curr, before returning to userspace, so provide them here:
5207 	 *
5208 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5209 	 *   provided by mmdrop_lazy_tlb(),
5210 	 * - a sync_core for SYNC_CORE.
5211 	 */
5212 	if (mm) {
5213 		membarrier_mm_sync_core_before_usermode(mm);
5214 		mmdrop_lazy_tlb_sched(mm);
5215 	}
5216 
5217 	if (unlikely(prev_state == TASK_DEAD)) {
5218 		if (prev->sched_class->task_dead)
5219 			prev->sched_class->task_dead(prev);
5220 
5221 		/* Task is done with its stack. */
5222 		put_task_stack(prev);
5223 
5224 		put_task_struct_rcu_user(prev);
5225 	}
5226 
5227 	return rq;
5228 }
5229 
5230 /**
5231  * schedule_tail - first thing a freshly forked thread must call.
5232  * @prev: the thread we just switched away from.
5233  */
5234 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5235 	__releases(rq->lock)
5236 {
5237 	/*
5238 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5239 	 * finish_task_switch() for details.
5240 	 *
5241 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5242 	 * and the preempt_enable() will end up enabling preemption (on
5243 	 * PREEMPT_COUNT kernels).
5244 	 */
5245 
5246 	finish_task_switch(prev);
5247 	preempt_enable();
5248 
5249 	if (current->set_child_tid)
5250 		put_user(task_pid_vnr(current), current->set_child_tid);
5251 
5252 	calculate_sigpending();
5253 }
5254 
5255 /*
5256  * context_switch - switch to the new MM and the new thread's register state.
5257  */
5258 static __always_inline struct rq *
5259 context_switch(struct rq *rq, struct task_struct *prev,
5260 	       struct task_struct *next, struct rq_flags *rf)
5261 {
5262 	prepare_task_switch(rq, prev, next);
5263 
5264 	/*
5265 	 * For paravirt, this is coupled with an exit in switch_to to
5266 	 * combine the page table reload and the switch backend into
5267 	 * one hypercall.
5268 	 */
5269 	arch_start_context_switch(prev);
5270 
5271 	/*
5272 	 * kernel -> kernel   lazy + transfer active
5273 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5274 	 *
5275 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5276 	 *   user ->   user   switch
5277 	 *
5278 	 * switch_mm_cid() needs to be updated if the barriers provided
5279 	 * by context_switch() are modified.
5280 	 */
5281 	if (!next->mm) {                                // to kernel
5282 		enter_lazy_tlb(prev->active_mm, next);
5283 
5284 		next->active_mm = prev->active_mm;
5285 		if (prev->mm)                           // from user
5286 			mmgrab_lazy_tlb(prev->active_mm);
5287 		else
5288 			prev->active_mm = NULL;
5289 	} else {                                        // to user
5290 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5291 		/*
5292 		 * sys_membarrier() requires an smp_mb() between setting
5293 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5294 		 *
5295 		 * The below provides this either through switch_mm(), or in
5296 		 * case 'prev->active_mm == next->mm' through
5297 		 * finish_task_switch()'s mmdrop().
5298 		 */
5299 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5300 		lru_gen_use_mm(next->mm);
5301 
5302 		if (!prev->mm) {                        // from kernel
5303 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5304 			rq->prev_mm = prev->active_mm;
5305 			prev->active_mm = NULL;
5306 		}
5307 	}
5308 
5309 	/* switch_mm_cid() requires the memory barriers above. */
5310 	switch_mm_cid(rq, prev, next);
5311 
5312 	prepare_lock_switch(rq, next, rf);
5313 
5314 	/* Here we just switch the register state and the stack. */
5315 	switch_to(prev, next, prev);
5316 	barrier();
5317 
5318 	return finish_task_switch(prev);
5319 }
5320 
5321 /*
5322  * nr_running and nr_context_switches:
5323  *
5324  * externally visible scheduler statistics: current number of runnable
5325  * threads, total number of context switches performed since bootup.
5326  */
5327 unsigned int nr_running(void)
5328 {
5329 	unsigned int i, sum = 0;
5330 
5331 	for_each_online_cpu(i)
5332 		sum += cpu_rq(i)->nr_running;
5333 
5334 	return sum;
5335 }
5336 
5337 /*
5338  * Check if only the current task is running on the CPU.
5339  *
5340  * Caution: this function does not check that the caller has disabled
5341  * preemption, thus the result might have a time-of-check-to-time-of-use
5342  * race.  The caller is responsible to use it correctly, for example:
5343  *
5344  * - from a non-preemptible section (of course)
5345  *
5346  * - from a thread that is bound to a single CPU
5347  *
5348  * - in a loop with very short iterations (e.g. a polling loop)
5349  */
5350 bool single_task_running(void)
5351 {
5352 	return raw_rq()->nr_running == 1;
5353 }
5354 EXPORT_SYMBOL(single_task_running);
5355 
5356 unsigned long long nr_context_switches_cpu(int cpu)
5357 {
5358 	return cpu_rq(cpu)->nr_switches;
5359 }
5360 
5361 unsigned long long nr_context_switches(void)
5362 {
5363 	int i;
5364 	unsigned long long sum = 0;
5365 
5366 	for_each_possible_cpu(i)
5367 		sum += cpu_rq(i)->nr_switches;
5368 
5369 	return sum;
5370 }
5371 
5372 /*
5373  * Consumers of these two interfaces, like for example the cpuidle menu
5374  * governor, are using nonsensical data. Preferring shallow idle state selection
5375  * for a CPU that has IO-wait which might not even end up running the task when
5376  * it does become runnable.
5377  */
5378 
5379 unsigned int nr_iowait_cpu(int cpu)
5380 {
5381 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5382 }
5383 
5384 /*
5385  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5386  *
5387  * The idea behind IO-wait account is to account the idle time that we could
5388  * have spend running if it were not for IO. That is, if we were to improve the
5389  * storage performance, we'd have a proportional reduction in IO-wait time.
5390  *
5391  * This all works nicely on UP, where, when a task blocks on IO, we account
5392  * idle time as IO-wait, because if the storage were faster, it could've been
5393  * running and we'd not be idle.
5394  *
5395  * This has been extended to SMP, by doing the same for each CPU. This however
5396  * is broken.
5397  *
5398  * Imagine for instance the case where two tasks block on one CPU, only the one
5399  * CPU will have IO-wait accounted, while the other has regular idle. Even
5400  * though, if the storage were faster, both could've ran at the same time,
5401  * utilising both CPUs.
5402  *
5403  * This means, that when looking globally, the current IO-wait accounting on
5404  * SMP is a lower bound, by reason of under accounting.
5405  *
5406  * Worse, since the numbers are provided per CPU, they are sometimes
5407  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5408  * associated with any one particular CPU, it can wake to another CPU than it
5409  * blocked on. This means the per CPU IO-wait number is meaningless.
5410  *
5411  * Task CPU affinities can make all that even more 'interesting'.
5412  */
5413 
5414 unsigned int nr_iowait(void)
5415 {
5416 	unsigned int i, sum = 0;
5417 
5418 	for_each_possible_cpu(i)
5419 		sum += nr_iowait_cpu(i);
5420 
5421 	return sum;
5422 }
5423 
5424 #ifdef CONFIG_SMP
5425 
5426 /*
5427  * sched_exec - execve() is a valuable balancing opportunity, because at
5428  * this point the task has the smallest effective memory and cache footprint.
5429  */
5430 void sched_exec(void)
5431 {
5432 	struct task_struct *p = current;
5433 	struct migration_arg arg;
5434 	int dest_cpu;
5435 
5436 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5437 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5438 		if (dest_cpu == smp_processor_id())
5439 			return;
5440 
5441 		if (unlikely(!cpu_active(dest_cpu)))
5442 			return;
5443 
5444 		arg = (struct migration_arg){ p, dest_cpu };
5445 	}
5446 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5447 }
5448 
5449 #endif
5450 
5451 DEFINE_PER_CPU(struct kernel_stat, kstat);
5452 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5453 
5454 EXPORT_PER_CPU_SYMBOL(kstat);
5455 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5456 
5457 /*
5458  * The function fair_sched_class.update_curr accesses the struct curr
5459  * and its field curr->exec_start; when called from task_sched_runtime(),
5460  * we observe a high rate of cache misses in practice.
5461  * Prefetching this data results in improved performance.
5462  */
5463 static inline void prefetch_curr_exec_start(struct task_struct *p)
5464 {
5465 #ifdef CONFIG_FAIR_GROUP_SCHED
5466 	struct sched_entity *curr = p->se.cfs_rq->curr;
5467 #else
5468 	struct sched_entity *curr = task_rq(p)->cfs.curr;
5469 #endif
5470 	prefetch(curr);
5471 	prefetch(&curr->exec_start);
5472 }
5473 
5474 /*
5475  * Return accounted runtime for the task.
5476  * In case the task is currently running, return the runtime plus current's
5477  * pending runtime that have not been accounted yet.
5478  */
5479 unsigned long long task_sched_runtime(struct task_struct *p)
5480 {
5481 	struct rq_flags rf;
5482 	struct rq *rq;
5483 	u64 ns;
5484 
5485 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5486 	/*
5487 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5488 	 * So we have a optimization chance when the task's delta_exec is 0.
5489 	 * Reading ->on_cpu is racy, but this is OK.
5490 	 *
5491 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5492 	 * If we race with it entering CPU, unaccounted time is 0. This is
5493 	 * indistinguishable from the read occurring a few cycles earlier.
5494 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5495 	 * been accounted, so we're correct here as well.
5496 	 */
5497 	if (!p->on_cpu || !task_on_rq_queued(p))
5498 		return p->se.sum_exec_runtime;
5499 #endif
5500 
5501 	rq = task_rq_lock(p, &rf);
5502 	/*
5503 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5504 	 * project cycles that may never be accounted to this
5505 	 * thread, breaking clock_gettime().
5506 	 */
5507 	if (task_current(rq, p) && task_on_rq_queued(p)) {
5508 		prefetch_curr_exec_start(p);
5509 		update_rq_clock(rq);
5510 		p->sched_class->update_curr(rq);
5511 	}
5512 	ns = p->se.sum_exec_runtime;
5513 	task_rq_unlock(rq, p, &rf);
5514 
5515 	return ns;
5516 }
5517 
5518 #ifdef CONFIG_SCHED_DEBUG
5519 static u64 cpu_resched_latency(struct rq *rq)
5520 {
5521 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5522 	u64 resched_latency, now = rq_clock(rq);
5523 	static bool warned_once;
5524 
5525 	if (sysctl_resched_latency_warn_once && warned_once)
5526 		return 0;
5527 
5528 	if (!need_resched() || !latency_warn_ms)
5529 		return 0;
5530 
5531 	if (system_state == SYSTEM_BOOTING)
5532 		return 0;
5533 
5534 	if (!rq->last_seen_need_resched_ns) {
5535 		rq->last_seen_need_resched_ns = now;
5536 		rq->ticks_without_resched = 0;
5537 		return 0;
5538 	}
5539 
5540 	rq->ticks_without_resched++;
5541 	resched_latency = now - rq->last_seen_need_resched_ns;
5542 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5543 		return 0;
5544 
5545 	warned_once = true;
5546 
5547 	return resched_latency;
5548 }
5549 
5550 static int __init setup_resched_latency_warn_ms(char *str)
5551 {
5552 	long val;
5553 
5554 	if ((kstrtol(str, 0, &val))) {
5555 		pr_warn("Unable to set resched_latency_warn_ms\n");
5556 		return 1;
5557 	}
5558 
5559 	sysctl_resched_latency_warn_ms = val;
5560 	return 1;
5561 }
5562 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5563 #else
5564 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5565 #endif /* CONFIG_SCHED_DEBUG */
5566 
5567 /*
5568  * This function gets called by the timer code, with HZ frequency.
5569  * We call it with interrupts disabled.
5570  */
5571 void sched_tick(void)
5572 {
5573 	int cpu = smp_processor_id();
5574 	struct rq *rq = cpu_rq(cpu);
5575 	struct task_struct *curr;
5576 	struct rq_flags rf;
5577 	unsigned long hw_pressure;
5578 	u64 resched_latency;
5579 
5580 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5581 		arch_scale_freq_tick();
5582 
5583 	sched_clock_tick();
5584 
5585 	rq_lock(rq, &rf);
5586 
5587 	curr = rq->curr;
5588 	psi_account_irqtime(rq, curr, NULL);
5589 
5590 	update_rq_clock(rq);
5591 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5592 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5593 	curr->sched_class->task_tick(rq, curr, 0);
5594 	if (sched_feat(LATENCY_WARN))
5595 		resched_latency = cpu_resched_latency(rq);
5596 	calc_global_load_tick(rq);
5597 	sched_core_tick(rq);
5598 	task_tick_mm_cid(rq, curr);
5599 	scx_tick(rq);
5600 
5601 	rq_unlock(rq, &rf);
5602 
5603 	if (sched_feat(LATENCY_WARN) && resched_latency)
5604 		resched_latency_warn(cpu, resched_latency);
5605 
5606 	perf_event_task_tick();
5607 
5608 	if (curr->flags & PF_WQ_WORKER)
5609 		wq_worker_tick(curr);
5610 
5611 #ifdef CONFIG_SMP
5612 	if (!scx_switched_all()) {
5613 		rq->idle_balance = idle_cpu(cpu);
5614 		sched_balance_trigger(rq);
5615 	}
5616 #endif
5617 }
5618 
5619 #ifdef CONFIG_NO_HZ_FULL
5620 
5621 struct tick_work {
5622 	int			cpu;
5623 	atomic_t		state;
5624 	struct delayed_work	work;
5625 };
5626 /* Values for ->state, see diagram below. */
5627 #define TICK_SCHED_REMOTE_OFFLINE	0
5628 #define TICK_SCHED_REMOTE_OFFLINING	1
5629 #define TICK_SCHED_REMOTE_RUNNING	2
5630 
5631 /*
5632  * State diagram for ->state:
5633  *
5634  *
5635  *          TICK_SCHED_REMOTE_OFFLINE
5636  *                    |   ^
5637  *                    |   |
5638  *                    |   | sched_tick_remote()
5639  *                    |   |
5640  *                    |   |
5641  *                    +--TICK_SCHED_REMOTE_OFFLINING
5642  *                    |   ^
5643  *                    |   |
5644  * sched_tick_start() |   | sched_tick_stop()
5645  *                    |   |
5646  *                    V   |
5647  *          TICK_SCHED_REMOTE_RUNNING
5648  *
5649  *
5650  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5651  * and sched_tick_start() are happy to leave the state in RUNNING.
5652  */
5653 
5654 static struct tick_work __percpu *tick_work_cpu;
5655 
5656 static void sched_tick_remote(struct work_struct *work)
5657 {
5658 	struct delayed_work *dwork = to_delayed_work(work);
5659 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5660 	int cpu = twork->cpu;
5661 	struct rq *rq = cpu_rq(cpu);
5662 	int os;
5663 
5664 	/*
5665 	 * Handle the tick only if it appears the remote CPU is running in full
5666 	 * dynticks mode. The check is racy by nature, but missing a tick or
5667 	 * having one too much is no big deal because the scheduler tick updates
5668 	 * statistics and checks timeslices in a time-independent way, regardless
5669 	 * of when exactly it is running.
5670 	 */
5671 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5672 		guard(rq_lock_irq)(rq);
5673 		struct task_struct *curr = rq->curr;
5674 
5675 		if (cpu_online(cpu)) {
5676 			update_rq_clock(rq);
5677 
5678 			if (!is_idle_task(curr)) {
5679 				/*
5680 				 * Make sure the next tick runs within a
5681 				 * reasonable amount of time.
5682 				 */
5683 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5684 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5685 			}
5686 			curr->sched_class->task_tick(rq, curr, 0);
5687 
5688 			calc_load_nohz_remote(rq);
5689 		}
5690 	}
5691 
5692 	/*
5693 	 * Run the remote tick once per second (1Hz). This arbitrary
5694 	 * frequency is large enough to avoid overload but short enough
5695 	 * to keep scheduler internal stats reasonably up to date.  But
5696 	 * first update state to reflect hotplug activity if required.
5697 	 */
5698 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5699 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5700 	if (os == TICK_SCHED_REMOTE_RUNNING)
5701 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5702 }
5703 
5704 static void sched_tick_start(int cpu)
5705 {
5706 	int os;
5707 	struct tick_work *twork;
5708 
5709 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5710 		return;
5711 
5712 	WARN_ON_ONCE(!tick_work_cpu);
5713 
5714 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5715 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5716 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5717 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5718 		twork->cpu = cpu;
5719 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5720 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5721 	}
5722 }
5723 
5724 #ifdef CONFIG_HOTPLUG_CPU
5725 static void sched_tick_stop(int cpu)
5726 {
5727 	struct tick_work *twork;
5728 	int os;
5729 
5730 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5731 		return;
5732 
5733 	WARN_ON_ONCE(!tick_work_cpu);
5734 
5735 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5736 	/* There cannot be competing actions, but don't rely on stop-machine. */
5737 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5738 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5739 	/* Don't cancel, as this would mess up the state machine. */
5740 }
5741 #endif /* CONFIG_HOTPLUG_CPU */
5742 
5743 int __init sched_tick_offload_init(void)
5744 {
5745 	tick_work_cpu = alloc_percpu(struct tick_work);
5746 	BUG_ON(!tick_work_cpu);
5747 	return 0;
5748 }
5749 
5750 #else /* !CONFIG_NO_HZ_FULL */
5751 static inline void sched_tick_start(int cpu) { }
5752 static inline void sched_tick_stop(int cpu) { }
5753 #endif
5754 
5755 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5756 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5757 /*
5758  * If the value passed in is equal to the current preempt count
5759  * then we just disabled preemption. Start timing the latency.
5760  */
5761 static inline void preempt_latency_start(int val)
5762 {
5763 	if (preempt_count() == val) {
5764 		unsigned long ip = get_lock_parent_ip();
5765 #ifdef CONFIG_DEBUG_PREEMPT
5766 		current->preempt_disable_ip = ip;
5767 #endif
5768 		trace_preempt_off(CALLER_ADDR0, ip);
5769 	}
5770 }
5771 
5772 void preempt_count_add(int val)
5773 {
5774 #ifdef CONFIG_DEBUG_PREEMPT
5775 	/*
5776 	 * Underflow?
5777 	 */
5778 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5779 		return;
5780 #endif
5781 	__preempt_count_add(val);
5782 #ifdef CONFIG_DEBUG_PREEMPT
5783 	/*
5784 	 * Spinlock count overflowing soon?
5785 	 */
5786 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5787 				PREEMPT_MASK - 10);
5788 #endif
5789 	preempt_latency_start(val);
5790 }
5791 EXPORT_SYMBOL(preempt_count_add);
5792 NOKPROBE_SYMBOL(preempt_count_add);
5793 
5794 /*
5795  * If the value passed in equals to the current preempt count
5796  * then we just enabled preemption. Stop timing the latency.
5797  */
5798 static inline void preempt_latency_stop(int val)
5799 {
5800 	if (preempt_count() == val)
5801 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5802 }
5803 
5804 void preempt_count_sub(int val)
5805 {
5806 #ifdef CONFIG_DEBUG_PREEMPT
5807 	/*
5808 	 * Underflow?
5809 	 */
5810 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5811 		return;
5812 	/*
5813 	 * Is the spinlock portion underflowing?
5814 	 */
5815 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5816 			!(preempt_count() & PREEMPT_MASK)))
5817 		return;
5818 #endif
5819 
5820 	preempt_latency_stop(val);
5821 	__preempt_count_sub(val);
5822 }
5823 EXPORT_SYMBOL(preempt_count_sub);
5824 NOKPROBE_SYMBOL(preempt_count_sub);
5825 
5826 #else
5827 static inline void preempt_latency_start(int val) { }
5828 static inline void preempt_latency_stop(int val) { }
5829 #endif
5830 
5831 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5832 {
5833 #ifdef CONFIG_DEBUG_PREEMPT
5834 	return p->preempt_disable_ip;
5835 #else
5836 	return 0;
5837 #endif
5838 }
5839 
5840 /*
5841  * Print scheduling while atomic bug:
5842  */
5843 static noinline void __schedule_bug(struct task_struct *prev)
5844 {
5845 	/* Save this before calling printk(), since that will clobber it */
5846 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5847 
5848 	if (oops_in_progress)
5849 		return;
5850 
5851 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5852 		prev->comm, prev->pid, preempt_count());
5853 
5854 	debug_show_held_locks(prev);
5855 	print_modules();
5856 	if (irqs_disabled())
5857 		print_irqtrace_events(prev);
5858 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5859 		pr_err("Preemption disabled at:");
5860 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5861 	}
5862 	check_panic_on_warn("scheduling while atomic");
5863 
5864 	dump_stack();
5865 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5866 }
5867 
5868 /*
5869  * Various schedule()-time debugging checks and statistics:
5870  */
5871 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5872 {
5873 #ifdef CONFIG_SCHED_STACK_END_CHECK
5874 	if (task_stack_end_corrupted(prev))
5875 		panic("corrupted stack end detected inside scheduler\n");
5876 
5877 	if (task_scs_end_corrupted(prev))
5878 		panic("corrupted shadow stack detected inside scheduler\n");
5879 #endif
5880 
5881 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5882 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5883 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5884 			prev->comm, prev->pid, prev->non_block_count);
5885 		dump_stack();
5886 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5887 	}
5888 #endif
5889 
5890 	if (unlikely(in_atomic_preempt_off())) {
5891 		__schedule_bug(prev);
5892 		preempt_count_set(PREEMPT_DISABLED);
5893 	}
5894 	rcu_sleep_check();
5895 	SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5896 
5897 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5898 
5899 	schedstat_inc(this_rq()->sched_count);
5900 }
5901 
5902 static void prev_balance(struct rq *rq, struct task_struct *prev,
5903 			 struct rq_flags *rf)
5904 {
5905 	const struct sched_class *start_class = prev->sched_class;
5906 	const struct sched_class *class;
5907 
5908 #ifdef CONFIG_SCHED_CLASS_EXT
5909 	/*
5910 	 * SCX requires a balance() call before every pick_next_task() including
5911 	 * when waking up from SCHED_IDLE. If @start_class is below SCX, start
5912 	 * from SCX instead.
5913 	 */
5914 	if (scx_enabled() && sched_class_above(&ext_sched_class, start_class))
5915 		start_class = &ext_sched_class;
5916 #endif
5917 
5918 	/*
5919 	 * We must do the balancing pass before put_prev_task(), such
5920 	 * that when we release the rq->lock the task is in the same
5921 	 * state as before we took rq->lock.
5922 	 *
5923 	 * We can terminate the balance pass as soon as we know there is
5924 	 * a runnable task of @class priority or higher.
5925 	 */
5926 	for_active_class_range(class, start_class, &idle_sched_class) {
5927 		if (class->balance && class->balance(rq, prev, rf))
5928 			break;
5929 	}
5930 }
5931 
5932 /*
5933  * Pick up the highest-prio task:
5934  */
5935 static inline struct task_struct *
5936 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5937 {
5938 	const struct sched_class *class;
5939 	struct task_struct *p;
5940 
5941 	rq->dl_server = NULL;
5942 
5943 	if (scx_enabled())
5944 		goto restart;
5945 
5946 	/*
5947 	 * Optimization: we know that if all tasks are in the fair class we can
5948 	 * call that function directly, but only if the @prev task wasn't of a
5949 	 * higher scheduling class, because otherwise those lose the
5950 	 * opportunity to pull in more work from other CPUs.
5951 	 */
5952 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5953 		   rq->nr_running == rq->cfs.h_nr_running)) {
5954 
5955 		p = pick_next_task_fair(rq, prev, rf);
5956 		if (unlikely(p == RETRY_TASK))
5957 			goto restart;
5958 
5959 		/* Assume the next prioritized class is idle_sched_class */
5960 		if (!p) {
5961 			p = pick_task_idle(rq);
5962 			put_prev_set_next_task(rq, prev, p);
5963 		}
5964 
5965 		return p;
5966 	}
5967 
5968 restart:
5969 	prev_balance(rq, prev, rf);
5970 
5971 	for_each_active_class(class) {
5972 		if (class->pick_next_task) {
5973 			p = class->pick_next_task(rq, prev);
5974 			if (p)
5975 				return p;
5976 		} else {
5977 			p = class->pick_task(rq);
5978 			if (p) {
5979 				put_prev_set_next_task(rq, prev, p);
5980 				return p;
5981 			}
5982 		}
5983 	}
5984 
5985 	BUG(); /* The idle class should always have a runnable task. */
5986 }
5987 
5988 #ifdef CONFIG_SCHED_CORE
5989 static inline bool is_task_rq_idle(struct task_struct *t)
5990 {
5991 	return (task_rq(t)->idle == t);
5992 }
5993 
5994 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5995 {
5996 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
5997 }
5998 
5999 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6000 {
6001 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
6002 		return true;
6003 
6004 	return a->core_cookie == b->core_cookie;
6005 }
6006 
6007 static inline struct task_struct *pick_task(struct rq *rq)
6008 {
6009 	const struct sched_class *class;
6010 	struct task_struct *p;
6011 
6012 	rq->dl_server = NULL;
6013 
6014 	for_each_active_class(class) {
6015 		p = class->pick_task(rq);
6016 		if (p)
6017 			return p;
6018 	}
6019 
6020 	BUG(); /* The idle class should always have a runnable task. */
6021 }
6022 
6023 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6024 
6025 static void queue_core_balance(struct rq *rq);
6026 
6027 static struct task_struct *
6028 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6029 {
6030 	struct task_struct *next, *p, *max = NULL;
6031 	const struct cpumask *smt_mask;
6032 	bool fi_before = false;
6033 	bool core_clock_updated = (rq == rq->core);
6034 	unsigned long cookie;
6035 	int i, cpu, occ = 0;
6036 	struct rq *rq_i;
6037 	bool need_sync;
6038 
6039 	if (!sched_core_enabled(rq))
6040 		return __pick_next_task(rq, prev, rf);
6041 
6042 	cpu = cpu_of(rq);
6043 
6044 	/* Stopper task is switching into idle, no need core-wide selection. */
6045 	if (cpu_is_offline(cpu)) {
6046 		/*
6047 		 * Reset core_pick so that we don't enter the fastpath when
6048 		 * coming online. core_pick would already be migrated to
6049 		 * another cpu during offline.
6050 		 */
6051 		rq->core_pick = NULL;
6052 		rq->core_dl_server = NULL;
6053 		return __pick_next_task(rq, prev, rf);
6054 	}
6055 
6056 	/*
6057 	 * If there were no {en,de}queues since we picked (IOW, the task
6058 	 * pointers are all still valid), and we haven't scheduled the last
6059 	 * pick yet, do so now.
6060 	 *
6061 	 * rq->core_pick can be NULL if no selection was made for a CPU because
6062 	 * it was either offline or went offline during a sibling's core-wide
6063 	 * selection. In this case, do a core-wide selection.
6064 	 */
6065 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6066 	    rq->core->core_pick_seq != rq->core_sched_seq &&
6067 	    rq->core_pick) {
6068 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6069 
6070 		next = rq->core_pick;
6071 		rq->dl_server = rq->core_dl_server;
6072 		rq->core_pick = NULL;
6073 		rq->core_dl_server = NULL;
6074 		goto out_set_next;
6075 	}
6076 
6077 	prev_balance(rq, prev, rf);
6078 
6079 	smt_mask = cpu_smt_mask(cpu);
6080 	need_sync = !!rq->core->core_cookie;
6081 
6082 	/* reset state */
6083 	rq->core->core_cookie = 0UL;
6084 	if (rq->core->core_forceidle_count) {
6085 		if (!core_clock_updated) {
6086 			update_rq_clock(rq->core);
6087 			core_clock_updated = true;
6088 		}
6089 		sched_core_account_forceidle(rq);
6090 		/* reset after accounting force idle */
6091 		rq->core->core_forceidle_start = 0;
6092 		rq->core->core_forceidle_count = 0;
6093 		rq->core->core_forceidle_occupation = 0;
6094 		need_sync = true;
6095 		fi_before = true;
6096 	}
6097 
6098 	/*
6099 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6100 	 *
6101 	 * @task_seq guards the task state ({en,de}queues)
6102 	 * @pick_seq is the @task_seq we did a selection on
6103 	 * @sched_seq is the @pick_seq we scheduled
6104 	 *
6105 	 * However, preemptions can cause multiple picks on the same task set.
6106 	 * 'Fix' this by also increasing @task_seq for every pick.
6107 	 */
6108 	rq->core->core_task_seq++;
6109 
6110 	/*
6111 	 * Optimize for common case where this CPU has no cookies
6112 	 * and there are no cookied tasks running on siblings.
6113 	 */
6114 	if (!need_sync) {
6115 		next = pick_task(rq);
6116 		if (!next->core_cookie) {
6117 			rq->core_pick = NULL;
6118 			rq->core_dl_server = NULL;
6119 			/*
6120 			 * For robustness, update the min_vruntime_fi for
6121 			 * unconstrained picks as well.
6122 			 */
6123 			WARN_ON_ONCE(fi_before);
6124 			task_vruntime_update(rq, next, false);
6125 			goto out_set_next;
6126 		}
6127 	}
6128 
6129 	/*
6130 	 * For each thread: do the regular task pick and find the max prio task
6131 	 * amongst them.
6132 	 *
6133 	 * Tie-break prio towards the current CPU
6134 	 */
6135 	for_each_cpu_wrap(i, smt_mask, cpu) {
6136 		rq_i = cpu_rq(i);
6137 
6138 		/*
6139 		 * Current cpu always has its clock updated on entrance to
6140 		 * pick_next_task(). If the current cpu is not the core,
6141 		 * the core may also have been updated above.
6142 		 */
6143 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6144 			update_rq_clock(rq_i);
6145 
6146 		rq_i->core_pick = p = pick_task(rq_i);
6147 		rq_i->core_dl_server = rq_i->dl_server;
6148 
6149 		if (!max || prio_less(max, p, fi_before))
6150 			max = p;
6151 	}
6152 
6153 	cookie = rq->core->core_cookie = max->core_cookie;
6154 
6155 	/*
6156 	 * For each thread: try and find a runnable task that matches @max or
6157 	 * force idle.
6158 	 */
6159 	for_each_cpu(i, smt_mask) {
6160 		rq_i = cpu_rq(i);
6161 		p = rq_i->core_pick;
6162 
6163 		if (!cookie_equals(p, cookie)) {
6164 			p = NULL;
6165 			if (cookie)
6166 				p = sched_core_find(rq_i, cookie);
6167 			if (!p)
6168 				p = idle_sched_class.pick_task(rq_i);
6169 		}
6170 
6171 		rq_i->core_pick = p;
6172 		rq_i->core_dl_server = NULL;
6173 
6174 		if (p == rq_i->idle) {
6175 			if (rq_i->nr_running) {
6176 				rq->core->core_forceidle_count++;
6177 				if (!fi_before)
6178 					rq->core->core_forceidle_seq++;
6179 			}
6180 		} else {
6181 			occ++;
6182 		}
6183 	}
6184 
6185 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6186 		rq->core->core_forceidle_start = rq_clock(rq->core);
6187 		rq->core->core_forceidle_occupation = occ;
6188 	}
6189 
6190 	rq->core->core_pick_seq = rq->core->core_task_seq;
6191 	next = rq->core_pick;
6192 	rq->core_sched_seq = rq->core->core_pick_seq;
6193 
6194 	/* Something should have been selected for current CPU */
6195 	WARN_ON_ONCE(!next);
6196 
6197 	/*
6198 	 * Reschedule siblings
6199 	 *
6200 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6201 	 * sending an IPI (below) ensures the sibling will no longer be running
6202 	 * their task. This ensures there is no inter-sibling overlap between
6203 	 * non-matching user state.
6204 	 */
6205 	for_each_cpu(i, smt_mask) {
6206 		rq_i = cpu_rq(i);
6207 
6208 		/*
6209 		 * An online sibling might have gone offline before a task
6210 		 * could be picked for it, or it might be offline but later
6211 		 * happen to come online, but its too late and nothing was
6212 		 * picked for it.  That's Ok - it will pick tasks for itself,
6213 		 * so ignore it.
6214 		 */
6215 		if (!rq_i->core_pick)
6216 			continue;
6217 
6218 		/*
6219 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6220 		 * fi_before     fi      update?
6221 		 *  0            0       1
6222 		 *  0            1       1
6223 		 *  1            0       1
6224 		 *  1            1       0
6225 		 */
6226 		if (!(fi_before && rq->core->core_forceidle_count))
6227 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6228 
6229 		rq_i->core_pick->core_occupation = occ;
6230 
6231 		if (i == cpu) {
6232 			rq_i->core_pick = NULL;
6233 			rq_i->core_dl_server = NULL;
6234 			continue;
6235 		}
6236 
6237 		/* Did we break L1TF mitigation requirements? */
6238 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6239 
6240 		if (rq_i->curr == rq_i->core_pick) {
6241 			rq_i->core_pick = NULL;
6242 			rq_i->core_dl_server = NULL;
6243 			continue;
6244 		}
6245 
6246 		resched_curr(rq_i);
6247 	}
6248 
6249 out_set_next:
6250 	put_prev_set_next_task(rq, prev, next);
6251 	if (rq->core->core_forceidle_count && next == rq->idle)
6252 		queue_core_balance(rq);
6253 
6254 	return next;
6255 }
6256 
6257 static bool try_steal_cookie(int this, int that)
6258 {
6259 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6260 	struct task_struct *p;
6261 	unsigned long cookie;
6262 	bool success = false;
6263 
6264 	guard(irq)();
6265 	guard(double_rq_lock)(dst, src);
6266 
6267 	cookie = dst->core->core_cookie;
6268 	if (!cookie)
6269 		return false;
6270 
6271 	if (dst->curr != dst->idle)
6272 		return false;
6273 
6274 	p = sched_core_find(src, cookie);
6275 	if (!p)
6276 		return false;
6277 
6278 	do {
6279 		if (p == src->core_pick || p == src->curr)
6280 			goto next;
6281 
6282 		if (!is_cpu_allowed(p, this))
6283 			goto next;
6284 
6285 		if (p->core_occupation > dst->idle->core_occupation)
6286 			goto next;
6287 		/*
6288 		 * sched_core_find() and sched_core_next() will ensure
6289 		 * that task @p is not throttled now, we also need to
6290 		 * check whether the runqueue of the destination CPU is
6291 		 * being throttled.
6292 		 */
6293 		if (sched_task_is_throttled(p, this))
6294 			goto next;
6295 
6296 		deactivate_task(src, p, 0);
6297 		set_task_cpu(p, this);
6298 		activate_task(dst, p, 0);
6299 
6300 		resched_curr(dst);
6301 
6302 		success = true;
6303 		break;
6304 
6305 next:
6306 		p = sched_core_next(p, cookie);
6307 	} while (p);
6308 
6309 	return success;
6310 }
6311 
6312 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6313 {
6314 	int i;
6315 
6316 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6317 		if (i == cpu)
6318 			continue;
6319 
6320 		if (need_resched())
6321 			break;
6322 
6323 		if (try_steal_cookie(cpu, i))
6324 			return true;
6325 	}
6326 
6327 	return false;
6328 }
6329 
6330 static void sched_core_balance(struct rq *rq)
6331 {
6332 	struct sched_domain *sd;
6333 	int cpu = cpu_of(rq);
6334 
6335 	guard(preempt)();
6336 	guard(rcu)();
6337 
6338 	raw_spin_rq_unlock_irq(rq);
6339 	for_each_domain(cpu, sd) {
6340 		if (need_resched())
6341 			break;
6342 
6343 		if (steal_cookie_task(cpu, sd))
6344 			break;
6345 	}
6346 	raw_spin_rq_lock_irq(rq);
6347 }
6348 
6349 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6350 
6351 static void queue_core_balance(struct rq *rq)
6352 {
6353 	if (!sched_core_enabled(rq))
6354 		return;
6355 
6356 	if (!rq->core->core_cookie)
6357 		return;
6358 
6359 	if (!rq->nr_running) /* not forced idle */
6360 		return;
6361 
6362 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6363 }
6364 
6365 DEFINE_LOCK_GUARD_1(core_lock, int,
6366 		    sched_core_lock(*_T->lock, &_T->flags),
6367 		    sched_core_unlock(*_T->lock, &_T->flags),
6368 		    unsigned long flags)
6369 
6370 static void sched_core_cpu_starting(unsigned int cpu)
6371 {
6372 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6373 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6374 	int t;
6375 
6376 	guard(core_lock)(&cpu);
6377 
6378 	WARN_ON_ONCE(rq->core != rq);
6379 
6380 	/* if we're the first, we'll be our own leader */
6381 	if (cpumask_weight(smt_mask) == 1)
6382 		return;
6383 
6384 	/* find the leader */
6385 	for_each_cpu(t, smt_mask) {
6386 		if (t == cpu)
6387 			continue;
6388 		rq = cpu_rq(t);
6389 		if (rq->core == rq) {
6390 			core_rq = rq;
6391 			break;
6392 		}
6393 	}
6394 
6395 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6396 		return;
6397 
6398 	/* install and validate core_rq */
6399 	for_each_cpu(t, smt_mask) {
6400 		rq = cpu_rq(t);
6401 
6402 		if (t == cpu)
6403 			rq->core = core_rq;
6404 
6405 		WARN_ON_ONCE(rq->core != core_rq);
6406 	}
6407 }
6408 
6409 static void sched_core_cpu_deactivate(unsigned int cpu)
6410 {
6411 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6412 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6413 	int t;
6414 
6415 	guard(core_lock)(&cpu);
6416 
6417 	/* if we're the last man standing, nothing to do */
6418 	if (cpumask_weight(smt_mask) == 1) {
6419 		WARN_ON_ONCE(rq->core != rq);
6420 		return;
6421 	}
6422 
6423 	/* if we're not the leader, nothing to do */
6424 	if (rq->core != rq)
6425 		return;
6426 
6427 	/* find a new leader */
6428 	for_each_cpu(t, smt_mask) {
6429 		if (t == cpu)
6430 			continue;
6431 		core_rq = cpu_rq(t);
6432 		break;
6433 	}
6434 
6435 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6436 		return;
6437 
6438 	/* copy the shared state to the new leader */
6439 	core_rq->core_task_seq             = rq->core_task_seq;
6440 	core_rq->core_pick_seq             = rq->core_pick_seq;
6441 	core_rq->core_cookie               = rq->core_cookie;
6442 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6443 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6444 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6445 
6446 	/*
6447 	 * Accounting edge for forced idle is handled in pick_next_task().
6448 	 * Don't need another one here, since the hotplug thread shouldn't
6449 	 * have a cookie.
6450 	 */
6451 	core_rq->core_forceidle_start = 0;
6452 
6453 	/* install new leader */
6454 	for_each_cpu(t, smt_mask) {
6455 		rq = cpu_rq(t);
6456 		rq->core = core_rq;
6457 	}
6458 }
6459 
6460 static inline void sched_core_cpu_dying(unsigned int cpu)
6461 {
6462 	struct rq *rq = cpu_rq(cpu);
6463 
6464 	if (rq->core != rq)
6465 		rq->core = rq;
6466 }
6467 
6468 #else /* !CONFIG_SCHED_CORE */
6469 
6470 static inline void sched_core_cpu_starting(unsigned int cpu) {}
6471 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6472 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6473 
6474 static struct task_struct *
6475 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6476 {
6477 	return __pick_next_task(rq, prev, rf);
6478 }
6479 
6480 #endif /* CONFIG_SCHED_CORE */
6481 
6482 /*
6483  * Constants for the sched_mode argument of __schedule().
6484  *
6485  * The mode argument allows RT enabled kernels to differentiate a
6486  * preemption from blocking on an 'sleeping' spin/rwlock.
6487  */
6488 #define SM_IDLE			(-1)
6489 #define SM_NONE			0
6490 #define SM_PREEMPT		1
6491 #define SM_RTLOCK_WAIT		2
6492 
6493 /*
6494  * __schedule() is the main scheduler function.
6495  *
6496  * The main means of driving the scheduler and thus entering this function are:
6497  *
6498  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6499  *
6500  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6501  *      paths. For example, see arch/x86/entry_64.S.
6502  *
6503  *      To drive preemption between tasks, the scheduler sets the flag in timer
6504  *      interrupt handler sched_tick().
6505  *
6506  *   3. Wakeups don't really cause entry into schedule(). They add a
6507  *      task to the run-queue and that's it.
6508  *
6509  *      Now, if the new task added to the run-queue preempts the current
6510  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6511  *      called on the nearest possible occasion:
6512  *
6513  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6514  *
6515  *         - in syscall or exception context, at the next outmost
6516  *           preempt_enable(). (this might be as soon as the wake_up()'s
6517  *           spin_unlock()!)
6518  *
6519  *         - in IRQ context, return from interrupt-handler to
6520  *           preemptible context
6521  *
6522  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6523  *         then at the next:
6524  *
6525  *          - cond_resched() call
6526  *          - explicit schedule() call
6527  *          - return from syscall or exception to user-space
6528  *          - return from interrupt-handler to user-space
6529  *
6530  * WARNING: must be called with preemption disabled!
6531  */
6532 static void __sched notrace __schedule(int sched_mode)
6533 {
6534 	struct task_struct *prev, *next;
6535 	/*
6536 	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6537 	 * as a preemption by schedule_debug() and RCU.
6538 	 */
6539 	bool preempt = sched_mode > SM_NONE;
6540 	unsigned long *switch_count;
6541 	unsigned long prev_state;
6542 	struct rq_flags rf;
6543 	struct rq *rq;
6544 	int cpu;
6545 
6546 	cpu = smp_processor_id();
6547 	rq = cpu_rq(cpu);
6548 	prev = rq->curr;
6549 
6550 	schedule_debug(prev, preempt);
6551 
6552 	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6553 		hrtick_clear(rq);
6554 
6555 	local_irq_disable();
6556 	rcu_note_context_switch(preempt);
6557 
6558 	/*
6559 	 * Make sure that signal_pending_state()->signal_pending() below
6560 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6561 	 * done by the caller to avoid the race with signal_wake_up():
6562 	 *
6563 	 * __set_current_state(@state)		signal_wake_up()
6564 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6565 	 *					  wake_up_state(p, state)
6566 	 *   LOCK rq->lock			    LOCK p->pi_state
6567 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6568 	 *     if (signal_pending_state())	    if (p->state & @state)
6569 	 *
6570 	 * Also, the membarrier system call requires a full memory barrier
6571 	 * after coming from user-space, before storing to rq->curr; this
6572 	 * barrier matches a full barrier in the proximity of the membarrier
6573 	 * system call exit.
6574 	 */
6575 	rq_lock(rq, &rf);
6576 	smp_mb__after_spinlock();
6577 
6578 	/* Promote REQ to ACT */
6579 	rq->clock_update_flags <<= 1;
6580 	update_rq_clock(rq);
6581 	rq->clock_update_flags = RQCF_UPDATED;
6582 
6583 	switch_count = &prev->nivcsw;
6584 
6585 	/* Task state changes only considers SM_PREEMPT as preemption */
6586 	preempt = sched_mode == SM_PREEMPT;
6587 
6588 	/*
6589 	 * We must load prev->state once (task_struct::state is volatile), such
6590 	 * that we form a control dependency vs deactivate_task() below.
6591 	 */
6592 	prev_state = READ_ONCE(prev->__state);
6593 	if (sched_mode == SM_IDLE) {
6594 		if (!rq->nr_running) {
6595 			next = prev;
6596 			goto picked;
6597 		}
6598 	} else if (!preempt && prev_state) {
6599 		if (signal_pending_state(prev_state, prev)) {
6600 			WRITE_ONCE(prev->__state, TASK_RUNNING);
6601 		} else {
6602 			int flags = DEQUEUE_NOCLOCK;
6603 
6604 			prev->sched_contributes_to_load =
6605 				(prev_state & TASK_UNINTERRUPTIBLE) &&
6606 				!(prev_state & TASK_NOLOAD) &&
6607 				!(prev_state & TASK_FROZEN);
6608 
6609 			if (unlikely(is_special_task_state(prev_state)))
6610 				flags |= DEQUEUE_SPECIAL;
6611 
6612 			/*
6613 			 * __schedule()			ttwu()
6614 			 *   prev_state = prev->state;    if (p->on_rq && ...)
6615 			 *   if (prev_state)		    goto out;
6616 			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6617 			 *				  p->state = TASK_WAKING
6618 			 *
6619 			 * Where __schedule() and ttwu() have matching control dependencies.
6620 			 *
6621 			 * After this, schedule() must not care about p->state any more.
6622 			 */
6623 			block_task(rq, prev, flags);
6624 		}
6625 		switch_count = &prev->nvcsw;
6626 	}
6627 
6628 	next = pick_next_task(rq, prev, &rf);
6629 picked:
6630 	clear_tsk_need_resched(prev);
6631 	clear_preempt_need_resched();
6632 #ifdef CONFIG_SCHED_DEBUG
6633 	rq->last_seen_need_resched_ns = 0;
6634 #endif
6635 
6636 	if (likely(prev != next)) {
6637 		rq->nr_switches++;
6638 		/*
6639 		 * RCU users of rcu_dereference(rq->curr) may not see
6640 		 * changes to task_struct made by pick_next_task().
6641 		 */
6642 		RCU_INIT_POINTER(rq->curr, next);
6643 		/*
6644 		 * The membarrier system call requires each architecture
6645 		 * to have a full memory barrier after updating
6646 		 * rq->curr, before returning to user-space.
6647 		 *
6648 		 * Here are the schemes providing that barrier on the
6649 		 * various architectures:
6650 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6651 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6652 		 *   on PowerPC and on RISC-V.
6653 		 * - finish_lock_switch() for weakly-ordered
6654 		 *   architectures where spin_unlock is a full barrier,
6655 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6656 		 *   is a RELEASE barrier),
6657 		 *
6658 		 * The barrier matches a full barrier in the proximity of
6659 		 * the membarrier system call entry.
6660 		 *
6661 		 * On RISC-V, this barrier pairing is also needed for the
6662 		 * SYNC_CORE command when switching between processes, cf.
6663 		 * the inline comments in membarrier_arch_switch_mm().
6664 		 */
6665 		++*switch_count;
6666 
6667 		migrate_disable_switch(rq, prev);
6668 		psi_account_irqtime(rq, prev, next);
6669 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6670 
6671 		trace_sched_switch(preempt, prev, next, prev_state);
6672 
6673 		/* Also unlocks the rq: */
6674 		rq = context_switch(rq, prev, next, &rf);
6675 	} else {
6676 		rq_unpin_lock(rq, &rf);
6677 		__balance_callbacks(rq);
6678 		raw_spin_rq_unlock_irq(rq);
6679 	}
6680 }
6681 
6682 void __noreturn do_task_dead(void)
6683 {
6684 	/* Causes final put_task_struct in finish_task_switch(): */
6685 	set_special_state(TASK_DEAD);
6686 
6687 	/* Tell freezer to ignore us: */
6688 	current->flags |= PF_NOFREEZE;
6689 
6690 	__schedule(SM_NONE);
6691 	BUG();
6692 
6693 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6694 	for (;;)
6695 		cpu_relax();
6696 }
6697 
6698 static inline void sched_submit_work(struct task_struct *tsk)
6699 {
6700 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6701 	unsigned int task_flags;
6702 
6703 	/*
6704 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6705 	 * will use a blocking primitive -- which would lead to recursion.
6706 	 */
6707 	lock_map_acquire_try(&sched_map);
6708 
6709 	task_flags = tsk->flags;
6710 	/*
6711 	 * If a worker goes to sleep, notify and ask workqueue whether it
6712 	 * wants to wake up a task to maintain concurrency.
6713 	 */
6714 	if (task_flags & PF_WQ_WORKER)
6715 		wq_worker_sleeping(tsk);
6716 	else if (task_flags & PF_IO_WORKER)
6717 		io_wq_worker_sleeping(tsk);
6718 
6719 	/*
6720 	 * spinlock and rwlock must not flush block requests.  This will
6721 	 * deadlock if the callback attempts to acquire a lock which is
6722 	 * already acquired.
6723 	 */
6724 	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6725 
6726 	/*
6727 	 * If we are going to sleep and we have plugged IO queued,
6728 	 * make sure to submit it to avoid deadlocks.
6729 	 */
6730 	blk_flush_plug(tsk->plug, true);
6731 
6732 	lock_map_release(&sched_map);
6733 }
6734 
6735 static void sched_update_worker(struct task_struct *tsk)
6736 {
6737 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6738 		if (tsk->flags & PF_BLOCK_TS)
6739 			blk_plug_invalidate_ts(tsk);
6740 		if (tsk->flags & PF_WQ_WORKER)
6741 			wq_worker_running(tsk);
6742 		else if (tsk->flags & PF_IO_WORKER)
6743 			io_wq_worker_running(tsk);
6744 	}
6745 }
6746 
6747 static __always_inline void __schedule_loop(int sched_mode)
6748 {
6749 	do {
6750 		preempt_disable();
6751 		__schedule(sched_mode);
6752 		sched_preempt_enable_no_resched();
6753 	} while (need_resched());
6754 }
6755 
6756 asmlinkage __visible void __sched schedule(void)
6757 {
6758 	struct task_struct *tsk = current;
6759 
6760 #ifdef CONFIG_RT_MUTEXES
6761 	lockdep_assert(!tsk->sched_rt_mutex);
6762 #endif
6763 
6764 	if (!task_is_running(tsk))
6765 		sched_submit_work(tsk);
6766 	__schedule_loop(SM_NONE);
6767 	sched_update_worker(tsk);
6768 }
6769 EXPORT_SYMBOL(schedule);
6770 
6771 /*
6772  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6773  * state (have scheduled out non-voluntarily) by making sure that all
6774  * tasks have either left the run queue or have gone into user space.
6775  * As idle tasks do not do either, they must not ever be preempted
6776  * (schedule out non-voluntarily).
6777  *
6778  * schedule_idle() is similar to schedule_preempt_disable() except that it
6779  * never enables preemption because it does not call sched_submit_work().
6780  */
6781 void __sched schedule_idle(void)
6782 {
6783 	/*
6784 	 * As this skips calling sched_submit_work(), which the idle task does
6785 	 * regardless because that function is a NOP when the task is in a
6786 	 * TASK_RUNNING state, make sure this isn't used someplace that the
6787 	 * current task can be in any other state. Note, idle is always in the
6788 	 * TASK_RUNNING state.
6789 	 */
6790 	WARN_ON_ONCE(current->__state);
6791 	do {
6792 		__schedule(SM_IDLE);
6793 	} while (need_resched());
6794 }
6795 
6796 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6797 asmlinkage __visible void __sched schedule_user(void)
6798 {
6799 	/*
6800 	 * If we come here after a random call to set_need_resched(),
6801 	 * or we have been woken up remotely but the IPI has not yet arrived,
6802 	 * we haven't yet exited the RCU idle mode. Do it here manually until
6803 	 * we find a better solution.
6804 	 *
6805 	 * NB: There are buggy callers of this function.  Ideally we
6806 	 * should warn if prev_state != CT_STATE_USER, but that will trigger
6807 	 * too frequently to make sense yet.
6808 	 */
6809 	enum ctx_state prev_state = exception_enter();
6810 	schedule();
6811 	exception_exit(prev_state);
6812 }
6813 #endif
6814 
6815 /**
6816  * schedule_preempt_disabled - called with preemption disabled
6817  *
6818  * Returns with preemption disabled. Note: preempt_count must be 1
6819  */
6820 void __sched schedule_preempt_disabled(void)
6821 {
6822 	sched_preempt_enable_no_resched();
6823 	schedule();
6824 	preempt_disable();
6825 }
6826 
6827 #ifdef CONFIG_PREEMPT_RT
6828 void __sched notrace schedule_rtlock(void)
6829 {
6830 	__schedule_loop(SM_RTLOCK_WAIT);
6831 }
6832 NOKPROBE_SYMBOL(schedule_rtlock);
6833 #endif
6834 
6835 static void __sched notrace preempt_schedule_common(void)
6836 {
6837 	do {
6838 		/*
6839 		 * Because the function tracer can trace preempt_count_sub()
6840 		 * and it also uses preempt_enable/disable_notrace(), if
6841 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6842 		 * by the function tracer will call this function again and
6843 		 * cause infinite recursion.
6844 		 *
6845 		 * Preemption must be disabled here before the function
6846 		 * tracer can trace. Break up preempt_disable() into two
6847 		 * calls. One to disable preemption without fear of being
6848 		 * traced. The other to still record the preemption latency,
6849 		 * which can also be traced by the function tracer.
6850 		 */
6851 		preempt_disable_notrace();
6852 		preempt_latency_start(1);
6853 		__schedule(SM_PREEMPT);
6854 		preempt_latency_stop(1);
6855 		preempt_enable_no_resched_notrace();
6856 
6857 		/*
6858 		 * Check again in case we missed a preemption opportunity
6859 		 * between schedule and now.
6860 		 */
6861 	} while (need_resched());
6862 }
6863 
6864 #ifdef CONFIG_PREEMPTION
6865 /*
6866  * This is the entry point to schedule() from in-kernel preemption
6867  * off of preempt_enable.
6868  */
6869 asmlinkage __visible void __sched notrace preempt_schedule(void)
6870 {
6871 	/*
6872 	 * If there is a non-zero preempt_count or interrupts are disabled,
6873 	 * we do not want to preempt the current task. Just return..
6874 	 */
6875 	if (likely(!preemptible()))
6876 		return;
6877 	preempt_schedule_common();
6878 }
6879 NOKPROBE_SYMBOL(preempt_schedule);
6880 EXPORT_SYMBOL(preempt_schedule);
6881 
6882 #ifdef CONFIG_PREEMPT_DYNAMIC
6883 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6884 #ifndef preempt_schedule_dynamic_enabled
6885 #define preempt_schedule_dynamic_enabled	preempt_schedule
6886 #define preempt_schedule_dynamic_disabled	NULL
6887 #endif
6888 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6889 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6890 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6891 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6892 void __sched notrace dynamic_preempt_schedule(void)
6893 {
6894 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6895 		return;
6896 	preempt_schedule();
6897 }
6898 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6899 EXPORT_SYMBOL(dynamic_preempt_schedule);
6900 #endif
6901 #endif
6902 
6903 /**
6904  * preempt_schedule_notrace - preempt_schedule called by tracing
6905  *
6906  * The tracing infrastructure uses preempt_enable_notrace to prevent
6907  * recursion and tracing preempt enabling caused by the tracing
6908  * infrastructure itself. But as tracing can happen in areas coming
6909  * from userspace or just about to enter userspace, a preempt enable
6910  * can occur before user_exit() is called. This will cause the scheduler
6911  * to be called when the system is still in usermode.
6912  *
6913  * To prevent this, the preempt_enable_notrace will use this function
6914  * instead of preempt_schedule() to exit user context if needed before
6915  * calling the scheduler.
6916  */
6917 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6918 {
6919 	enum ctx_state prev_ctx;
6920 
6921 	if (likely(!preemptible()))
6922 		return;
6923 
6924 	do {
6925 		/*
6926 		 * Because the function tracer can trace preempt_count_sub()
6927 		 * and it also uses preempt_enable/disable_notrace(), if
6928 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6929 		 * by the function tracer will call this function again and
6930 		 * cause infinite recursion.
6931 		 *
6932 		 * Preemption must be disabled here before the function
6933 		 * tracer can trace. Break up preempt_disable() into two
6934 		 * calls. One to disable preemption without fear of being
6935 		 * traced. The other to still record the preemption latency,
6936 		 * which can also be traced by the function tracer.
6937 		 */
6938 		preempt_disable_notrace();
6939 		preempt_latency_start(1);
6940 		/*
6941 		 * Needs preempt disabled in case user_exit() is traced
6942 		 * and the tracer calls preempt_enable_notrace() causing
6943 		 * an infinite recursion.
6944 		 */
6945 		prev_ctx = exception_enter();
6946 		__schedule(SM_PREEMPT);
6947 		exception_exit(prev_ctx);
6948 
6949 		preempt_latency_stop(1);
6950 		preempt_enable_no_resched_notrace();
6951 	} while (need_resched());
6952 }
6953 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6954 
6955 #ifdef CONFIG_PREEMPT_DYNAMIC
6956 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6957 #ifndef preempt_schedule_notrace_dynamic_enabled
6958 #define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
6959 #define preempt_schedule_notrace_dynamic_disabled	NULL
6960 #endif
6961 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6962 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6963 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6964 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
6965 void __sched notrace dynamic_preempt_schedule_notrace(void)
6966 {
6967 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6968 		return;
6969 	preempt_schedule_notrace();
6970 }
6971 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6972 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6973 #endif
6974 #endif
6975 
6976 #endif /* CONFIG_PREEMPTION */
6977 
6978 /*
6979  * This is the entry point to schedule() from kernel preemption
6980  * off of IRQ context.
6981  * Note, that this is called and return with IRQs disabled. This will
6982  * protect us against recursive calling from IRQ contexts.
6983  */
6984 asmlinkage __visible void __sched preempt_schedule_irq(void)
6985 {
6986 	enum ctx_state prev_state;
6987 
6988 	/* Catch callers which need to be fixed */
6989 	BUG_ON(preempt_count() || !irqs_disabled());
6990 
6991 	prev_state = exception_enter();
6992 
6993 	do {
6994 		preempt_disable();
6995 		local_irq_enable();
6996 		__schedule(SM_PREEMPT);
6997 		local_irq_disable();
6998 		sched_preempt_enable_no_resched();
6999 	} while (need_resched());
7000 
7001 	exception_exit(prev_state);
7002 }
7003 
7004 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7005 			  void *key)
7006 {
7007 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7008 	return try_to_wake_up(curr->private, mode, wake_flags);
7009 }
7010 EXPORT_SYMBOL(default_wake_function);
7011 
7012 void __setscheduler_prio(struct task_struct *p, int prio)
7013 {
7014 	if (dl_prio(prio))
7015 		p->sched_class = &dl_sched_class;
7016 	else if (rt_prio(prio))
7017 		p->sched_class = &rt_sched_class;
7018 #ifdef CONFIG_SCHED_CLASS_EXT
7019 	else if (task_should_scx(p))
7020 		p->sched_class = &ext_sched_class;
7021 #endif
7022 	else
7023 		p->sched_class = &fair_sched_class;
7024 
7025 	p->prio = prio;
7026 }
7027 
7028 #ifdef CONFIG_RT_MUTEXES
7029 
7030 /*
7031  * Would be more useful with typeof()/auto_type but they don't mix with
7032  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7033  * name such that if someone were to implement this function we get to compare
7034  * notes.
7035  */
7036 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7037 
7038 void rt_mutex_pre_schedule(void)
7039 {
7040 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7041 	sched_submit_work(current);
7042 }
7043 
7044 void rt_mutex_schedule(void)
7045 {
7046 	lockdep_assert(current->sched_rt_mutex);
7047 	__schedule_loop(SM_NONE);
7048 }
7049 
7050 void rt_mutex_post_schedule(void)
7051 {
7052 	sched_update_worker(current);
7053 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7054 }
7055 
7056 /*
7057  * rt_mutex_setprio - set the current priority of a task
7058  * @p: task to boost
7059  * @pi_task: donor task
7060  *
7061  * This function changes the 'effective' priority of a task. It does
7062  * not touch ->normal_prio like __setscheduler().
7063  *
7064  * Used by the rt_mutex code to implement priority inheritance
7065  * logic. Call site only calls if the priority of the task changed.
7066  */
7067 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7068 {
7069 	int prio, oldprio, queued, running, queue_flag =
7070 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7071 	const struct sched_class *prev_class;
7072 	struct rq_flags rf;
7073 	struct rq *rq;
7074 
7075 	/* XXX used to be waiter->prio, not waiter->task->prio */
7076 	prio = __rt_effective_prio(pi_task, p->normal_prio);
7077 
7078 	/*
7079 	 * If nothing changed; bail early.
7080 	 */
7081 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7082 		return;
7083 
7084 	rq = __task_rq_lock(p, &rf);
7085 	update_rq_clock(rq);
7086 	/*
7087 	 * Set under pi_lock && rq->lock, such that the value can be used under
7088 	 * either lock.
7089 	 *
7090 	 * Note that there is loads of tricky to make this pointer cache work
7091 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7092 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7093 	 * task is allowed to run again (and can exit). This ensures the pointer
7094 	 * points to a blocked task -- which guarantees the task is present.
7095 	 */
7096 	p->pi_top_task = pi_task;
7097 
7098 	/*
7099 	 * For FIFO/RR we only need to set prio, if that matches we're done.
7100 	 */
7101 	if (prio == p->prio && !dl_prio(prio))
7102 		goto out_unlock;
7103 
7104 	/*
7105 	 * Idle task boosting is a no-no in general. There is one
7106 	 * exception, when PREEMPT_RT and NOHZ is active:
7107 	 *
7108 	 * The idle task calls get_next_timer_interrupt() and holds
7109 	 * the timer wheel base->lock on the CPU and another CPU wants
7110 	 * to access the timer (probably to cancel it). We can safely
7111 	 * ignore the boosting request, as the idle CPU runs this code
7112 	 * with interrupts disabled and will complete the lock
7113 	 * protected section without being interrupted. So there is no
7114 	 * real need to boost.
7115 	 */
7116 	if (unlikely(p == rq->idle)) {
7117 		WARN_ON(p != rq->curr);
7118 		WARN_ON(p->pi_blocked_on);
7119 		goto out_unlock;
7120 	}
7121 
7122 	trace_sched_pi_setprio(p, pi_task);
7123 	oldprio = p->prio;
7124 
7125 	if (oldprio == prio)
7126 		queue_flag &= ~DEQUEUE_MOVE;
7127 
7128 	prev_class = p->sched_class;
7129 	queued = task_on_rq_queued(p);
7130 	running = task_current(rq, p);
7131 	if (queued)
7132 		dequeue_task(rq, p, queue_flag);
7133 	if (running)
7134 		put_prev_task(rq, p);
7135 
7136 	/*
7137 	 * Boosting condition are:
7138 	 * 1. -rt task is running and holds mutex A
7139 	 *      --> -dl task blocks on mutex A
7140 	 *
7141 	 * 2. -dl task is running and holds mutex A
7142 	 *      --> -dl task blocks on mutex A and could preempt the
7143 	 *          running task
7144 	 */
7145 	if (dl_prio(prio)) {
7146 		if (!dl_prio(p->normal_prio) ||
7147 		    (pi_task && dl_prio(pi_task->prio) &&
7148 		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7149 			p->dl.pi_se = pi_task->dl.pi_se;
7150 			queue_flag |= ENQUEUE_REPLENISH;
7151 		} else {
7152 			p->dl.pi_se = &p->dl;
7153 		}
7154 	} else if (rt_prio(prio)) {
7155 		if (dl_prio(oldprio))
7156 			p->dl.pi_se = &p->dl;
7157 		if (oldprio < prio)
7158 			queue_flag |= ENQUEUE_HEAD;
7159 	} else {
7160 		if (dl_prio(oldprio))
7161 			p->dl.pi_se = &p->dl;
7162 		if (rt_prio(oldprio))
7163 			p->rt.timeout = 0;
7164 	}
7165 
7166 	__setscheduler_prio(p, prio);
7167 	check_class_changing(rq, p, prev_class);
7168 
7169 	if (queued)
7170 		enqueue_task(rq, p, queue_flag);
7171 	if (running)
7172 		set_next_task(rq, p);
7173 
7174 	check_class_changed(rq, p, prev_class, oldprio);
7175 out_unlock:
7176 	/* Avoid rq from going away on us: */
7177 	preempt_disable();
7178 
7179 	rq_unpin_lock(rq, &rf);
7180 	__balance_callbacks(rq);
7181 	raw_spin_rq_unlock(rq);
7182 
7183 	preempt_enable();
7184 }
7185 #endif
7186 
7187 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7188 int __sched __cond_resched(void)
7189 {
7190 	if (should_resched(0)) {
7191 		preempt_schedule_common();
7192 		return 1;
7193 	}
7194 	/*
7195 	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7196 	 * whether the current CPU is in an RCU read-side critical section,
7197 	 * so the tick can report quiescent states even for CPUs looping
7198 	 * in kernel context.  In contrast, in non-preemptible kernels,
7199 	 * RCU readers leave no in-memory hints, which means that CPU-bound
7200 	 * processes executing in kernel context might never report an
7201 	 * RCU quiescent state.  Therefore, the following code causes
7202 	 * cond_resched() to report a quiescent state, but only when RCU
7203 	 * is in urgent need of one.
7204 	 */
7205 #ifndef CONFIG_PREEMPT_RCU
7206 	rcu_all_qs();
7207 #endif
7208 	return 0;
7209 }
7210 EXPORT_SYMBOL(__cond_resched);
7211 #endif
7212 
7213 #ifdef CONFIG_PREEMPT_DYNAMIC
7214 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7215 #define cond_resched_dynamic_enabled	__cond_resched
7216 #define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
7217 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7218 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7219 
7220 #define might_resched_dynamic_enabled	__cond_resched
7221 #define might_resched_dynamic_disabled	((void *)&__static_call_return0)
7222 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7223 EXPORT_STATIC_CALL_TRAMP(might_resched);
7224 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7225 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7226 int __sched dynamic_cond_resched(void)
7227 {
7228 	klp_sched_try_switch();
7229 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7230 		return 0;
7231 	return __cond_resched();
7232 }
7233 EXPORT_SYMBOL(dynamic_cond_resched);
7234 
7235 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7236 int __sched dynamic_might_resched(void)
7237 {
7238 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
7239 		return 0;
7240 	return __cond_resched();
7241 }
7242 EXPORT_SYMBOL(dynamic_might_resched);
7243 #endif
7244 #endif
7245 
7246 /*
7247  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7248  * call schedule, and on return reacquire the lock.
7249  *
7250  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7251  * operations here to prevent schedule() from being called twice (once via
7252  * spin_unlock(), once by hand).
7253  */
7254 int __cond_resched_lock(spinlock_t *lock)
7255 {
7256 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7257 	int ret = 0;
7258 
7259 	lockdep_assert_held(lock);
7260 
7261 	if (spin_needbreak(lock) || resched) {
7262 		spin_unlock(lock);
7263 		if (!_cond_resched())
7264 			cpu_relax();
7265 		ret = 1;
7266 		spin_lock(lock);
7267 	}
7268 	return ret;
7269 }
7270 EXPORT_SYMBOL(__cond_resched_lock);
7271 
7272 int __cond_resched_rwlock_read(rwlock_t *lock)
7273 {
7274 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7275 	int ret = 0;
7276 
7277 	lockdep_assert_held_read(lock);
7278 
7279 	if (rwlock_needbreak(lock) || resched) {
7280 		read_unlock(lock);
7281 		if (!_cond_resched())
7282 			cpu_relax();
7283 		ret = 1;
7284 		read_lock(lock);
7285 	}
7286 	return ret;
7287 }
7288 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7289 
7290 int __cond_resched_rwlock_write(rwlock_t *lock)
7291 {
7292 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7293 	int ret = 0;
7294 
7295 	lockdep_assert_held_write(lock);
7296 
7297 	if (rwlock_needbreak(lock) || resched) {
7298 		write_unlock(lock);
7299 		if (!_cond_resched())
7300 			cpu_relax();
7301 		ret = 1;
7302 		write_lock(lock);
7303 	}
7304 	return ret;
7305 }
7306 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7307 
7308 #ifdef CONFIG_PREEMPT_DYNAMIC
7309 
7310 #ifdef CONFIG_GENERIC_ENTRY
7311 #include <linux/entry-common.h>
7312 #endif
7313 
7314 /*
7315  * SC:cond_resched
7316  * SC:might_resched
7317  * SC:preempt_schedule
7318  * SC:preempt_schedule_notrace
7319  * SC:irqentry_exit_cond_resched
7320  *
7321  *
7322  * NONE:
7323  *   cond_resched               <- __cond_resched
7324  *   might_resched              <- RET0
7325  *   preempt_schedule           <- NOP
7326  *   preempt_schedule_notrace   <- NOP
7327  *   irqentry_exit_cond_resched <- NOP
7328  *
7329  * VOLUNTARY:
7330  *   cond_resched               <- __cond_resched
7331  *   might_resched              <- __cond_resched
7332  *   preempt_schedule           <- NOP
7333  *   preempt_schedule_notrace   <- NOP
7334  *   irqentry_exit_cond_resched <- NOP
7335  *
7336  * FULL:
7337  *   cond_resched               <- RET0
7338  *   might_resched              <- RET0
7339  *   preempt_schedule           <- preempt_schedule
7340  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7341  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7342  */
7343 
7344 enum {
7345 	preempt_dynamic_undefined = -1,
7346 	preempt_dynamic_none,
7347 	preempt_dynamic_voluntary,
7348 	preempt_dynamic_full,
7349 };
7350 
7351 int preempt_dynamic_mode = preempt_dynamic_undefined;
7352 
7353 int sched_dynamic_mode(const char *str)
7354 {
7355 	if (!strcmp(str, "none"))
7356 		return preempt_dynamic_none;
7357 
7358 	if (!strcmp(str, "voluntary"))
7359 		return preempt_dynamic_voluntary;
7360 
7361 	if (!strcmp(str, "full"))
7362 		return preempt_dynamic_full;
7363 
7364 	return -EINVAL;
7365 }
7366 
7367 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7368 #define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
7369 #define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
7370 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7371 #define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
7372 #define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
7373 #else
7374 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7375 #endif
7376 
7377 static DEFINE_MUTEX(sched_dynamic_mutex);
7378 static bool klp_override;
7379 
7380 static void __sched_dynamic_update(int mode)
7381 {
7382 	/*
7383 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7384 	 * the ZERO state, which is invalid.
7385 	 */
7386 	if (!klp_override)
7387 		preempt_dynamic_enable(cond_resched);
7388 	preempt_dynamic_enable(might_resched);
7389 	preempt_dynamic_enable(preempt_schedule);
7390 	preempt_dynamic_enable(preempt_schedule_notrace);
7391 	preempt_dynamic_enable(irqentry_exit_cond_resched);
7392 
7393 	switch (mode) {
7394 	case preempt_dynamic_none:
7395 		if (!klp_override)
7396 			preempt_dynamic_enable(cond_resched);
7397 		preempt_dynamic_disable(might_resched);
7398 		preempt_dynamic_disable(preempt_schedule);
7399 		preempt_dynamic_disable(preempt_schedule_notrace);
7400 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7401 		if (mode != preempt_dynamic_mode)
7402 			pr_info("Dynamic Preempt: none\n");
7403 		break;
7404 
7405 	case preempt_dynamic_voluntary:
7406 		if (!klp_override)
7407 			preempt_dynamic_enable(cond_resched);
7408 		preempt_dynamic_enable(might_resched);
7409 		preempt_dynamic_disable(preempt_schedule);
7410 		preempt_dynamic_disable(preempt_schedule_notrace);
7411 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7412 		if (mode != preempt_dynamic_mode)
7413 			pr_info("Dynamic Preempt: voluntary\n");
7414 		break;
7415 
7416 	case preempt_dynamic_full:
7417 		if (!klp_override)
7418 			preempt_dynamic_disable(cond_resched);
7419 		preempt_dynamic_disable(might_resched);
7420 		preempt_dynamic_enable(preempt_schedule);
7421 		preempt_dynamic_enable(preempt_schedule_notrace);
7422 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7423 		if (mode != preempt_dynamic_mode)
7424 			pr_info("Dynamic Preempt: full\n");
7425 		break;
7426 	}
7427 
7428 	preempt_dynamic_mode = mode;
7429 }
7430 
7431 void sched_dynamic_update(int mode)
7432 {
7433 	mutex_lock(&sched_dynamic_mutex);
7434 	__sched_dynamic_update(mode);
7435 	mutex_unlock(&sched_dynamic_mutex);
7436 }
7437 
7438 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7439 
7440 static int klp_cond_resched(void)
7441 {
7442 	__klp_sched_try_switch();
7443 	return __cond_resched();
7444 }
7445 
7446 void sched_dynamic_klp_enable(void)
7447 {
7448 	mutex_lock(&sched_dynamic_mutex);
7449 
7450 	klp_override = true;
7451 	static_call_update(cond_resched, klp_cond_resched);
7452 
7453 	mutex_unlock(&sched_dynamic_mutex);
7454 }
7455 
7456 void sched_dynamic_klp_disable(void)
7457 {
7458 	mutex_lock(&sched_dynamic_mutex);
7459 
7460 	klp_override = false;
7461 	__sched_dynamic_update(preempt_dynamic_mode);
7462 
7463 	mutex_unlock(&sched_dynamic_mutex);
7464 }
7465 
7466 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7467 
7468 static int __init setup_preempt_mode(char *str)
7469 {
7470 	int mode = sched_dynamic_mode(str);
7471 	if (mode < 0) {
7472 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7473 		return 0;
7474 	}
7475 
7476 	sched_dynamic_update(mode);
7477 	return 1;
7478 }
7479 __setup("preempt=", setup_preempt_mode);
7480 
7481 static void __init preempt_dynamic_init(void)
7482 {
7483 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7484 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7485 			sched_dynamic_update(preempt_dynamic_none);
7486 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7487 			sched_dynamic_update(preempt_dynamic_voluntary);
7488 		} else {
7489 			/* Default static call setting, nothing to do */
7490 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7491 			preempt_dynamic_mode = preempt_dynamic_full;
7492 			pr_info("Dynamic Preempt: full\n");
7493 		}
7494 	}
7495 }
7496 
7497 #define PREEMPT_MODEL_ACCESSOR(mode) \
7498 	bool preempt_model_##mode(void)						 \
7499 	{									 \
7500 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7501 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
7502 	}									 \
7503 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
7504 
7505 PREEMPT_MODEL_ACCESSOR(none);
7506 PREEMPT_MODEL_ACCESSOR(voluntary);
7507 PREEMPT_MODEL_ACCESSOR(full);
7508 
7509 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7510 
7511 static inline void preempt_dynamic_init(void) { }
7512 
7513 #endif /* CONFIG_PREEMPT_DYNAMIC */
7514 
7515 int io_schedule_prepare(void)
7516 {
7517 	int old_iowait = current->in_iowait;
7518 
7519 	current->in_iowait = 1;
7520 	blk_flush_plug(current->plug, true);
7521 	return old_iowait;
7522 }
7523 
7524 void io_schedule_finish(int token)
7525 {
7526 	current->in_iowait = token;
7527 }
7528 
7529 /*
7530  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7531  * that process accounting knows that this is a task in IO wait state.
7532  */
7533 long __sched io_schedule_timeout(long timeout)
7534 {
7535 	int token;
7536 	long ret;
7537 
7538 	token = io_schedule_prepare();
7539 	ret = schedule_timeout(timeout);
7540 	io_schedule_finish(token);
7541 
7542 	return ret;
7543 }
7544 EXPORT_SYMBOL(io_schedule_timeout);
7545 
7546 void __sched io_schedule(void)
7547 {
7548 	int token;
7549 
7550 	token = io_schedule_prepare();
7551 	schedule();
7552 	io_schedule_finish(token);
7553 }
7554 EXPORT_SYMBOL(io_schedule);
7555 
7556 void sched_show_task(struct task_struct *p)
7557 {
7558 	unsigned long free;
7559 	int ppid;
7560 
7561 	if (!try_get_task_stack(p))
7562 		return;
7563 
7564 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7565 
7566 	if (task_is_running(p))
7567 		pr_cont("  running task    ");
7568 	free = stack_not_used(p);
7569 	ppid = 0;
7570 	rcu_read_lock();
7571 	if (pid_alive(p))
7572 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
7573 	rcu_read_unlock();
7574 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7575 		free, task_pid_nr(p), task_tgid_nr(p),
7576 		ppid, read_task_thread_flags(p));
7577 
7578 	print_worker_info(KERN_INFO, p);
7579 	print_stop_info(KERN_INFO, p);
7580 	print_scx_info(KERN_INFO, p);
7581 	show_stack(p, NULL, KERN_INFO);
7582 	put_task_stack(p);
7583 }
7584 EXPORT_SYMBOL_GPL(sched_show_task);
7585 
7586 static inline bool
7587 state_filter_match(unsigned long state_filter, struct task_struct *p)
7588 {
7589 	unsigned int state = READ_ONCE(p->__state);
7590 
7591 	/* no filter, everything matches */
7592 	if (!state_filter)
7593 		return true;
7594 
7595 	/* filter, but doesn't match */
7596 	if (!(state & state_filter))
7597 		return false;
7598 
7599 	/*
7600 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7601 	 * TASK_KILLABLE).
7602 	 */
7603 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7604 		return false;
7605 
7606 	return true;
7607 }
7608 
7609 
7610 void show_state_filter(unsigned int state_filter)
7611 {
7612 	struct task_struct *g, *p;
7613 
7614 	rcu_read_lock();
7615 	for_each_process_thread(g, p) {
7616 		/*
7617 		 * reset the NMI-timeout, listing all files on a slow
7618 		 * console might take a lot of time:
7619 		 * Also, reset softlockup watchdogs on all CPUs, because
7620 		 * another CPU might be blocked waiting for us to process
7621 		 * an IPI.
7622 		 */
7623 		touch_nmi_watchdog();
7624 		touch_all_softlockup_watchdogs();
7625 		if (state_filter_match(state_filter, p))
7626 			sched_show_task(p);
7627 	}
7628 
7629 #ifdef CONFIG_SCHED_DEBUG
7630 	if (!state_filter)
7631 		sysrq_sched_debug_show();
7632 #endif
7633 	rcu_read_unlock();
7634 	/*
7635 	 * Only show locks if all tasks are dumped:
7636 	 */
7637 	if (!state_filter)
7638 		debug_show_all_locks();
7639 }
7640 
7641 /**
7642  * init_idle - set up an idle thread for a given CPU
7643  * @idle: task in question
7644  * @cpu: CPU the idle task belongs to
7645  *
7646  * NOTE: this function does not set the idle thread's NEED_RESCHED
7647  * flag, to make booting more robust.
7648  */
7649 void __init init_idle(struct task_struct *idle, int cpu)
7650 {
7651 #ifdef CONFIG_SMP
7652 	struct affinity_context ac = (struct affinity_context) {
7653 		.new_mask  = cpumask_of(cpu),
7654 		.flags     = 0,
7655 	};
7656 #endif
7657 	struct rq *rq = cpu_rq(cpu);
7658 	unsigned long flags;
7659 
7660 	__sched_fork(0, idle);
7661 
7662 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
7663 	raw_spin_rq_lock(rq);
7664 
7665 	idle->__state = TASK_RUNNING;
7666 	idle->se.exec_start = sched_clock();
7667 	/*
7668 	 * PF_KTHREAD should already be set at this point; regardless, make it
7669 	 * look like a proper per-CPU kthread.
7670 	 */
7671 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7672 	kthread_set_per_cpu(idle, cpu);
7673 
7674 #ifdef CONFIG_SMP
7675 	/*
7676 	 * It's possible that init_idle() gets called multiple times on a task,
7677 	 * in that case do_set_cpus_allowed() will not do the right thing.
7678 	 *
7679 	 * And since this is boot we can forgo the serialization.
7680 	 */
7681 	set_cpus_allowed_common(idle, &ac);
7682 #endif
7683 	/*
7684 	 * We're having a chicken and egg problem, even though we are
7685 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
7686 	 * lockdep check in task_group() will fail.
7687 	 *
7688 	 * Similar case to sched_fork(). / Alternatively we could
7689 	 * use task_rq_lock() here and obtain the other rq->lock.
7690 	 *
7691 	 * Silence PROVE_RCU
7692 	 */
7693 	rcu_read_lock();
7694 	__set_task_cpu(idle, cpu);
7695 	rcu_read_unlock();
7696 
7697 	rq->idle = idle;
7698 	rcu_assign_pointer(rq->curr, idle);
7699 	idle->on_rq = TASK_ON_RQ_QUEUED;
7700 #ifdef CONFIG_SMP
7701 	idle->on_cpu = 1;
7702 #endif
7703 	raw_spin_rq_unlock(rq);
7704 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7705 
7706 	/* Set the preempt count _outside_ the spinlocks! */
7707 	init_idle_preempt_count(idle, cpu);
7708 
7709 	/*
7710 	 * The idle tasks have their own, simple scheduling class:
7711 	 */
7712 	idle->sched_class = &idle_sched_class;
7713 	ftrace_graph_init_idle_task(idle, cpu);
7714 	vtime_init_idle(idle, cpu);
7715 #ifdef CONFIG_SMP
7716 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7717 #endif
7718 }
7719 
7720 #ifdef CONFIG_SMP
7721 
7722 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7723 			      const struct cpumask *trial)
7724 {
7725 	int ret = 1;
7726 
7727 	if (cpumask_empty(cur))
7728 		return ret;
7729 
7730 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7731 
7732 	return ret;
7733 }
7734 
7735 int task_can_attach(struct task_struct *p)
7736 {
7737 	int ret = 0;
7738 
7739 	/*
7740 	 * Kthreads which disallow setaffinity shouldn't be moved
7741 	 * to a new cpuset; we don't want to change their CPU
7742 	 * affinity and isolating such threads by their set of
7743 	 * allowed nodes is unnecessary.  Thus, cpusets are not
7744 	 * applicable for such threads.  This prevents checking for
7745 	 * success of set_cpus_allowed_ptr() on all attached tasks
7746 	 * before cpus_mask may be changed.
7747 	 */
7748 	if (p->flags & PF_NO_SETAFFINITY)
7749 		ret = -EINVAL;
7750 
7751 	return ret;
7752 }
7753 
7754 bool sched_smp_initialized __read_mostly;
7755 
7756 #ifdef CONFIG_NUMA_BALANCING
7757 /* Migrate current task p to target_cpu */
7758 int migrate_task_to(struct task_struct *p, int target_cpu)
7759 {
7760 	struct migration_arg arg = { p, target_cpu };
7761 	int curr_cpu = task_cpu(p);
7762 
7763 	if (curr_cpu == target_cpu)
7764 		return 0;
7765 
7766 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7767 		return -EINVAL;
7768 
7769 	/* TODO: This is not properly updating schedstats */
7770 
7771 	trace_sched_move_numa(p, curr_cpu, target_cpu);
7772 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7773 }
7774 
7775 /*
7776  * Requeue a task on a given node and accurately track the number of NUMA
7777  * tasks on the runqueues
7778  */
7779 void sched_setnuma(struct task_struct *p, int nid)
7780 {
7781 	bool queued, running;
7782 	struct rq_flags rf;
7783 	struct rq *rq;
7784 
7785 	rq = task_rq_lock(p, &rf);
7786 	queued = task_on_rq_queued(p);
7787 	running = task_current(rq, p);
7788 
7789 	if (queued)
7790 		dequeue_task(rq, p, DEQUEUE_SAVE);
7791 	if (running)
7792 		put_prev_task(rq, p);
7793 
7794 	p->numa_preferred_nid = nid;
7795 
7796 	if (queued)
7797 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7798 	if (running)
7799 		set_next_task(rq, p);
7800 	task_rq_unlock(rq, p, &rf);
7801 }
7802 #endif /* CONFIG_NUMA_BALANCING */
7803 
7804 #ifdef CONFIG_HOTPLUG_CPU
7805 /*
7806  * Ensure that the idle task is using init_mm right before its CPU goes
7807  * offline.
7808  */
7809 void idle_task_exit(void)
7810 {
7811 	struct mm_struct *mm = current->active_mm;
7812 
7813 	BUG_ON(cpu_online(smp_processor_id()));
7814 	BUG_ON(current != this_rq()->idle);
7815 
7816 	if (mm != &init_mm) {
7817 		switch_mm(mm, &init_mm, current);
7818 		finish_arch_post_lock_switch();
7819 	}
7820 
7821 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7822 }
7823 
7824 static int __balance_push_cpu_stop(void *arg)
7825 {
7826 	struct task_struct *p = arg;
7827 	struct rq *rq = this_rq();
7828 	struct rq_flags rf;
7829 	int cpu;
7830 
7831 	raw_spin_lock_irq(&p->pi_lock);
7832 	rq_lock(rq, &rf);
7833 
7834 	update_rq_clock(rq);
7835 
7836 	if (task_rq(p) == rq && task_on_rq_queued(p)) {
7837 		cpu = select_fallback_rq(rq->cpu, p);
7838 		rq = __migrate_task(rq, &rf, p, cpu);
7839 	}
7840 
7841 	rq_unlock(rq, &rf);
7842 	raw_spin_unlock_irq(&p->pi_lock);
7843 
7844 	put_task_struct(p);
7845 
7846 	return 0;
7847 }
7848 
7849 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7850 
7851 /*
7852  * Ensure we only run per-cpu kthreads once the CPU goes !active.
7853  *
7854  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7855  * effective when the hotplug motion is down.
7856  */
7857 static void balance_push(struct rq *rq)
7858 {
7859 	struct task_struct *push_task = rq->curr;
7860 
7861 	lockdep_assert_rq_held(rq);
7862 
7863 	/*
7864 	 * Ensure the thing is persistent until balance_push_set(.on = false);
7865 	 */
7866 	rq->balance_callback = &balance_push_callback;
7867 
7868 	/*
7869 	 * Only active while going offline and when invoked on the outgoing
7870 	 * CPU.
7871 	 */
7872 	if (!cpu_dying(rq->cpu) || rq != this_rq())
7873 		return;
7874 
7875 	/*
7876 	 * Both the cpu-hotplug and stop task are in this case and are
7877 	 * required to complete the hotplug process.
7878 	 */
7879 	if (kthread_is_per_cpu(push_task) ||
7880 	    is_migration_disabled(push_task)) {
7881 
7882 		/*
7883 		 * If this is the idle task on the outgoing CPU try to wake
7884 		 * up the hotplug control thread which might wait for the
7885 		 * last task to vanish. The rcuwait_active() check is
7886 		 * accurate here because the waiter is pinned on this CPU
7887 		 * and can't obviously be running in parallel.
7888 		 *
7889 		 * On RT kernels this also has to check whether there are
7890 		 * pinned and scheduled out tasks on the runqueue. They
7891 		 * need to leave the migrate disabled section first.
7892 		 */
7893 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7894 		    rcuwait_active(&rq->hotplug_wait)) {
7895 			raw_spin_rq_unlock(rq);
7896 			rcuwait_wake_up(&rq->hotplug_wait);
7897 			raw_spin_rq_lock(rq);
7898 		}
7899 		return;
7900 	}
7901 
7902 	get_task_struct(push_task);
7903 	/*
7904 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
7905 	 * Both preemption and IRQs are still disabled.
7906 	 */
7907 	preempt_disable();
7908 	raw_spin_rq_unlock(rq);
7909 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7910 			    this_cpu_ptr(&push_work));
7911 	preempt_enable();
7912 	/*
7913 	 * At this point need_resched() is true and we'll take the loop in
7914 	 * schedule(). The next pick is obviously going to be the stop task
7915 	 * which kthread_is_per_cpu() and will push this task away.
7916 	 */
7917 	raw_spin_rq_lock(rq);
7918 }
7919 
7920 static void balance_push_set(int cpu, bool on)
7921 {
7922 	struct rq *rq = cpu_rq(cpu);
7923 	struct rq_flags rf;
7924 
7925 	rq_lock_irqsave(rq, &rf);
7926 	if (on) {
7927 		WARN_ON_ONCE(rq->balance_callback);
7928 		rq->balance_callback = &balance_push_callback;
7929 	} else if (rq->balance_callback == &balance_push_callback) {
7930 		rq->balance_callback = NULL;
7931 	}
7932 	rq_unlock_irqrestore(rq, &rf);
7933 }
7934 
7935 /*
7936  * Invoked from a CPUs hotplug control thread after the CPU has been marked
7937  * inactive. All tasks which are not per CPU kernel threads are either
7938  * pushed off this CPU now via balance_push() or placed on a different CPU
7939  * during wakeup. Wait until the CPU is quiescent.
7940  */
7941 static void balance_hotplug_wait(void)
7942 {
7943 	struct rq *rq = this_rq();
7944 
7945 	rcuwait_wait_event(&rq->hotplug_wait,
7946 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
7947 			   TASK_UNINTERRUPTIBLE);
7948 }
7949 
7950 #else
7951 
7952 static inline void balance_push(struct rq *rq)
7953 {
7954 }
7955 
7956 static inline void balance_push_set(int cpu, bool on)
7957 {
7958 }
7959 
7960 static inline void balance_hotplug_wait(void)
7961 {
7962 }
7963 
7964 #endif /* CONFIG_HOTPLUG_CPU */
7965 
7966 void set_rq_online(struct rq *rq)
7967 {
7968 	if (!rq->online) {
7969 		const struct sched_class *class;
7970 
7971 		cpumask_set_cpu(rq->cpu, rq->rd->online);
7972 		rq->online = 1;
7973 
7974 		for_each_class(class) {
7975 			if (class->rq_online)
7976 				class->rq_online(rq);
7977 		}
7978 	}
7979 }
7980 
7981 void set_rq_offline(struct rq *rq)
7982 {
7983 	if (rq->online) {
7984 		const struct sched_class *class;
7985 
7986 		update_rq_clock(rq);
7987 		for_each_class(class) {
7988 			if (class->rq_offline)
7989 				class->rq_offline(rq);
7990 		}
7991 
7992 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
7993 		rq->online = 0;
7994 	}
7995 }
7996 
7997 static inline void sched_set_rq_online(struct rq *rq, int cpu)
7998 {
7999 	struct rq_flags rf;
8000 
8001 	rq_lock_irqsave(rq, &rf);
8002 	if (rq->rd) {
8003 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8004 		set_rq_online(rq);
8005 	}
8006 	rq_unlock_irqrestore(rq, &rf);
8007 }
8008 
8009 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8010 {
8011 	struct rq_flags rf;
8012 
8013 	rq_lock_irqsave(rq, &rf);
8014 	if (rq->rd) {
8015 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8016 		set_rq_offline(rq);
8017 	}
8018 	rq_unlock_irqrestore(rq, &rf);
8019 }
8020 
8021 /*
8022  * used to mark begin/end of suspend/resume:
8023  */
8024 static int num_cpus_frozen;
8025 
8026 /*
8027  * Update cpusets according to cpu_active mask.  If cpusets are
8028  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8029  * around partition_sched_domains().
8030  *
8031  * If we come here as part of a suspend/resume, don't touch cpusets because we
8032  * want to restore it back to its original state upon resume anyway.
8033  */
8034 static void cpuset_cpu_active(void)
8035 {
8036 	if (cpuhp_tasks_frozen) {
8037 		/*
8038 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
8039 		 * resume sequence. As long as this is not the last online
8040 		 * operation in the resume sequence, just build a single sched
8041 		 * domain, ignoring cpusets.
8042 		 */
8043 		partition_sched_domains(1, NULL, NULL);
8044 		if (--num_cpus_frozen)
8045 			return;
8046 		/*
8047 		 * This is the last CPU online operation. So fall through and
8048 		 * restore the original sched domains by considering the
8049 		 * cpuset configurations.
8050 		 */
8051 		cpuset_force_rebuild();
8052 	}
8053 	cpuset_update_active_cpus();
8054 }
8055 
8056 static int cpuset_cpu_inactive(unsigned int cpu)
8057 {
8058 	if (!cpuhp_tasks_frozen) {
8059 		int ret = dl_bw_check_overflow(cpu);
8060 
8061 		if (ret)
8062 			return ret;
8063 		cpuset_update_active_cpus();
8064 	} else {
8065 		num_cpus_frozen++;
8066 		partition_sched_domains(1, NULL, NULL);
8067 	}
8068 	return 0;
8069 }
8070 
8071 static inline void sched_smt_present_inc(int cpu)
8072 {
8073 #ifdef CONFIG_SCHED_SMT
8074 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8075 		static_branch_inc_cpuslocked(&sched_smt_present);
8076 #endif
8077 }
8078 
8079 static inline void sched_smt_present_dec(int cpu)
8080 {
8081 #ifdef CONFIG_SCHED_SMT
8082 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8083 		static_branch_dec_cpuslocked(&sched_smt_present);
8084 #endif
8085 }
8086 
8087 int sched_cpu_activate(unsigned int cpu)
8088 {
8089 	struct rq *rq = cpu_rq(cpu);
8090 
8091 	/*
8092 	 * Clear the balance_push callback and prepare to schedule
8093 	 * regular tasks.
8094 	 */
8095 	balance_push_set(cpu, false);
8096 
8097 	/*
8098 	 * When going up, increment the number of cores with SMT present.
8099 	 */
8100 	sched_smt_present_inc(cpu);
8101 	set_cpu_active(cpu, true);
8102 
8103 	if (sched_smp_initialized) {
8104 		sched_update_numa(cpu, true);
8105 		sched_domains_numa_masks_set(cpu);
8106 		cpuset_cpu_active();
8107 	}
8108 
8109 	scx_rq_activate(rq);
8110 
8111 	/*
8112 	 * Put the rq online, if not already. This happens:
8113 	 *
8114 	 * 1) In the early boot process, because we build the real domains
8115 	 *    after all CPUs have been brought up.
8116 	 *
8117 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8118 	 *    domains.
8119 	 */
8120 	sched_set_rq_online(rq, cpu);
8121 
8122 	return 0;
8123 }
8124 
8125 int sched_cpu_deactivate(unsigned int cpu)
8126 {
8127 	struct rq *rq = cpu_rq(cpu);
8128 	int ret;
8129 
8130 	/*
8131 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8132 	 * load balancing when not active
8133 	 */
8134 	nohz_balance_exit_idle(rq);
8135 
8136 	set_cpu_active(cpu, false);
8137 
8138 	/*
8139 	 * From this point forward, this CPU will refuse to run any task that
8140 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8141 	 * push those tasks away until this gets cleared, see
8142 	 * sched_cpu_dying().
8143 	 */
8144 	balance_push_set(cpu, true);
8145 
8146 	/*
8147 	 * We've cleared cpu_active_mask / set balance_push, wait for all
8148 	 * preempt-disabled and RCU users of this state to go away such that
8149 	 * all new such users will observe it.
8150 	 *
8151 	 * Specifically, we rely on ttwu to no longer target this CPU, see
8152 	 * ttwu_queue_cond() and is_cpu_allowed().
8153 	 *
8154 	 * Do sync before park smpboot threads to take care the RCU boost case.
8155 	 */
8156 	synchronize_rcu();
8157 
8158 	sched_set_rq_offline(rq, cpu);
8159 
8160 	scx_rq_deactivate(rq);
8161 
8162 	/*
8163 	 * When going down, decrement the number of cores with SMT present.
8164 	 */
8165 	sched_smt_present_dec(cpu);
8166 
8167 #ifdef CONFIG_SCHED_SMT
8168 	sched_core_cpu_deactivate(cpu);
8169 #endif
8170 
8171 	if (!sched_smp_initialized)
8172 		return 0;
8173 
8174 	sched_update_numa(cpu, false);
8175 	ret = cpuset_cpu_inactive(cpu);
8176 	if (ret) {
8177 		sched_smt_present_inc(cpu);
8178 		sched_set_rq_online(rq, cpu);
8179 		balance_push_set(cpu, false);
8180 		set_cpu_active(cpu, true);
8181 		sched_update_numa(cpu, true);
8182 		return ret;
8183 	}
8184 	sched_domains_numa_masks_clear(cpu);
8185 	return 0;
8186 }
8187 
8188 static void sched_rq_cpu_starting(unsigned int cpu)
8189 {
8190 	struct rq *rq = cpu_rq(cpu);
8191 
8192 	rq->calc_load_update = calc_load_update;
8193 	update_max_interval();
8194 }
8195 
8196 int sched_cpu_starting(unsigned int cpu)
8197 {
8198 	sched_core_cpu_starting(cpu);
8199 	sched_rq_cpu_starting(cpu);
8200 	sched_tick_start(cpu);
8201 	return 0;
8202 }
8203 
8204 #ifdef CONFIG_HOTPLUG_CPU
8205 
8206 /*
8207  * Invoked immediately before the stopper thread is invoked to bring the
8208  * CPU down completely. At this point all per CPU kthreads except the
8209  * hotplug thread (current) and the stopper thread (inactive) have been
8210  * either parked or have been unbound from the outgoing CPU. Ensure that
8211  * any of those which might be on the way out are gone.
8212  *
8213  * If after this point a bound task is being woken on this CPU then the
8214  * responsible hotplug callback has failed to do it's job.
8215  * sched_cpu_dying() will catch it with the appropriate fireworks.
8216  */
8217 int sched_cpu_wait_empty(unsigned int cpu)
8218 {
8219 	balance_hotplug_wait();
8220 	return 0;
8221 }
8222 
8223 /*
8224  * Since this CPU is going 'away' for a while, fold any nr_active delta we
8225  * might have. Called from the CPU stopper task after ensuring that the
8226  * stopper is the last running task on the CPU, so nr_active count is
8227  * stable. We need to take the tear-down thread which is calling this into
8228  * account, so we hand in adjust = 1 to the load calculation.
8229  *
8230  * Also see the comment "Global load-average calculations".
8231  */
8232 static void calc_load_migrate(struct rq *rq)
8233 {
8234 	long delta = calc_load_fold_active(rq, 1);
8235 
8236 	if (delta)
8237 		atomic_long_add(delta, &calc_load_tasks);
8238 }
8239 
8240 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8241 {
8242 	struct task_struct *g, *p;
8243 	int cpu = cpu_of(rq);
8244 
8245 	lockdep_assert_rq_held(rq);
8246 
8247 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8248 	for_each_process_thread(g, p) {
8249 		if (task_cpu(p) != cpu)
8250 			continue;
8251 
8252 		if (!task_on_rq_queued(p))
8253 			continue;
8254 
8255 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8256 	}
8257 }
8258 
8259 int sched_cpu_dying(unsigned int cpu)
8260 {
8261 	struct rq *rq = cpu_rq(cpu);
8262 	struct rq_flags rf;
8263 
8264 	/* Handle pending wakeups and then migrate everything off */
8265 	sched_tick_stop(cpu);
8266 
8267 	rq_lock_irqsave(rq, &rf);
8268 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8269 		WARN(true, "Dying CPU not properly vacated!");
8270 		dump_rq_tasks(rq, KERN_WARNING);
8271 	}
8272 	rq_unlock_irqrestore(rq, &rf);
8273 
8274 	calc_load_migrate(rq);
8275 	update_max_interval();
8276 	hrtick_clear(rq);
8277 	sched_core_cpu_dying(cpu);
8278 	return 0;
8279 }
8280 #endif
8281 
8282 void __init sched_init_smp(void)
8283 {
8284 	sched_init_numa(NUMA_NO_NODE);
8285 
8286 	/*
8287 	 * There's no userspace yet to cause hotplug operations; hence all the
8288 	 * CPU masks are stable and all blatant races in the below code cannot
8289 	 * happen.
8290 	 */
8291 	mutex_lock(&sched_domains_mutex);
8292 	sched_init_domains(cpu_active_mask);
8293 	mutex_unlock(&sched_domains_mutex);
8294 
8295 	/* Move init over to a non-isolated CPU */
8296 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8297 		BUG();
8298 	current->flags &= ~PF_NO_SETAFFINITY;
8299 	sched_init_granularity();
8300 
8301 	init_sched_rt_class();
8302 	init_sched_dl_class();
8303 
8304 	sched_smp_initialized = true;
8305 }
8306 
8307 static int __init migration_init(void)
8308 {
8309 	sched_cpu_starting(smp_processor_id());
8310 	return 0;
8311 }
8312 early_initcall(migration_init);
8313 
8314 #else
8315 void __init sched_init_smp(void)
8316 {
8317 	sched_init_granularity();
8318 }
8319 #endif /* CONFIG_SMP */
8320 
8321 int in_sched_functions(unsigned long addr)
8322 {
8323 	return in_lock_functions(addr) ||
8324 		(addr >= (unsigned long)__sched_text_start
8325 		&& addr < (unsigned long)__sched_text_end);
8326 }
8327 
8328 #ifdef CONFIG_CGROUP_SCHED
8329 /*
8330  * Default task group.
8331  * Every task in system belongs to this group at bootup.
8332  */
8333 struct task_group root_task_group;
8334 LIST_HEAD(task_groups);
8335 
8336 /* Cacheline aligned slab cache for task_group */
8337 static struct kmem_cache *task_group_cache __ro_after_init;
8338 #endif
8339 
8340 void __init sched_init(void)
8341 {
8342 	unsigned long ptr = 0;
8343 	int i;
8344 
8345 	/* Make sure the linker didn't screw up */
8346 #ifdef CONFIG_SMP
8347 	BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8348 #endif
8349 	BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8350 	BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8351 	BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8352 #ifdef CONFIG_SCHED_CLASS_EXT
8353 	BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8354 	BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8355 #endif
8356 
8357 	wait_bit_init();
8358 
8359 #ifdef CONFIG_FAIR_GROUP_SCHED
8360 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8361 #endif
8362 #ifdef CONFIG_RT_GROUP_SCHED
8363 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8364 #endif
8365 	if (ptr) {
8366 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8367 
8368 #ifdef CONFIG_FAIR_GROUP_SCHED
8369 		root_task_group.se = (struct sched_entity **)ptr;
8370 		ptr += nr_cpu_ids * sizeof(void **);
8371 
8372 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8373 		ptr += nr_cpu_ids * sizeof(void **);
8374 
8375 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8376 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8377 #endif /* CONFIG_FAIR_GROUP_SCHED */
8378 #ifdef CONFIG_EXT_GROUP_SCHED
8379 		root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8380 #endif /* CONFIG_EXT_GROUP_SCHED */
8381 #ifdef CONFIG_RT_GROUP_SCHED
8382 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8383 		ptr += nr_cpu_ids * sizeof(void **);
8384 
8385 		root_task_group.rt_rq = (struct rt_rq **)ptr;
8386 		ptr += nr_cpu_ids * sizeof(void **);
8387 
8388 #endif /* CONFIG_RT_GROUP_SCHED */
8389 	}
8390 
8391 #ifdef CONFIG_SMP
8392 	init_defrootdomain();
8393 #endif
8394 
8395 #ifdef CONFIG_RT_GROUP_SCHED
8396 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8397 			global_rt_period(), global_rt_runtime());
8398 #endif /* CONFIG_RT_GROUP_SCHED */
8399 
8400 #ifdef CONFIG_CGROUP_SCHED
8401 	task_group_cache = KMEM_CACHE(task_group, 0);
8402 
8403 	list_add(&root_task_group.list, &task_groups);
8404 	INIT_LIST_HEAD(&root_task_group.children);
8405 	INIT_LIST_HEAD(&root_task_group.siblings);
8406 	autogroup_init(&init_task);
8407 #endif /* CONFIG_CGROUP_SCHED */
8408 
8409 	for_each_possible_cpu(i) {
8410 		struct rq *rq;
8411 
8412 		rq = cpu_rq(i);
8413 		raw_spin_lock_init(&rq->__lock);
8414 		rq->nr_running = 0;
8415 		rq->calc_load_active = 0;
8416 		rq->calc_load_update = jiffies + LOAD_FREQ;
8417 		init_cfs_rq(&rq->cfs);
8418 		init_rt_rq(&rq->rt);
8419 		init_dl_rq(&rq->dl);
8420 #ifdef CONFIG_FAIR_GROUP_SCHED
8421 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8422 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8423 		/*
8424 		 * How much CPU bandwidth does root_task_group get?
8425 		 *
8426 		 * In case of task-groups formed through the cgroup filesystem, it
8427 		 * gets 100% of the CPU resources in the system. This overall
8428 		 * system CPU resource is divided among the tasks of
8429 		 * root_task_group and its child task-groups in a fair manner,
8430 		 * based on each entity's (task or task-group's) weight
8431 		 * (se->load.weight).
8432 		 *
8433 		 * In other words, if root_task_group has 10 tasks of weight
8434 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8435 		 * then A0's share of the CPU resource is:
8436 		 *
8437 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8438 		 *
8439 		 * We achieve this by letting root_task_group's tasks sit
8440 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8441 		 */
8442 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8443 #endif /* CONFIG_FAIR_GROUP_SCHED */
8444 
8445 #ifdef CONFIG_RT_GROUP_SCHED
8446 		/*
8447 		 * This is required for init cpu because rt.c:__enable_runtime()
8448 		 * starts working after scheduler_running, which is not the case
8449 		 * yet.
8450 		 */
8451 		rq->rt.rt_runtime = global_rt_runtime();
8452 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8453 #endif
8454 #ifdef CONFIG_SMP
8455 		rq->sd = NULL;
8456 		rq->rd = NULL;
8457 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8458 		rq->balance_callback = &balance_push_callback;
8459 		rq->active_balance = 0;
8460 		rq->next_balance = jiffies;
8461 		rq->push_cpu = 0;
8462 		rq->cpu = i;
8463 		rq->online = 0;
8464 		rq->idle_stamp = 0;
8465 		rq->avg_idle = 2*sysctl_sched_migration_cost;
8466 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8467 
8468 		INIT_LIST_HEAD(&rq->cfs_tasks);
8469 
8470 		rq_attach_root(rq, &def_root_domain);
8471 #ifdef CONFIG_NO_HZ_COMMON
8472 		rq->last_blocked_load_update_tick = jiffies;
8473 		atomic_set(&rq->nohz_flags, 0);
8474 
8475 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8476 #endif
8477 #ifdef CONFIG_HOTPLUG_CPU
8478 		rcuwait_init(&rq->hotplug_wait);
8479 #endif
8480 #endif /* CONFIG_SMP */
8481 		hrtick_rq_init(rq);
8482 		atomic_set(&rq->nr_iowait, 0);
8483 		fair_server_init(rq);
8484 
8485 #ifdef CONFIG_SCHED_CORE
8486 		rq->core = rq;
8487 		rq->core_pick = NULL;
8488 		rq->core_dl_server = NULL;
8489 		rq->core_enabled = 0;
8490 		rq->core_tree = RB_ROOT;
8491 		rq->core_forceidle_count = 0;
8492 		rq->core_forceidle_occupation = 0;
8493 		rq->core_forceidle_start = 0;
8494 
8495 		rq->core_cookie = 0UL;
8496 #endif
8497 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8498 	}
8499 
8500 	set_load_weight(&init_task, false);
8501 	init_task.se.slice = sysctl_sched_base_slice,
8502 
8503 	/*
8504 	 * The boot idle thread does lazy MMU switching as well:
8505 	 */
8506 	mmgrab_lazy_tlb(&init_mm);
8507 	enter_lazy_tlb(&init_mm, current);
8508 
8509 	/*
8510 	 * The idle task doesn't need the kthread struct to function, but it
8511 	 * is dressed up as a per-CPU kthread and thus needs to play the part
8512 	 * if we want to avoid special-casing it in code that deals with per-CPU
8513 	 * kthreads.
8514 	 */
8515 	WARN_ON(!set_kthread_struct(current));
8516 
8517 	/*
8518 	 * Make us the idle thread. Technically, schedule() should not be
8519 	 * called from this thread, however somewhere below it might be,
8520 	 * but because we are the idle thread, we just pick up running again
8521 	 * when this runqueue becomes "idle".
8522 	 */
8523 	init_idle(current, smp_processor_id());
8524 
8525 	calc_load_update = jiffies + LOAD_FREQ;
8526 
8527 #ifdef CONFIG_SMP
8528 	idle_thread_set_boot_cpu();
8529 	balance_push_set(smp_processor_id(), false);
8530 #endif
8531 	init_sched_fair_class();
8532 	init_sched_ext_class();
8533 
8534 	psi_init();
8535 
8536 	init_uclamp();
8537 
8538 	preempt_dynamic_init();
8539 
8540 	scheduler_running = 1;
8541 }
8542 
8543 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8544 
8545 void __might_sleep(const char *file, int line)
8546 {
8547 	unsigned int state = get_current_state();
8548 	/*
8549 	 * Blocking primitives will set (and therefore destroy) current->state,
8550 	 * since we will exit with TASK_RUNNING make sure we enter with it,
8551 	 * otherwise we will destroy state.
8552 	 */
8553 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8554 			"do not call blocking ops when !TASK_RUNNING; "
8555 			"state=%x set at [<%p>] %pS\n", state,
8556 			(void *)current->task_state_change,
8557 			(void *)current->task_state_change);
8558 
8559 	__might_resched(file, line, 0);
8560 }
8561 EXPORT_SYMBOL(__might_sleep);
8562 
8563 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8564 {
8565 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8566 		return;
8567 
8568 	if (preempt_count() == preempt_offset)
8569 		return;
8570 
8571 	pr_err("Preemption disabled at:");
8572 	print_ip_sym(KERN_ERR, ip);
8573 }
8574 
8575 static inline bool resched_offsets_ok(unsigned int offsets)
8576 {
8577 	unsigned int nested = preempt_count();
8578 
8579 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8580 
8581 	return nested == offsets;
8582 }
8583 
8584 void __might_resched(const char *file, int line, unsigned int offsets)
8585 {
8586 	/* Ratelimiting timestamp: */
8587 	static unsigned long prev_jiffy;
8588 
8589 	unsigned long preempt_disable_ip;
8590 
8591 	/* WARN_ON_ONCE() by default, no rate limit required: */
8592 	rcu_sleep_check();
8593 
8594 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8595 	     !is_idle_task(current) && !current->non_block_count) ||
8596 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8597 	    oops_in_progress)
8598 		return;
8599 
8600 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8601 		return;
8602 	prev_jiffy = jiffies;
8603 
8604 	/* Save this before calling printk(), since that will clobber it: */
8605 	preempt_disable_ip = get_preempt_disable_ip(current);
8606 
8607 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8608 	       file, line);
8609 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8610 	       in_atomic(), irqs_disabled(), current->non_block_count,
8611 	       current->pid, current->comm);
8612 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8613 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
8614 
8615 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8616 		pr_err("RCU nest depth: %d, expected: %u\n",
8617 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8618 	}
8619 
8620 	if (task_stack_end_corrupted(current))
8621 		pr_emerg("Thread overran stack, or stack corrupted\n");
8622 
8623 	debug_show_held_locks(current);
8624 	if (irqs_disabled())
8625 		print_irqtrace_events(current);
8626 
8627 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8628 				 preempt_disable_ip);
8629 
8630 	dump_stack();
8631 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8632 }
8633 EXPORT_SYMBOL(__might_resched);
8634 
8635 void __cant_sleep(const char *file, int line, int preempt_offset)
8636 {
8637 	static unsigned long prev_jiffy;
8638 
8639 	if (irqs_disabled())
8640 		return;
8641 
8642 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8643 		return;
8644 
8645 	if (preempt_count() > preempt_offset)
8646 		return;
8647 
8648 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8649 		return;
8650 	prev_jiffy = jiffies;
8651 
8652 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8653 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8654 			in_atomic(), irqs_disabled(),
8655 			current->pid, current->comm);
8656 
8657 	debug_show_held_locks(current);
8658 	dump_stack();
8659 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8660 }
8661 EXPORT_SYMBOL_GPL(__cant_sleep);
8662 
8663 #ifdef CONFIG_SMP
8664 void __cant_migrate(const char *file, int line)
8665 {
8666 	static unsigned long prev_jiffy;
8667 
8668 	if (irqs_disabled())
8669 		return;
8670 
8671 	if (is_migration_disabled(current))
8672 		return;
8673 
8674 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8675 		return;
8676 
8677 	if (preempt_count() > 0)
8678 		return;
8679 
8680 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8681 		return;
8682 	prev_jiffy = jiffies;
8683 
8684 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8685 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8686 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
8687 	       current->pid, current->comm);
8688 
8689 	debug_show_held_locks(current);
8690 	dump_stack();
8691 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8692 }
8693 EXPORT_SYMBOL_GPL(__cant_migrate);
8694 #endif
8695 #endif
8696 
8697 #ifdef CONFIG_MAGIC_SYSRQ
8698 void normalize_rt_tasks(void)
8699 {
8700 	struct task_struct *g, *p;
8701 	struct sched_attr attr = {
8702 		.sched_policy = SCHED_NORMAL,
8703 	};
8704 
8705 	read_lock(&tasklist_lock);
8706 	for_each_process_thread(g, p) {
8707 		/*
8708 		 * Only normalize user tasks:
8709 		 */
8710 		if (p->flags & PF_KTHREAD)
8711 			continue;
8712 
8713 		p->se.exec_start = 0;
8714 		schedstat_set(p->stats.wait_start,  0);
8715 		schedstat_set(p->stats.sleep_start, 0);
8716 		schedstat_set(p->stats.block_start, 0);
8717 
8718 		if (!rt_or_dl_task(p)) {
8719 			/*
8720 			 * Renice negative nice level userspace
8721 			 * tasks back to 0:
8722 			 */
8723 			if (task_nice(p) < 0)
8724 				set_user_nice(p, 0);
8725 			continue;
8726 		}
8727 
8728 		__sched_setscheduler(p, &attr, false, false);
8729 	}
8730 	read_unlock(&tasklist_lock);
8731 }
8732 
8733 #endif /* CONFIG_MAGIC_SYSRQ */
8734 
8735 #if defined(CONFIG_KGDB_KDB)
8736 /*
8737  * These functions are only useful for KDB.
8738  *
8739  * They can only be called when the whole system has been
8740  * stopped - every CPU needs to be quiescent, and no scheduling
8741  * activity can take place. Using them for anything else would
8742  * be a serious bug, and as a result, they aren't even visible
8743  * under any other configuration.
8744  */
8745 
8746 /**
8747  * curr_task - return the current task for a given CPU.
8748  * @cpu: the processor in question.
8749  *
8750  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8751  *
8752  * Return: The current task for @cpu.
8753  */
8754 struct task_struct *curr_task(int cpu)
8755 {
8756 	return cpu_curr(cpu);
8757 }
8758 
8759 #endif /* defined(CONFIG_KGDB_KDB) */
8760 
8761 #ifdef CONFIG_CGROUP_SCHED
8762 /* task_group_lock serializes the addition/removal of task groups */
8763 static DEFINE_SPINLOCK(task_group_lock);
8764 
8765 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8766 					    struct task_group *parent)
8767 {
8768 #ifdef CONFIG_UCLAMP_TASK_GROUP
8769 	enum uclamp_id clamp_id;
8770 
8771 	for_each_clamp_id(clamp_id) {
8772 		uclamp_se_set(&tg->uclamp_req[clamp_id],
8773 			      uclamp_none(clamp_id), false);
8774 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8775 	}
8776 #endif
8777 }
8778 
8779 static void sched_free_group(struct task_group *tg)
8780 {
8781 	free_fair_sched_group(tg);
8782 	free_rt_sched_group(tg);
8783 	autogroup_free(tg);
8784 	kmem_cache_free(task_group_cache, tg);
8785 }
8786 
8787 static void sched_free_group_rcu(struct rcu_head *rcu)
8788 {
8789 	sched_free_group(container_of(rcu, struct task_group, rcu));
8790 }
8791 
8792 static void sched_unregister_group(struct task_group *tg)
8793 {
8794 	unregister_fair_sched_group(tg);
8795 	unregister_rt_sched_group(tg);
8796 	/*
8797 	 * We have to wait for yet another RCU grace period to expire, as
8798 	 * print_cfs_stats() might run concurrently.
8799 	 */
8800 	call_rcu(&tg->rcu, sched_free_group_rcu);
8801 }
8802 
8803 /* allocate runqueue etc for a new task group */
8804 struct task_group *sched_create_group(struct task_group *parent)
8805 {
8806 	struct task_group *tg;
8807 
8808 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8809 	if (!tg)
8810 		return ERR_PTR(-ENOMEM);
8811 
8812 	if (!alloc_fair_sched_group(tg, parent))
8813 		goto err;
8814 
8815 	if (!alloc_rt_sched_group(tg, parent))
8816 		goto err;
8817 
8818 	scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8819 	alloc_uclamp_sched_group(tg, parent);
8820 
8821 	return tg;
8822 
8823 err:
8824 	sched_free_group(tg);
8825 	return ERR_PTR(-ENOMEM);
8826 }
8827 
8828 void sched_online_group(struct task_group *tg, struct task_group *parent)
8829 {
8830 	unsigned long flags;
8831 
8832 	spin_lock_irqsave(&task_group_lock, flags);
8833 	list_add_rcu(&tg->list, &task_groups);
8834 
8835 	/* Root should already exist: */
8836 	WARN_ON(!parent);
8837 
8838 	tg->parent = parent;
8839 	INIT_LIST_HEAD(&tg->children);
8840 	list_add_rcu(&tg->siblings, &parent->children);
8841 	spin_unlock_irqrestore(&task_group_lock, flags);
8842 
8843 	online_fair_sched_group(tg);
8844 }
8845 
8846 /* RCU callback to free various structures associated with a task group */
8847 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8848 {
8849 	/* Now it should be safe to free those cfs_rqs: */
8850 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
8851 }
8852 
8853 void sched_destroy_group(struct task_group *tg)
8854 {
8855 	/* Wait for possible concurrent references to cfs_rqs complete: */
8856 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
8857 }
8858 
8859 void sched_release_group(struct task_group *tg)
8860 {
8861 	unsigned long flags;
8862 
8863 	/*
8864 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8865 	 * sched_cfs_period_timer()).
8866 	 *
8867 	 * For this to be effective, we have to wait for all pending users of
8868 	 * this task group to leave their RCU critical section to ensure no new
8869 	 * user will see our dying task group any more. Specifically ensure
8870 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
8871 	 *
8872 	 * We therefore defer calling unregister_fair_sched_group() to
8873 	 * sched_unregister_group() which is guarantied to get called only after the
8874 	 * current RCU grace period has expired.
8875 	 */
8876 	spin_lock_irqsave(&task_group_lock, flags);
8877 	list_del_rcu(&tg->list);
8878 	list_del_rcu(&tg->siblings);
8879 	spin_unlock_irqrestore(&task_group_lock, flags);
8880 }
8881 
8882 static struct task_group *sched_get_task_group(struct task_struct *tsk)
8883 {
8884 	struct task_group *tg;
8885 
8886 	/*
8887 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
8888 	 * which is pointless here. Thus, we pass "true" to task_css_check()
8889 	 * to prevent lockdep warnings.
8890 	 */
8891 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8892 			  struct task_group, css);
8893 	tg = autogroup_task_group(tsk, tg);
8894 
8895 	return tg;
8896 }
8897 
8898 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
8899 {
8900 	tsk->sched_task_group = group;
8901 
8902 #ifdef CONFIG_FAIR_GROUP_SCHED
8903 	if (tsk->sched_class->task_change_group)
8904 		tsk->sched_class->task_change_group(tsk);
8905 	else
8906 #endif
8907 		set_task_rq(tsk, task_cpu(tsk));
8908 }
8909 
8910 /*
8911  * Change task's runqueue when it moves between groups.
8912  *
8913  * The caller of this function should have put the task in its new group by
8914  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8915  * its new group.
8916  */
8917 void sched_move_task(struct task_struct *tsk)
8918 {
8919 	int queued, running, queue_flags =
8920 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8921 	struct task_group *group;
8922 	struct rq *rq;
8923 
8924 	CLASS(task_rq_lock, rq_guard)(tsk);
8925 	rq = rq_guard.rq;
8926 
8927 	/*
8928 	 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
8929 	 * group changes.
8930 	 */
8931 	group = sched_get_task_group(tsk);
8932 	if (group == tsk->sched_task_group)
8933 		return;
8934 
8935 	update_rq_clock(rq);
8936 
8937 	running = task_current(rq, tsk);
8938 	queued = task_on_rq_queued(tsk);
8939 
8940 	if (queued)
8941 		dequeue_task(rq, tsk, queue_flags);
8942 	if (running)
8943 		put_prev_task(rq, tsk);
8944 
8945 	sched_change_group(tsk, group);
8946 	scx_move_task(tsk);
8947 
8948 	if (queued)
8949 		enqueue_task(rq, tsk, queue_flags);
8950 	if (running) {
8951 		set_next_task(rq, tsk);
8952 		/*
8953 		 * After changing group, the running task may have joined a
8954 		 * throttled one but it's still the running task. Trigger a
8955 		 * resched to make sure that task can still run.
8956 		 */
8957 		resched_curr(rq);
8958 	}
8959 }
8960 
8961 static struct cgroup_subsys_state *
8962 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8963 {
8964 	struct task_group *parent = css_tg(parent_css);
8965 	struct task_group *tg;
8966 
8967 	if (!parent) {
8968 		/* This is early initialization for the top cgroup */
8969 		return &root_task_group.css;
8970 	}
8971 
8972 	tg = sched_create_group(parent);
8973 	if (IS_ERR(tg))
8974 		return ERR_PTR(-ENOMEM);
8975 
8976 	return &tg->css;
8977 }
8978 
8979 /* Expose task group only after completing cgroup initialization */
8980 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8981 {
8982 	struct task_group *tg = css_tg(css);
8983 	struct task_group *parent = css_tg(css->parent);
8984 	int ret;
8985 
8986 	ret = scx_tg_online(tg);
8987 	if (ret)
8988 		return ret;
8989 
8990 	if (parent)
8991 		sched_online_group(tg, parent);
8992 
8993 #ifdef CONFIG_UCLAMP_TASK_GROUP
8994 	/* Propagate the effective uclamp value for the new group */
8995 	guard(mutex)(&uclamp_mutex);
8996 	guard(rcu)();
8997 	cpu_util_update_eff(css);
8998 #endif
8999 
9000 	return 0;
9001 }
9002 
9003 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9004 {
9005 	struct task_group *tg = css_tg(css);
9006 
9007 	scx_tg_offline(tg);
9008 }
9009 
9010 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9011 {
9012 	struct task_group *tg = css_tg(css);
9013 
9014 	sched_release_group(tg);
9015 }
9016 
9017 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9018 {
9019 	struct task_group *tg = css_tg(css);
9020 
9021 	/*
9022 	 * Relies on the RCU grace period between css_released() and this.
9023 	 */
9024 	sched_unregister_group(tg);
9025 }
9026 
9027 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9028 {
9029 #ifdef CONFIG_RT_GROUP_SCHED
9030 	struct task_struct *task;
9031 	struct cgroup_subsys_state *css;
9032 
9033 	cgroup_taskset_for_each(task, css, tset) {
9034 		if (!sched_rt_can_attach(css_tg(css), task))
9035 			return -EINVAL;
9036 	}
9037 #endif
9038 	return scx_cgroup_can_attach(tset);
9039 }
9040 
9041 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9042 {
9043 	struct task_struct *task;
9044 	struct cgroup_subsys_state *css;
9045 
9046 	cgroup_taskset_for_each(task, css, tset)
9047 		sched_move_task(task);
9048 
9049 	scx_cgroup_finish_attach();
9050 }
9051 
9052 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9053 {
9054 	scx_cgroup_cancel_attach(tset);
9055 }
9056 
9057 #ifdef CONFIG_UCLAMP_TASK_GROUP
9058 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9059 {
9060 	struct cgroup_subsys_state *top_css = css;
9061 	struct uclamp_se *uc_parent = NULL;
9062 	struct uclamp_se *uc_se = NULL;
9063 	unsigned int eff[UCLAMP_CNT];
9064 	enum uclamp_id clamp_id;
9065 	unsigned int clamps;
9066 
9067 	lockdep_assert_held(&uclamp_mutex);
9068 	SCHED_WARN_ON(!rcu_read_lock_held());
9069 
9070 	css_for_each_descendant_pre(css, top_css) {
9071 		uc_parent = css_tg(css)->parent
9072 			? css_tg(css)->parent->uclamp : NULL;
9073 
9074 		for_each_clamp_id(clamp_id) {
9075 			/* Assume effective clamps matches requested clamps */
9076 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9077 			/* Cap effective clamps with parent's effective clamps */
9078 			if (uc_parent &&
9079 			    eff[clamp_id] > uc_parent[clamp_id].value) {
9080 				eff[clamp_id] = uc_parent[clamp_id].value;
9081 			}
9082 		}
9083 		/* Ensure protection is always capped by limit */
9084 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9085 
9086 		/* Propagate most restrictive effective clamps */
9087 		clamps = 0x0;
9088 		uc_se = css_tg(css)->uclamp;
9089 		for_each_clamp_id(clamp_id) {
9090 			if (eff[clamp_id] == uc_se[clamp_id].value)
9091 				continue;
9092 			uc_se[clamp_id].value = eff[clamp_id];
9093 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9094 			clamps |= (0x1 << clamp_id);
9095 		}
9096 		if (!clamps) {
9097 			css = css_rightmost_descendant(css);
9098 			continue;
9099 		}
9100 
9101 		/* Immediately update descendants RUNNABLE tasks */
9102 		uclamp_update_active_tasks(css);
9103 	}
9104 }
9105 
9106 /*
9107  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9108  * C expression. Since there is no way to convert a macro argument (N) into a
9109  * character constant, use two levels of macros.
9110  */
9111 #define _POW10(exp) ((unsigned int)1e##exp)
9112 #define POW10(exp) _POW10(exp)
9113 
9114 struct uclamp_request {
9115 #define UCLAMP_PERCENT_SHIFT	2
9116 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
9117 	s64 percent;
9118 	u64 util;
9119 	int ret;
9120 };
9121 
9122 static inline struct uclamp_request
9123 capacity_from_percent(char *buf)
9124 {
9125 	struct uclamp_request req = {
9126 		.percent = UCLAMP_PERCENT_SCALE,
9127 		.util = SCHED_CAPACITY_SCALE,
9128 		.ret = 0,
9129 	};
9130 
9131 	buf = strim(buf);
9132 	if (strcmp(buf, "max")) {
9133 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9134 					     &req.percent);
9135 		if (req.ret)
9136 			return req;
9137 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9138 			req.ret = -ERANGE;
9139 			return req;
9140 		}
9141 
9142 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
9143 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9144 	}
9145 
9146 	return req;
9147 }
9148 
9149 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9150 				size_t nbytes, loff_t off,
9151 				enum uclamp_id clamp_id)
9152 {
9153 	struct uclamp_request req;
9154 	struct task_group *tg;
9155 
9156 	req = capacity_from_percent(buf);
9157 	if (req.ret)
9158 		return req.ret;
9159 
9160 	static_branch_enable(&sched_uclamp_used);
9161 
9162 	guard(mutex)(&uclamp_mutex);
9163 	guard(rcu)();
9164 
9165 	tg = css_tg(of_css(of));
9166 	if (tg->uclamp_req[clamp_id].value != req.util)
9167 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9168 
9169 	/*
9170 	 * Because of not recoverable conversion rounding we keep track of the
9171 	 * exact requested value
9172 	 */
9173 	tg->uclamp_pct[clamp_id] = req.percent;
9174 
9175 	/* Update effective clamps to track the most restrictive value */
9176 	cpu_util_update_eff(of_css(of));
9177 
9178 	return nbytes;
9179 }
9180 
9181 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9182 				    char *buf, size_t nbytes,
9183 				    loff_t off)
9184 {
9185 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9186 }
9187 
9188 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9189 				    char *buf, size_t nbytes,
9190 				    loff_t off)
9191 {
9192 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9193 }
9194 
9195 static inline void cpu_uclamp_print(struct seq_file *sf,
9196 				    enum uclamp_id clamp_id)
9197 {
9198 	struct task_group *tg;
9199 	u64 util_clamp;
9200 	u64 percent;
9201 	u32 rem;
9202 
9203 	scoped_guard (rcu) {
9204 		tg = css_tg(seq_css(sf));
9205 		util_clamp = tg->uclamp_req[clamp_id].value;
9206 	}
9207 
9208 	if (util_clamp == SCHED_CAPACITY_SCALE) {
9209 		seq_puts(sf, "max\n");
9210 		return;
9211 	}
9212 
9213 	percent = tg->uclamp_pct[clamp_id];
9214 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9215 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9216 }
9217 
9218 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9219 {
9220 	cpu_uclamp_print(sf, UCLAMP_MIN);
9221 	return 0;
9222 }
9223 
9224 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9225 {
9226 	cpu_uclamp_print(sf, UCLAMP_MAX);
9227 	return 0;
9228 }
9229 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9230 
9231 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9232 static unsigned long tg_weight(struct task_group *tg)
9233 {
9234 #ifdef CONFIG_FAIR_GROUP_SCHED
9235 	return scale_load_down(tg->shares);
9236 #else
9237 	return sched_weight_from_cgroup(tg->scx_weight);
9238 #endif
9239 }
9240 
9241 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9242 				struct cftype *cftype, u64 shareval)
9243 {
9244 	int ret;
9245 
9246 	if (shareval > scale_load_down(ULONG_MAX))
9247 		shareval = MAX_SHARES;
9248 	ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9249 	if (!ret)
9250 		scx_group_set_weight(css_tg(css),
9251 				     sched_weight_to_cgroup(shareval));
9252 	return ret;
9253 }
9254 
9255 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9256 			       struct cftype *cft)
9257 {
9258 	return tg_weight(css_tg(css));
9259 }
9260 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9261 
9262 #ifdef CONFIG_CFS_BANDWIDTH
9263 static DEFINE_MUTEX(cfs_constraints_mutex);
9264 
9265 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9266 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9267 /* More than 203 days if BW_SHIFT equals 20. */
9268 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9269 
9270 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9271 
9272 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9273 				u64 burst)
9274 {
9275 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
9276 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9277 
9278 	if (tg == &root_task_group)
9279 		return -EINVAL;
9280 
9281 	/*
9282 	 * Ensure we have at some amount of bandwidth every period.  This is
9283 	 * to prevent reaching a state of large arrears when throttled via
9284 	 * entity_tick() resulting in prolonged exit starvation.
9285 	 */
9286 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9287 		return -EINVAL;
9288 
9289 	/*
9290 	 * Likewise, bound things on the other side by preventing insane quota
9291 	 * periods.  This also allows us to normalize in computing quota
9292 	 * feasibility.
9293 	 */
9294 	if (period > max_cfs_quota_period)
9295 		return -EINVAL;
9296 
9297 	/*
9298 	 * Bound quota to defend quota against overflow during bandwidth shift.
9299 	 */
9300 	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9301 		return -EINVAL;
9302 
9303 	if (quota != RUNTIME_INF && (burst > quota ||
9304 				     burst + quota > max_cfs_runtime))
9305 		return -EINVAL;
9306 
9307 	/*
9308 	 * Prevent race between setting of cfs_rq->runtime_enabled and
9309 	 * unthrottle_offline_cfs_rqs().
9310 	 */
9311 	guard(cpus_read_lock)();
9312 	guard(mutex)(&cfs_constraints_mutex);
9313 
9314 	ret = __cfs_schedulable(tg, period, quota);
9315 	if (ret)
9316 		return ret;
9317 
9318 	runtime_enabled = quota != RUNTIME_INF;
9319 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9320 	/*
9321 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
9322 	 * before making related changes, and on->off must occur afterwards
9323 	 */
9324 	if (runtime_enabled && !runtime_was_enabled)
9325 		cfs_bandwidth_usage_inc();
9326 
9327 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9328 		cfs_b->period = ns_to_ktime(period);
9329 		cfs_b->quota = quota;
9330 		cfs_b->burst = burst;
9331 
9332 		__refill_cfs_bandwidth_runtime(cfs_b);
9333 
9334 		/*
9335 		 * Restart the period timer (if active) to handle new
9336 		 * period expiry:
9337 		 */
9338 		if (runtime_enabled)
9339 			start_cfs_bandwidth(cfs_b);
9340 	}
9341 
9342 	for_each_online_cpu(i) {
9343 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9344 		struct rq *rq = cfs_rq->rq;
9345 
9346 		guard(rq_lock_irq)(rq);
9347 		cfs_rq->runtime_enabled = runtime_enabled;
9348 		cfs_rq->runtime_remaining = 0;
9349 
9350 		if (cfs_rq->throttled)
9351 			unthrottle_cfs_rq(cfs_rq);
9352 	}
9353 
9354 	if (runtime_was_enabled && !runtime_enabled)
9355 		cfs_bandwidth_usage_dec();
9356 
9357 	return 0;
9358 }
9359 
9360 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9361 {
9362 	u64 quota, period, burst;
9363 
9364 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9365 	burst = tg->cfs_bandwidth.burst;
9366 	if (cfs_quota_us < 0)
9367 		quota = RUNTIME_INF;
9368 	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9369 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9370 	else
9371 		return -EINVAL;
9372 
9373 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9374 }
9375 
9376 static long tg_get_cfs_quota(struct task_group *tg)
9377 {
9378 	u64 quota_us;
9379 
9380 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9381 		return -1;
9382 
9383 	quota_us = tg->cfs_bandwidth.quota;
9384 	do_div(quota_us, NSEC_PER_USEC);
9385 
9386 	return quota_us;
9387 }
9388 
9389 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9390 {
9391 	u64 quota, period, burst;
9392 
9393 	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9394 		return -EINVAL;
9395 
9396 	period = (u64)cfs_period_us * NSEC_PER_USEC;
9397 	quota = tg->cfs_bandwidth.quota;
9398 	burst = tg->cfs_bandwidth.burst;
9399 
9400 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9401 }
9402 
9403 static long tg_get_cfs_period(struct task_group *tg)
9404 {
9405 	u64 cfs_period_us;
9406 
9407 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9408 	do_div(cfs_period_us, NSEC_PER_USEC);
9409 
9410 	return cfs_period_us;
9411 }
9412 
9413 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9414 {
9415 	u64 quota, period, burst;
9416 
9417 	if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9418 		return -EINVAL;
9419 
9420 	burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9421 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9422 	quota = tg->cfs_bandwidth.quota;
9423 
9424 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9425 }
9426 
9427 static long tg_get_cfs_burst(struct task_group *tg)
9428 {
9429 	u64 burst_us;
9430 
9431 	burst_us = tg->cfs_bandwidth.burst;
9432 	do_div(burst_us, NSEC_PER_USEC);
9433 
9434 	return burst_us;
9435 }
9436 
9437 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9438 				  struct cftype *cft)
9439 {
9440 	return tg_get_cfs_quota(css_tg(css));
9441 }
9442 
9443 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9444 				   struct cftype *cftype, s64 cfs_quota_us)
9445 {
9446 	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9447 }
9448 
9449 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9450 				   struct cftype *cft)
9451 {
9452 	return tg_get_cfs_period(css_tg(css));
9453 }
9454 
9455 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9456 				    struct cftype *cftype, u64 cfs_period_us)
9457 {
9458 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
9459 }
9460 
9461 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9462 				  struct cftype *cft)
9463 {
9464 	return tg_get_cfs_burst(css_tg(css));
9465 }
9466 
9467 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9468 				   struct cftype *cftype, u64 cfs_burst_us)
9469 {
9470 	return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9471 }
9472 
9473 struct cfs_schedulable_data {
9474 	struct task_group *tg;
9475 	u64 period, quota;
9476 };
9477 
9478 /*
9479  * normalize group quota/period to be quota/max_period
9480  * note: units are usecs
9481  */
9482 static u64 normalize_cfs_quota(struct task_group *tg,
9483 			       struct cfs_schedulable_data *d)
9484 {
9485 	u64 quota, period;
9486 
9487 	if (tg == d->tg) {
9488 		period = d->period;
9489 		quota = d->quota;
9490 	} else {
9491 		period = tg_get_cfs_period(tg);
9492 		quota = tg_get_cfs_quota(tg);
9493 	}
9494 
9495 	/* note: these should typically be equivalent */
9496 	if (quota == RUNTIME_INF || quota == -1)
9497 		return RUNTIME_INF;
9498 
9499 	return to_ratio(period, quota);
9500 }
9501 
9502 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9503 {
9504 	struct cfs_schedulable_data *d = data;
9505 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9506 	s64 quota = 0, parent_quota = -1;
9507 
9508 	if (!tg->parent) {
9509 		quota = RUNTIME_INF;
9510 	} else {
9511 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9512 
9513 		quota = normalize_cfs_quota(tg, d);
9514 		parent_quota = parent_b->hierarchical_quota;
9515 
9516 		/*
9517 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9518 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
9519 		 * inherit when no limit is set. In both cases this is used
9520 		 * by the scheduler to determine if a given CFS task has a
9521 		 * bandwidth constraint at some higher level.
9522 		 */
9523 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9524 			if (quota == RUNTIME_INF)
9525 				quota = parent_quota;
9526 			else if (parent_quota != RUNTIME_INF)
9527 				quota = min(quota, parent_quota);
9528 		} else {
9529 			if (quota == RUNTIME_INF)
9530 				quota = parent_quota;
9531 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9532 				return -EINVAL;
9533 		}
9534 	}
9535 	cfs_b->hierarchical_quota = quota;
9536 
9537 	return 0;
9538 }
9539 
9540 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9541 {
9542 	struct cfs_schedulable_data data = {
9543 		.tg = tg,
9544 		.period = period,
9545 		.quota = quota,
9546 	};
9547 
9548 	if (quota != RUNTIME_INF) {
9549 		do_div(data.period, NSEC_PER_USEC);
9550 		do_div(data.quota, NSEC_PER_USEC);
9551 	}
9552 
9553 	guard(rcu)();
9554 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9555 }
9556 
9557 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9558 {
9559 	struct task_group *tg = css_tg(seq_css(sf));
9560 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9561 
9562 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9563 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9564 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9565 
9566 	if (schedstat_enabled() && tg != &root_task_group) {
9567 		struct sched_statistics *stats;
9568 		u64 ws = 0;
9569 		int i;
9570 
9571 		for_each_possible_cpu(i) {
9572 			stats = __schedstats_from_se(tg->se[i]);
9573 			ws += schedstat_val(stats->wait_sum);
9574 		}
9575 
9576 		seq_printf(sf, "wait_sum %llu\n", ws);
9577 	}
9578 
9579 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9580 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9581 
9582 	return 0;
9583 }
9584 
9585 static u64 throttled_time_self(struct task_group *tg)
9586 {
9587 	int i;
9588 	u64 total = 0;
9589 
9590 	for_each_possible_cpu(i) {
9591 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9592 	}
9593 
9594 	return total;
9595 }
9596 
9597 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9598 {
9599 	struct task_group *tg = css_tg(seq_css(sf));
9600 
9601 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9602 
9603 	return 0;
9604 }
9605 #endif /* CONFIG_CFS_BANDWIDTH */
9606 
9607 #ifdef CONFIG_RT_GROUP_SCHED
9608 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9609 				struct cftype *cft, s64 val)
9610 {
9611 	return sched_group_set_rt_runtime(css_tg(css), val);
9612 }
9613 
9614 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9615 			       struct cftype *cft)
9616 {
9617 	return sched_group_rt_runtime(css_tg(css));
9618 }
9619 
9620 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9621 				    struct cftype *cftype, u64 rt_period_us)
9622 {
9623 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
9624 }
9625 
9626 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9627 				   struct cftype *cft)
9628 {
9629 	return sched_group_rt_period(css_tg(css));
9630 }
9631 #endif /* CONFIG_RT_GROUP_SCHED */
9632 
9633 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9634 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9635 			       struct cftype *cft)
9636 {
9637 	return css_tg(css)->idle;
9638 }
9639 
9640 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9641 				struct cftype *cft, s64 idle)
9642 {
9643 	int ret;
9644 
9645 	ret = sched_group_set_idle(css_tg(css), idle);
9646 	if (!ret)
9647 		scx_group_set_idle(css_tg(css), idle);
9648 	return ret;
9649 }
9650 #endif
9651 
9652 static struct cftype cpu_legacy_files[] = {
9653 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9654 	{
9655 		.name = "shares",
9656 		.read_u64 = cpu_shares_read_u64,
9657 		.write_u64 = cpu_shares_write_u64,
9658 	},
9659 	{
9660 		.name = "idle",
9661 		.read_s64 = cpu_idle_read_s64,
9662 		.write_s64 = cpu_idle_write_s64,
9663 	},
9664 #endif
9665 #ifdef CONFIG_CFS_BANDWIDTH
9666 	{
9667 		.name = "cfs_quota_us",
9668 		.read_s64 = cpu_cfs_quota_read_s64,
9669 		.write_s64 = cpu_cfs_quota_write_s64,
9670 	},
9671 	{
9672 		.name = "cfs_period_us",
9673 		.read_u64 = cpu_cfs_period_read_u64,
9674 		.write_u64 = cpu_cfs_period_write_u64,
9675 	},
9676 	{
9677 		.name = "cfs_burst_us",
9678 		.read_u64 = cpu_cfs_burst_read_u64,
9679 		.write_u64 = cpu_cfs_burst_write_u64,
9680 	},
9681 	{
9682 		.name = "stat",
9683 		.seq_show = cpu_cfs_stat_show,
9684 	},
9685 	{
9686 		.name = "stat.local",
9687 		.seq_show = cpu_cfs_local_stat_show,
9688 	},
9689 #endif
9690 #ifdef CONFIG_RT_GROUP_SCHED
9691 	{
9692 		.name = "rt_runtime_us",
9693 		.read_s64 = cpu_rt_runtime_read,
9694 		.write_s64 = cpu_rt_runtime_write,
9695 	},
9696 	{
9697 		.name = "rt_period_us",
9698 		.read_u64 = cpu_rt_period_read_uint,
9699 		.write_u64 = cpu_rt_period_write_uint,
9700 	},
9701 #endif
9702 #ifdef CONFIG_UCLAMP_TASK_GROUP
9703 	{
9704 		.name = "uclamp.min",
9705 		.flags = CFTYPE_NOT_ON_ROOT,
9706 		.seq_show = cpu_uclamp_min_show,
9707 		.write = cpu_uclamp_min_write,
9708 	},
9709 	{
9710 		.name = "uclamp.max",
9711 		.flags = CFTYPE_NOT_ON_ROOT,
9712 		.seq_show = cpu_uclamp_max_show,
9713 		.write = cpu_uclamp_max_write,
9714 	},
9715 #endif
9716 	{ }	/* Terminate */
9717 };
9718 
9719 static int cpu_extra_stat_show(struct seq_file *sf,
9720 			       struct cgroup_subsys_state *css)
9721 {
9722 #ifdef CONFIG_CFS_BANDWIDTH
9723 	{
9724 		struct task_group *tg = css_tg(css);
9725 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9726 		u64 throttled_usec, burst_usec;
9727 
9728 		throttled_usec = cfs_b->throttled_time;
9729 		do_div(throttled_usec, NSEC_PER_USEC);
9730 		burst_usec = cfs_b->burst_time;
9731 		do_div(burst_usec, NSEC_PER_USEC);
9732 
9733 		seq_printf(sf, "nr_periods %d\n"
9734 			   "nr_throttled %d\n"
9735 			   "throttled_usec %llu\n"
9736 			   "nr_bursts %d\n"
9737 			   "burst_usec %llu\n",
9738 			   cfs_b->nr_periods, cfs_b->nr_throttled,
9739 			   throttled_usec, cfs_b->nr_burst, burst_usec);
9740 	}
9741 #endif
9742 	return 0;
9743 }
9744 
9745 static int cpu_local_stat_show(struct seq_file *sf,
9746 			       struct cgroup_subsys_state *css)
9747 {
9748 #ifdef CONFIG_CFS_BANDWIDTH
9749 	{
9750 		struct task_group *tg = css_tg(css);
9751 		u64 throttled_self_usec;
9752 
9753 		throttled_self_usec = throttled_time_self(tg);
9754 		do_div(throttled_self_usec, NSEC_PER_USEC);
9755 
9756 		seq_printf(sf, "throttled_usec %llu\n",
9757 			   throttled_self_usec);
9758 	}
9759 #endif
9760 	return 0;
9761 }
9762 
9763 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9764 
9765 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9766 			       struct cftype *cft)
9767 {
9768 	return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9769 }
9770 
9771 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9772 				struct cftype *cft, u64 cgrp_weight)
9773 {
9774 	unsigned long weight;
9775 	int ret;
9776 
9777 	if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9778 		return -ERANGE;
9779 
9780 	weight = sched_weight_from_cgroup(cgrp_weight);
9781 
9782 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9783 	if (!ret)
9784 		scx_group_set_weight(css_tg(css), cgrp_weight);
9785 	return ret;
9786 }
9787 
9788 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9789 				    struct cftype *cft)
9790 {
9791 	unsigned long weight = tg_weight(css_tg(css));
9792 	int last_delta = INT_MAX;
9793 	int prio, delta;
9794 
9795 	/* find the closest nice value to the current weight */
9796 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9797 		delta = abs(sched_prio_to_weight[prio] - weight);
9798 		if (delta >= last_delta)
9799 			break;
9800 		last_delta = delta;
9801 	}
9802 
9803 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9804 }
9805 
9806 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9807 				     struct cftype *cft, s64 nice)
9808 {
9809 	unsigned long weight;
9810 	int idx, ret;
9811 
9812 	if (nice < MIN_NICE || nice > MAX_NICE)
9813 		return -ERANGE;
9814 
9815 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9816 	idx = array_index_nospec(idx, 40);
9817 	weight = sched_prio_to_weight[idx];
9818 
9819 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9820 	if (!ret)
9821 		scx_group_set_weight(css_tg(css),
9822 				     sched_weight_to_cgroup(weight));
9823 	return ret;
9824 }
9825 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9826 
9827 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9828 						  long period, long quota)
9829 {
9830 	if (quota < 0)
9831 		seq_puts(sf, "max");
9832 	else
9833 		seq_printf(sf, "%ld", quota);
9834 
9835 	seq_printf(sf, " %ld\n", period);
9836 }
9837 
9838 /* caller should put the current value in *@periodp before calling */
9839 static int __maybe_unused cpu_period_quota_parse(char *buf,
9840 						 u64 *periodp, u64 *quotap)
9841 {
9842 	char tok[21];	/* U64_MAX */
9843 
9844 	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9845 		return -EINVAL;
9846 
9847 	*periodp *= NSEC_PER_USEC;
9848 
9849 	if (sscanf(tok, "%llu", quotap))
9850 		*quotap *= NSEC_PER_USEC;
9851 	else if (!strcmp(tok, "max"))
9852 		*quotap = RUNTIME_INF;
9853 	else
9854 		return -EINVAL;
9855 
9856 	return 0;
9857 }
9858 
9859 #ifdef CONFIG_CFS_BANDWIDTH
9860 static int cpu_max_show(struct seq_file *sf, void *v)
9861 {
9862 	struct task_group *tg = css_tg(seq_css(sf));
9863 
9864 	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9865 	return 0;
9866 }
9867 
9868 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9869 			     char *buf, size_t nbytes, loff_t off)
9870 {
9871 	struct task_group *tg = css_tg(of_css(of));
9872 	u64 period = tg_get_cfs_period(tg);
9873 	u64 burst = tg->cfs_bandwidth.burst;
9874 	u64 quota;
9875 	int ret;
9876 
9877 	ret = cpu_period_quota_parse(buf, &period, &quota);
9878 	if (!ret)
9879 		ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
9880 	return ret ?: nbytes;
9881 }
9882 #endif
9883 
9884 static struct cftype cpu_files[] = {
9885 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9886 	{
9887 		.name = "weight",
9888 		.flags = CFTYPE_NOT_ON_ROOT,
9889 		.read_u64 = cpu_weight_read_u64,
9890 		.write_u64 = cpu_weight_write_u64,
9891 	},
9892 	{
9893 		.name = "weight.nice",
9894 		.flags = CFTYPE_NOT_ON_ROOT,
9895 		.read_s64 = cpu_weight_nice_read_s64,
9896 		.write_s64 = cpu_weight_nice_write_s64,
9897 	},
9898 	{
9899 		.name = "idle",
9900 		.flags = CFTYPE_NOT_ON_ROOT,
9901 		.read_s64 = cpu_idle_read_s64,
9902 		.write_s64 = cpu_idle_write_s64,
9903 	},
9904 #endif
9905 #ifdef CONFIG_CFS_BANDWIDTH
9906 	{
9907 		.name = "max",
9908 		.flags = CFTYPE_NOT_ON_ROOT,
9909 		.seq_show = cpu_max_show,
9910 		.write = cpu_max_write,
9911 	},
9912 	{
9913 		.name = "max.burst",
9914 		.flags = CFTYPE_NOT_ON_ROOT,
9915 		.read_u64 = cpu_cfs_burst_read_u64,
9916 		.write_u64 = cpu_cfs_burst_write_u64,
9917 	},
9918 #endif
9919 #ifdef CONFIG_UCLAMP_TASK_GROUP
9920 	{
9921 		.name = "uclamp.min",
9922 		.flags = CFTYPE_NOT_ON_ROOT,
9923 		.seq_show = cpu_uclamp_min_show,
9924 		.write = cpu_uclamp_min_write,
9925 	},
9926 	{
9927 		.name = "uclamp.max",
9928 		.flags = CFTYPE_NOT_ON_ROOT,
9929 		.seq_show = cpu_uclamp_max_show,
9930 		.write = cpu_uclamp_max_write,
9931 	},
9932 #endif
9933 	{ }	/* terminate */
9934 };
9935 
9936 struct cgroup_subsys cpu_cgrp_subsys = {
9937 	.css_alloc	= cpu_cgroup_css_alloc,
9938 	.css_online	= cpu_cgroup_css_online,
9939 	.css_offline	= cpu_cgroup_css_offline,
9940 	.css_released	= cpu_cgroup_css_released,
9941 	.css_free	= cpu_cgroup_css_free,
9942 	.css_extra_stat_show = cpu_extra_stat_show,
9943 	.css_local_stat_show = cpu_local_stat_show,
9944 	.can_attach	= cpu_cgroup_can_attach,
9945 	.attach		= cpu_cgroup_attach,
9946 	.cancel_attach	= cpu_cgroup_cancel_attach,
9947 	.legacy_cftypes	= cpu_legacy_files,
9948 	.dfl_cftypes	= cpu_files,
9949 	.early_init	= true,
9950 	.threaded	= true,
9951 };
9952 
9953 #endif	/* CONFIG_CGROUP_SCHED */
9954 
9955 void dump_cpu_task(int cpu)
9956 {
9957 	if (in_hardirq() && cpu == smp_processor_id()) {
9958 		struct pt_regs *regs;
9959 
9960 		regs = get_irq_regs();
9961 		if (regs) {
9962 			show_regs(regs);
9963 			return;
9964 		}
9965 	}
9966 
9967 	if (trigger_single_cpu_backtrace(cpu))
9968 		return;
9969 
9970 	pr_info("Task dump for CPU %d:\n", cpu);
9971 	sched_show_task(cpu_curr(cpu));
9972 }
9973 
9974 /*
9975  * Nice levels are multiplicative, with a gentle 10% change for every
9976  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9977  * nice 1, it will get ~10% less CPU time than another CPU-bound task
9978  * that remained on nice 0.
9979  *
9980  * The "10% effect" is relative and cumulative: from _any_ nice level,
9981  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9982  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9983  * If a task goes up by ~10% and another task goes down by ~10% then
9984  * the relative distance between them is ~25%.)
9985  */
9986 const int sched_prio_to_weight[40] = {
9987  /* -20 */     88761,     71755,     56483,     46273,     36291,
9988  /* -15 */     29154,     23254,     18705,     14949,     11916,
9989  /* -10 */      9548,      7620,      6100,      4904,      3906,
9990  /*  -5 */      3121,      2501,      1991,      1586,      1277,
9991  /*   0 */      1024,       820,       655,       526,       423,
9992  /*   5 */       335,       272,       215,       172,       137,
9993  /*  10 */       110,        87,        70,        56,        45,
9994  /*  15 */        36,        29,        23,        18,        15,
9995 };
9996 
9997 /*
9998  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
9999  *
10000  * In cases where the weight does not change often, we can use the
10001  * pre-calculated inverse to speed up arithmetics by turning divisions
10002  * into multiplications:
10003  */
10004 const u32 sched_prio_to_wmult[40] = {
10005  /* -20 */     48388,     59856,     76040,     92818,    118348,
10006  /* -15 */    147320,    184698,    229616,    287308,    360437,
10007  /* -10 */    449829,    563644,    704093,    875809,   1099582,
10008  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
10009  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
10010  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
10011  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
10012  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10013 };
10014 
10015 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10016 {
10017         trace_sched_update_nr_running_tp(rq, count);
10018 }
10019 
10020 #ifdef CONFIG_SCHED_MM_CID
10021 
10022 /*
10023  * @cid_lock: Guarantee forward-progress of cid allocation.
10024  *
10025  * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10026  * is only used when contention is detected by the lock-free allocation so
10027  * forward progress can be guaranteed.
10028  */
10029 DEFINE_RAW_SPINLOCK(cid_lock);
10030 
10031 /*
10032  * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10033  *
10034  * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10035  * detected, it is set to 1 to ensure that all newly coming allocations are
10036  * serialized by @cid_lock until the allocation which detected contention
10037  * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10038  * of a cid allocation.
10039  */
10040 int use_cid_lock;
10041 
10042 /*
10043  * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10044  * concurrently with respect to the execution of the source runqueue context
10045  * switch.
10046  *
10047  * There is one basic properties we want to guarantee here:
10048  *
10049  * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10050  * used by a task. That would lead to concurrent allocation of the cid and
10051  * userspace corruption.
10052  *
10053  * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10054  * that a pair of loads observe at least one of a pair of stores, which can be
10055  * shown as:
10056  *
10057  *      X = Y = 0
10058  *
10059  *      w[X]=1          w[Y]=1
10060  *      MB              MB
10061  *      r[Y]=y          r[X]=x
10062  *
10063  * Which guarantees that x==0 && y==0 is impossible. But rather than using
10064  * values 0 and 1, this algorithm cares about specific state transitions of the
10065  * runqueue current task (as updated by the scheduler context switch), and the
10066  * per-mm/cpu cid value.
10067  *
10068  * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10069  * task->mm != mm for the rest of the discussion. There are two scheduler state
10070  * transitions on context switch we care about:
10071  *
10072  * (TSA) Store to rq->curr with transition from (N) to (Y)
10073  *
10074  * (TSB) Store to rq->curr with transition from (Y) to (N)
10075  *
10076  * On the remote-clear side, there is one transition we care about:
10077  *
10078  * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10079  *
10080  * There is also a transition to UNSET state which can be performed from all
10081  * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10082  * guarantees that only a single thread will succeed:
10083  *
10084  * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10085  *
10086  * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10087  * when a thread is actively using the cid (property (1)).
10088  *
10089  * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10090  *
10091  * Scenario A) (TSA)+(TMA) (from next task perspective)
10092  *
10093  * CPU0                                      CPU1
10094  *
10095  * Context switch CS-1                       Remote-clear
10096  *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
10097  *                                             (implied barrier after cmpxchg)
10098  *   - switch_mm_cid()
10099  *     - memory barrier (see switch_mm_cid()
10100  *       comment explaining how this barrier
10101  *       is combined with other scheduler
10102  *       barriers)
10103  *     - mm_cid_get (next)
10104  *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
10105  *
10106  * This Dekker ensures that either task (Y) is observed by the
10107  * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10108  * observed.
10109  *
10110  * If task (Y) store is observed by rcu_dereference(), it means that there is
10111  * still an active task on the cpu. Remote-clear will therefore not transition
10112  * to UNSET, which fulfills property (1).
10113  *
10114  * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10115  * it will move its state to UNSET, which clears the percpu cid perhaps
10116  * uselessly (which is not an issue for correctness). Because task (Y) is not
10117  * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10118  * state to UNSET is done with a cmpxchg expecting that the old state has the
10119  * LAZY flag set, only one thread will successfully UNSET.
10120  *
10121  * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10122  * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10123  * CPU1 will observe task (Y) and do nothing more, which is fine.
10124  *
10125  * What we are effectively preventing with this Dekker is a scenario where
10126  * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10127  * because this would UNSET a cid which is actively used.
10128  */
10129 
10130 void sched_mm_cid_migrate_from(struct task_struct *t)
10131 {
10132 	t->migrate_from_cpu = task_cpu(t);
10133 }
10134 
10135 static
10136 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10137 					  struct task_struct *t,
10138 					  struct mm_cid *src_pcpu_cid)
10139 {
10140 	struct mm_struct *mm = t->mm;
10141 	struct task_struct *src_task;
10142 	int src_cid, last_mm_cid;
10143 
10144 	if (!mm)
10145 		return -1;
10146 
10147 	last_mm_cid = t->last_mm_cid;
10148 	/*
10149 	 * If the migrated task has no last cid, or if the current
10150 	 * task on src rq uses the cid, it means the source cid does not need
10151 	 * to be moved to the destination cpu.
10152 	 */
10153 	if (last_mm_cid == -1)
10154 		return -1;
10155 	src_cid = READ_ONCE(src_pcpu_cid->cid);
10156 	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10157 		return -1;
10158 
10159 	/*
10160 	 * If we observe an active task using the mm on this rq, it means we
10161 	 * are not the last task to be migrated from this cpu for this mm, so
10162 	 * there is no need to move src_cid to the destination cpu.
10163 	 */
10164 	guard(rcu)();
10165 	src_task = rcu_dereference(src_rq->curr);
10166 	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10167 		t->last_mm_cid = -1;
10168 		return -1;
10169 	}
10170 
10171 	return src_cid;
10172 }
10173 
10174 static
10175 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10176 					      struct task_struct *t,
10177 					      struct mm_cid *src_pcpu_cid,
10178 					      int src_cid)
10179 {
10180 	struct task_struct *src_task;
10181 	struct mm_struct *mm = t->mm;
10182 	int lazy_cid;
10183 
10184 	if (src_cid == -1)
10185 		return -1;
10186 
10187 	/*
10188 	 * Attempt to clear the source cpu cid to move it to the destination
10189 	 * cpu.
10190 	 */
10191 	lazy_cid = mm_cid_set_lazy_put(src_cid);
10192 	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10193 		return -1;
10194 
10195 	/*
10196 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10197 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10198 	 * between store to rq->curr and load of prev and next task's
10199 	 * per-mm/cpu cid.
10200 	 *
10201 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10202 	 * rq->curr->mm_cid_active matches the barrier in
10203 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10204 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10205 	 * load of per-mm/cpu cid.
10206 	 */
10207 
10208 	/*
10209 	 * If we observe an active task using the mm on this rq after setting
10210 	 * the lazy-put flag, this task will be responsible for transitioning
10211 	 * from lazy-put flag set to MM_CID_UNSET.
10212 	 */
10213 	scoped_guard (rcu) {
10214 		src_task = rcu_dereference(src_rq->curr);
10215 		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10216 			/*
10217 			 * We observed an active task for this mm, there is therefore
10218 			 * no point in moving this cid to the destination cpu.
10219 			 */
10220 			t->last_mm_cid = -1;
10221 			return -1;
10222 		}
10223 	}
10224 
10225 	/*
10226 	 * The src_cid is unused, so it can be unset.
10227 	 */
10228 	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10229 		return -1;
10230 	return src_cid;
10231 }
10232 
10233 /*
10234  * Migration to dst cpu. Called with dst_rq lock held.
10235  * Interrupts are disabled, which keeps the window of cid ownership without the
10236  * source rq lock held small.
10237  */
10238 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10239 {
10240 	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10241 	struct mm_struct *mm = t->mm;
10242 	int src_cid, dst_cid, src_cpu;
10243 	struct rq *src_rq;
10244 
10245 	lockdep_assert_rq_held(dst_rq);
10246 
10247 	if (!mm)
10248 		return;
10249 	src_cpu = t->migrate_from_cpu;
10250 	if (src_cpu == -1) {
10251 		t->last_mm_cid = -1;
10252 		return;
10253 	}
10254 	/*
10255 	 * Move the src cid if the dst cid is unset. This keeps id
10256 	 * allocation closest to 0 in cases where few threads migrate around
10257 	 * many CPUs.
10258 	 *
10259 	 * If destination cid is already set, we may have to just clear
10260 	 * the src cid to ensure compactness in frequent migrations
10261 	 * scenarios.
10262 	 *
10263 	 * It is not useful to clear the src cid when the number of threads is
10264 	 * greater or equal to the number of allowed CPUs, because user-space
10265 	 * can expect that the number of allowed cids can reach the number of
10266 	 * allowed CPUs.
10267 	 */
10268 	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10269 	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
10270 	if (!mm_cid_is_unset(dst_cid) &&
10271 	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
10272 		return;
10273 	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10274 	src_rq = cpu_rq(src_cpu);
10275 	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10276 	if (src_cid == -1)
10277 		return;
10278 	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10279 							    src_cid);
10280 	if (src_cid == -1)
10281 		return;
10282 	if (!mm_cid_is_unset(dst_cid)) {
10283 		__mm_cid_put(mm, src_cid);
10284 		return;
10285 	}
10286 	/* Move src_cid to dst cpu. */
10287 	mm_cid_snapshot_time(dst_rq, mm);
10288 	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10289 }
10290 
10291 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10292 				      int cpu)
10293 {
10294 	struct rq *rq = cpu_rq(cpu);
10295 	struct task_struct *t;
10296 	int cid, lazy_cid;
10297 
10298 	cid = READ_ONCE(pcpu_cid->cid);
10299 	if (!mm_cid_is_valid(cid))
10300 		return;
10301 
10302 	/*
10303 	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
10304 	 * there happens to be other tasks left on the source cpu using this
10305 	 * mm, the next task using this mm will reallocate its cid on context
10306 	 * switch.
10307 	 */
10308 	lazy_cid = mm_cid_set_lazy_put(cid);
10309 	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10310 		return;
10311 
10312 	/*
10313 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10314 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10315 	 * between store to rq->curr and load of prev and next task's
10316 	 * per-mm/cpu cid.
10317 	 *
10318 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10319 	 * rq->curr->mm_cid_active matches the barrier in
10320 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10321 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10322 	 * load of per-mm/cpu cid.
10323 	 */
10324 
10325 	/*
10326 	 * If we observe an active task using the mm on this rq after setting
10327 	 * the lazy-put flag, that task will be responsible for transitioning
10328 	 * from lazy-put flag set to MM_CID_UNSET.
10329 	 */
10330 	scoped_guard (rcu) {
10331 		t = rcu_dereference(rq->curr);
10332 		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10333 			return;
10334 	}
10335 
10336 	/*
10337 	 * The cid is unused, so it can be unset.
10338 	 * Disable interrupts to keep the window of cid ownership without rq
10339 	 * lock small.
10340 	 */
10341 	scoped_guard (irqsave) {
10342 		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10343 			__mm_cid_put(mm, cid);
10344 	}
10345 }
10346 
10347 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10348 {
10349 	struct rq *rq = cpu_rq(cpu);
10350 	struct mm_cid *pcpu_cid;
10351 	struct task_struct *curr;
10352 	u64 rq_clock;
10353 
10354 	/*
10355 	 * rq->clock load is racy on 32-bit but one spurious clear once in a
10356 	 * while is irrelevant.
10357 	 */
10358 	rq_clock = READ_ONCE(rq->clock);
10359 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10360 
10361 	/*
10362 	 * In order to take care of infrequently scheduled tasks, bump the time
10363 	 * snapshot associated with this cid if an active task using the mm is
10364 	 * observed on this rq.
10365 	 */
10366 	scoped_guard (rcu) {
10367 		curr = rcu_dereference(rq->curr);
10368 		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10369 			WRITE_ONCE(pcpu_cid->time, rq_clock);
10370 			return;
10371 		}
10372 	}
10373 
10374 	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10375 		return;
10376 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10377 }
10378 
10379 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10380 					     int weight)
10381 {
10382 	struct mm_cid *pcpu_cid;
10383 	int cid;
10384 
10385 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10386 	cid = READ_ONCE(pcpu_cid->cid);
10387 	if (!mm_cid_is_valid(cid) || cid < weight)
10388 		return;
10389 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10390 }
10391 
10392 static void task_mm_cid_work(struct callback_head *work)
10393 {
10394 	unsigned long now = jiffies, old_scan, next_scan;
10395 	struct task_struct *t = current;
10396 	struct cpumask *cidmask;
10397 	struct mm_struct *mm;
10398 	int weight, cpu;
10399 
10400 	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10401 
10402 	work->next = work;	/* Prevent double-add */
10403 	if (t->flags & PF_EXITING)
10404 		return;
10405 	mm = t->mm;
10406 	if (!mm)
10407 		return;
10408 	old_scan = READ_ONCE(mm->mm_cid_next_scan);
10409 	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10410 	if (!old_scan) {
10411 		unsigned long res;
10412 
10413 		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10414 		if (res != old_scan)
10415 			old_scan = res;
10416 		else
10417 			old_scan = next_scan;
10418 	}
10419 	if (time_before(now, old_scan))
10420 		return;
10421 	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10422 		return;
10423 	cidmask = mm_cidmask(mm);
10424 	/* Clear cids that were not recently used. */
10425 	for_each_possible_cpu(cpu)
10426 		sched_mm_cid_remote_clear_old(mm, cpu);
10427 	weight = cpumask_weight(cidmask);
10428 	/*
10429 	 * Clear cids that are greater or equal to the cidmask weight to
10430 	 * recompact it.
10431 	 */
10432 	for_each_possible_cpu(cpu)
10433 		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10434 }
10435 
10436 void init_sched_mm_cid(struct task_struct *t)
10437 {
10438 	struct mm_struct *mm = t->mm;
10439 	int mm_users = 0;
10440 
10441 	if (mm) {
10442 		mm_users = atomic_read(&mm->mm_users);
10443 		if (mm_users == 1)
10444 			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10445 	}
10446 	t->cid_work.next = &t->cid_work;	/* Protect against double add */
10447 	init_task_work(&t->cid_work, task_mm_cid_work);
10448 }
10449 
10450 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10451 {
10452 	struct callback_head *work = &curr->cid_work;
10453 	unsigned long now = jiffies;
10454 
10455 	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10456 	    work->next != work)
10457 		return;
10458 	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10459 		return;
10460 	task_work_add(curr, work, TWA_RESUME);
10461 }
10462 
10463 void sched_mm_cid_exit_signals(struct task_struct *t)
10464 {
10465 	struct mm_struct *mm = t->mm;
10466 	struct rq *rq;
10467 
10468 	if (!mm)
10469 		return;
10470 
10471 	preempt_disable();
10472 	rq = this_rq();
10473 	guard(rq_lock_irqsave)(rq);
10474 	preempt_enable_no_resched();	/* holding spinlock */
10475 	WRITE_ONCE(t->mm_cid_active, 0);
10476 	/*
10477 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10478 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10479 	 */
10480 	smp_mb();
10481 	mm_cid_put(mm);
10482 	t->last_mm_cid = t->mm_cid = -1;
10483 }
10484 
10485 void sched_mm_cid_before_execve(struct task_struct *t)
10486 {
10487 	struct mm_struct *mm = t->mm;
10488 	struct rq *rq;
10489 
10490 	if (!mm)
10491 		return;
10492 
10493 	preempt_disable();
10494 	rq = this_rq();
10495 	guard(rq_lock_irqsave)(rq);
10496 	preempt_enable_no_resched();	/* holding spinlock */
10497 	WRITE_ONCE(t->mm_cid_active, 0);
10498 	/*
10499 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10500 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10501 	 */
10502 	smp_mb();
10503 	mm_cid_put(mm);
10504 	t->last_mm_cid = t->mm_cid = -1;
10505 }
10506 
10507 void sched_mm_cid_after_execve(struct task_struct *t)
10508 {
10509 	struct mm_struct *mm = t->mm;
10510 	struct rq *rq;
10511 
10512 	if (!mm)
10513 		return;
10514 
10515 	preempt_disable();
10516 	rq = this_rq();
10517 	scoped_guard (rq_lock_irqsave, rq) {
10518 		preempt_enable_no_resched();	/* holding spinlock */
10519 		WRITE_ONCE(t->mm_cid_active, 1);
10520 		/*
10521 		 * Store t->mm_cid_active before loading per-mm/cpu cid.
10522 		 * Matches barrier in sched_mm_cid_remote_clear_old().
10523 		 */
10524 		smp_mb();
10525 		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
10526 	}
10527 	rseq_set_notify_resume(t);
10528 }
10529 
10530 void sched_mm_cid_fork(struct task_struct *t)
10531 {
10532 	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10533 	t->mm_cid_active = 1;
10534 }
10535 #endif
10536 
10537 #ifdef CONFIG_SCHED_CLASS_EXT
10538 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10539 			    struct sched_enq_and_set_ctx *ctx)
10540 {
10541 	struct rq *rq = task_rq(p);
10542 
10543 	lockdep_assert_rq_held(rq);
10544 
10545 	*ctx = (struct sched_enq_and_set_ctx){
10546 		.p = p,
10547 		.queue_flags = queue_flags,
10548 		.queued = task_on_rq_queued(p),
10549 		.running = task_current(rq, p),
10550 	};
10551 
10552 	update_rq_clock(rq);
10553 	if (ctx->queued)
10554 		dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10555 	if (ctx->running)
10556 		put_prev_task(rq, p);
10557 }
10558 
10559 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10560 {
10561 	struct rq *rq = task_rq(ctx->p);
10562 
10563 	lockdep_assert_rq_held(rq);
10564 
10565 	if (ctx->queued)
10566 		enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10567 	if (ctx->running)
10568 		set_next_task(rq, ctx->p);
10569 }
10570 #endif	/* CONFIG_SCHED_CLASS_EXT */
10571