xref: /linux/kernel/sched/core.c (revision 1e83ccd5921a610ef409a7d4e56db27822b4ea39)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel CPU scheduler code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #define INSTANTIATE_EXPORTED_MIGRATE_DISABLE
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hrtimer_api.h>
14 #include <linux/ktime_api.h>
15 #include <linux/sched/signal.h>
16 #include <linux/syscalls_api.h>
17 #include <linux/debug_locks.h>
18 #include <linux/prefetch.h>
19 #include <linux/capability.h>
20 #include <linux/pgtable_api.h>
21 #include <linux/wait_bit.h>
22 #include <linux/jiffies.h>
23 #include <linux/spinlock_api.h>
24 #include <linux/cpumask_api.h>
25 #include <linux/lockdep_api.h>
26 #include <linux/hardirq.h>
27 #include <linux/softirq.h>
28 #include <linux/refcount_api.h>
29 #include <linux/topology.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/cond_resched.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/hotplug.h>
35 #include <linux/sched/init.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/sched/loadavg.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/rseq_api.h>
41 #include <linux/sched/rt.h>
42 
43 #include <linux/blkdev.h>
44 #include <linux/context_tracking.h>
45 #include <linux/cpuset.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/interrupt.h>
49 #include <linux/ioprio.h>
50 #include <linux/kallsyms.h>
51 #include <linux/kcov.h>
52 #include <linux/kprobes.h>
53 #include <linux/llist_api.h>
54 #include <linux/mmu_context.h>
55 #include <linux/mmzone.h>
56 #include <linux/mutex_api.h>
57 #include <linux/nmi.h>
58 #include <linux/nospec.h>
59 #include <linux/perf_event_api.h>
60 #include <linux/profile.h>
61 #include <linux/psi.h>
62 #include <linux/rcuwait_api.h>
63 #include <linux/rseq.h>
64 #include <linux/sched/wake_q.h>
65 #include <linux/scs.h>
66 #include <linux/slab.h>
67 #include <linux/syscalls.h>
68 #include <linux/vtime.h>
69 #include <linux/wait_api.h>
70 #include <linux/workqueue_api.h>
71 #include <linux/livepatch_sched.h>
72 
73 #ifdef CONFIG_PREEMPT_DYNAMIC
74 # ifdef CONFIG_GENERIC_IRQ_ENTRY
75 #  include <linux/irq-entry-common.h>
76 # endif
77 #endif
78 
79 #include <uapi/linux/sched/types.h>
80 
81 #include <asm/irq_regs.h>
82 #include <asm/switch_to.h>
83 #include <asm/tlb.h>
84 
85 #define CREATE_TRACE_POINTS
86 #include <linux/sched/rseq_api.h>
87 #include <trace/events/sched.h>
88 #include <trace/events/ipi.h>
89 #undef CREATE_TRACE_POINTS
90 
91 #include "sched.h"
92 #include "stats.h"
93 
94 #include "autogroup.h"
95 #include "pelt.h"
96 #include "smp.h"
97 
98 #include "../workqueue_internal.h"
99 #include "../../io_uring/io-wq.h"
100 #include "../smpboot.h"
101 #include "../locking/mutex.h"
102 
103 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
104 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
105 
106 /*
107  * Export tracepoints that act as a bare tracehook (ie: have no trace event
108  * associated with them) to allow external modules to probe them.
109  */
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
119 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
120 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
121 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
122 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
123 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
124 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
125 
126 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
127 DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
128 
129 #ifdef CONFIG_SCHED_PROXY_EXEC
130 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
131 static int __init setup_proxy_exec(char *str)
132 {
133 	bool proxy_enable = true;
134 
135 	if (*str && kstrtobool(str + 1, &proxy_enable)) {
136 		pr_warn("Unable to parse sched_proxy_exec=\n");
137 		return 0;
138 	}
139 
140 	if (proxy_enable) {
141 		pr_info("sched_proxy_exec enabled via boot arg\n");
142 		static_branch_enable(&__sched_proxy_exec);
143 	} else {
144 		pr_info("sched_proxy_exec disabled via boot arg\n");
145 		static_branch_disable(&__sched_proxy_exec);
146 	}
147 	return 1;
148 }
149 #else
150 static int __init setup_proxy_exec(char *str)
151 {
152 	pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
153 	return 0;
154 }
155 #endif
156 __setup("sched_proxy_exec", setup_proxy_exec);
157 
158 /*
159  * Debugging: various feature bits
160  *
161  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
162  * sysctl_sched_features, defined in sched.h, to allow constants propagation
163  * at compile time and compiler optimization based on features default.
164  */
165 #define SCHED_FEAT(name, enabled)	\
166 	(1UL << __SCHED_FEAT_##name) * enabled |
167 __read_mostly unsigned int sysctl_sched_features =
168 #include "features.h"
169 	0;
170 #undef SCHED_FEAT
171 
172 /*
173  * Print a warning if need_resched is set for the given duration (if
174  * LATENCY_WARN is enabled).
175  *
176  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
177  * per boot.
178  */
179 __read_mostly int sysctl_resched_latency_warn_ms = 100;
180 __read_mostly int sysctl_resched_latency_warn_once = 1;
181 
182 /*
183  * Number of tasks to iterate in a single balance run.
184  * Limited because this is done with IRQs disabled.
185  */
186 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
187 
188 __read_mostly int scheduler_running;
189 
190 #ifdef CONFIG_SCHED_CORE
191 
192 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
193 
194 /* kernel prio, less is more */
195 static inline int __task_prio(const struct task_struct *p)
196 {
197 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
198 		return -2;
199 
200 	if (p->dl_server)
201 		return -1; /* deadline */
202 
203 	if (rt_or_dl_prio(p->prio))
204 		return p->prio; /* [-1, 99] */
205 
206 	if (p->sched_class == &idle_sched_class)
207 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
208 
209 	if (task_on_scx(p))
210 		return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
211 
212 	return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
213 }
214 
215 /*
216  * l(a,b)
217  * le(a,b) := !l(b,a)
218  * g(a,b)  := l(b,a)
219  * ge(a,b) := !l(a,b)
220  */
221 
222 /* real prio, less is less */
223 static inline bool prio_less(const struct task_struct *a,
224 			     const struct task_struct *b, bool in_fi)
225 {
226 
227 	int pa = __task_prio(a), pb = __task_prio(b);
228 
229 	if (-pa < -pb)
230 		return true;
231 
232 	if (-pb < -pa)
233 		return false;
234 
235 	if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
236 		const struct sched_dl_entity *a_dl, *b_dl;
237 
238 		a_dl = &a->dl;
239 		/*
240 		 * Since,'a' and 'b' can be CFS tasks served by DL server,
241 		 * __task_prio() can return -1 (for DL) even for those. In that
242 		 * case, get to the dl_server's DL entity.
243 		 */
244 		if (a->dl_server)
245 			a_dl = a->dl_server;
246 
247 		b_dl = &b->dl;
248 		if (b->dl_server)
249 			b_dl = b->dl_server;
250 
251 		return !dl_time_before(a_dl->deadline, b_dl->deadline);
252 	}
253 
254 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
255 		return cfs_prio_less(a, b, in_fi);
256 
257 #ifdef CONFIG_SCHED_CLASS_EXT
258 	if (pa == MAX_RT_PRIO + MAX_NICE + 1)	/* ext */
259 		return scx_prio_less(a, b, in_fi);
260 #endif
261 
262 	return false;
263 }
264 
265 static inline bool __sched_core_less(const struct task_struct *a,
266 				     const struct task_struct *b)
267 {
268 	if (a->core_cookie < b->core_cookie)
269 		return true;
270 
271 	if (a->core_cookie > b->core_cookie)
272 		return false;
273 
274 	/* flip prio, so high prio is leftmost */
275 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
276 		return true;
277 
278 	return false;
279 }
280 
281 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
282 
283 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
284 {
285 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
286 }
287 
288 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
289 {
290 	const struct task_struct *p = __node_2_sc(node);
291 	unsigned long cookie = (unsigned long)key;
292 
293 	if (cookie < p->core_cookie)
294 		return -1;
295 
296 	if (cookie > p->core_cookie)
297 		return 1;
298 
299 	return 0;
300 }
301 
302 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
303 {
304 	if (p->se.sched_delayed)
305 		return;
306 
307 	rq->core->core_task_seq++;
308 
309 	if (!p->core_cookie)
310 		return;
311 
312 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
313 }
314 
315 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
316 {
317 	if (p->se.sched_delayed)
318 		return;
319 
320 	rq->core->core_task_seq++;
321 
322 	if (sched_core_enqueued(p)) {
323 		rb_erase(&p->core_node, &rq->core_tree);
324 		RB_CLEAR_NODE(&p->core_node);
325 	}
326 
327 	/*
328 	 * Migrating the last task off the cpu, with the cpu in forced idle
329 	 * state. Reschedule to create an accounting edge for forced idle,
330 	 * and re-examine whether the core is still in forced idle state.
331 	 */
332 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
333 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
334 		resched_curr(rq);
335 }
336 
337 static int sched_task_is_throttled(struct task_struct *p, int cpu)
338 {
339 	if (p->sched_class->task_is_throttled)
340 		return p->sched_class->task_is_throttled(p, cpu);
341 
342 	return 0;
343 }
344 
345 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
346 {
347 	struct rb_node *node = &p->core_node;
348 	int cpu = task_cpu(p);
349 
350 	do {
351 		node = rb_next(node);
352 		if (!node)
353 			return NULL;
354 
355 		p = __node_2_sc(node);
356 		if (p->core_cookie != cookie)
357 			return NULL;
358 
359 	} while (sched_task_is_throttled(p, cpu));
360 
361 	return p;
362 }
363 
364 /*
365  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
366  * If no suitable task is found, NULL will be returned.
367  */
368 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
369 {
370 	struct task_struct *p;
371 	struct rb_node *node;
372 
373 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
374 	if (!node)
375 		return NULL;
376 
377 	p = __node_2_sc(node);
378 	if (!sched_task_is_throttled(p, rq->cpu))
379 		return p;
380 
381 	return sched_core_next(p, cookie);
382 }
383 
384 /*
385  * Magic required such that:
386  *
387  *	raw_spin_rq_lock(rq);
388  *	...
389  *	raw_spin_rq_unlock(rq);
390  *
391  * ends up locking and unlocking the _same_ lock, and all CPUs
392  * always agree on what rq has what lock.
393  *
394  * XXX entirely possible to selectively enable cores, don't bother for now.
395  */
396 
397 static DEFINE_MUTEX(sched_core_mutex);
398 static atomic_t sched_core_count;
399 static struct cpumask sched_core_mask;
400 
401 static void sched_core_lock(int cpu, unsigned long *flags)
402 	__context_unsafe(/* acquires multiple */)
403 	__acquires(&runqueues.__lock) /* overapproximation */
404 {
405 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
406 	int t, i = 0;
407 
408 	local_irq_save(*flags);
409 	for_each_cpu(t, smt_mask)
410 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
411 }
412 
413 static void sched_core_unlock(int cpu, unsigned long *flags)
414 	__context_unsafe(/* releases multiple */)
415 	__releases(&runqueues.__lock) /* overapproximation */
416 {
417 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
418 	int t;
419 
420 	for_each_cpu(t, smt_mask)
421 		raw_spin_unlock(&cpu_rq(t)->__lock);
422 	local_irq_restore(*flags);
423 }
424 
425 static void __sched_core_flip(bool enabled)
426 {
427 	unsigned long flags;
428 	int cpu, t;
429 
430 	cpus_read_lock();
431 
432 	/*
433 	 * Toggle the online cores, one by one.
434 	 */
435 	cpumask_copy(&sched_core_mask, cpu_online_mask);
436 	for_each_cpu(cpu, &sched_core_mask) {
437 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
438 
439 		sched_core_lock(cpu, &flags);
440 
441 		for_each_cpu(t, smt_mask)
442 			cpu_rq(t)->core_enabled = enabled;
443 
444 		cpu_rq(cpu)->core->core_forceidle_start = 0;
445 
446 		sched_core_unlock(cpu, &flags);
447 
448 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
449 	}
450 
451 	/*
452 	 * Toggle the offline CPUs.
453 	 */
454 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
455 		cpu_rq(cpu)->core_enabled = enabled;
456 
457 	cpus_read_unlock();
458 }
459 
460 static void sched_core_assert_empty(void)
461 {
462 	int cpu;
463 
464 	for_each_possible_cpu(cpu)
465 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
466 }
467 
468 static void __sched_core_enable(void)
469 {
470 	static_branch_enable(&__sched_core_enabled);
471 	/*
472 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
473 	 * and future ones will observe !sched_core_disabled().
474 	 */
475 	synchronize_rcu();
476 	__sched_core_flip(true);
477 	sched_core_assert_empty();
478 }
479 
480 static void __sched_core_disable(void)
481 {
482 	sched_core_assert_empty();
483 	__sched_core_flip(false);
484 	static_branch_disable(&__sched_core_enabled);
485 }
486 
487 void sched_core_get(void)
488 {
489 	if (atomic_inc_not_zero(&sched_core_count))
490 		return;
491 
492 	mutex_lock(&sched_core_mutex);
493 	if (!atomic_read(&sched_core_count))
494 		__sched_core_enable();
495 
496 	smp_mb__before_atomic();
497 	atomic_inc(&sched_core_count);
498 	mutex_unlock(&sched_core_mutex);
499 }
500 
501 static void __sched_core_put(struct work_struct *work)
502 {
503 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
504 		__sched_core_disable();
505 		mutex_unlock(&sched_core_mutex);
506 	}
507 }
508 
509 void sched_core_put(void)
510 {
511 	static DECLARE_WORK(_work, __sched_core_put);
512 
513 	/*
514 	 * "There can be only one"
515 	 *
516 	 * Either this is the last one, or we don't actually need to do any
517 	 * 'work'. If it is the last *again*, we rely on
518 	 * WORK_STRUCT_PENDING_BIT.
519 	 */
520 	if (!atomic_add_unless(&sched_core_count, -1, 1))
521 		schedule_work(&_work);
522 }
523 
524 #else /* !CONFIG_SCHED_CORE: */
525 
526 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
527 static inline void
528 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
529 
530 #endif /* !CONFIG_SCHED_CORE */
531 
532 /* need a wrapper since we may need to trace from modules */
533 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
534 
535 /* Call via the helper macro trace_set_current_state. */
536 void __trace_set_current_state(int state_value)
537 {
538 	trace_sched_set_state_tp(current, state_value);
539 }
540 EXPORT_SYMBOL(__trace_set_current_state);
541 
542 /*
543  * Serialization rules:
544  *
545  * Lock order:
546  *
547  *   p->pi_lock
548  *     rq->lock
549  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
550  *
551  *  rq1->lock
552  *    rq2->lock  where: rq1 < rq2
553  *
554  * Regular state:
555  *
556  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
557  * local CPU's rq->lock, it optionally removes the task from the runqueue and
558  * always looks at the local rq data structures to find the most eligible task
559  * to run next.
560  *
561  * Task enqueue is also under rq->lock, possibly taken from another CPU.
562  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
563  * the local CPU to avoid bouncing the runqueue state around [ see
564  * ttwu_queue_wakelist() ]
565  *
566  * Task wakeup, specifically wakeups that involve migration, are horribly
567  * complicated to avoid having to take two rq->locks.
568  *
569  * Special state:
570  *
571  * System-calls and anything external will use task_rq_lock() which acquires
572  * both p->pi_lock and rq->lock. As a consequence the state they change is
573  * stable while holding either lock:
574  *
575  *  - sched_setaffinity()/
576  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
577  *  - set_user_nice():		p->se.load, p->*prio
578  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
579  *				p->se.load, p->rt_priority,
580  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
581  *  - sched_setnuma():		p->numa_preferred_nid
582  *  - sched_move_task():	p->sched_task_group
583  *  - uclamp_update_active()	p->uclamp*
584  *
585  * p->state <- TASK_*:
586  *
587  *   is changed locklessly using set_current_state(), __set_current_state() or
588  *   set_special_state(), see their respective comments, or by
589  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
590  *   concurrent self.
591  *
592  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
593  *
594  *   is set by activate_task() and cleared by deactivate_task()/block_task(),
595  *   under rq->lock. Non-zero indicates the task is runnable, the special
596  *   ON_RQ_MIGRATING state is used for migration without holding both
597  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
598  *
599  *   Additionally it is possible to be ->on_rq but still be considered not
600  *   runnable when p->se.sched_delayed is true. These tasks are on the runqueue
601  *   but will be dequeued as soon as they get picked again. See the
602  *   task_is_runnable() helper.
603  *
604  * p->on_cpu <- { 0, 1 }:
605  *
606  *   is set by prepare_task() and cleared by finish_task() such that it will be
607  *   set before p is scheduled-in and cleared after p is scheduled-out, both
608  *   under rq->lock. Non-zero indicates the task is running on its CPU.
609  *
610  *   [ The astute reader will observe that it is possible for two tasks on one
611  *     CPU to have ->on_cpu = 1 at the same time. ]
612  *
613  * task_cpu(p): is changed by set_task_cpu(), the rules are:
614  *
615  *  - Don't call set_task_cpu() on a blocked task:
616  *
617  *    We don't care what CPU we're not running on, this simplifies hotplug,
618  *    the CPU assignment of blocked tasks isn't required to be valid.
619  *
620  *  - for try_to_wake_up(), called under p->pi_lock:
621  *
622  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
623  *
624  *  - for migration called under rq->lock:
625  *    [ see task_on_rq_migrating() in task_rq_lock() ]
626  *
627  *    o move_queued_task()
628  *    o detach_task()
629  *
630  *  - for migration called under double_rq_lock():
631  *
632  *    o __migrate_swap_task()
633  *    o push_rt_task() / pull_rt_task()
634  *    o push_dl_task() / pull_dl_task()
635  *    o dl_task_offline_migration()
636  *
637  */
638 
639 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
640 	__context_unsafe()
641 {
642 	raw_spinlock_t *lock;
643 
644 	/* Matches synchronize_rcu() in __sched_core_enable() */
645 	preempt_disable();
646 	if (sched_core_disabled()) {
647 		raw_spin_lock_nested(&rq->__lock, subclass);
648 		/* preempt_count *MUST* be > 1 */
649 		preempt_enable_no_resched();
650 		return;
651 	}
652 
653 	for (;;) {
654 		lock = __rq_lockp(rq);
655 		raw_spin_lock_nested(lock, subclass);
656 		if (likely(lock == __rq_lockp(rq))) {
657 			/* preempt_count *MUST* be > 1 */
658 			preempt_enable_no_resched();
659 			return;
660 		}
661 		raw_spin_unlock(lock);
662 	}
663 }
664 
665 bool raw_spin_rq_trylock(struct rq *rq)
666 	__context_unsafe()
667 {
668 	raw_spinlock_t *lock;
669 	bool ret;
670 
671 	/* Matches synchronize_rcu() in __sched_core_enable() */
672 	preempt_disable();
673 	if (sched_core_disabled()) {
674 		ret = raw_spin_trylock(&rq->__lock);
675 		preempt_enable();
676 		return ret;
677 	}
678 
679 	for (;;) {
680 		lock = __rq_lockp(rq);
681 		ret = raw_spin_trylock(lock);
682 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
683 			preempt_enable();
684 			return ret;
685 		}
686 		raw_spin_unlock(lock);
687 	}
688 }
689 
690 void raw_spin_rq_unlock(struct rq *rq)
691 {
692 	raw_spin_unlock(rq_lockp(rq));
693 }
694 
695 /*
696  * double_rq_lock - safely lock two runqueues
697  */
698 void double_rq_lock(struct rq *rq1, struct rq *rq2)
699 {
700 	lockdep_assert_irqs_disabled();
701 
702 	if (rq_order_less(rq2, rq1))
703 		swap(rq1, rq2);
704 
705 	raw_spin_rq_lock(rq1);
706 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
707 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
708 	else
709 		__acquire_ctx_lock(__rq_lockp(rq2)); /* fake acquire */
710 
711 	double_rq_clock_clear_update(rq1, rq2);
712 }
713 
714 /*
715  * ___task_rq_lock - lock the rq @p resides on.
716  */
717 struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
718 {
719 	struct rq *rq;
720 
721 	lockdep_assert_held(&p->pi_lock);
722 
723 	for (;;) {
724 		rq = task_rq(p);
725 		raw_spin_rq_lock(rq);
726 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
727 			rq_pin_lock(rq, rf);
728 			return rq;
729 		}
730 		raw_spin_rq_unlock(rq);
731 
732 		while (unlikely(task_on_rq_migrating(p)))
733 			cpu_relax();
734 	}
735 }
736 
737 /*
738  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
739  */
740 struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
741 {
742 	struct rq *rq;
743 
744 	for (;;) {
745 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
746 		rq = task_rq(p);
747 		raw_spin_rq_lock(rq);
748 		/*
749 		 *	move_queued_task()		task_rq_lock()
750 		 *
751 		 *	ACQUIRE (rq->lock)
752 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
753 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
754 		 *	[S] ->cpu = new_cpu		[L] task_rq()
755 		 *					[L] ->on_rq
756 		 *	RELEASE (rq->lock)
757 		 *
758 		 * If we observe the old CPU in task_rq_lock(), the acquire of
759 		 * the old rq->lock will fully serialize against the stores.
760 		 *
761 		 * If we observe the new CPU in task_rq_lock(), the address
762 		 * dependency headed by '[L] rq = task_rq()' and the acquire
763 		 * will pair with the WMB to ensure we then also see migrating.
764 		 */
765 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
766 			rq_pin_lock(rq, rf);
767 			return rq;
768 		}
769 		raw_spin_rq_unlock(rq);
770 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
771 
772 		while (unlikely(task_on_rq_migrating(p)))
773 			cpu_relax();
774 	}
775 }
776 
777 /*
778  * RQ-clock updating methods:
779  */
780 
781 /* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
782 #ifdef CONFIG_PARAVIRT
783 struct static_key paravirt_steal_rq_enabled;
784 #endif
785 
786 static void update_rq_clock_task(struct rq *rq, s64 delta)
787 {
788 /*
789  * In theory, the compile should just see 0 here, and optimize out the call
790  * to sched_rt_avg_update. But I don't trust it...
791  */
792 	s64 __maybe_unused steal = 0, irq_delta = 0;
793 
794 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
795 	if (irqtime_enabled()) {
796 		irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
797 
798 		/*
799 		 * Since irq_time is only updated on {soft,}irq_exit, we might run into
800 		 * this case when a previous update_rq_clock() happened inside a
801 		 * {soft,}IRQ region.
802 		 *
803 		 * When this happens, we stop ->clock_task and only update the
804 		 * prev_irq_time stamp to account for the part that fit, so that a next
805 		 * update will consume the rest. This ensures ->clock_task is
806 		 * monotonic.
807 		 *
808 		 * It does however cause some slight miss-attribution of {soft,}IRQ
809 		 * time, a more accurate solution would be to update the irq_time using
810 		 * the current rq->clock timestamp, except that would require using
811 		 * atomic ops.
812 		 */
813 		if (irq_delta > delta)
814 			irq_delta = delta;
815 
816 		rq->prev_irq_time += irq_delta;
817 		delta -= irq_delta;
818 		delayacct_irq(rq->curr, irq_delta);
819 	}
820 #endif
821 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
822 	if (static_key_false((&paravirt_steal_rq_enabled))) {
823 		u64 prev_steal;
824 
825 		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
826 		steal -= rq->prev_steal_time_rq;
827 
828 		if (unlikely(steal > delta))
829 			steal = delta;
830 
831 		rq->prev_steal_time_rq = prev_steal;
832 		delta -= steal;
833 	}
834 #endif
835 
836 	rq->clock_task += delta;
837 
838 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
839 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
840 		update_irq_load_avg(rq, irq_delta + steal);
841 #endif
842 	update_rq_clock_pelt(rq, delta);
843 }
844 
845 void update_rq_clock(struct rq *rq)
846 {
847 	s64 delta;
848 	u64 clock;
849 
850 	lockdep_assert_rq_held(rq);
851 
852 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
853 		return;
854 
855 	if (sched_feat(WARN_DOUBLE_CLOCK))
856 		WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
857 	rq->clock_update_flags |= RQCF_UPDATED;
858 
859 	clock = sched_clock_cpu(cpu_of(rq));
860 	scx_rq_clock_update(rq, clock);
861 
862 	delta = clock - rq->clock;
863 	if (delta < 0)
864 		return;
865 	rq->clock += delta;
866 
867 	update_rq_clock_task(rq, delta);
868 }
869 
870 #ifdef CONFIG_SCHED_HRTICK
871 /*
872  * Use HR-timers to deliver accurate preemption points.
873  */
874 
875 static void hrtick_clear(struct rq *rq)
876 {
877 	if (hrtimer_active(&rq->hrtick_timer))
878 		hrtimer_cancel(&rq->hrtick_timer);
879 }
880 
881 /*
882  * High-resolution timer tick.
883  * Runs from hardirq context with interrupts disabled.
884  */
885 static enum hrtimer_restart hrtick(struct hrtimer *timer)
886 {
887 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
888 	struct rq_flags rf;
889 
890 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
891 
892 	rq_lock(rq, &rf);
893 	update_rq_clock(rq);
894 	rq->donor->sched_class->task_tick(rq, rq->donor, 1);
895 	rq_unlock(rq, &rf);
896 
897 	return HRTIMER_NORESTART;
898 }
899 
900 static void __hrtick_restart(struct rq *rq)
901 {
902 	struct hrtimer *timer = &rq->hrtick_timer;
903 	ktime_t time = rq->hrtick_time;
904 
905 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
906 }
907 
908 /*
909  * called from hardirq (IPI) context
910  */
911 static void __hrtick_start(void *arg)
912 {
913 	struct rq *rq = arg;
914 	struct rq_flags rf;
915 
916 	rq_lock(rq, &rf);
917 	__hrtick_restart(rq);
918 	rq_unlock(rq, &rf);
919 }
920 
921 /*
922  * Called to set the hrtick timer state.
923  *
924  * called with rq->lock held and IRQs disabled
925  */
926 void hrtick_start(struct rq *rq, u64 delay)
927 {
928 	struct hrtimer *timer = &rq->hrtick_timer;
929 	s64 delta;
930 
931 	/*
932 	 * Don't schedule slices shorter than 10000ns, that just
933 	 * doesn't make sense and can cause timer DoS.
934 	 */
935 	delta = max_t(s64, delay, 10000LL);
936 	rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta);
937 
938 	if (rq == this_rq())
939 		__hrtick_restart(rq);
940 	else
941 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
942 }
943 
944 static void hrtick_rq_init(struct rq *rq)
945 {
946 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
947 	hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
948 }
949 #else /* !CONFIG_SCHED_HRTICK: */
950 static inline void hrtick_clear(struct rq *rq)
951 {
952 }
953 
954 static inline void hrtick_rq_init(struct rq *rq)
955 {
956 }
957 #endif /* !CONFIG_SCHED_HRTICK */
958 
959 /*
960  * try_cmpxchg based fetch_or() macro so it works for different integer types:
961  */
962 #define fetch_or(ptr, mask)						\
963 	({								\
964 		typeof(ptr) _ptr = (ptr);				\
965 		typeof(mask) _mask = (mask);				\
966 		typeof(*_ptr) _val = *_ptr;				\
967 									\
968 		do {							\
969 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
970 	_val;								\
971 })
972 
973 #ifdef TIF_POLLING_NRFLAG
974 /*
975  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
976  * this avoids any races wrt polling state changes and thereby avoids
977  * spurious IPIs.
978  */
979 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
980 {
981 	return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
982 }
983 
984 /*
985  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
986  *
987  * If this returns true, then the idle task promises to call
988  * sched_ttwu_pending() and reschedule soon.
989  */
990 static bool set_nr_if_polling(struct task_struct *p)
991 {
992 	struct thread_info *ti = task_thread_info(p);
993 	typeof(ti->flags) val = READ_ONCE(ti->flags);
994 
995 	do {
996 		if (!(val & _TIF_POLLING_NRFLAG))
997 			return false;
998 		if (val & _TIF_NEED_RESCHED)
999 			return true;
1000 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
1001 
1002 	return true;
1003 }
1004 
1005 #else
1006 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
1007 {
1008 	set_ti_thread_flag(ti, tif);
1009 	return true;
1010 }
1011 
1012 static inline bool set_nr_if_polling(struct task_struct *p)
1013 {
1014 	return false;
1015 }
1016 #endif
1017 
1018 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
1019 {
1020 	struct wake_q_node *node = &task->wake_q;
1021 
1022 	/*
1023 	 * Atomically grab the task, if ->wake_q is !nil already it means
1024 	 * it's already queued (either by us or someone else) and will get the
1025 	 * wakeup due to that.
1026 	 *
1027 	 * In order to ensure that a pending wakeup will observe our pending
1028 	 * state, even in the failed case, an explicit smp_mb() must be used.
1029 	 */
1030 	smp_mb__before_atomic();
1031 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1032 		return false;
1033 
1034 	/*
1035 	 * The head is context local, there can be no concurrency.
1036 	 */
1037 	*head->lastp = node;
1038 	head->lastp = &node->next;
1039 	return true;
1040 }
1041 
1042 /**
1043  * wake_q_add() - queue a wakeup for 'later' waking.
1044  * @head: the wake_q_head to add @task to
1045  * @task: the task to queue for 'later' wakeup
1046  *
1047  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1048  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1049  * instantly.
1050  *
1051  * This function must be used as-if it were wake_up_process(); IOW the task
1052  * must be ready to be woken at this location.
1053  */
1054 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1055 {
1056 	if (__wake_q_add(head, task))
1057 		get_task_struct(task);
1058 }
1059 
1060 /**
1061  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1062  * @head: the wake_q_head to add @task to
1063  * @task: the task to queue for 'later' wakeup
1064  *
1065  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1066  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1067  * instantly.
1068  *
1069  * This function must be used as-if it were wake_up_process(); IOW the task
1070  * must be ready to be woken at this location.
1071  *
1072  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1073  * that already hold reference to @task can call the 'safe' version and trust
1074  * wake_q to do the right thing depending whether or not the @task is already
1075  * queued for wakeup.
1076  */
1077 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1078 {
1079 	if (!__wake_q_add(head, task))
1080 		put_task_struct(task);
1081 }
1082 
1083 void wake_up_q(struct wake_q_head *head)
1084 {
1085 	struct wake_q_node *node = head->first;
1086 
1087 	while (node != WAKE_Q_TAIL) {
1088 		struct task_struct *task;
1089 
1090 		task = container_of(node, struct task_struct, wake_q);
1091 		node = node->next;
1092 		/* pairs with cmpxchg_relaxed() in __wake_q_add() */
1093 		WRITE_ONCE(task->wake_q.next, NULL);
1094 		/* Task can safely be re-inserted now. */
1095 
1096 		/*
1097 		 * wake_up_process() executes a full barrier, which pairs with
1098 		 * the queueing in wake_q_add() so as not to miss wakeups.
1099 		 */
1100 		wake_up_process(task);
1101 		put_task_struct(task);
1102 	}
1103 }
1104 
1105 /*
1106  * resched_curr - mark rq's current task 'to be rescheduled now'.
1107  *
1108  * On UP this means the setting of the need_resched flag, on SMP it
1109  * might also involve a cross-CPU call to trigger the scheduler on
1110  * the target CPU.
1111  */
1112 static void __resched_curr(struct rq *rq, int tif)
1113 {
1114 	struct task_struct *curr = rq->curr;
1115 	struct thread_info *cti = task_thread_info(curr);
1116 	int cpu;
1117 
1118 	lockdep_assert_rq_held(rq);
1119 
1120 	/*
1121 	 * Always immediately preempt the idle task; no point in delaying doing
1122 	 * actual work.
1123 	 */
1124 	if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1125 		tif = TIF_NEED_RESCHED;
1126 
1127 	if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1128 		return;
1129 
1130 	cpu = cpu_of(rq);
1131 
1132 	trace_sched_set_need_resched_tp(curr, cpu, tif);
1133 	if (cpu == smp_processor_id()) {
1134 		set_ti_thread_flag(cti, tif);
1135 		if (tif == TIF_NEED_RESCHED)
1136 			set_preempt_need_resched();
1137 		return;
1138 	}
1139 
1140 	if (set_nr_and_not_polling(cti, tif)) {
1141 		if (tif == TIF_NEED_RESCHED)
1142 			smp_send_reschedule(cpu);
1143 	} else {
1144 		trace_sched_wake_idle_without_ipi(cpu);
1145 	}
1146 }
1147 
1148 void __trace_set_need_resched(struct task_struct *curr, int tif)
1149 {
1150 	trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
1151 }
1152 EXPORT_SYMBOL_GPL(__trace_set_need_resched);
1153 
1154 void resched_curr(struct rq *rq)
1155 {
1156 	__resched_curr(rq, TIF_NEED_RESCHED);
1157 }
1158 
1159 #ifdef CONFIG_PREEMPT_DYNAMIC
1160 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
1161 static __always_inline bool dynamic_preempt_lazy(void)
1162 {
1163 	return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1164 }
1165 #else
1166 static __always_inline bool dynamic_preempt_lazy(void)
1167 {
1168 	return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1169 }
1170 #endif
1171 
1172 static __always_inline int get_lazy_tif_bit(void)
1173 {
1174 	if (dynamic_preempt_lazy())
1175 		return TIF_NEED_RESCHED_LAZY;
1176 
1177 	return TIF_NEED_RESCHED;
1178 }
1179 
1180 void resched_curr_lazy(struct rq *rq)
1181 {
1182 	__resched_curr(rq, get_lazy_tif_bit());
1183 }
1184 
1185 void resched_cpu(int cpu)
1186 {
1187 	struct rq *rq = cpu_rq(cpu);
1188 	unsigned long flags;
1189 
1190 	raw_spin_rq_lock_irqsave(rq, flags);
1191 	if (cpu_online(cpu) || cpu == smp_processor_id())
1192 		resched_curr(rq);
1193 	raw_spin_rq_unlock_irqrestore(rq, flags);
1194 }
1195 
1196 #ifdef CONFIG_NO_HZ_COMMON
1197 /*
1198  * In the semi idle case, use the nearest busy CPU for migrating timers
1199  * from an idle CPU.  This is good for power-savings.
1200  *
1201  * We don't do similar optimization for completely idle system, as
1202  * selecting an idle CPU will add more delays to the timers than intended
1203  * (as that CPU's timer base may not be up to date wrt jiffies etc).
1204  */
1205 int get_nohz_timer_target(void)
1206 {
1207 	int i, cpu = smp_processor_id(), default_cpu = -1;
1208 	struct sched_domain *sd;
1209 	const struct cpumask *hk_mask;
1210 
1211 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1212 		if (!idle_cpu(cpu))
1213 			return cpu;
1214 		default_cpu = cpu;
1215 	}
1216 
1217 	hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1218 
1219 	guard(rcu)();
1220 
1221 	for_each_domain(cpu, sd) {
1222 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1223 			if (cpu == i)
1224 				continue;
1225 
1226 			if (!idle_cpu(i))
1227 				return i;
1228 		}
1229 	}
1230 
1231 	if (default_cpu == -1)
1232 		default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1233 
1234 	return default_cpu;
1235 }
1236 
1237 /*
1238  * When add_timer_on() enqueues a timer into the timer wheel of an
1239  * idle CPU then this timer might expire before the next timer event
1240  * which is scheduled to wake up that CPU. In case of a completely
1241  * idle system the next event might even be infinite time into the
1242  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1243  * leaves the inner idle loop so the newly added timer is taken into
1244  * account when the CPU goes back to idle and evaluates the timer
1245  * wheel for the next timer event.
1246  */
1247 static void wake_up_idle_cpu(int cpu)
1248 {
1249 	struct rq *rq = cpu_rq(cpu);
1250 
1251 	if (cpu == smp_processor_id())
1252 		return;
1253 
1254 	/*
1255 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1256 	 * part of the idle loop. This forces an exit from the idle loop
1257 	 * and a round trip to schedule(). Now this could be optimized
1258 	 * because a simple new idle loop iteration is enough to
1259 	 * re-evaluate the next tick. Provided some re-ordering of tick
1260 	 * nohz functions that would need to follow TIF_NR_POLLING
1261 	 * clearing:
1262 	 *
1263 	 * - On most architectures, a simple fetch_or on ti::flags with a
1264 	 *   "0" value would be enough to know if an IPI needs to be sent.
1265 	 *
1266 	 * - x86 needs to perform a last need_resched() check between
1267 	 *   monitor and mwait which doesn't take timers into account.
1268 	 *   There a dedicated TIF_TIMER flag would be required to
1269 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1270 	 *   before mwait().
1271 	 *
1272 	 * However, remote timer enqueue is not such a frequent event
1273 	 * and testing of the above solutions didn't appear to report
1274 	 * much benefits.
1275 	 */
1276 	if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1277 		smp_send_reschedule(cpu);
1278 	else
1279 		trace_sched_wake_idle_without_ipi(cpu);
1280 }
1281 
1282 static bool wake_up_full_nohz_cpu(int cpu)
1283 {
1284 	/*
1285 	 * We just need the target to call irq_exit() and re-evaluate
1286 	 * the next tick. The nohz full kick at least implies that.
1287 	 * If needed we can still optimize that later with an
1288 	 * empty IRQ.
1289 	 */
1290 	if (cpu_is_offline(cpu))
1291 		return true;  /* Don't try to wake offline CPUs. */
1292 	if (tick_nohz_full_cpu(cpu)) {
1293 		if (cpu != smp_processor_id() ||
1294 		    tick_nohz_tick_stopped())
1295 			tick_nohz_full_kick_cpu(cpu);
1296 		return true;
1297 	}
1298 
1299 	return false;
1300 }
1301 
1302 /*
1303  * Wake up the specified CPU.  If the CPU is going offline, it is the
1304  * caller's responsibility to deal with the lost wakeup, for example,
1305  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1306  */
1307 void wake_up_nohz_cpu(int cpu)
1308 {
1309 	if (!wake_up_full_nohz_cpu(cpu))
1310 		wake_up_idle_cpu(cpu);
1311 }
1312 
1313 static void nohz_csd_func(void *info)
1314 {
1315 	struct rq *rq = info;
1316 	int cpu = cpu_of(rq);
1317 	unsigned int flags;
1318 
1319 	/*
1320 	 * Release the rq::nohz_csd.
1321 	 */
1322 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1323 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1324 
1325 	rq->idle_balance = idle_cpu(cpu);
1326 	if (rq->idle_balance) {
1327 		rq->nohz_idle_balance = flags;
1328 		__raise_softirq_irqoff(SCHED_SOFTIRQ);
1329 	}
1330 }
1331 
1332 #endif /* CONFIG_NO_HZ_COMMON */
1333 
1334 #ifdef CONFIG_NO_HZ_FULL
1335 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1336 {
1337 	if (rq->nr_running != 1)
1338 		return false;
1339 
1340 	if (p->sched_class != &fair_sched_class)
1341 		return false;
1342 
1343 	if (!task_on_rq_queued(p))
1344 		return false;
1345 
1346 	return true;
1347 }
1348 
1349 bool sched_can_stop_tick(struct rq *rq)
1350 {
1351 	int fifo_nr_running;
1352 
1353 	/* Deadline tasks, even if single, need the tick */
1354 	if (rq->dl.dl_nr_running)
1355 		return false;
1356 
1357 	/*
1358 	 * If there are more than one RR tasks, we need the tick to affect the
1359 	 * actual RR behaviour.
1360 	 */
1361 	if (rq->rt.rr_nr_running) {
1362 		if (rq->rt.rr_nr_running == 1)
1363 			return true;
1364 		else
1365 			return false;
1366 	}
1367 
1368 	/*
1369 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1370 	 * forced preemption between FIFO tasks.
1371 	 */
1372 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1373 	if (fifo_nr_running)
1374 		return true;
1375 
1376 	/*
1377 	 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1378 	 * left. For CFS, if there's more than one we need the tick for
1379 	 * involuntary preemption. For SCX, ask.
1380 	 */
1381 	if (scx_enabled() && !scx_can_stop_tick(rq))
1382 		return false;
1383 
1384 	if (rq->cfs.h_nr_queued > 1)
1385 		return false;
1386 
1387 	/*
1388 	 * If there is one task and it has CFS runtime bandwidth constraints
1389 	 * and it's on the cpu now we don't want to stop the tick.
1390 	 * This check prevents clearing the bit if a newly enqueued task here is
1391 	 * dequeued by migrating while the constrained task continues to run.
1392 	 * E.g. going from 2->1 without going through pick_next_task().
1393 	 */
1394 	if (__need_bw_check(rq, rq->curr)) {
1395 		if (cfs_task_bw_constrained(rq->curr))
1396 			return false;
1397 	}
1398 
1399 	return true;
1400 }
1401 #endif /* CONFIG_NO_HZ_FULL */
1402 
1403 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
1404 /*
1405  * Iterate task_group tree rooted at *from, calling @down when first entering a
1406  * node and @up when leaving it for the final time.
1407  *
1408  * Caller must hold rcu_lock or sufficient equivalent.
1409  */
1410 int walk_tg_tree_from(struct task_group *from,
1411 			     tg_visitor down, tg_visitor up, void *data)
1412 {
1413 	struct task_group *parent, *child;
1414 	int ret;
1415 
1416 	parent = from;
1417 
1418 down:
1419 	ret = (*down)(parent, data);
1420 	if (ret)
1421 		goto out;
1422 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1423 		parent = child;
1424 		goto down;
1425 
1426 up:
1427 		continue;
1428 	}
1429 	ret = (*up)(parent, data);
1430 	if (ret || parent == from)
1431 		goto out;
1432 
1433 	child = parent;
1434 	parent = parent->parent;
1435 	if (parent)
1436 		goto up;
1437 out:
1438 	return ret;
1439 }
1440 
1441 int tg_nop(struct task_group *tg, void *data)
1442 {
1443 	return 0;
1444 }
1445 #endif
1446 
1447 void set_load_weight(struct task_struct *p, bool update_load)
1448 {
1449 	int prio = p->static_prio - MAX_RT_PRIO;
1450 	struct load_weight lw;
1451 
1452 	if (task_has_idle_policy(p)) {
1453 		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1454 		lw.inv_weight = WMULT_IDLEPRIO;
1455 	} else {
1456 		lw.weight = scale_load(sched_prio_to_weight[prio]);
1457 		lw.inv_weight = sched_prio_to_wmult[prio];
1458 	}
1459 
1460 	/*
1461 	 * SCHED_OTHER tasks have to update their load when changing their
1462 	 * weight
1463 	 */
1464 	if (update_load && p->sched_class->reweight_task)
1465 		p->sched_class->reweight_task(task_rq(p), p, &lw);
1466 	else
1467 		p->se.load = lw;
1468 }
1469 
1470 #ifdef CONFIG_UCLAMP_TASK
1471 /*
1472  * Serializes updates of utilization clamp values
1473  *
1474  * The (slow-path) user-space triggers utilization clamp value updates which
1475  * can require updates on (fast-path) scheduler's data structures used to
1476  * support enqueue/dequeue operations.
1477  * While the per-CPU rq lock protects fast-path update operations, user-space
1478  * requests are serialized using a mutex to reduce the risk of conflicting
1479  * updates or API abuses.
1480  */
1481 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1482 
1483 /* Max allowed minimum utilization */
1484 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1485 
1486 /* Max allowed maximum utilization */
1487 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1488 
1489 /*
1490  * By default RT tasks run at the maximum performance point/capacity of the
1491  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1492  * SCHED_CAPACITY_SCALE.
1493  *
1494  * This knob allows admins to change the default behavior when uclamp is being
1495  * used. In battery powered devices, particularly, running at the maximum
1496  * capacity and frequency will increase energy consumption and shorten the
1497  * battery life.
1498  *
1499  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1500  *
1501  * This knob will not override the system default sched_util_clamp_min defined
1502  * above.
1503  */
1504 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1505 
1506 /* All clamps are required to be less or equal than these values */
1507 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1508 
1509 /*
1510  * This static key is used to reduce the uclamp overhead in the fast path. It
1511  * primarily disables the call to uclamp_rq_{inc, dec}() in
1512  * enqueue/dequeue_task().
1513  *
1514  * This allows users to continue to enable uclamp in their kernel config with
1515  * minimum uclamp overhead in the fast path.
1516  *
1517  * As soon as userspace modifies any of the uclamp knobs, the static key is
1518  * enabled, since we have an actual users that make use of uclamp
1519  * functionality.
1520  *
1521  * The knobs that would enable this static key are:
1522  *
1523  *   * A task modifying its uclamp value with sched_setattr().
1524  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1525  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1526  */
1527 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1528 
1529 static inline unsigned int
1530 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1531 		  unsigned int clamp_value)
1532 {
1533 	/*
1534 	 * Avoid blocked utilization pushing up the frequency when we go
1535 	 * idle (which drops the max-clamp) by retaining the last known
1536 	 * max-clamp.
1537 	 */
1538 	if (clamp_id == UCLAMP_MAX) {
1539 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1540 		return clamp_value;
1541 	}
1542 
1543 	return uclamp_none(UCLAMP_MIN);
1544 }
1545 
1546 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1547 				     unsigned int clamp_value)
1548 {
1549 	/* Reset max-clamp retention only on idle exit */
1550 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1551 		return;
1552 
1553 	uclamp_rq_set(rq, clamp_id, clamp_value);
1554 }
1555 
1556 static inline
1557 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1558 				   unsigned int clamp_value)
1559 {
1560 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1561 	int bucket_id = UCLAMP_BUCKETS - 1;
1562 
1563 	/*
1564 	 * Since both min and max clamps are max aggregated, find the
1565 	 * top most bucket with tasks in.
1566 	 */
1567 	for ( ; bucket_id >= 0; bucket_id--) {
1568 		if (!bucket[bucket_id].tasks)
1569 			continue;
1570 		return bucket[bucket_id].value;
1571 	}
1572 
1573 	/* No tasks -- default clamp values */
1574 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1575 }
1576 
1577 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1578 {
1579 	unsigned int default_util_min;
1580 	struct uclamp_se *uc_se;
1581 
1582 	lockdep_assert_held(&p->pi_lock);
1583 
1584 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1585 
1586 	/* Only sync if user didn't override the default */
1587 	if (uc_se->user_defined)
1588 		return;
1589 
1590 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1591 	uclamp_se_set(uc_se, default_util_min, false);
1592 }
1593 
1594 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1595 {
1596 	if (!rt_task(p))
1597 		return;
1598 
1599 	/* Protect updates to p->uclamp_* */
1600 	guard(task_rq_lock)(p);
1601 	__uclamp_update_util_min_rt_default(p);
1602 }
1603 
1604 static inline struct uclamp_se
1605 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1606 {
1607 	/* Copy by value as we could modify it */
1608 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1609 #ifdef CONFIG_UCLAMP_TASK_GROUP
1610 	unsigned int tg_min, tg_max, value;
1611 
1612 	/*
1613 	 * Tasks in autogroups or root task group will be
1614 	 * restricted by system defaults.
1615 	 */
1616 	if (task_group_is_autogroup(task_group(p)))
1617 		return uc_req;
1618 	if (task_group(p) == &root_task_group)
1619 		return uc_req;
1620 
1621 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1622 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1623 	value = uc_req.value;
1624 	value = clamp(value, tg_min, tg_max);
1625 	uclamp_se_set(&uc_req, value, false);
1626 #endif
1627 
1628 	return uc_req;
1629 }
1630 
1631 /*
1632  * The effective clamp bucket index of a task depends on, by increasing
1633  * priority:
1634  * - the task specific clamp value, when explicitly requested from userspace
1635  * - the task group effective clamp value, for tasks not either in the root
1636  *   group or in an autogroup
1637  * - the system default clamp value, defined by the sysadmin
1638  */
1639 static inline struct uclamp_se
1640 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1641 {
1642 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1643 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1644 
1645 	/* System default restrictions always apply */
1646 	if (unlikely(uc_req.value > uc_max.value))
1647 		return uc_max;
1648 
1649 	return uc_req;
1650 }
1651 
1652 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1653 {
1654 	struct uclamp_se uc_eff;
1655 
1656 	/* Task currently refcounted: use back-annotated (effective) value */
1657 	if (p->uclamp[clamp_id].active)
1658 		return (unsigned long)p->uclamp[clamp_id].value;
1659 
1660 	uc_eff = uclamp_eff_get(p, clamp_id);
1661 
1662 	return (unsigned long)uc_eff.value;
1663 }
1664 
1665 /*
1666  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1667  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1668  * updates the rq's clamp value if required.
1669  *
1670  * Tasks can have a task-specific value requested from user-space, track
1671  * within each bucket the maximum value for tasks refcounted in it.
1672  * This "local max aggregation" allows to track the exact "requested" value
1673  * for each bucket when all its RUNNABLE tasks require the same clamp.
1674  */
1675 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1676 				    enum uclamp_id clamp_id)
1677 {
1678 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1679 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1680 	struct uclamp_bucket *bucket;
1681 
1682 	lockdep_assert_rq_held(rq);
1683 
1684 	/* Update task effective clamp */
1685 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1686 
1687 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1688 	bucket->tasks++;
1689 	uc_se->active = true;
1690 
1691 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1692 
1693 	/*
1694 	 * Local max aggregation: rq buckets always track the max
1695 	 * "requested" clamp value of its RUNNABLE tasks.
1696 	 */
1697 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1698 		bucket->value = uc_se->value;
1699 
1700 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1701 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1702 }
1703 
1704 /*
1705  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1706  * is released. If this is the last task reference counting the rq's max
1707  * active clamp value, then the rq's clamp value is updated.
1708  *
1709  * Both refcounted tasks and rq's cached clamp values are expected to be
1710  * always valid. If it's detected they are not, as defensive programming,
1711  * enforce the expected state and warn.
1712  */
1713 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1714 				    enum uclamp_id clamp_id)
1715 {
1716 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1717 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1718 	struct uclamp_bucket *bucket;
1719 	unsigned int bkt_clamp;
1720 	unsigned int rq_clamp;
1721 
1722 	lockdep_assert_rq_held(rq);
1723 
1724 	/*
1725 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1726 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1727 	 *
1728 	 * In this case the uc_se->active flag should be false since no uclamp
1729 	 * accounting was performed at enqueue time and we can just return
1730 	 * here.
1731 	 *
1732 	 * Need to be careful of the following enqueue/dequeue ordering
1733 	 * problem too
1734 	 *
1735 	 *	enqueue(taskA)
1736 	 *	// sched_uclamp_used gets enabled
1737 	 *	enqueue(taskB)
1738 	 *	dequeue(taskA)
1739 	 *	// Must not decrement bucket->tasks here
1740 	 *	dequeue(taskB)
1741 	 *
1742 	 * where we could end up with stale data in uc_se and
1743 	 * bucket[uc_se->bucket_id].
1744 	 *
1745 	 * The following check here eliminates the possibility of such race.
1746 	 */
1747 	if (unlikely(!uc_se->active))
1748 		return;
1749 
1750 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1751 
1752 	WARN_ON_ONCE(!bucket->tasks);
1753 	if (likely(bucket->tasks))
1754 		bucket->tasks--;
1755 
1756 	uc_se->active = false;
1757 
1758 	/*
1759 	 * Keep "local max aggregation" simple and accept to (possibly)
1760 	 * overboost some RUNNABLE tasks in the same bucket.
1761 	 * The rq clamp bucket value is reset to its base value whenever
1762 	 * there are no more RUNNABLE tasks refcounting it.
1763 	 */
1764 	if (likely(bucket->tasks))
1765 		return;
1766 
1767 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1768 	/*
1769 	 * Defensive programming: this should never happen. If it happens,
1770 	 * e.g. due to future modification, warn and fix up the expected value.
1771 	 */
1772 	WARN_ON_ONCE(bucket->value > rq_clamp);
1773 	if (bucket->value >= rq_clamp) {
1774 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1775 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1776 	}
1777 }
1778 
1779 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
1780 {
1781 	enum uclamp_id clamp_id;
1782 
1783 	/*
1784 	 * Avoid any overhead until uclamp is actually used by the userspace.
1785 	 *
1786 	 * The condition is constructed such that a NOP is generated when
1787 	 * sched_uclamp_used is disabled.
1788 	 */
1789 	if (!uclamp_is_used())
1790 		return;
1791 
1792 	if (unlikely(!p->sched_class->uclamp_enabled))
1793 		return;
1794 
1795 	/* Only inc the delayed task which being woken up. */
1796 	if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
1797 		return;
1798 
1799 	for_each_clamp_id(clamp_id)
1800 		uclamp_rq_inc_id(rq, p, clamp_id);
1801 
1802 	/* Reset clamp idle holding when there is one RUNNABLE task */
1803 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1804 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1805 }
1806 
1807 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1808 {
1809 	enum uclamp_id clamp_id;
1810 
1811 	/*
1812 	 * Avoid any overhead until uclamp is actually used by the userspace.
1813 	 *
1814 	 * The condition is constructed such that a NOP is generated when
1815 	 * sched_uclamp_used is disabled.
1816 	 */
1817 	if (!uclamp_is_used())
1818 		return;
1819 
1820 	if (unlikely(!p->sched_class->uclamp_enabled))
1821 		return;
1822 
1823 	if (p->se.sched_delayed)
1824 		return;
1825 
1826 	for_each_clamp_id(clamp_id)
1827 		uclamp_rq_dec_id(rq, p, clamp_id);
1828 }
1829 
1830 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1831 				      enum uclamp_id clamp_id)
1832 {
1833 	if (!p->uclamp[clamp_id].active)
1834 		return;
1835 
1836 	uclamp_rq_dec_id(rq, p, clamp_id);
1837 	uclamp_rq_inc_id(rq, p, clamp_id);
1838 
1839 	/*
1840 	 * Make sure to clear the idle flag if we've transiently reached 0
1841 	 * active tasks on rq.
1842 	 */
1843 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1844 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1845 }
1846 
1847 static inline void
1848 uclamp_update_active(struct task_struct *p)
1849 {
1850 	enum uclamp_id clamp_id;
1851 	struct rq_flags rf;
1852 	struct rq *rq;
1853 
1854 	/*
1855 	 * Lock the task and the rq where the task is (or was) queued.
1856 	 *
1857 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1858 	 * price to pay to safely serialize util_{min,max} updates with
1859 	 * enqueues, dequeues and migration operations.
1860 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1861 	 */
1862 	rq = task_rq_lock(p, &rf);
1863 
1864 	/*
1865 	 * Setting the clamp bucket is serialized by task_rq_lock().
1866 	 * If the task is not yet RUNNABLE and its task_struct is not
1867 	 * affecting a valid clamp bucket, the next time it's enqueued,
1868 	 * it will already see the updated clamp bucket value.
1869 	 */
1870 	for_each_clamp_id(clamp_id)
1871 		uclamp_rq_reinc_id(rq, p, clamp_id);
1872 
1873 	task_rq_unlock(rq, p, &rf);
1874 }
1875 
1876 #ifdef CONFIG_UCLAMP_TASK_GROUP
1877 static inline void
1878 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1879 {
1880 	struct css_task_iter it;
1881 	struct task_struct *p;
1882 
1883 	css_task_iter_start(css, 0, &it);
1884 	while ((p = css_task_iter_next(&it)))
1885 		uclamp_update_active(p);
1886 	css_task_iter_end(&it);
1887 }
1888 
1889 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1890 #endif
1891 
1892 #ifdef CONFIG_SYSCTL
1893 #ifdef CONFIG_UCLAMP_TASK_GROUP
1894 static void uclamp_update_root_tg(void)
1895 {
1896 	struct task_group *tg = &root_task_group;
1897 
1898 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1899 		      sysctl_sched_uclamp_util_min, false);
1900 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1901 		      sysctl_sched_uclamp_util_max, false);
1902 
1903 	guard(rcu)();
1904 	cpu_util_update_eff(&root_task_group.css);
1905 }
1906 #else
1907 static void uclamp_update_root_tg(void) { }
1908 #endif
1909 
1910 static void uclamp_sync_util_min_rt_default(void)
1911 {
1912 	struct task_struct *g, *p;
1913 
1914 	/*
1915 	 * copy_process()			sysctl_uclamp
1916 	 *					  uclamp_min_rt = X;
1917 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1918 	 *   // link thread			  smp_mb__after_spinlock()
1919 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1920 	 *   sched_post_fork()			  for_each_process_thread()
1921 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1922 	 *
1923 	 * Ensures that either sched_post_fork() will observe the new
1924 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1925 	 * task.
1926 	 */
1927 	read_lock(&tasklist_lock);
1928 	smp_mb__after_spinlock();
1929 	read_unlock(&tasklist_lock);
1930 
1931 	guard(rcu)();
1932 	for_each_process_thread(g, p)
1933 		uclamp_update_util_min_rt_default(p);
1934 }
1935 
1936 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1937 				void *buffer, size_t *lenp, loff_t *ppos)
1938 {
1939 	bool update_root_tg = false;
1940 	int old_min, old_max, old_min_rt;
1941 	int result;
1942 
1943 	guard(mutex)(&uclamp_mutex);
1944 
1945 	old_min = sysctl_sched_uclamp_util_min;
1946 	old_max = sysctl_sched_uclamp_util_max;
1947 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1948 
1949 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1950 	if (result)
1951 		goto undo;
1952 	if (!write)
1953 		return 0;
1954 
1955 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1956 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1957 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1958 
1959 		result = -EINVAL;
1960 		goto undo;
1961 	}
1962 
1963 	if (old_min != sysctl_sched_uclamp_util_min) {
1964 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1965 			      sysctl_sched_uclamp_util_min, false);
1966 		update_root_tg = true;
1967 	}
1968 	if (old_max != sysctl_sched_uclamp_util_max) {
1969 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1970 			      sysctl_sched_uclamp_util_max, false);
1971 		update_root_tg = true;
1972 	}
1973 
1974 	if (update_root_tg) {
1975 		sched_uclamp_enable();
1976 		uclamp_update_root_tg();
1977 	}
1978 
1979 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1980 		sched_uclamp_enable();
1981 		uclamp_sync_util_min_rt_default();
1982 	}
1983 
1984 	/*
1985 	 * We update all RUNNABLE tasks only when task groups are in use.
1986 	 * Otherwise, keep it simple and do just a lazy update at each next
1987 	 * task enqueue time.
1988 	 */
1989 	return 0;
1990 
1991 undo:
1992 	sysctl_sched_uclamp_util_min = old_min;
1993 	sysctl_sched_uclamp_util_max = old_max;
1994 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1995 	return result;
1996 }
1997 #endif /* CONFIG_SYSCTL */
1998 
1999 static void uclamp_fork(struct task_struct *p)
2000 {
2001 	enum uclamp_id clamp_id;
2002 
2003 	/*
2004 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
2005 	 * as the task is still at its early fork stages.
2006 	 */
2007 	for_each_clamp_id(clamp_id)
2008 		p->uclamp[clamp_id].active = false;
2009 
2010 	if (likely(!p->sched_reset_on_fork))
2011 		return;
2012 
2013 	for_each_clamp_id(clamp_id) {
2014 		uclamp_se_set(&p->uclamp_req[clamp_id],
2015 			      uclamp_none(clamp_id), false);
2016 	}
2017 }
2018 
2019 static void uclamp_post_fork(struct task_struct *p)
2020 {
2021 	uclamp_update_util_min_rt_default(p);
2022 }
2023 
2024 static void __init init_uclamp_rq(struct rq *rq)
2025 {
2026 	enum uclamp_id clamp_id;
2027 	struct uclamp_rq *uc_rq = rq->uclamp;
2028 
2029 	for_each_clamp_id(clamp_id) {
2030 		uc_rq[clamp_id] = (struct uclamp_rq) {
2031 			.value = uclamp_none(clamp_id)
2032 		};
2033 	}
2034 
2035 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2036 }
2037 
2038 static void __init init_uclamp(void)
2039 {
2040 	struct uclamp_se uc_max = {};
2041 	enum uclamp_id clamp_id;
2042 	int cpu;
2043 
2044 	for_each_possible_cpu(cpu)
2045 		init_uclamp_rq(cpu_rq(cpu));
2046 
2047 	for_each_clamp_id(clamp_id) {
2048 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
2049 			      uclamp_none(clamp_id), false);
2050 	}
2051 
2052 	/* System defaults allow max clamp values for both indexes */
2053 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2054 	for_each_clamp_id(clamp_id) {
2055 		uclamp_default[clamp_id] = uc_max;
2056 #ifdef CONFIG_UCLAMP_TASK_GROUP
2057 		root_task_group.uclamp_req[clamp_id] = uc_max;
2058 		root_task_group.uclamp[clamp_id] = uc_max;
2059 #endif
2060 	}
2061 }
2062 
2063 #else /* !CONFIG_UCLAMP_TASK: */
2064 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
2065 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
2066 static inline void uclamp_fork(struct task_struct *p) { }
2067 static inline void uclamp_post_fork(struct task_struct *p) { }
2068 static inline void init_uclamp(void) { }
2069 #endif /* !CONFIG_UCLAMP_TASK */
2070 
2071 bool sched_task_on_rq(struct task_struct *p)
2072 {
2073 	return task_on_rq_queued(p);
2074 }
2075 
2076 unsigned long get_wchan(struct task_struct *p)
2077 {
2078 	unsigned long ip = 0;
2079 	unsigned int state;
2080 
2081 	if (!p || p == current)
2082 		return 0;
2083 
2084 	/* Only get wchan if task is blocked and we can keep it that way. */
2085 	raw_spin_lock_irq(&p->pi_lock);
2086 	state = READ_ONCE(p->__state);
2087 	smp_rmb(); /* see try_to_wake_up() */
2088 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2089 		ip = __get_wchan(p);
2090 	raw_spin_unlock_irq(&p->pi_lock);
2091 
2092 	return ip;
2093 }
2094 
2095 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2096 {
2097 	if (!(flags & ENQUEUE_NOCLOCK))
2098 		update_rq_clock(rq);
2099 
2100 	/*
2101 	 * Can be before ->enqueue_task() because uclamp considers the
2102 	 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2103 	 * in ->enqueue_task().
2104 	 */
2105 	uclamp_rq_inc(rq, p, flags);
2106 
2107 	p->sched_class->enqueue_task(rq, p, flags);
2108 
2109 	psi_enqueue(p, flags);
2110 
2111 	if (!(flags & ENQUEUE_RESTORE))
2112 		sched_info_enqueue(rq, p);
2113 
2114 	if (sched_core_enabled(rq))
2115 		sched_core_enqueue(rq, p);
2116 }
2117 
2118 /*
2119  * Must only return false when DEQUEUE_SLEEP.
2120  */
2121 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2122 {
2123 	if (sched_core_enabled(rq))
2124 		sched_core_dequeue(rq, p, flags);
2125 
2126 	if (!(flags & DEQUEUE_NOCLOCK))
2127 		update_rq_clock(rq);
2128 
2129 	if (!(flags & DEQUEUE_SAVE))
2130 		sched_info_dequeue(rq, p);
2131 
2132 	psi_dequeue(p, flags);
2133 
2134 	/*
2135 	 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2136 	 * and mark the task ->sched_delayed.
2137 	 */
2138 	uclamp_rq_dec(rq, p);
2139 	return p->sched_class->dequeue_task(rq, p, flags);
2140 }
2141 
2142 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2143 {
2144 	if (task_on_rq_migrating(p))
2145 		flags |= ENQUEUE_MIGRATED;
2146 
2147 	enqueue_task(rq, p, flags);
2148 
2149 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2150 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2151 }
2152 
2153 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2154 {
2155 	WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
2156 
2157 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2158 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2159 
2160 	/*
2161 	 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2162 	 * dequeue_task() and cleared *after* enqueue_task().
2163 	 */
2164 
2165 	dequeue_task(rq, p, flags);
2166 }
2167 
2168 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2169 {
2170 	if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2171 		__block_task(rq, p);
2172 }
2173 
2174 /**
2175  * task_curr - is this task currently executing on a CPU?
2176  * @p: the task in question.
2177  *
2178  * Return: 1 if the task is currently executing. 0 otherwise.
2179  */
2180 inline int task_curr(const struct task_struct *p)
2181 {
2182 	return cpu_curr(task_cpu(p)) == p;
2183 }
2184 
2185 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2186 {
2187 	struct task_struct *donor = rq->donor;
2188 
2189 	if (p->sched_class == rq->next_class) {
2190 		rq->next_class->wakeup_preempt(rq, p, flags);
2191 
2192 	} else if (sched_class_above(p->sched_class, rq->next_class)) {
2193 		rq->next_class->wakeup_preempt(rq, p, flags);
2194 		resched_curr(rq);
2195 		rq->next_class = p->sched_class;
2196 	}
2197 
2198 	/*
2199 	 * A queue event has occurred, and we're going to schedule.  In
2200 	 * this case, we can save a useless back to back clock update.
2201 	 */
2202 	if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2203 		rq_clock_skip_update(rq);
2204 }
2205 
2206 static __always_inline
2207 int __task_state_match(struct task_struct *p, unsigned int state)
2208 {
2209 	if (READ_ONCE(p->__state) & state)
2210 		return 1;
2211 
2212 	if (READ_ONCE(p->saved_state) & state)
2213 		return -1;
2214 
2215 	return 0;
2216 }
2217 
2218 static __always_inline
2219 int task_state_match(struct task_struct *p, unsigned int state)
2220 {
2221 	/*
2222 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2223 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2224 	 */
2225 	guard(raw_spinlock_irq)(&p->pi_lock);
2226 	return __task_state_match(p, state);
2227 }
2228 
2229 /*
2230  * wait_task_inactive - wait for a thread to unschedule.
2231  *
2232  * Wait for the thread to block in any of the states set in @match_state.
2233  * If it changes, i.e. @p might have woken up, then return zero.  When we
2234  * succeed in waiting for @p to be off its CPU, we return a positive number
2235  * (its total switch count).  If a second call a short while later returns the
2236  * same number, the caller can be sure that @p has remained unscheduled the
2237  * whole time.
2238  *
2239  * The caller must ensure that the task *will* unschedule sometime soon,
2240  * else this function might spin for a *long* time. This function can't
2241  * be called with interrupts off, or it may introduce deadlock with
2242  * smp_call_function() if an IPI is sent by the same process we are
2243  * waiting to become inactive.
2244  */
2245 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2246 {
2247 	int running, queued, match;
2248 	struct rq_flags rf;
2249 	unsigned long ncsw;
2250 	struct rq *rq;
2251 
2252 	for (;;) {
2253 		/*
2254 		 * We do the initial early heuristics without holding
2255 		 * any task-queue locks at all. We'll only try to get
2256 		 * the runqueue lock when things look like they will
2257 		 * work out!
2258 		 */
2259 		rq = task_rq(p);
2260 
2261 		/*
2262 		 * If the task is actively running on another CPU
2263 		 * still, just relax and busy-wait without holding
2264 		 * any locks.
2265 		 *
2266 		 * NOTE! Since we don't hold any locks, it's not
2267 		 * even sure that "rq" stays as the right runqueue!
2268 		 * But we don't care, since "task_on_cpu()" will
2269 		 * return false if the runqueue has changed and p
2270 		 * is actually now running somewhere else!
2271 		 */
2272 		while (task_on_cpu(rq, p)) {
2273 			if (!task_state_match(p, match_state))
2274 				return 0;
2275 			cpu_relax();
2276 		}
2277 
2278 		/*
2279 		 * Ok, time to look more closely! We need the rq
2280 		 * lock now, to be *sure*. If we're wrong, we'll
2281 		 * just go back and repeat.
2282 		 */
2283 		rq = task_rq_lock(p, &rf);
2284 		/*
2285 		 * If task is sched_delayed, force dequeue it, to avoid always
2286 		 * hitting the tick timeout in the queued case
2287 		 */
2288 		if (p->se.sched_delayed)
2289 			dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
2290 		trace_sched_wait_task(p);
2291 		running = task_on_cpu(rq, p);
2292 		queued = task_on_rq_queued(p);
2293 		ncsw = 0;
2294 		if ((match = __task_state_match(p, match_state))) {
2295 			/*
2296 			 * When matching on p->saved_state, consider this task
2297 			 * still queued so it will wait.
2298 			 */
2299 			if (match < 0)
2300 				queued = 1;
2301 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2302 		}
2303 		task_rq_unlock(rq, p, &rf);
2304 
2305 		/*
2306 		 * If it changed from the expected state, bail out now.
2307 		 */
2308 		if (unlikely(!ncsw))
2309 			break;
2310 
2311 		/*
2312 		 * Was it really running after all now that we
2313 		 * checked with the proper locks actually held?
2314 		 *
2315 		 * Oops. Go back and try again..
2316 		 */
2317 		if (unlikely(running)) {
2318 			cpu_relax();
2319 			continue;
2320 		}
2321 
2322 		/*
2323 		 * It's not enough that it's not actively running,
2324 		 * it must be off the runqueue _entirely_, and not
2325 		 * preempted!
2326 		 *
2327 		 * So if it was still runnable (but just not actively
2328 		 * running right now), it's preempted, and we should
2329 		 * yield - it could be a while.
2330 		 */
2331 		if (unlikely(queued)) {
2332 			ktime_t to = NSEC_PER_SEC / HZ;
2333 
2334 			set_current_state(TASK_UNINTERRUPTIBLE);
2335 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2336 			continue;
2337 		}
2338 
2339 		/*
2340 		 * Ahh, all good. It wasn't running, and it wasn't
2341 		 * runnable, which means that it will never become
2342 		 * running in the future either. We're all done!
2343 		 */
2344 		break;
2345 	}
2346 
2347 	return ncsw;
2348 }
2349 
2350 static void
2351 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2352 
2353 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2354 {
2355 	struct affinity_context ac = {
2356 		.new_mask  = cpumask_of(rq->cpu),
2357 		.flags     = SCA_MIGRATE_DISABLE,
2358 	};
2359 
2360 	if (likely(!p->migration_disabled))
2361 		return;
2362 
2363 	if (p->cpus_ptr != &p->cpus_mask)
2364 		return;
2365 
2366 	scoped_guard (task_rq_lock, p)
2367 		do_set_cpus_allowed(p, &ac);
2368 }
2369 
2370 void ___migrate_enable(void)
2371 {
2372 	struct task_struct *p = current;
2373 	struct affinity_context ac = {
2374 		.new_mask  = &p->cpus_mask,
2375 		.flags     = SCA_MIGRATE_ENABLE,
2376 	};
2377 
2378 	__set_cpus_allowed_ptr(p, &ac);
2379 }
2380 EXPORT_SYMBOL_GPL(___migrate_enable);
2381 
2382 void migrate_disable(void)
2383 {
2384 	__migrate_disable();
2385 }
2386 EXPORT_SYMBOL_GPL(migrate_disable);
2387 
2388 void migrate_enable(void)
2389 {
2390 	__migrate_enable();
2391 }
2392 EXPORT_SYMBOL_GPL(migrate_enable);
2393 
2394 static inline bool rq_has_pinned_tasks(struct rq *rq)
2395 {
2396 	return rq->nr_pinned;
2397 }
2398 
2399 /*
2400  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2401  * __set_cpus_allowed_ptr() and select_fallback_rq().
2402  */
2403 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2404 {
2405 	/* When not in the task's cpumask, no point in looking further. */
2406 	if (!task_allowed_on_cpu(p, cpu))
2407 		return false;
2408 
2409 	/* migrate_disabled() must be allowed to finish. */
2410 	if (is_migration_disabled(p))
2411 		return cpu_online(cpu);
2412 
2413 	/* Non kernel threads are not allowed during either online or offline. */
2414 	if (!(p->flags & PF_KTHREAD))
2415 		return cpu_active(cpu);
2416 
2417 	/* KTHREAD_IS_PER_CPU is always allowed. */
2418 	if (kthread_is_per_cpu(p))
2419 		return cpu_online(cpu);
2420 
2421 	/* Regular kernel threads don't get to stay during offline. */
2422 	if (cpu_dying(cpu))
2423 		return false;
2424 
2425 	/* But are allowed during online. */
2426 	return cpu_online(cpu);
2427 }
2428 
2429 /*
2430  * This is how migration works:
2431  *
2432  * 1) we invoke migration_cpu_stop() on the target CPU using
2433  *    stop_one_cpu().
2434  * 2) stopper starts to run (implicitly forcing the migrated thread
2435  *    off the CPU)
2436  * 3) it checks whether the migrated task is still in the wrong runqueue.
2437  * 4) if it's in the wrong runqueue then the migration thread removes
2438  *    it and puts it into the right queue.
2439  * 5) stopper completes and stop_one_cpu() returns and the migration
2440  *    is done.
2441  */
2442 
2443 /*
2444  * move_queued_task - move a queued task to new rq.
2445  *
2446  * Returns (locked) new rq. Old rq's lock is released.
2447  */
2448 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2449 				   struct task_struct *p, int new_cpu)
2450 	__must_hold(__rq_lockp(rq))
2451 {
2452 	lockdep_assert_rq_held(rq);
2453 
2454 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2455 	set_task_cpu(p, new_cpu);
2456 	rq_unlock(rq, rf);
2457 
2458 	rq = cpu_rq(new_cpu);
2459 
2460 	rq_lock(rq, rf);
2461 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2462 	activate_task(rq, p, 0);
2463 	wakeup_preempt(rq, p, 0);
2464 
2465 	return rq;
2466 }
2467 
2468 struct migration_arg {
2469 	struct task_struct		*task;
2470 	int				dest_cpu;
2471 	struct set_affinity_pending	*pending;
2472 };
2473 
2474 /*
2475  * @refs: number of wait_for_completion()
2476  * @stop_pending: is @stop_work in use
2477  */
2478 struct set_affinity_pending {
2479 	refcount_t		refs;
2480 	unsigned int		stop_pending;
2481 	struct completion	done;
2482 	struct cpu_stop_work	stop_work;
2483 	struct migration_arg	arg;
2484 };
2485 
2486 /*
2487  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2488  * this because either it can't run here any more (set_cpus_allowed()
2489  * away from this CPU, or CPU going down), or because we're
2490  * attempting to rebalance this task on exec (sched_exec).
2491  *
2492  * So we race with normal scheduler movements, but that's OK, as long
2493  * as the task is no longer on this CPU.
2494  */
2495 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2496 				 struct task_struct *p, int dest_cpu)
2497 	__must_hold(__rq_lockp(rq))
2498 {
2499 	/* Affinity changed (again). */
2500 	if (!is_cpu_allowed(p, dest_cpu))
2501 		return rq;
2502 
2503 	rq = move_queued_task(rq, rf, p, dest_cpu);
2504 
2505 	return rq;
2506 }
2507 
2508 /*
2509  * migration_cpu_stop - this will be executed by a high-prio stopper thread
2510  * and performs thread migration by bumping thread off CPU then
2511  * 'pushing' onto another runqueue.
2512  */
2513 static int migration_cpu_stop(void *data)
2514 {
2515 	struct migration_arg *arg = data;
2516 	struct set_affinity_pending *pending = arg->pending;
2517 	struct task_struct *p = arg->task;
2518 	struct rq *rq = this_rq();
2519 	bool complete = false;
2520 	struct rq_flags rf;
2521 
2522 	/*
2523 	 * The original target CPU might have gone down and we might
2524 	 * be on another CPU but it doesn't matter.
2525 	 */
2526 	local_irq_save(rf.flags);
2527 	/*
2528 	 * We need to explicitly wake pending tasks before running
2529 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2530 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2531 	 */
2532 	flush_smp_call_function_queue();
2533 
2534 	/*
2535 	 * We may change the underlying rq, but the locks held will
2536 	 * appropriately be "transferred" when switching.
2537 	 */
2538 	context_unsafe_alias(rq);
2539 
2540 	raw_spin_lock(&p->pi_lock);
2541 	rq_lock(rq, &rf);
2542 
2543 	/*
2544 	 * If we were passed a pending, then ->stop_pending was set, thus
2545 	 * p->migration_pending must have remained stable.
2546 	 */
2547 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2548 
2549 	/*
2550 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2551 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2552 	 * we're holding p->pi_lock.
2553 	 */
2554 	if (task_rq(p) == rq) {
2555 		if (is_migration_disabled(p))
2556 			goto out;
2557 
2558 		if (pending) {
2559 			p->migration_pending = NULL;
2560 			complete = true;
2561 
2562 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2563 				goto out;
2564 		}
2565 
2566 		if (task_on_rq_queued(p)) {
2567 			update_rq_clock(rq);
2568 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2569 		} else {
2570 			p->wake_cpu = arg->dest_cpu;
2571 		}
2572 
2573 		/*
2574 		 * XXX __migrate_task() can fail, at which point we might end
2575 		 * up running on a dodgy CPU, AFAICT this can only happen
2576 		 * during CPU hotplug, at which point we'll get pushed out
2577 		 * anyway, so it's probably not a big deal.
2578 		 */
2579 
2580 	} else if (pending) {
2581 		/*
2582 		 * This happens when we get migrated between migrate_enable()'s
2583 		 * preempt_enable() and scheduling the stopper task. At that
2584 		 * point we're a regular task again and not current anymore.
2585 		 *
2586 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2587 		 * more likely.
2588 		 */
2589 
2590 		/*
2591 		 * The task moved before the stopper got to run. We're holding
2592 		 * ->pi_lock, so the allowed mask is stable - if it got
2593 		 * somewhere allowed, we're done.
2594 		 */
2595 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2596 			p->migration_pending = NULL;
2597 			complete = true;
2598 			goto out;
2599 		}
2600 
2601 		/*
2602 		 * When migrate_enable() hits a rq mis-match we can't reliably
2603 		 * determine is_migration_disabled() and so have to chase after
2604 		 * it.
2605 		 */
2606 		WARN_ON_ONCE(!pending->stop_pending);
2607 		preempt_disable();
2608 		rq_unlock(rq, &rf);
2609 		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2610 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2611 				    &pending->arg, &pending->stop_work);
2612 		preempt_enable();
2613 		return 0;
2614 	}
2615 out:
2616 	if (pending)
2617 		pending->stop_pending = false;
2618 	rq_unlock(rq, &rf);
2619 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2620 
2621 	if (complete)
2622 		complete_all(&pending->done);
2623 
2624 	return 0;
2625 }
2626 
2627 int push_cpu_stop(void *arg)
2628 {
2629 	struct rq *lowest_rq = NULL, *rq = this_rq();
2630 	struct task_struct *p = arg;
2631 
2632 	raw_spin_lock_irq(&p->pi_lock);
2633 	raw_spin_rq_lock(rq);
2634 
2635 	if (task_rq(p) != rq)
2636 		goto out_unlock;
2637 
2638 	if (is_migration_disabled(p)) {
2639 		p->migration_flags |= MDF_PUSH;
2640 		goto out_unlock;
2641 	}
2642 
2643 	p->migration_flags &= ~MDF_PUSH;
2644 
2645 	if (p->sched_class->find_lock_rq)
2646 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2647 
2648 	if (!lowest_rq)
2649 		goto out_unlock;
2650 
2651 	lockdep_assert_rq_held(lowest_rq);
2652 
2653 	// XXX validate p is still the highest prio task
2654 	if (task_rq(p) == rq) {
2655 		move_queued_task_locked(rq, lowest_rq, p);
2656 		resched_curr(lowest_rq);
2657 	}
2658 
2659 	double_unlock_balance(rq, lowest_rq);
2660 
2661 out_unlock:
2662 	rq->push_busy = false;
2663 	raw_spin_rq_unlock(rq);
2664 	raw_spin_unlock_irq(&p->pi_lock);
2665 
2666 	put_task_struct(p);
2667 	return 0;
2668 }
2669 
2670 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask);
2671 
2672 /*
2673  * sched_class::set_cpus_allowed must do the below, but is not required to
2674  * actually call this function.
2675  */
2676 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2677 {
2678 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2679 		p->cpus_ptr = ctx->new_mask;
2680 		return;
2681 	}
2682 
2683 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2684 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2685 	mm_update_cpus_allowed(p->mm, ctx->new_mask);
2686 
2687 	/*
2688 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2689 	 */
2690 	if (ctx->flags & SCA_USER)
2691 		swap(p->user_cpus_ptr, ctx->user_mask);
2692 }
2693 
2694 static void
2695 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2696 {
2697 	scoped_guard (sched_change, p, DEQUEUE_SAVE)
2698 		p->sched_class->set_cpus_allowed(p, ctx);
2699 }
2700 
2701 /*
2702  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2703  * affinity (if any) should be destroyed too.
2704  */
2705 void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask)
2706 {
2707 	struct affinity_context ac = {
2708 		.new_mask  = new_mask,
2709 		.user_mask = NULL,
2710 		.flags     = SCA_USER,	/* clear the user requested mask */
2711 	};
2712 	union cpumask_rcuhead {
2713 		cpumask_t cpumask;
2714 		struct rcu_head rcu;
2715 	};
2716 
2717 	scoped_guard (__task_rq_lock, p)
2718 		do_set_cpus_allowed(p, &ac);
2719 
2720 	/*
2721 	 * Because this is called with p->pi_lock held, it is not possible
2722 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2723 	 * kfree_rcu().
2724 	 */
2725 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2726 }
2727 
2728 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2729 		      int node)
2730 {
2731 	cpumask_t *user_mask;
2732 	unsigned long flags;
2733 
2734 	/*
2735 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2736 	 * may differ by now due to racing.
2737 	 */
2738 	dst->user_cpus_ptr = NULL;
2739 
2740 	/*
2741 	 * This check is racy and losing the race is a valid situation.
2742 	 * It is not worth the extra overhead of taking the pi_lock on
2743 	 * every fork/clone.
2744 	 */
2745 	if (data_race(!src->user_cpus_ptr))
2746 		return 0;
2747 
2748 	user_mask = alloc_user_cpus_ptr(node);
2749 	if (!user_mask)
2750 		return -ENOMEM;
2751 
2752 	/*
2753 	 * Use pi_lock to protect content of user_cpus_ptr
2754 	 *
2755 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2756 	 * set_cpus_allowed_force().
2757 	 */
2758 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2759 	if (src->user_cpus_ptr) {
2760 		swap(dst->user_cpus_ptr, user_mask);
2761 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2762 	}
2763 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2764 
2765 	if (unlikely(user_mask))
2766 		kfree(user_mask);
2767 
2768 	return 0;
2769 }
2770 
2771 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2772 {
2773 	struct cpumask *user_mask = NULL;
2774 
2775 	swap(p->user_cpus_ptr, user_mask);
2776 
2777 	return user_mask;
2778 }
2779 
2780 void release_user_cpus_ptr(struct task_struct *p)
2781 {
2782 	kfree(clear_user_cpus_ptr(p));
2783 }
2784 
2785 /*
2786  * This function is wildly self concurrent; here be dragons.
2787  *
2788  *
2789  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2790  * designated task is enqueued on an allowed CPU. If that task is currently
2791  * running, we have to kick it out using the CPU stopper.
2792  *
2793  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2794  * Consider:
2795  *
2796  *     Initial conditions: P0->cpus_mask = [0, 1]
2797  *
2798  *     P0@CPU0                  P1
2799  *
2800  *     migrate_disable();
2801  *     <preempted>
2802  *                              set_cpus_allowed_ptr(P0, [1]);
2803  *
2804  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2805  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2806  * This means we need the following scheme:
2807  *
2808  *     P0@CPU0                  P1
2809  *
2810  *     migrate_disable();
2811  *     <preempted>
2812  *                              set_cpus_allowed_ptr(P0, [1]);
2813  *                                <blocks>
2814  *     <resumes>
2815  *     migrate_enable();
2816  *       __set_cpus_allowed_ptr();
2817  *       <wakes local stopper>
2818  *                         `--> <woken on migration completion>
2819  *
2820  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2821  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2822  * task p are serialized by p->pi_lock, which we can leverage: the one that
2823  * should come into effect at the end of the Migrate-Disable region is the last
2824  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2825  * but we still need to properly signal those waiting tasks at the appropriate
2826  * moment.
2827  *
2828  * This is implemented using struct set_affinity_pending. The first
2829  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2830  * setup an instance of that struct and install it on the targeted task_struct.
2831  * Any and all further callers will reuse that instance. Those then wait for
2832  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2833  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2834  *
2835  *
2836  * (1) In the cases covered above. There is one more where the completion is
2837  * signaled within affine_move_task() itself: when a subsequent affinity request
2838  * occurs after the stopper bailed out due to the targeted task still being
2839  * Migrate-Disable. Consider:
2840  *
2841  *     Initial conditions: P0->cpus_mask = [0, 1]
2842  *
2843  *     CPU0		  P1				P2
2844  *     <P0>
2845  *       migrate_disable();
2846  *       <preempted>
2847  *                        set_cpus_allowed_ptr(P0, [1]);
2848  *                          <blocks>
2849  *     <migration/0>
2850  *       migration_cpu_stop()
2851  *         is_migration_disabled()
2852  *           <bails>
2853  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2854  *                                                         <signal completion>
2855  *                          <awakes>
2856  *
2857  * Note that the above is safe vs a concurrent migrate_enable(), as any
2858  * pending affinity completion is preceded by an uninstallation of
2859  * p->migration_pending done with p->pi_lock held.
2860  */
2861 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2862 			    int dest_cpu, unsigned int flags)
2863 	__releases(__rq_lockp(rq), &p->pi_lock)
2864 {
2865 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2866 	bool stop_pending, complete = false;
2867 
2868 	/*
2869 	 * Can the task run on the task's current CPU? If so, we're done
2870 	 *
2871 	 * We are also done if the task is the current donor, boosting a lock-
2872 	 * holding proxy, (and potentially has been migrated outside its
2873 	 * current or previous affinity mask)
2874 	 */
2875 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
2876 	    (task_current_donor(rq, p) && !task_current(rq, p))) {
2877 		struct task_struct *push_task = NULL;
2878 
2879 		if ((flags & SCA_MIGRATE_ENABLE) &&
2880 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2881 			rq->push_busy = true;
2882 			push_task = get_task_struct(p);
2883 		}
2884 
2885 		/*
2886 		 * If there are pending waiters, but no pending stop_work,
2887 		 * then complete now.
2888 		 */
2889 		pending = p->migration_pending;
2890 		if (pending && !pending->stop_pending) {
2891 			p->migration_pending = NULL;
2892 			complete = true;
2893 		}
2894 
2895 		preempt_disable();
2896 		task_rq_unlock(rq, p, rf);
2897 		if (push_task) {
2898 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2899 					    p, &rq->push_work);
2900 		}
2901 		preempt_enable();
2902 
2903 		if (complete)
2904 			complete_all(&pending->done);
2905 
2906 		return 0;
2907 	}
2908 
2909 	if (!(flags & SCA_MIGRATE_ENABLE)) {
2910 		/* serialized by p->pi_lock */
2911 		if (!p->migration_pending) {
2912 			/* Install the request */
2913 			refcount_set(&my_pending.refs, 1);
2914 			init_completion(&my_pending.done);
2915 			my_pending.arg = (struct migration_arg) {
2916 				.task = p,
2917 				.dest_cpu = dest_cpu,
2918 				.pending = &my_pending,
2919 			};
2920 
2921 			p->migration_pending = &my_pending;
2922 		} else {
2923 			pending = p->migration_pending;
2924 			refcount_inc(&pending->refs);
2925 			/*
2926 			 * Affinity has changed, but we've already installed a
2927 			 * pending. migration_cpu_stop() *must* see this, else
2928 			 * we risk a completion of the pending despite having a
2929 			 * task on a disallowed CPU.
2930 			 *
2931 			 * Serialized by p->pi_lock, so this is safe.
2932 			 */
2933 			pending->arg.dest_cpu = dest_cpu;
2934 		}
2935 	}
2936 	pending = p->migration_pending;
2937 	/*
2938 	 * - !MIGRATE_ENABLE:
2939 	 *   we'll have installed a pending if there wasn't one already.
2940 	 *
2941 	 * - MIGRATE_ENABLE:
2942 	 *   we're here because the current CPU isn't matching anymore,
2943 	 *   the only way that can happen is because of a concurrent
2944 	 *   set_cpus_allowed_ptr() call, which should then still be
2945 	 *   pending completion.
2946 	 *
2947 	 * Either way, we really should have a @pending here.
2948 	 */
2949 	if (WARN_ON_ONCE(!pending)) {
2950 		task_rq_unlock(rq, p, rf);
2951 		return -EINVAL;
2952 	}
2953 
2954 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2955 		/*
2956 		 * MIGRATE_ENABLE gets here because 'p == current', but for
2957 		 * anything else we cannot do is_migration_disabled(), punt
2958 		 * and have the stopper function handle it all race-free.
2959 		 */
2960 		stop_pending = pending->stop_pending;
2961 		if (!stop_pending)
2962 			pending->stop_pending = true;
2963 
2964 		if (flags & SCA_MIGRATE_ENABLE)
2965 			p->migration_flags &= ~MDF_PUSH;
2966 
2967 		preempt_disable();
2968 		task_rq_unlock(rq, p, rf);
2969 		if (!stop_pending) {
2970 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2971 					    &pending->arg, &pending->stop_work);
2972 		}
2973 		preempt_enable();
2974 
2975 		if (flags & SCA_MIGRATE_ENABLE)
2976 			return 0;
2977 	} else {
2978 
2979 		if (!is_migration_disabled(p)) {
2980 			if (task_on_rq_queued(p))
2981 				rq = move_queued_task(rq, rf, p, dest_cpu);
2982 
2983 			if (!pending->stop_pending) {
2984 				p->migration_pending = NULL;
2985 				complete = true;
2986 			}
2987 		}
2988 		task_rq_unlock(rq, p, rf);
2989 
2990 		if (complete)
2991 			complete_all(&pending->done);
2992 	}
2993 
2994 	wait_for_completion(&pending->done);
2995 
2996 	if (refcount_dec_and_test(&pending->refs))
2997 		wake_up_var(&pending->refs); /* No UaF, just an address */
2998 
2999 	/*
3000 	 * Block the original owner of &pending until all subsequent callers
3001 	 * have seen the completion and decremented the refcount
3002 	 */
3003 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3004 
3005 	/* ARGH */
3006 	WARN_ON_ONCE(my_pending.stop_pending);
3007 
3008 	return 0;
3009 }
3010 
3011 /*
3012  * Called with both p->pi_lock and rq->lock held; drops both before returning.
3013  */
3014 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3015 					 struct affinity_context *ctx,
3016 					 struct rq *rq,
3017 					 struct rq_flags *rf)
3018 	__releases(__rq_lockp(rq), &p->pi_lock)
3019 {
3020 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3021 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3022 	bool kthread = p->flags & PF_KTHREAD;
3023 	unsigned int dest_cpu;
3024 	int ret = 0;
3025 
3026 	if (kthread || is_migration_disabled(p)) {
3027 		/*
3028 		 * Kernel threads are allowed on online && !active CPUs,
3029 		 * however, during cpu-hot-unplug, even these might get pushed
3030 		 * away if not KTHREAD_IS_PER_CPU.
3031 		 *
3032 		 * Specifically, migration_disabled() tasks must not fail the
3033 		 * cpumask_any_and_distribute() pick below, esp. so on
3034 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3035 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3036 		 */
3037 		cpu_valid_mask = cpu_online_mask;
3038 	}
3039 
3040 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3041 		ret = -EINVAL;
3042 		goto out;
3043 	}
3044 
3045 	/*
3046 	 * Must re-check here, to close a race against __kthread_bind(),
3047 	 * sched_setaffinity() is not guaranteed to observe the flag.
3048 	 */
3049 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3050 		ret = -EINVAL;
3051 		goto out;
3052 	}
3053 
3054 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3055 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3056 			if (ctx->flags & SCA_USER)
3057 				swap(p->user_cpus_ptr, ctx->user_mask);
3058 			goto out;
3059 		}
3060 
3061 		if (WARN_ON_ONCE(p == current &&
3062 				 is_migration_disabled(p) &&
3063 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3064 			ret = -EBUSY;
3065 			goto out;
3066 		}
3067 	}
3068 
3069 	/*
3070 	 * Picking a ~random cpu helps in cases where we are changing affinity
3071 	 * for groups of tasks (ie. cpuset), so that load balancing is not
3072 	 * immediately required to distribute the tasks within their new mask.
3073 	 */
3074 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3075 	if (dest_cpu >= nr_cpu_ids) {
3076 		ret = -EINVAL;
3077 		goto out;
3078 	}
3079 
3080 	do_set_cpus_allowed(p, ctx);
3081 
3082 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3083 
3084 out:
3085 	task_rq_unlock(rq, p, rf);
3086 
3087 	return ret;
3088 }
3089 
3090 /*
3091  * Change a given task's CPU affinity. Migrate the thread to a
3092  * proper CPU and schedule it away if the CPU it's executing on
3093  * is removed from the allowed bitmask.
3094  *
3095  * NOTE: the caller must have a valid reference to the task, the
3096  * task must not exit() & deallocate itself prematurely. The
3097  * call is not atomic; no spinlocks may be held.
3098  */
3099 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3100 {
3101 	struct rq_flags rf;
3102 	struct rq *rq;
3103 
3104 	rq = task_rq_lock(p, &rf);
3105 	/*
3106 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3107 	 * flags are set.
3108 	 */
3109 	if (p->user_cpus_ptr &&
3110 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3111 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3112 		ctx->new_mask = rq->scratch_mask;
3113 
3114 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3115 }
3116 
3117 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3118 {
3119 	struct affinity_context ac = {
3120 		.new_mask  = new_mask,
3121 		.flags     = 0,
3122 	};
3123 
3124 	return __set_cpus_allowed_ptr(p, &ac);
3125 }
3126 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3127 
3128 /*
3129  * Change a given task's CPU affinity to the intersection of its current
3130  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3131  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3132  * affinity or use cpu_online_mask instead.
3133  *
3134  * If the resulting mask is empty, leave the affinity unchanged and return
3135  * -EINVAL.
3136  */
3137 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3138 				     struct cpumask *new_mask,
3139 				     const struct cpumask *subset_mask)
3140 {
3141 	struct affinity_context ac = {
3142 		.new_mask  = new_mask,
3143 		.flags     = 0,
3144 	};
3145 	struct rq_flags rf;
3146 	struct rq *rq;
3147 	int err;
3148 
3149 	rq = task_rq_lock(p, &rf);
3150 
3151 	/*
3152 	 * Forcefully restricting the affinity of a deadline task is
3153 	 * likely to cause problems, so fail and noisily override the
3154 	 * mask entirely.
3155 	 */
3156 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3157 		err = -EPERM;
3158 		goto err_unlock;
3159 	}
3160 
3161 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3162 		err = -EINVAL;
3163 		goto err_unlock;
3164 	}
3165 
3166 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3167 
3168 err_unlock:
3169 	task_rq_unlock(rq, p, &rf);
3170 	return err;
3171 }
3172 
3173 /*
3174  * Restrict the CPU affinity of task @p so that it is a subset of
3175  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3176  * old affinity mask. If the resulting mask is empty, we warn and walk
3177  * up the cpuset hierarchy until we find a suitable mask.
3178  */
3179 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3180 {
3181 	cpumask_var_t new_mask;
3182 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3183 
3184 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3185 
3186 	/*
3187 	 * __migrate_task() can fail silently in the face of concurrent
3188 	 * offlining of the chosen destination CPU, so take the hotplug
3189 	 * lock to ensure that the migration succeeds.
3190 	 */
3191 	cpus_read_lock();
3192 	if (!cpumask_available(new_mask))
3193 		goto out_set_mask;
3194 
3195 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3196 		goto out_free_mask;
3197 
3198 	/*
3199 	 * We failed to find a valid subset of the affinity mask for the
3200 	 * task, so override it based on its cpuset hierarchy.
3201 	 */
3202 	cpuset_cpus_allowed(p, new_mask);
3203 	override_mask = new_mask;
3204 
3205 out_set_mask:
3206 	if (printk_ratelimit()) {
3207 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3208 				task_pid_nr(p), p->comm,
3209 				cpumask_pr_args(override_mask));
3210 	}
3211 
3212 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3213 out_free_mask:
3214 	cpus_read_unlock();
3215 	free_cpumask_var(new_mask);
3216 }
3217 
3218 /*
3219  * Restore the affinity of a task @p which was previously restricted by a
3220  * call to force_compatible_cpus_allowed_ptr().
3221  *
3222  * It is the caller's responsibility to serialise this with any calls to
3223  * force_compatible_cpus_allowed_ptr(@p).
3224  */
3225 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3226 {
3227 	struct affinity_context ac = {
3228 		.new_mask  = task_user_cpus(p),
3229 		.flags     = 0,
3230 	};
3231 	int ret;
3232 
3233 	/*
3234 	 * Try to restore the old affinity mask with __sched_setaffinity().
3235 	 * Cpuset masking will be done there too.
3236 	 */
3237 	ret = __sched_setaffinity(p, &ac);
3238 	WARN_ON_ONCE(ret);
3239 }
3240 
3241 #ifdef CONFIG_SMP
3242 
3243 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3244 {
3245 	unsigned int state = READ_ONCE(p->__state);
3246 
3247 	/*
3248 	 * We should never call set_task_cpu() on a blocked task,
3249 	 * ttwu() will sort out the placement.
3250 	 */
3251 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3252 
3253 	/*
3254 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3255 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3256 	 * time relying on p->on_rq.
3257 	 */
3258 	WARN_ON_ONCE(state == TASK_RUNNING &&
3259 		     p->sched_class == &fair_sched_class &&
3260 		     (p->on_rq && !task_on_rq_migrating(p)));
3261 
3262 #ifdef CONFIG_LOCKDEP
3263 	/*
3264 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3265 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3266 	 *
3267 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3268 	 * see task_group().
3269 	 *
3270 	 * Furthermore, all task_rq users should acquire both locks, see
3271 	 * task_rq_lock().
3272 	 */
3273 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3274 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3275 #endif
3276 	/*
3277 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3278 	 */
3279 	WARN_ON_ONCE(!cpu_online(new_cpu));
3280 
3281 	WARN_ON_ONCE(is_migration_disabled(p));
3282 
3283 	trace_sched_migrate_task(p, new_cpu);
3284 
3285 	if (task_cpu(p) != new_cpu) {
3286 		if (p->sched_class->migrate_task_rq)
3287 			p->sched_class->migrate_task_rq(p, new_cpu);
3288 		p->se.nr_migrations++;
3289 		perf_event_task_migrate(p);
3290 	}
3291 
3292 	__set_task_cpu(p, new_cpu);
3293 }
3294 #endif /* CONFIG_SMP */
3295 
3296 #ifdef CONFIG_NUMA_BALANCING
3297 static void __migrate_swap_task(struct task_struct *p, int cpu)
3298 {
3299 	if (task_on_rq_queued(p)) {
3300 		struct rq *src_rq, *dst_rq;
3301 		struct rq_flags srf, drf;
3302 
3303 		src_rq = task_rq(p);
3304 		dst_rq = cpu_rq(cpu);
3305 
3306 		rq_pin_lock(src_rq, &srf);
3307 		rq_pin_lock(dst_rq, &drf);
3308 
3309 		move_queued_task_locked(src_rq, dst_rq, p);
3310 		wakeup_preempt(dst_rq, p, 0);
3311 
3312 		rq_unpin_lock(dst_rq, &drf);
3313 		rq_unpin_lock(src_rq, &srf);
3314 
3315 	} else {
3316 		/*
3317 		 * Task isn't running anymore; make it appear like we migrated
3318 		 * it before it went to sleep. This means on wakeup we make the
3319 		 * previous CPU our target instead of where it really is.
3320 		 */
3321 		p->wake_cpu = cpu;
3322 	}
3323 }
3324 
3325 struct migration_swap_arg {
3326 	struct task_struct *src_task, *dst_task;
3327 	int src_cpu, dst_cpu;
3328 };
3329 
3330 static int migrate_swap_stop(void *data)
3331 {
3332 	struct migration_swap_arg *arg = data;
3333 	struct rq *src_rq, *dst_rq;
3334 
3335 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3336 		return -EAGAIN;
3337 
3338 	src_rq = cpu_rq(arg->src_cpu);
3339 	dst_rq = cpu_rq(arg->dst_cpu);
3340 
3341 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3342 	guard(double_rq_lock)(src_rq, dst_rq);
3343 
3344 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3345 		return -EAGAIN;
3346 
3347 	if (task_cpu(arg->src_task) != arg->src_cpu)
3348 		return -EAGAIN;
3349 
3350 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3351 		return -EAGAIN;
3352 
3353 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3354 		return -EAGAIN;
3355 
3356 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3357 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3358 
3359 	return 0;
3360 }
3361 
3362 /*
3363  * Cross migrate two tasks
3364  */
3365 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3366 		int target_cpu, int curr_cpu)
3367 {
3368 	struct migration_swap_arg arg;
3369 	int ret = -EINVAL;
3370 
3371 	arg = (struct migration_swap_arg){
3372 		.src_task = cur,
3373 		.src_cpu = curr_cpu,
3374 		.dst_task = p,
3375 		.dst_cpu = target_cpu,
3376 	};
3377 
3378 	if (arg.src_cpu == arg.dst_cpu)
3379 		goto out;
3380 
3381 	/*
3382 	 * These three tests are all lockless; this is OK since all of them
3383 	 * will be re-checked with proper locks held further down the line.
3384 	 */
3385 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3386 		goto out;
3387 
3388 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3389 		goto out;
3390 
3391 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3392 		goto out;
3393 
3394 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3395 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3396 
3397 out:
3398 	return ret;
3399 }
3400 #endif /* CONFIG_NUMA_BALANCING */
3401 
3402 /***
3403  * kick_process - kick a running thread to enter/exit the kernel
3404  * @p: the to-be-kicked thread
3405  *
3406  * Cause a process which is running on another CPU to enter
3407  * kernel-mode, without any delay. (to get signals handled.)
3408  *
3409  * NOTE: this function doesn't have to take the runqueue lock,
3410  * because all it wants to ensure is that the remote task enters
3411  * the kernel. If the IPI races and the task has been migrated
3412  * to another CPU then no harm is done and the purpose has been
3413  * achieved as well.
3414  */
3415 void kick_process(struct task_struct *p)
3416 {
3417 	guard(preempt)();
3418 	int cpu = task_cpu(p);
3419 
3420 	if ((cpu != smp_processor_id()) && task_curr(p))
3421 		smp_send_reschedule(cpu);
3422 }
3423 EXPORT_SYMBOL_GPL(kick_process);
3424 
3425 /*
3426  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3427  *
3428  * A few notes on cpu_active vs cpu_online:
3429  *
3430  *  - cpu_active must be a subset of cpu_online
3431  *
3432  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3433  *    see __set_cpus_allowed_ptr(). At this point the newly online
3434  *    CPU isn't yet part of the sched domains, and balancing will not
3435  *    see it.
3436  *
3437  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3438  *    avoid the load balancer to place new tasks on the to be removed
3439  *    CPU. Existing tasks will remain running there and will be taken
3440  *    off.
3441  *
3442  * This means that fallback selection must not select !active CPUs.
3443  * And can assume that any active CPU must be online. Conversely
3444  * select_task_rq() below may allow selection of !active CPUs in order
3445  * to satisfy the above rules.
3446  */
3447 static int select_fallback_rq(int cpu, struct task_struct *p)
3448 {
3449 	int nid = cpu_to_node(cpu);
3450 	const struct cpumask *nodemask = NULL;
3451 	enum { cpuset, possible, fail } state = cpuset;
3452 	int dest_cpu;
3453 
3454 	/*
3455 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3456 	 * will return -1. There is no CPU on the node, and we should
3457 	 * select the CPU on the other node.
3458 	 */
3459 	if (nid != -1) {
3460 		nodemask = cpumask_of_node(nid);
3461 
3462 		/* Look for allowed, online CPU in same node. */
3463 		for_each_cpu(dest_cpu, nodemask) {
3464 			if (is_cpu_allowed(p, dest_cpu))
3465 				return dest_cpu;
3466 		}
3467 	}
3468 
3469 	for (;;) {
3470 		/* Any allowed, online CPU? */
3471 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3472 			if (!is_cpu_allowed(p, dest_cpu))
3473 				continue;
3474 
3475 			goto out;
3476 		}
3477 
3478 		/* No more Mr. Nice Guy. */
3479 		switch (state) {
3480 		case cpuset:
3481 			if (cpuset_cpus_allowed_fallback(p)) {
3482 				state = possible;
3483 				break;
3484 			}
3485 			fallthrough;
3486 		case possible:
3487 			set_cpus_allowed_force(p, task_cpu_fallback_mask(p));
3488 			state = fail;
3489 			break;
3490 		case fail:
3491 			BUG();
3492 			break;
3493 		}
3494 	}
3495 
3496 out:
3497 	if (state != cpuset) {
3498 		/*
3499 		 * Don't tell them about moving exiting tasks or
3500 		 * kernel threads (both mm NULL), since they never
3501 		 * leave kernel.
3502 		 */
3503 		if (p->mm && printk_ratelimit()) {
3504 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3505 					task_pid_nr(p), p->comm, cpu);
3506 		}
3507 	}
3508 
3509 	return dest_cpu;
3510 }
3511 
3512 /*
3513  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3514  */
3515 static inline
3516 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3517 {
3518 	lockdep_assert_held(&p->pi_lock);
3519 
3520 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3521 		cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3522 		*wake_flags |= WF_RQ_SELECTED;
3523 	} else {
3524 		cpu = cpumask_any(p->cpus_ptr);
3525 	}
3526 
3527 	/*
3528 	 * In order not to call set_task_cpu() on a blocking task we need
3529 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3530 	 * CPU.
3531 	 *
3532 	 * Since this is common to all placement strategies, this lives here.
3533 	 *
3534 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3535 	 *   not worry about this generic constraint ]
3536 	 */
3537 	if (unlikely(!is_cpu_allowed(p, cpu)))
3538 		cpu = select_fallback_rq(task_cpu(p), p);
3539 
3540 	return cpu;
3541 }
3542 
3543 void sched_set_stop_task(int cpu, struct task_struct *stop)
3544 {
3545 	static struct lock_class_key stop_pi_lock;
3546 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3547 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3548 
3549 	if (stop) {
3550 		/*
3551 		 * Make it appear like a SCHED_FIFO task, its something
3552 		 * userspace knows about and won't get confused about.
3553 		 *
3554 		 * Also, it will make PI more or less work without too
3555 		 * much confusion -- but then, stop work should not
3556 		 * rely on PI working anyway.
3557 		 */
3558 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3559 
3560 		stop->sched_class = &stop_sched_class;
3561 
3562 		/*
3563 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3564 		 * adjust the effective priority of a task. As a result,
3565 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3566 		 * which can then trigger wakeups of the stop thread to push
3567 		 * around the current task.
3568 		 *
3569 		 * The stop task itself will never be part of the PI-chain, it
3570 		 * never blocks, therefore that ->pi_lock recursion is safe.
3571 		 * Tell lockdep about this by placing the stop->pi_lock in its
3572 		 * own class.
3573 		 */
3574 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3575 	}
3576 
3577 	cpu_rq(cpu)->stop = stop;
3578 
3579 	if (old_stop) {
3580 		/*
3581 		 * Reset it back to a normal scheduling class so that
3582 		 * it can die in pieces.
3583 		 */
3584 		old_stop->sched_class = &rt_sched_class;
3585 	}
3586 }
3587 
3588 static void
3589 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3590 {
3591 	struct rq *rq;
3592 
3593 	if (!schedstat_enabled())
3594 		return;
3595 
3596 	rq = this_rq();
3597 
3598 	if (cpu == rq->cpu) {
3599 		__schedstat_inc(rq->ttwu_local);
3600 		__schedstat_inc(p->stats.nr_wakeups_local);
3601 	} else {
3602 		struct sched_domain *sd;
3603 
3604 		__schedstat_inc(p->stats.nr_wakeups_remote);
3605 
3606 		guard(rcu)();
3607 		for_each_domain(rq->cpu, sd) {
3608 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3609 				__schedstat_inc(sd->ttwu_wake_remote);
3610 				break;
3611 			}
3612 		}
3613 	}
3614 
3615 	if (wake_flags & WF_MIGRATED)
3616 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3617 
3618 	__schedstat_inc(rq->ttwu_count);
3619 	__schedstat_inc(p->stats.nr_wakeups);
3620 
3621 	if (wake_flags & WF_SYNC)
3622 		__schedstat_inc(p->stats.nr_wakeups_sync);
3623 }
3624 
3625 /*
3626  * Mark the task runnable.
3627  */
3628 static inline void ttwu_do_wakeup(struct task_struct *p)
3629 {
3630 	WRITE_ONCE(p->__state, TASK_RUNNING);
3631 	trace_sched_wakeup(p);
3632 }
3633 
3634 void update_rq_avg_idle(struct rq *rq)
3635 {
3636 	u64 delta = rq_clock(rq) - rq->idle_stamp;
3637 	u64 max = 2*rq->max_idle_balance_cost;
3638 
3639 	update_avg(&rq->avg_idle, delta);
3640 
3641 	if (rq->avg_idle > max)
3642 		rq->avg_idle = max;
3643 	rq->idle_stamp = 0;
3644 }
3645 
3646 static void
3647 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3648 		 struct rq_flags *rf)
3649 {
3650 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3651 
3652 	lockdep_assert_rq_held(rq);
3653 
3654 	if (p->sched_contributes_to_load)
3655 		rq->nr_uninterruptible--;
3656 
3657 	if (wake_flags & WF_RQ_SELECTED)
3658 		en_flags |= ENQUEUE_RQ_SELECTED;
3659 	if (wake_flags & WF_MIGRATED)
3660 		en_flags |= ENQUEUE_MIGRATED;
3661 	else
3662 	if (p->in_iowait) {
3663 		delayacct_blkio_end(p);
3664 		atomic_dec(&task_rq(p)->nr_iowait);
3665 	}
3666 
3667 	activate_task(rq, p, en_flags);
3668 	wakeup_preempt(rq, p, wake_flags);
3669 
3670 	ttwu_do_wakeup(p);
3671 
3672 	if (p->sched_class->task_woken) {
3673 		/*
3674 		 * Our task @p is fully woken up and running; so it's safe to
3675 		 * drop the rq->lock, hereafter rq is only used for statistics.
3676 		 */
3677 		rq_unpin_lock(rq, rf);
3678 		p->sched_class->task_woken(rq, p);
3679 		rq_repin_lock(rq, rf);
3680 	}
3681 }
3682 
3683 /*
3684  * Consider @p being inside a wait loop:
3685  *
3686  *   for (;;) {
3687  *      set_current_state(TASK_UNINTERRUPTIBLE);
3688  *
3689  *      if (CONDITION)
3690  *         break;
3691  *
3692  *      schedule();
3693  *   }
3694  *   __set_current_state(TASK_RUNNING);
3695  *
3696  * between set_current_state() and schedule(). In this case @p is still
3697  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3698  * an atomic manner.
3699  *
3700  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3701  * then schedule() must still happen and p->state can be changed to
3702  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3703  * need to do a full wakeup with enqueue.
3704  *
3705  * Returns: %true when the wakeup is done,
3706  *          %false otherwise.
3707  */
3708 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3709 {
3710 	struct rq_flags rf;
3711 	struct rq *rq;
3712 	int ret = 0;
3713 
3714 	rq = __task_rq_lock(p, &rf);
3715 	if (task_on_rq_queued(p)) {
3716 		update_rq_clock(rq);
3717 		if (p->se.sched_delayed)
3718 			enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3719 		if (!task_on_cpu(rq, p)) {
3720 			/*
3721 			 * When on_rq && !on_cpu the task is preempted, see if
3722 			 * it should preempt the task that is current now.
3723 			 */
3724 			wakeup_preempt(rq, p, wake_flags);
3725 		}
3726 		ttwu_do_wakeup(p);
3727 		ret = 1;
3728 	}
3729 	__task_rq_unlock(rq, p, &rf);
3730 
3731 	return ret;
3732 }
3733 
3734 void sched_ttwu_pending(void *arg)
3735 {
3736 	struct llist_node *llist = arg;
3737 	struct rq *rq = this_rq();
3738 	struct task_struct *p, *t;
3739 	struct rq_flags rf;
3740 
3741 	if (!llist)
3742 		return;
3743 
3744 	rq_lock_irqsave(rq, &rf);
3745 	update_rq_clock(rq);
3746 
3747 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3748 		if (WARN_ON_ONCE(p->on_cpu))
3749 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3750 
3751 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3752 			set_task_cpu(p, cpu_of(rq));
3753 
3754 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3755 	}
3756 
3757 	/*
3758 	 * Must be after enqueueing at least once task such that
3759 	 * idle_cpu() does not observe a false-negative -- if it does,
3760 	 * it is possible for select_idle_siblings() to stack a number
3761 	 * of tasks on this CPU during that window.
3762 	 *
3763 	 * It is OK to clear ttwu_pending when another task pending.
3764 	 * We will receive IPI after local IRQ enabled and then enqueue it.
3765 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3766 	 */
3767 	WRITE_ONCE(rq->ttwu_pending, 0);
3768 	rq_unlock_irqrestore(rq, &rf);
3769 }
3770 
3771 /*
3772  * Prepare the scene for sending an IPI for a remote smp_call
3773  *
3774  * Returns true if the caller can proceed with sending the IPI.
3775  * Returns false otherwise.
3776  */
3777 bool call_function_single_prep_ipi(int cpu)
3778 {
3779 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3780 		trace_sched_wake_idle_without_ipi(cpu);
3781 		return false;
3782 	}
3783 
3784 	return true;
3785 }
3786 
3787 /*
3788  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3789  * necessary. The wakee CPU on receipt of the IPI will queue the task
3790  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3791  * of the wakeup instead of the waker.
3792  */
3793 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3794 {
3795 	struct rq *rq = cpu_rq(cpu);
3796 
3797 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3798 
3799 	WRITE_ONCE(rq->ttwu_pending, 1);
3800 #ifdef CONFIG_SMP
3801 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3802 #endif
3803 }
3804 
3805 void wake_up_if_idle(int cpu)
3806 {
3807 	struct rq *rq = cpu_rq(cpu);
3808 
3809 	guard(rcu)();
3810 	if (is_idle_task(rcu_dereference(rq->curr))) {
3811 		guard(rq_lock_irqsave)(rq);
3812 		if (is_idle_task(rq->curr))
3813 			resched_curr(rq);
3814 	}
3815 }
3816 
3817 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3818 {
3819 	if (!sched_asym_cpucap_active())
3820 		return true;
3821 
3822 	if (this_cpu == that_cpu)
3823 		return true;
3824 
3825 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3826 }
3827 
3828 bool cpus_share_cache(int this_cpu, int that_cpu)
3829 {
3830 	if (this_cpu == that_cpu)
3831 		return true;
3832 
3833 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3834 }
3835 
3836 /*
3837  * Whether CPUs are share cache resources, which means LLC on non-cluster
3838  * machines and LLC tag or L2 on machines with clusters.
3839  */
3840 bool cpus_share_resources(int this_cpu, int that_cpu)
3841 {
3842 	if (this_cpu == that_cpu)
3843 		return true;
3844 
3845 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3846 }
3847 
3848 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3849 {
3850 	/* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3851 	if (!scx_allow_ttwu_queue(p))
3852 		return false;
3853 
3854 #ifdef CONFIG_SMP
3855 	if (p->sched_class == &stop_sched_class)
3856 		return false;
3857 #endif
3858 
3859 	/*
3860 	 * Do not complicate things with the async wake_list while the CPU is
3861 	 * in hotplug state.
3862 	 */
3863 	if (!cpu_active(cpu))
3864 		return false;
3865 
3866 	/* Ensure the task will still be allowed to run on the CPU. */
3867 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3868 		return false;
3869 
3870 	/*
3871 	 * If the CPU does not share cache, then queue the task on the
3872 	 * remote rqs wakelist to avoid accessing remote data.
3873 	 */
3874 	if (!cpus_share_cache(smp_processor_id(), cpu))
3875 		return true;
3876 
3877 	if (cpu == smp_processor_id())
3878 		return false;
3879 
3880 	/*
3881 	 * If the wakee cpu is idle, or the task is descheduling and the
3882 	 * only running task on the CPU, then use the wakelist to offload
3883 	 * the task activation to the idle (or soon-to-be-idle) CPU as
3884 	 * the current CPU is likely busy. nr_running is checked to
3885 	 * avoid unnecessary task stacking.
3886 	 *
3887 	 * Note that we can only get here with (wakee) p->on_rq=0,
3888 	 * p->on_cpu can be whatever, we've done the dequeue, so
3889 	 * the wakee has been accounted out of ->nr_running.
3890 	 */
3891 	if (!cpu_rq(cpu)->nr_running)
3892 		return true;
3893 
3894 	return false;
3895 }
3896 
3897 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3898 {
3899 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3900 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3901 		__ttwu_queue_wakelist(p, cpu, wake_flags);
3902 		return true;
3903 	}
3904 
3905 	return false;
3906 }
3907 
3908 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3909 {
3910 	struct rq *rq = cpu_rq(cpu);
3911 	struct rq_flags rf;
3912 
3913 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
3914 		return;
3915 
3916 	rq_lock(rq, &rf);
3917 	update_rq_clock(rq);
3918 	ttwu_do_activate(rq, p, wake_flags, &rf);
3919 	rq_unlock(rq, &rf);
3920 }
3921 
3922 /*
3923  * Invoked from try_to_wake_up() to check whether the task can be woken up.
3924  *
3925  * The caller holds p::pi_lock if p != current or has preemption
3926  * disabled when p == current.
3927  *
3928  * The rules of saved_state:
3929  *
3930  *   The related locking code always holds p::pi_lock when updating
3931  *   p::saved_state, which means the code is fully serialized in both cases.
3932  *
3933  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3934  *   No other bits set. This allows to distinguish all wakeup scenarios.
3935  *
3936  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3937  *   allows us to prevent early wakeup of tasks before they can be run on
3938  *   asymmetric ISA architectures (eg ARMv9).
3939  */
3940 static __always_inline
3941 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3942 {
3943 	int match;
3944 
3945 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3946 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3947 			     state != TASK_RTLOCK_WAIT);
3948 	}
3949 
3950 	*success = !!(match = __task_state_match(p, state));
3951 
3952 	/*
3953 	 * Saved state preserves the task state across blocking on
3954 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
3955 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
3956 	 * because it waits for a lock wakeup or __thaw_task(). Also
3957 	 * indicate success because from the regular waker's point of
3958 	 * view this has succeeded.
3959 	 *
3960 	 * After acquiring the lock the task will restore p::__state
3961 	 * from p::saved_state which ensures that the regular
3962 	 * wakeup is not lost. The restore will also set
3963 	 * p::saved_state to TASK_RUNNING so any further tests will
3964 	 * not result in false positives vs. @success
3965 	 */
3966 	if (match < 0)
3967 		p->saved_state = TASK_RUNNING;
3968 
3969 	return match > 0;
3970 }
3971 
3972 /*
3973  * Notes on Program-Order guarantees on SMP systems.
3974  *
3975  *  MIGRATION
3976  *
3977  * The basic program-order guarantee on SMP systems is that when a task [t]
3978  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3979  * execution on its new CPU [c1].
3980  *
3981  * For migration (of runnable tasks) this is provided by the following means:
3982  *
3983  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
3984  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
3985  *     rq(c1)->lock (if not at the same time, then in that order).
3986  *  C) LOCK of the rq(c1)->lock scheduling in task
3987  *
3988  * Release/acquire chaining guarantees that B happens after A and C after B.
3989  * Note: the CPU doing B need not be c0 or c1
3990  *
3991  * Example:
3992  *
3993  *   CPU0            CPU1            CPU2
3994  *
3995  *   LOCK rq(0)->lock
3996  *   sched-out X
3997  *   sched-in Y
3998  *   UNLOCK rq(0)->lock
3999  *
4000  *                                   LOCK rq(0)->lock // orders against CPU0
4001  *                                   dequeue X
4002  *                                   UNLOCK rq(0)->lock
4003  *
4004  *                                   LOCK rq(1)->lock
4005  *                                   enqueue X
4006  *                                   UNLOCK rq(1)->lock
4007  *
4008  *                   LOCK rq(1)->lock // orders against CPU2
4009  *                   sched-out Z
4010  *                   sched-in X
4011  *                   UNLOCK rq(1)->lock
4012  *
4013  *
4014  *  BLOCKING -- aka. SLEEP + WAKEUP
4015  *
4016  * For blocking we (obviously) need to provide the same guarantee as for
4017  * migration. However the means are completely different as there is no lock
4018  * chain to provide order. Instead we do:
4019  *
4020  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4021  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4022  *
4023  * Example:
4024  *
4025  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4026  *
4027  *   LOCK rq(0)->lock LOCK X->pi_lock
4028  *   dequeue X
4029  *   sched-out X
4030  *   smp_store_release(X->on_cpu, 0);
4031  *
4032  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4033  *                    X->state = WAKING
4034  *                    set_task_cpu(X,2)
4035  *
4036  *                    LOCK rq(2)->lock
4037  *                    enqueue X
4038  *                    X->state = RUNNING
4039  *                    UNLOCK rq(2)->lock
4040  *
4041  *                                          LOCK rq(2)->lock // orders against CPU1
4042  *                                          sched-out Z
4043  *                                          sched-in X
4044  *                                          UNLOCK rq(2)->lock
4045  *
4046  *                    UNLOCK X->pi_lock
4047  *   UNLOCK rq(0)->lock
4048  *
4049  *
4050  * However, for wakeups there is a second guarantee we must provide, namely we
4051  * must ensure that CONDITION=1 done by the caller can not be reordered with
4052  * accesses to the task state; see try_to_wake_up() and set_current_state().
4053  */
4054 
4055 /**
4056  * try_to_wake_up - wake up a thread
4057  * @p: the thread to be awakened
4058  * @state: the mask of task states that can be woken
4059  * @wake_flags: wake modifier flags (WF_*)
4060  *
4061  * Conceptually does:
4062  *
4063  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4064  *
4065  * If the task was not queued/runnable, also place it back on a runqueue.
4066  *
4067  * This function is atomic against schedule() which would dequeue the task.
4068  *
4069  * It issues a full memory barrier before accessing @p->state, see the comment
4070  * with set_current_state().
4071  *
4072  * Uses p->pi_lock to serialize against concurrent wake-ups.
4073  *
4074  * Relies on p->pi_lock stabilizing:
4075  *  - p->sched_class
4076  *  - p->cpus_ptr
4077  *  - p->sched_task_group
4078  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4079  *
4080  * Tries really hard to only take one task_rq(p)->lock for performance.
4081  * Takes rq->lock in:
4082  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4083  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4084  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4085  *
4086  * As a consequence we race really badly with just about everything. See the
4087  * many memory barriers and their comments for details.
4088  *
4089  * Return: %true if @p->state changes (an actual wakeup was done),
4090  *	   %false otherwise.
4091  */
4092 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4093 {
4094 	guard(preempt)();
4095 	int cpu, success = 0;
4096 
4097 	wake_flags |= WF_TTWU;
4098 
4099 	if (p == current) {
4100 		/*
4101 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4102 		 * == smp_processor_id()'. Together this means we can special
4103 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4104 		 * without taking any locks.
4105 		 *
4106 		 * Specifically, given current runs ttwu() we must be before
4107 		 * schedule()'s block_task(), as such this must not observe
4108 		 * sched_delayed.
4109 		 *
4110 		 * In particular:
4111 		 *  - we rely on Program-Order guarantees for all the ordering,
4112 		 *  - we're serialized against set_special_state() by virtue of
4113 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4114 		 */
4115 		WARN_ON_ONCE(p->se.sched_delayed);
4116 		if (!ttwu_state_match(p, state, &success))
4117 			goto out;
4118 
4119 		trace_sched_waking(p);
4120 		ttwu_do_wakeup(p);
4121 		goto out;
4122 	}
4123 
4124 	/*
4125 	 * If we are going to wake up a thread waiting for CONDITION we
4126 	 * need to ensure that CONDITION=1 done by the caller can not be
4127 	 * reordered with p->state check below. This pairs with smp_store_mb()
4128 	 * in set_current_state() that the waiting thread does.
4129 	 */
4130 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4131 		smp_mb__after_spinlock();
4132 		if (!ttwu_state_match(p, state, &success))
4133 			break;
4134 
4135 		trace_sched_waking(p);
4136 
4137 		/*
4138 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4139 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4140 		 * in smp_cond_load_acquire() below.
4141 		 *
4142 		 * sched_ttwu_pending()			try_to_wake_up()
4143 		 *   STORE p->on_rq = 1			  LOAD p->state
4144 		 *   UNLOCK rq->lock
4145 		 *
4146 		 * __schedule() (switch to task 'p')
4147 		 *   LOCK rq->lock			  smp_rmb();
4148 		 *   smp_mb__after_spinlock();
4149 		 *   UNLOCK rq->lock
4150 		 *
4151 		 * [task p]
4152 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4153 		 *
4154 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4155 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4156 		 *
4157 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4158 		 */
4159 		smp_rmb();
4160 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4161 			break;
4162 
4163 		/*
4164 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4165 		 * possible to, falsely, observe p->on_cpu == 0.
4166 		 *
4167 		 * One must be running (->on_cpu == 1) in order to remove oneself
4168 		 * from the runqueue.
4169 		 *
4170 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4171 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4172 		 *   UNLOCK rq->lock
4173 		 *
4174 		 * __schedule() (put 'p' to sleep)
4175 		 *   LOCK rq->lock			  smp_rmb();
4176 		 *   smp_mb__after_spinlock();
4177 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4178 		 *
4179 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4180 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4181 		 *
4182 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4183 		 * schedule()'s block_task() has 'happened' and p will no longer
4184 		 * care about it's own p->state. See the comment in __schedule().
4185 		 */
4186 		smp_acquire__after_ctrl_dep();
4187 
4188 		/*
4189 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4190 		 * == 0), which means we need to do an enqueue, change p->state to
4191 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4192 		 * enqueue, such as ttwu_queue_wakelist().
4193 		 */
4194 		WRITE_ONCE(p->__state, TASK_WAKING);
4195 
4196 		/*
4197 		 * If the owning (remote) CPU is still in the middle of schedule() with
4198 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4199 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4200 		 * let the waker make forward progress. This is safe because IRQs are
4201 		 * disabled and the IPI will deliver after on_cpu is cleared.
4202 		 *
4203 		 * Ensure we load task_cpu(p) after p->on_cpu:
4204 		 *
4205 		 * set_task_cpu(p, cpu);
4206 		 *   STORE p->cpu = @cpu
4207 		 * __schedule() (switch to task 'p')
4208 		 *   LOCK rq->lock
4209 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4210 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4211 		 *
4212 		 * to ensure we observe the correct CPU on which the task is currently
4213 		 * scheduling.
4214 		 */
4215 		if (smp_load_acquire(&p->on_cpu) &&
4216 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4217 			break;
4218 
4219 		/*
4220 		 * If the owning (remote) CPU is still in the middle of schedule() with
4221 		 * this task as prev, wait until it's done referencing the task.
4222 		 *
4223 		 * Pairs with the smp_store_release() in finish_task().
4224 		 *
4225 		 * This ensures that tasks getting woken will be fully ordered against
4226 		 * their previous state and preserve Program Order.
4227 		 */
4228 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4229 
4230 		cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4231 		if (task_cpu(p) != cpu) {
4232 			if (p->in_iowait) {
4233 				delayacct_blkio_end(p);
4234 				atomic_dec(&task_rq(p)->nr_iowait);
4235 			}
4236 
4237 			wake_flags |= WF_MIGRATED;
4238 			psi_ttwu_dequeue(p);
4239 			set_task_cpu(p, cpu);
4240 		}
4241 
4242 		ttwu_queue(p, cpu, wake_flags);
4243 	}
4244 out:
4245 	if (success)
4246 		ttwu_stat(p, task_cpu(p), wake_flags);
4247 
4248 	return success;
4249 }
4250 
4251 static bool __task_needs_rq_lock(struct task_struct *p)
4252 {
4253 	unsigned int state = READ_ONCE(p->__state);
4254 
4255 	/*
4256 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4257 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4258 	 * locks at the end, see ttwu_queue_wakelist().
4259 	 */
4260 	if (state == TASK_RUNNING || state == TASK_WAKING)
4261 		return true;
4262 
4263 	/*
4264 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4265 	 * possible to, falsely, observe p->on_rq == 0.
4266 	 *
4267 	 * See try_to_wake_up() for a longer comment.
4268 	 */
4269 	smp_rmb();
4270 	if (p->on_rq)
4271 		return true;
4272 
4273 	/*
4274 	 * Ensure the task has finished __schedule() and will not be referenced
4275 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4276 	 */
4277 	smp_rmb();
4278 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4279 
4280 	return false;
4281 }
4282 
4283 /**
4284  * task_call_func - Invoke a function on task in fixed state
4285  * @p: Process for which the function is to be invoked, can be @current.
4286  * @func: Function to invoke.
4287  * @arg: Argument to function.
4288  *
4289  * Fix the task in it's current state by avoiding wakeups and or rq operations
4290  * and call @func(@arg) on it.  This function can use task_is_runnable() and
4291  * task_curr() to work out what the state is, if required.  Given that @func
4292  * can be invoked with a runqueue lock held, it had better be quite
4293  * lightweight.
4294  *
4295  * Returns:
4296  *   Whatever @func returns
4297  */
4298 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4299 {
4300 	struct rq_flags rf;
4301 	int ret;
4302 
4303 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4304 
4305 	if (__task_needs_rq_lock(p)) {
4306 		struct rq *rq = __task_rq_lock(p, &rf);
4307 
4308 		/*
4309 		 * At this point the task is pinned; either:
4310 		 *  - blocked and we're holding off wakeups	 (pi->lock)
4311 		 *  - woken, and we're holding off enqueue	 (rq->lock)
4312 		 *  - queued, and we're holding off schedule	 (rq->lock)
4313 		 *  - running, and we're holding off de-schedule (rq->lock)
4314 		 *
4315 		 * The called function (@func) can use: task_curr(), p->on_rq and
4316 		 * p->__state to differentiate between these states.
4317 		 */
4318 		ret = func(p, arg);
4319 
4320 		__task_rq_unlock(rq, p, &rf);
4321 	} else {
4322 		ret = func(p, arg);
4323 	}
4324 
4325 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4326 	return ret;
4327 }
4328 
4329 /**
4330  * cpu_curr_snapshot - Return a snapshot of the currently running task
4331  * @cpu: The CPU on which to snapshot the task.
4332  *
4333  * Returns the task_struct pointer of the task "currently" running on
4334  * the specified CPU.
4335  *
4336  * If the specified CPU was offline, the return value is whatever it
4337  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4338  * task, but there is no guarantee.  Callers wishing a useful return
4339  * value must take some action to ensure that the specified CPU remains
4340  * online throughout.
4341  *
4342  * This function executes full memory barriers before and after fetching
4343  * the pointer, which permits the caller to confine this function's fetch
4344  * with respect to the caller's accesses to other shared variables.
4345  */
4346 struct task_struct *cpu_curr_snapshot(int cpu)
4347 {
4348 	struct rq *rq = cpu_rq(cpu);
4349 	struct task_struct *t;
4350 	struct rq_flags rf;
4351 
4352 	rq_lock_irqsave(rq, &rf);
4353 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4354 	t = rcu_dereference(cpu_curr(cpu));
4355 	rq_unlock_irqrestore(rq, &rf);
4356 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4357 
4358 	return t;
4359 }
4360 
4361 /**
4362  * wake_up_process - Wake up a specific process
4363  * @p: The process to be woken up.
4364  *
4365  * Attempt to wake up the nominated process and move it to the set of runnable
4366  * processes.
4367  *
4368  * Return: 1 if the process was woken up, 0 if it was already running.
4369  *
4370  * This function executes a full memory barrier before accessing the task state.
4371  */
4372 int wake_up_process(struct task_struct *p)
4373 {
4374 	return try_to_wake_up(p, TASK_NORMAL, 0);
4375 }
4376 EXPORT_SYMBOL(wake_up_process);
4377 
4378 int wake_up_state(struct task_struct *p, unsigned int state)
4379 {
4380 	return try_to_wake_up(p, state, 0);
4381 }
4382 
4383 /*
4384  * Perform scheduler related setup for a newly forked process p.
4385  * p is forked by current.
4386  *
4387  * __sched_fork() is basic setup which is also used by sched_init() to
4388  * initialize the boot CPU's idle task.
4389  */
4390 static void __sched_fork(u64 clone_flags, struct task_struct *p)
4391 {
4392 	p->on_rq			= 0;
4393 
4394 	p->se.on_rq			= 0;
4395 	p->se.exec_start		= 0;
4396 	p->se.sum_exec_runtime		= 0;
4397 	p->se.prev_sum_exec_runtime	= 0;
4398 	p->se.nr_migrations		= 0;
4399 	p->se.vruntime			= 0;
4400 	p->se.vlag			= 0;
4401 	INIT_LIST_HEAD(&p->se.group_node);
4402 
4403 	/* A delayed task cannot be in clone(). */
4404 	WARN_ON_ONCE(p->se.sched_delayed);
4405 
4406 #ifdef CONFIG_FAIR_GROUP_SCHED
4407 	p->se.cfs_rq			= NULL;
4408 #ifdef CONFIG_CFS_BANDWIDTH
4409 	init_cfs_throttle_work(p);
4410 #endif
4411 #endif
4412 
4413 #ifdef CONFIG_SCHEDSTATS
4414 	/* Even if schedstat is disabled, there should not be garbage */
4415 	memset(&p->stats, 0, sizeof(p->stats));
4416 #endif
4417 
4418 	init_dl_entity(&p->dl);
4419 
4420 	INIT_LIST_HEAD(&p->rt.run_list);
4421 	p->rt.timeout		= 0;
4422 	p->rt.time_slice	= sched_rr_timeslice;
4423 	p->rt.on_rq		= 0;
4424 	p->rt.on_list		= 0;
4425 
4426 #ifdef CONFIG_SCHED_CLASS_EXT
4427 	init_scx_entity(&p->scx);
4428 #endif
4429 
4430 #ifdef CONFIG_PREEMPT_NOTIFIERS
4431 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4432 #endif
4433 
4434 #ifdef CONFIG_COMPACTION
4435 	p->capture_control = NULL;
4436 #endif
4437 	init_numa_balancing(clone_flags, p);
4438 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4439 	p->migration_pending = NULL;
4440 }
4441 
4442 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4443 
4444 #ifdef CONFIG_NUMA_BALANCING
4445 
4446 int sysctl_numa_balancing_mode;
4447 
4448 static void __set_numabalancing_state(bool enabled)
4449 {
4450 	if (enabled)
4451 		static_branch_enable(&sched_numa_balancing);
4452 	else
4453 		static_branch_disable(&sched_numa_balancing);
4454 }
4455 
4456 void set_numabalancing_state(bool enabled)
4457 {
4458 	if (enabled)
4459 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4460 	else
4461 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4462 	__set_numabalancing_state(enabled);
4463 }
4464 
4465 #ifdef CONFIG_PROC_SYSCTL
4466 static void reset_memory_tiering(void)
4467 {
4468 	struct pglist_data *pgdat;
4469 
4470 	for_each_online_pgdat(pgdat) {
4471 		pgdat->nbp_threshold = 0;
4472 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4473 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4474 	}
4475 }
4476 
4477 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4478 			  void *buffer, size_t *lenp, loff_t *ppos)
4479 {
4480 	struct ctl_table t;
4481 	int err;
4482 	int state = sysctl_numa_balancing_mode;
4483 
4484 	if (write && !capable(CAP_SYS_ADMIN))
4485 		return -EPERM;
4486 
4487 	t = *table;
4488 	t.data = &state;
4489 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4490 	if (err < 0)
4491 		return err;
4492 	if (write) {
4493 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4494 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4495 			reset_memory_tiering();
4496 		sysctl_numa_balancing_mode = state;
4497 		__set_numabalancing_state(state);
4498 	}
4499 	return err;
4500 }
4501 #endif /* CONFIG_PROC_SYSCTL */
4502 #endif /* CONFIG_NUMA_BALANCING */
4503 
4504 #ifdef CONFIG_SCHEDSTATS
4505 
4506 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4507 
4508 static void set_schedstats(bool enabled)
4509 {
4510 	if (enabled)
4511 		static_branch_enable(&sched_schedstats);
4512 	else
4513 		static_branch_disable(&sched_schedstats);
4514 }
4515 
4516 void force_schedstat_enabled(void)
4517 {
4518 	if (!schedstat_enabled()) {
4519 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4520 		static_branch_enable(&sched_schedstats);
4521 	}
4522 }
4523 
4524 static int __init setup_schedstats(char *str)
4525 {
4526 	int ret = 0;
4527 	if (!str)
4528 		goto out;
4529 
4530 	if (!strcmp(str, "enable")) {
4531 		set_schedstats(true);
4532 		ret = 1;
4533 	} else if (!strcmp(str, "disable")) {
4534 		set_schedstats(false);
4535 		ret = 1;
4536 	}
4537 out:
4538 	if (!ret)
4539 		pr_warn("Unable to parse schedstats=\n");
4540 
4541 	return ret;
4542 }
4543 __setup("schedstats=", setup_schedstats);
4544 
4545 #ifdef CONFIG_PROC_SYSCTL
4546 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4547 		size_t *lenp, loff_t *ppos)
4548 {
4549 	struct ctl_table t;
4550 	int err;
4551 	int state = static_branch_likely(&sched_schedstats);
4552 
4553 	if (write && !capable(CAP_SYS_ADMIN))
4554 		return -EPERM;
4555 
4556 	t = *table;
4557 	t.data = &state;
4558 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4559 	if (err < 0)
4560 		return err;
4561 	if (write)
4562 		set_schedstats(state);
4563 	return err;
4564 }
4565 #endif /* CONFIG_PROC_SYSCTL */
4566 #endif /* CONFIG_SCHEDSTATS */
4567 
4568 #ifdef CONFIG_SYSCTL
4569 static const struct ctl_table sched_core_sysctls[] = {
4570 #ifdef CONFIG_SCHEDSTATS
4571 	{
4572 		.procname       = "sched_schedstats",
4573 		.data           = NULL,
4574 		.maxlen         = sizeof(unsigned int),
4575 		.mode           = 0644,
4576 		.proc_handler   = sysctl_schedstats,
4577 		.extra1         = SYSCTL_ZERO,
4578 		.extra2         = SYSCTL_ONE,
4579 	},
4580 #endif /* CONFIG_SCHEDSTATS */
4581 #ifdef CONFIG_UCLAMP_TASK
4582 	{
4583 		.procname       = "sched_util_clamp_min",
4584 		.data           = &sysctl_sched_uclamp_util_min,
4585 		.maxlen         = sizeof(unsigned int),
4586 		.mode           = 0644,
4587 		.proc_handler   = sysctl_sched_uclamp_handler,
4588 	},
4589 	{
4590 		.procname       = "sched_util_clamp_max",
4591 		.data           = &sysctl_sched_uclamp_util_max,
4592 		.maxlen         = sizeof(unsigned int),
4593 		.mode           = 0644,
4594 		.proc_handler   = sysctl_sched_uclamp_handler,
4595 	},
4596 	{
4597 		.procname       = "sched_util_clamp_min_rt_default",
4598 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4599 		.maxlen         = sizeof(unsigned int),
4600 		.mode           = 0644,
4601 		.proc_handler   = sysctl_sched_uclamp_handler,
4602 	},
4603 #endif /* CONFIG_UCLAMP_TASK */
4604 #ifdef CONFIG_NUMA_BALANCING
4605 	{
4606 		.procname	= "numa_balancing",
4607 		.data		= NULL, /* filled in by handler */
4608 		.maxlen		= sizeof(unsigned int),
4609 		.mode		= 0644,
4610 		.proc_handler	= sysctl_numa_balancing,
4611 		.extra1		= SYSCTL_ZERO,
4612 		.extra2		= SYSCTL_FOUR,
4613 	},
4614 #endif /* CONFIG_NUMA_BALANCING */
4615 };
4616 static int __init sched_core_sysctl_init(void)
4617 {
4618 	register_sysctl_init("kernel", sched_core_sysctls);
4619 	return 0;
4620 }
4621 late_initcall(sched_core_sysctl_init);
4622 #endif /* CONFIG_SYSCTL */
4623 
4624 /*
4625  * fork()/clone()-time setup:
4626  */
4627 int sched_fork(u64 clone_flags, struct task_struct *p)
4628 {
4629 	__sched_fork(clone_flags, p);
4630 	/*
4631 	 * We mark the process as NEW here. This guarantees that
4632 	 * nobody will actually run it, and a signal or other external
4633 	 * event cannot wake it up and insert it on the runqueue either.
4634 	 */
4635 	p->__state = TASK_NEW;
4636 
4637 	/*
4638 	 * Make sure we do not leak PI boosting priority to the child.
4639 	 */
4640 	p->prio = current->normal_prio;
4641 
4642 	uclamp_fork(p);
4643 
4644 	/*
4645 	 * Revert to default priority/policy on fork if requested.
4646 	 */
4647 	if (unlikely(p->sched_reset_on_fork)) {
4648 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4649 			p->policy = SCHED_NORMAL;
4650 			p->static_prio = NICE_TO_PRIO(0);
4651 			p->rt_priority = 0;
4652 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4653 			p->static_prio = NICE_TO_PRIO(0);
4654 
4655 		p->prio = p->normal_prio = p->static_prio;
4656 		set_load_weight(p, false);
4657 		p->se.custom_slice = 0;
4658 		p->se.slice = sysctl_sched_base_slice;
4659 
4660 		/*
4661 		 * We don't need the reset flag anymore after the fork. It has
4662 		 * fulfilled its duty:
4663 		 */
4664 		p->sched_reset_on_fork = 0;
4665 	}
4666 
4667 	if (dl_prio(p->prio))
4668 		return -EAGAIN;
4669 
4670 	scx_pre_fork(p);
4671 
4672 	if (rt_prio(p->prio)) {
4673 		p->sched_class = &rt_sched_class;
4674 #ifdef CONFIG_SCHED_CLASS_EXT
4675 	} else if (task_should_scx(p->policy)) {
4676 		p->sched_class = &ext_sched_class;
4677 #endif
4678 	} else {
4679 		p->sched_class = &fair_sched_class;
4680 	}
4681 
4682 	init_entity_runnable_average(&p->se);
4683 
4684 
4685 #ifdef CONFIG_SCHED_INFO
4686 	if (likely(sched_info_on()))
4687 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4688 #endif
4689 	p->on_cpu = 0;
4690 	init_task_preempt_count(p);
4691 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4692 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4693 
4694 	return 0;
4695 }
4696 
4697 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4698 {
4699 	unsigned long flags;
4700 
4701 	/*
4702 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4703 	 * required yet, but lockdep gets upset if rules are violated.
4704 	 */
4705 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4706 #ifdef CONFIG_CGROUP_SCHED
4707 	if (1) {
4708 		struct task_group *tg;
4709 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4710 				  struct task_group, css);
4711 		tg = autogroup_task_group(p, tg);
4712 		p->sched_task_group = tg;
4713 	}
4714 #endif
4715 	/*
4716 	 * We're setting the CPU for the first time, we don't migrate,
4717 	 * so use __set_task_cpu().
4718 	 */
4719 	__set_task_cpu(p, smp_processor_id());
4720 	if (p->sched_class->task_fork)
4721 		p->sched_class->task_fork(p);
4722 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4723 
4724 	return scx_fork(p);
4725 }
4726 
4727 void sched_cancel_fork(struct task_struct *p)
4728 {
4729 	scx_cancel_fork(p);
4730 }
4731 
4732 void sched_post_fork(struct task_struct *p)
4733 {
4734 	uclamp_post_fork(p);
4735 	scx_post_fork(p);
4736 }
4737 
4738 unsigned long to_ratio(u64 period, u64 runtime)
4739 {
4740 	if (runtime == RUNTIME_INF)
4741 		return BW_UNIT;
4742 
4743 	/*
4744 	 * Doing this here saves a lot of checks in all
4745 	 * the calling paths, and returning zero seems
4746 	 * safe for them anyway.
4747 	 */
4748 	if (period == 0)
4749 		return 0;
4750 
4751 	return div64_u64(runtime << BW_SHIFT, period);
4752 }
4753 
4754 /*
4755  * wake_up_new_task - wake up a newly created task for the first time.
4756  *
4757  * This function will do some initial scheduler statistics housekeeping
4758  * that must be done for every newly created context, then puts the task
4759  * on the runqueue and wakes it.
4760  */
4761 void wake_up_new_task(struct task_struct *p)
4762 {
4763 	struct rq_flags rf;
4764 	struct rq *rq;
4765 	int wake_flags = WF_FORK;
4766 
4767 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4768 	WRITE_ONCE(p->__state, TASK_RUNNING);
4769 	/*
4770 	 * Fork balancing, do it here and not earlier because:
4771 	 *  - cpus_ptr can change in the fork path
4772 	 *  - any previously selected CPU might disappear through hotplug
4773 	 *
4774 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4775 	 * as we're not fully set-up yet.
4776 	 */
4777 	p->recent_used_cpu = task_cpu(p);
4778 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4779 	rq = __task_rq_lock(p, &rf);
4780 	update_rq_clock(rq);
4781 	post_init_entity_util_avg(p);
4782 
4783 	activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4784 	trace_sched_wakeup_new(p);
4785 	wakeup_preempt(rq, p, wake_flags);
4786 	if (p->sched_class->task_woken) {
4787 		/*
4788 		 * Nothing relies on rq->lock after this, so it's fine to
4789 		 * drop it.
4790 		 */
4791 		rq_unpin_lock(rq, &rf);
4792 		p->sched_class->task_woken(rq, p);
4793 		rq_repin_lock(rq, &rf);
4794 	}
4795 	task_rq_unlock(rq, p, &rf);
4796 }
4797 
4798 #ifdef CONFIG_PREEMPT_NOTIFIERS
4799 
4800 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4801 
4802 void preempt_notifier_inc(void)
4803 {
4804 	static_branch_inc(&preempt_notifier_key);
4805 }
4806 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4807 
4808 void preempt_notifier_dec(void)
4809 {
4810 	static_branch_dec(&preempt_notifier_key);
4811 }
4812 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4813 
4814 /**
4815  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4816  * @notifier: notifier struct to register
4817  */
4818 void preempt_notifier_register(struct preempt_notifier *notifier)
4819 {
4820 	if (!static_branch_unlikely(&preempt_notifier_key))
4821 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4822 
4823 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4824 }
4825 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4826 
4827 /**
4828  * preempt_notifier_unregister - no longer interested in preemption notifications
4829  * @notifier: notifier struct to unregister
4830  *
4831  * This is *not* safe to call from within a preemption notifier.
4832  */
4833 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4834 {
4835 	hlist_del(&notifier->link);
4836 }
4837 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4838 
4839 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4840 {
4841 	struct preempt_notifier *notifier;
4842 
4843 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4844 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4845 }
4846 
4847 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4848 {
4849 	if (static_branch_unlikely(&preempt_notifier_key))
4850 		__fire_sched_in_preempt_notifiers(curr);
4851 }
4852 
4853 static void
4854 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4855 				   struct task_struct *next)
4856 {
4857 	struct preempt_notifier *notifier;
4858 
4859 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4860 		notifier->ops->sched_out(notifier, next);
4861 }
4862 
4863 static __always_inline void
4864 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4865 				 struct task_struct *next)
4866 {
4867 	if (static_branch_unlikely(&preempt_notifier_key))
4868 		__fire_sched_out_preempt_notifiers(curr, next);
4869 }
4870 
4871 #else /* !CONFIG_PREEMPT_NOTIFIERS: */
4872 
4873 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4874 {
4875 }
4876 
4877 static inline void
4878 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4879 				 struct task_struct *next)
4880 {
4881 }
4882 
4883 #endif /* !CONFIG_PREEMPT_NOTIFIERS */
4884 
4885 static inline void prepare_task(struct task_struct *next)
4886 {
4887 	/*
4888 	 * Claim the task as running, we do this before switching to it
4889 	 * such that any running task will have this set.
4890 	 *
4891 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4892 	 * its ordering comment.
4893 	 */
4894 	WRITE_ONCE(next->on_cpu, 1);
4895 }
4896 
4897 static inline void finish_task(struct task_struct *prev)
4898 {
4899 	/*
4900 	 * This must be the very last reference to @prev from this CPU. After
4901 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4902 	 * must ensure this doesn't happen until the switch is completely
4903 	 * finished.
4904 	 *
4905 	 * In particular, the load of prev->state in finish_task_switch() must
4906 	 * happen before this.
4907 	 *
4908 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4909 	 */
4910 	smp_store_release(&prev->on_cpu, 0);
4911 }
4912 
4913 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4914 {
4915 	void (*func)(struct rq *rq);
4916 	struct balance_callback *next;
4917 
4918 	lockdep_assert_rq_held(rq);
4919 
4920 	while (head) {
4921 		func = (void (*)(struct rq *))head->func;
4922 		next = head->next;
4923 		head->next = NULL;
4924 		head = next;
4925 
4926 		func(rq);
4927 	}
4928 }
4929 
4930 static void balance_push(struct rq *rq);
4931 
4932 /*
4933  * balance_push_callback is a right abuse of the callback interface and plays
4934  * by significantly different rules.
4935  *
4936  * Where the normal balance_callback's purpose is to be ran in the same context
4937  * that queued it (only later, when it's safe to drop rq->lock again),
4938  * balance_push_callback is specifically targeted at __schedule().
4939  *
4940  * This abuse is tolerated because it places all the unlikely/odd cases behind
4941  * a single test, namely: rq->balance_callback == NULL.
4942  */
4943 struct balance_callback balance_push_callback = {
4944 	.next = NULL,
4945 	.func = balance_push,
4946 };
4947 
4948 static inline struct balance_callback *
4949 __splice_balance_callbacks(struct rq *rq, bool split)
4950 {
4951 	struct balance_callback *head = rq->balance_callback;
4952 
4953 	if (likely(!head))
4954 		return NULL;
4955 
4956 	lockdep_assert_rq_held(rq);
4957 	/*
4958 	 * Must not take balance_push_callback off the list when
4959 	 * splice_balance_callbacks() and balance_callbacks() are not
4960 	 * in the same rq->lock section.
4961 	 *
4962 	 * In that case it would be possible for __schedule() to interleave
4963 	 * and observe the list empty.
4964 	 */
4965 	if (split && head == &balance_push_callback)
4966 		head = NULL;
4967 	else
4968 		rq->balance_callback = NULL;
4969 
4970 	return head;
4971 }
4972 
4973 struct balance_callback *splice_balance_callbacks(struct rq *rq)
4974 {
4975 	return __splice_balance_callbacks(rq, true);
4976 }
4977 
4978 void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
4979 {
4980 	if (rf)
4981 		rq_unpin_lock(rq, rf);
4982 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4983 	if (rf)
4984 		rq_repin_lock(rq, rf);
4985 }
4986 
4987 void balance_callbacks(struct rq *rq, struct balance_callback *head)
4988 {
4989 	unsigned long flags;
4990 
4991 	if (unlikely(head)) {
4992 		raw_spin_rq_lock_irqsave(rq, flags);
4993 		do_balance_callbacks(rq, head);
4994 		raw_spin_rq_unlock_irqrestore(rq, flags);
4995 	}
4996 }
4997 
4998 static inline void
4999 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5000 	__releases(__rq_lockp(rq))
5001 	__acquires(__rq_lockp(this_rq()))
5002 {
5003 	/*
5004 	 * Since the runqueue lock will be released by the next
5005 	 * task (which is an invalid locking op but in the case
5006 	 * of the scheduler it's an obvious special-case), so we
5007 	 * do an early lockdep release here:
5008 	 */
5009 	rq_unpin_lock(rq, rf);
5010 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5011 #ifdef CONFIG_DEBUG_SPINLOCK
5012 	/* this is a valid case when another task releases the spinlock */
5013 	rq_lockp(rq)->owner = next;
5014 #endif
5015 	/*
5016 	 * Model the rq reference switcheroo.
5017 	 */
5018 	__release(__rq_lockp(rq));
5019 	__acquire(__rq_lockp(this_rq()));
5020 }
5021 
5022 static inline void finish_lock_switch(struct rq *rq)
5023 	__releases(__rq_lockp(rq))
5024 {
5025 	/*
5026 	 * If we are tracking spinlock dependencies then we have to
5027 	 * fix up the runqueue lock - which gets 'carried over' from
5028 	 * prev into current:
5029 	 */
5030 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5031 	__balance_callbacks(rq, NULL);
5032 	raw_spin_rq_unlock_irq(rq);
5033 }
5034 
5035 /*
5036  * NOP if the arch has not defined these:
5037  */
5038 
5039 #ifndef prepare_arch_switch
5040 # define prepare_arch_switch(next)	do { } while (0)
5041 #endif
5042 
5043 #ifndef finish_arch_post_lock_switch
5044 # define finish_arch_post_lock_switch()	do { } while (0)
5045 #endif
5046 
5047 static inline void kmap_local_sched_out(void)
5048 {
5049 #ifdef CONFIG_KMAP_LOCAL
5050 	if (unlikely(current->kmap_ctrl.idx))
5051 		__kmap_local_sched_out();
5052 #endif
5053 }
5054 
5055 static inline void kmap_local_sched_in(void)
5056 {
5057 #ifdef CONFIG_KMAP_LOCAL
5058 	if (unlikely(current->kmap_ctrl.idx))
5059 		__kmap_local_sched_in();
5060 #endif
5061 }
5062 
5063 /**
5064  * prepare_task_switch - prepare to switch tasks
5065  * @rq: the runqueue preparing to switch
5066  * @prev: the current task that is being switched out
5067  * @next: the task we are going to switch to.
5068  *
5069  * This is called with the rq lock held and interrupts off. It must
5070  * be paired with a subsequent finish_task_switch after the context
5071  * switch.
5072  *
5073  * prepare_task_switch sets up locking and calls architecture specific
5074  * hooks.
5075  */
5076 static inline void
5077 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5078 		    struct task_struct *next)
5079 	__must_hold(__rq_lockp(rq))
5080 {
5081 	kcov_prepare_switch(prev);
5082 	sched_info_switch(rq, prev, next);
5083 	perf_event_task_sched_out(prev, next);
5084 	fire_sched_out_preempt_notifiers(prev, next);
5085 	kmap_local_sched_out();
5086 	prepare_task(next);
5087 	prepare_arch_switch(next);
5088 }
5089 
5090 /**
5091  * finish_task_switch - clean up after a task-switch
5092  * @prev: the thread we just switched away from.
5093  *
5094  * finish_task_switch must be called after the context switch, paired
5095  * with a prepare_task_switch call before the context switch.
5096  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5097  * and do any other architecture-specific cleanup actions.
5098  *
5099  * Note that we may have delayed dropping an mm in context_switch(). If
5100  * so, we finish that here outside of the runqueue lock. (Doing it
5101  * with the lock held can cause deadlocks; see schedule() for
5102  * details.)
5103  *
5104  * The context switch have flipped the stack from under us and restored the
5105  * local variables which were saved when this task called schedule() in the
5106  * past. 'prev == current' is still correct but we need to recalculate this_rq
5107  * because prev may have moved to another CPU.
5108  */
5109 static struct rq *finish_task_switch(struct task_struct *prev)
5110 	__releases(__rq_lockp(this_rq()))
5111 {
5112 	struct rq *rq = this_rq();
5113 	struct mm_struct *mm = rq->prev_mm;
5114 	unsigned int prev_state;
5115 
5116 	/*
5117 	 * The previous task will have left us with a preempt_count of 2
5118 	 * because it left us after:
5119 	 *
5120 	 *	schedule()
5121 	 *	  preempt_disable();			// 1
5122 	 *	  __schedule()
5123 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5124 	 *
5125 	 * Also, see FORK_PREEMPT_COUNT.
5126 	 */
5127 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5128 		      "corrupted preempt_count: %s/%d/0x%x\n",
5129 		      current->comm, current->pid, preempt_count()))
5130 		preempt_count_set(FORK_PREEMPT_COUNT);
5131 
5132 	rq->prev_mm = NULL;
5133 
5134 	/*
5135 	 * A task struct has one reference for the use as "current".
5136 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5137 	 * schedule one last time. The schedule call will never return, and
5138 	 * the scheduled task must drop that reference.
5139 	 *
5140 	 * We must observe prev->state before clearing prev->on_cpu (in
5141 	 * finish_task), otherwise a concurrent wakeup can get prev
5142 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5143 	 * transition, resulting in a double drop.
5144 	 */
5145 	prev_state = READ_ONCE(prev->__state);
5146 	vtime_task_switch(prev);
5147 	perf_event_task_sched_in(prev, current);
5148 	finish_task(prev);
5149 	tick_nohz_task_switch();
5150 	finish_lock_switch(rq);
5151 	finish_arch_post_lock_switch();
5152 	kcov_finish_switch(current);
5153 	/*
5154 	 * kmap_local_sched_out() is invoked with rq::lock held and
5155 	 * interrupts disabled. There is no requirement for that, but the
5156 	 * sched out code does not have an interrupt enabled section.
5157 	 * Restoring the maps on sched in does not require interrupts being
5158 	 * disabled either.
5159 	 */
5160 	kmap_local_sched_in();
5161 
5162 	fire_sched_in_preempt_notifiers(current);
5163 	/*
5164 	 * When switching through a kernel thread, the loop in
5165 	 * membarrier_{private,global}_expedited() may have observed that
5166 	 * kernel thread and not issued an IPI. It is therefore possible to
5167 	 * schedule between user->kernel->user threads without passing though
5168 	 * switch_mm(). Membarrier requires a barrier after storing to
5169 	 * rq->curr, before returning to userspace, so provide them here:
5170 	 *
5171 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5172 	 *   provided by mmdrop_lazy_tlb(),
5173 	 * - a sync_core for SYNC_CORE.
5174 	 */
5175 	if (mm) {
5176 		membarrier_mm_sync_core_before_usermode(mm);
5177 		mmdrop_lazy_tlb_sched(mm);
5178 	}
5179 
5180 	if (unlikely(prev_state == TASK_DEAD)) {
5181 		if (prev->sched_class->task_dead)
5182 			prev->sched_class->task_dead(prev);
5183 
5184 		/*
5185 		 * sched_ext_dead() must come before cgroup_task_dead() to
5186 		 * prevent cgroups from being removed while its member tasks are
5187 		 * visible to SCX schedulers.
5188 		 */
5189 		sched_ext_dead(prev);
5190 		cgroup_task_dead(prev);
5191 
5192 		/* Task is done with its stack. */
5193 		put_task_stack(prev);
5194 
5195 		put_task_struct_rcu_user(prev);
5196 	}
5197 
5198 	return rq;
5199 }
5200 
5201 /**
5202  * schedule_tail - first thing a freshly forked thread must call.
5203  * @prev: the thread we just switched away from.
5204  */
5205 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5206 	__releases(__rq_lockp(this_rq()))
5207 {
5208 	/*
5209 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5210 	 * finish_task_switch() for details.
5211 	 *
5212 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5213 	 * and the preempt_enable() will end up enabling preemption (on
5214 	 * PREEMPT_COUNT kernels).
5215 	 */
5216 
5217 	finish_task_switch(prev);
5218 	/*
5219 	 * This is a special case: the newly created task has just
5220 	 * switched the context for the first time. It is returning from
5221 	 * schedule for the first time in this path.
5222 	 */
5223 	trace_sched_exit_tp(true);
5224 	preempt_enable();
5225 
5226 	if (current->set_child_tid)
5227 		put_user(task_pid_vnr(current), current->set_child_tid);
5228 
5229 	calculate_sigpending();
5230 }
5231 
5232 /*
5233  * context_switch - switch to the new MM and the new thread's register state.
5234  */
5235 static __always_inline struct rq *
5236 context_switch(struct rq *rq, struct task_struct *prev,
5237 	       struct task_struct *next, struct rq_flags *rf)
5238 	__releases(__rq_lockp(rq))
5239 {
5240 	prepare_task_switch(rq, prev, next);
5241 
5242 	/*
5243 	 * For paravirt, this is coupled with an exit in switch_to to
5244 	 * combine the page table reload and the switch backend into
5245 	 * one hypercall.
5246 	 */
5247 	arch_start_context_switch(prev);
5248 
5249 	/*
5250 	 * kernel -> kernel   lazy + transfer active
5251 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5252 	 *
5253 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5254 	 *   user ->   user   switch
5255 	 */
5256 	if (!next->mm) {				// to kernel
5257 		enter_lazy_tlb(prev->active_mm, next);
5258 
5259 		next->active_mm = prev->active_mm;
5260 		if (prev->mm)				// from user
5261 			mmgrab_lazy_tlb(prev->active_mm);
5262 		else
5263 			prev->active_mm = NULL;
5264 	} else {					// to user
5265 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5266 		/*
5267 		 * sys_membarrier() requires an smp_mb() between setting
5268 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5269 		 *
5270 		 * The below provides this either through switch_mm(), or in
5271 		 * case 'prev->active_mm == next->mm' through
5272 		 * finish_task_switch()'s mmdrop().
5273 		 */
5274 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5275 		lru_gen_use_mm(next->mm);
5276 
5277 		if (!prev->mm) {			// from kernel
5278 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5279 			rq->prev_mm = prev->active_mm;
5280 			prev->active_mm = NULL;
5281 		}
5282 	}
5283 
5284 	mm_cid_switch_to(prev, next);
5285 
5286 	/*
5287 	 * Tell rseq that the task was scheduled in. Must be after
5288 	 * switch_mm_cid() to get the TIF flag set.
5289 	 */
5290 	rseq_sched_switch_event(next);
5291 
5292 	prepare_lock_switch(rq, next, rf);
5293 
5294 	/* Here we just switch the register state and the stack. */
5295 	switch_to(prev, next, prev);
5296 	barrier();
5297 
5298 	return finish_task_switch(prev);
5299 }
5300 
5301 /*
5302  * nr_running and nr_context_switches:
5303  *
5304  * externally visible scheduler statistics: current number of runnable
5305  * threads, total number of context switches performed since bootup.
5306  */
5307 unsigned int nr_running(void)
5308 {
5309 	unsigned int i, sum = 0;
5310 
5311 	for_each_online_cpu(i)
5312 		sum += cpu_rq(i)->nr_running;
5313 
5314 	return sum;
5315 }
5316 
5317 /*
5318  * Check if only the current task is running on the CPU.
5319  *
5320  * Caution: this function does not check that the caller has disabled
5321  * preemption, thus the result might have a time-of-check-to-time-of-use
5322  * race.  The caller is responsible to use it correctly, for example:
5323  *
5324  * - from a non-preemptible section (of course)
5325  *
5326  * - from a thread that is bound to a single CPU
5327  *
5328  * - in a loop with very short iterations (e.g. a polling loop)
5329  */
5330 bool single_task_running(void)
5331 {
5332 	return raw_rq()->nr_running == 1;
5333 }
5334 EXPORT_SYMBOL(single_task_running);
5335 
5336 unsigned long long nr_context_switches_cpu(int cpu)
5337 {
5338 	return cpu_rq(cpu)->nr_switches;
5339 }
5340 
5341 unsigned long long nr_context_switches(void)
5342 {
5343 	int i;
5344 	unsigned long long sum = 0;
5345 
5346 	for_each_possible_cpu(i)
5347 		sum += cpu_rq(i)->nr_switches;
5348 
5349 	return sum;
5350 }
5351 
5352 /*
5353  * Consumers of these two interfaces, like for example the cpuidle menu
5354  * governor, are using nonsensical data. Preferring shallow idle state selection
5355  * for a CPU that has IO-wait which might not even end up running the task when
5356  * it does become runnable.
5357  */
5358 
5359 unsigned int nr_iowait_cpu(int cpu)
5360 {
5361 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5362 }
5363 
5364 /*
5365  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5366  *
5367  * The idea behind IO-wait account is to account the idle time that we could
5368  * have spend running if it were not for IO. That is, if we were to improve the
5369  * storage performance, we'd have a proportional reduction in IO-wait time.
5370  *
5371  * This all works nicely on UP, where, when a task blocks on IO, we account
5372  * idle time as IO-wait, because if the storage were faster, it could've been
5373  * running and we'd not be idle.
5374  *
5375  * This has been extended to SMP, by doing the same for each CPU. This however
5376  * is broken.
5377  *
5378  * Imagine for instance the case where two tasks block on one CPU, only the one
5379  * CPU will have IO-wait accounted, while the other has regular idle. Even
5380  * though, if the storage were faster, both could've ran at the same time,
5381  * utilising both CPUs.
5382  *
5383  * This means, that when looking globally, the current IO-wait accounting on
5384  * SMP is a lower bound, by reason of under accounting.
5385  *
5386  * Worse, since the numbers are provided per CPU, they are sometimes
5387  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5388  * associated with any one particular CPU, it can wake to another CPU than it
5389  * blocked on. This means the per CPU IO-wait number is meaningless.
5390  *
5391  * Task CPU affinities can make all that even more 'interesting'.
5392  */
5393 
5394 unsigned int nr_iowait(void)
5395 {
5396 	unsigned int i, sum = 0;
5397 
5398 	for_each_possible_cpu(i)
5399 		sum += nr_iowait_cpu(i);
5400 
5401 	return sum;
5402 }
5403 
5404 /*
5405  * sched_exec - execve() is a valuable balancing opportunity, because at
5406  * this point the task has the smallest effective memory and cache footprint.
5407  */
5408 void sched_exec(void)
5409 {
5410 	struct task_struct *p = current;
5411 	struct migration_arg arg;
5412 	int dest_cpu;
5413 
5414 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5415 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5416 		if (dest_cpu == smp_processor_id())
5417 			return;
5418 
5419 		if (unlikely(!cpu_active(dest_cpu)))
5420 			return;
5421 
5422 		arg = (struct migration_arg){ p, dest_cpu };
5423 	}
5424 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5425 }
5426 
5427 DEFINE_PER_CPU(struct kernel_stat, kstat);
5428 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5429 
5430 EXPORT_PER_CPU_SYMBOL(kstat);
5431 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5432 
5433 /*
5434  * The function fair_sched_class.update_curr accesses the struct curr
5435  * and its field curr->exec_start; when called from task_sched_runtime(),
5436  * we observe a high rate of cache misses in practice.
5437  * Prefetching this data results in improved performance.
5438  */
5439 static inline void prefetch_curr_exec_start(struct task_struct *p)
5440 {
5441 #ifdef CONFIG_FAIR_GROUP_SCHED
5442 	struct sched_entity *curr = p->se.cfs_rq->curr;
5443 #else
5444 	struct sched_entity *curr = task_rq(p)->cfs.curr;
5445 #endif
5446 	prefetch(curr);
5447 	prefetch(&curr->exec_start);
5448 }
5449 
5450 /*
5451  * Return accounted runtime for the task.
5452  * In case the task is currently running, return the runtime plus current's
5453  * pending runtime that have not been accounted yet.
5454  */
5455 unsigned long long task_sched_runtime(struct task_struct *p)
5456 {
5457 	struct rq_flags rf;
5458 	struct rq *rq;
5459 	u64 ns;
5460 
5461 #ifdef CONFIG_64BIT
5462 	/*
5463 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5464 	 * So we have a optimization chance when the task's delta_exec is 0.
5465 	 * Reading ->on_cpu is racy, but this is OK.
5466 	 *
5467 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5468 	 * If we race with it entering CPU, unaccounted time is 0. This is
5469 	 * indistinguishable from the read occurring a few cycles earlier.
5470 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5471 	 * been accounted, so we're correct here as well.
5472 	 */
5473 	if (!p->on_cpu || !task_on_rq_queued(p))
5474 		return p->se.sum_exec_runtime;
5475 #endif
5476 
5477 	rq = task_rq_lock(p, &rf);
5478 	/*
5479 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5480 	 * project cycles that may never be accounted to this
5481 	 * thread, breaking clock_gettime().
5482 	 */
5483 	if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5484 		prefetch_curr_exec_start(p);
5485 		update_rq_clock(rq);
5486 		p->sched_class->update_curr(rq);
5487 	}
5488 	ns = p->se.sum_exec_runtime;
5489 	task_rq_unlock(rq, p, &rf);
5490 
5491 	return ns;
5492 }
5493 
5494 static u64 cpu_resched_latency(struct rq *rq)
5495 {
5496 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5497 	u64 resched_latency, now = rq_clock(rq);
5498 	static bool warned_once;
5499 
5500 	if (sysctl_resched_latency_warn_once && warned_once)
5501 		return 0;
5502 
5503 	if (!need_resched() || !latency_warn_ms)
5504 		return 0;
5505 
5506 	if (system_state == SYSTEM_BOOTING)
5507 		return 0;
5508 
5509 	if (!rq->last_seen_need_resched_ns) {
5510 		rq->last_seen_need_resched_ns = now;
5511 		rq->ticks_without_resched = 0;
5512 		return 0;
5513 	}
5514 
5515 	rq->ticks_without_resched++;
5516 	resched_latency = now - rq->last_seen_need_resched_ns;
5517 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5518 		return 0;
5519 
5520 	warned_once = true;
5521 
5522 	return resched_latency;
5523 }
5524 
5525 static int __init setup_resched_latency_warn_ms(char *str)
5526 {
5527 	long val;
5528 
5529 	if ((kstrtol(str, 0, &val))) {
5530 		pr_warn("Unable to set resched_latency_warn_ms\n");
5531 		return 1;
5532 	}
5533 
5534 	sysctl_resched_latency_warn_ms = val;
5535 	return 1;
5536 }
5537 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5538 
5539 /*
5540  * This function gets called by the timer code, with HZ frequency.
5541  * We call it with interrupts disabled.
5542  */
5543 void sched_tick(void)
5544 {
5545 	int cpu = smp_processor_id();
5546 	struct rq *rq = cpu_rq(cpu);
5547 	/* accounting goes to the donor task */
5548 	struct task_struct *donor;
5549 	struct rq_flags rf;
5550 	unsigned long hw_pressure;
5551 	u64 resched_latency;
5552 
5553 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5554 		arch_scale_freq_tick();
5555 
5556 	sched_clock_tick();
5557 
5558 	rq_lock(rq, &rf);
5559 	donor = rq->donor;
5560 
5561 	psi_account_irqtime(rq, donor, NULL);
5562 
5563 	update_rq_clock(rq);
5564 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5565 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5566 
5567 	if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5568 		resched_curr(rq);
5569 
5570 	donor->sched_class->task_tick(rq, donor, 0);
5571 	if (sched_feat(LATENCY_WARN))
5572 		resched_latency = cpu_resched_latency(rq);
5573 	calc_global_load_tick(rq);
5574 	sched_core_tick(rq);
5575 	scx_tick(rq);
5576 
5577 	rq_unlock(rq, &rf);
5578 
5579 	if (sched_feat(LATENCY_WARN) && resched_latency)
5580 		resched_latency_warn(cpu, resched_latency);
5581 
5582 	perf_event_task_tick();
5583 
5584 	if (donor->flags & PF_WQ_WORKER)
5585 		wq_worker_tick(donor);
5586 
5587 	if (!scx_switched_all()) {
5588 		rq->idle_balance = idle_cpu(cpu);
5589 		sched_balance_trigger(rq);
5590 	}
5591 }
5592 
5593 #ifdef CONFIG_NO_HZ_FULL
5594 
5595 struct tick_work {
5596 	int			cpu;
5597 	atomic_t		state;
5598 	struct delayed_work	work;
5599 };
5600 /* Values for ->state, see diagram below. */
5601 #define TICK_SCHED_REMOTE_OFFLINE	0
5602 #define TICK_SCHED_REMOTE_OFFLINING	1
5603 #define TICK_SCHED_REMOTE_RUNNING	2
5604 
5605 /*
5606  * State diagram for ->state:
5607  *
5608  *
5609  *          TICK_SCHED_REMOTE_OFFLINE
5610  *                    |   ^
5611  *                    |   |
5612  *                    |   | sched_tick_remote()
5613  *                    |   |
5614  *                    |   |
5615  *                    +--TICK_SCHED_REMOTE_OFFLINING
5616  *                    |   ^
5617  *                    |   |
5618  * sched_tick_start() |   | sched_tick_stop()
5619  *                    |   |
5620  *                    V   |
5621  *          TICK_SCHED_REMOTE_RUNNING
5622  *
5623  *
5624  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5625  * and sched_tick_start() are happy to leave the state in RUNNING.
5626  */
5627 
5628 static struct tick_work __percpu *tick_work_cpu;
5629 
5630 static void sched_tick_remote(struct work_struct *work)
5631 {
5632 	struct delayed_work *dwork = to_delayed_work(work);
5633 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5634 	int cpu = twork->cpu;
5635 	struct rq *rq = cpu_rq(cpu);
5636 	int os;
5637 
5638 	/*
5639 	 * Handle the tick only if it appears the remote CPU is running in full
5640 	 * dynticks mode. The check is racy by nature, but missing a tick or
5641 	 * having one too much is no big deal because the scheduler tick updates
5642 	 * statistics and checks timeslices in a time-independent way, regardless
5643 	 * of when exactly it is running.
5644 	 */
5645 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5646 		guard(rq_lock_irq)(rq);
5647 		struct task_struct *curr = rq->curr;
5648 
5649 		if (cpu_online(cpu)) {
5650 			/*
5651 			 * Since this is a remote tick for full dynticks mode,
5652 			 * we are always sure that there is no proxy (only a
5653 			 * single task is running).
5654 			 */
5655 			WARN_ON_ONCE(rq->curr != rq->donor);
5656 			update_rq_clock(rq);
5657 
5658 			if (!is_idle_task(curr)) {
5659 				/*
5660 				 * Make sure the next tick runs within a
5661 				 * reasonable amount of time.
5662 				 */
5663 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5664 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 30);
5665 			}
5666 			curr->sched_class->task_tick(rq, curr, 0);
5667 
5668 			calc_load_nohz_remote(rq);
5669 		}
5670 	}
5671 
5672 	/*
5673 	 * Run the remote tick once per second (1Hz). This arbitrary
5674 	 * frequency is large enough to avoid overload but short enough
5675 	 * to keep scheduler internal stats reasonably up to date.  But
5676 	 * first update state to reflect hotplug activity if required.
5677 	 */
5678 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5679 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5680 	if (os == TICK_SCHED_REMOTE_RUNNING)
5681 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5682 }
5683 
5684 static void sched_tick_start(int cpu)
5685 {
5686 	int os;
5687 	struct tick_work *twork;
5688 
5689 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5690 		return;
5691 
5692 	WARN_ON_ONCE(!tick_work_cpu);
5693 
5694 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5695 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5696 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5697 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5698 		twork->cpu = cpu;
5699 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5700 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5701 	}
5702 }
5703 
5704 #ifdef CONFIG_HOTPLUG_CPU
5705 static void sched_tick_stop(int cpu)
5706 {
5707 	struct tick_work *twork;
5708 	int os;
5709 
5710 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5711 		return;
5712 
5713 	WARN_ON_ONCE(!tick_work_cpu);
5714 
5715 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5716 	/* There cannot be competing actions, but don't rely on stop-machine. */
5717 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5718 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5719 	/* Don't cancel, as this would mess up the state machine. */
5720 }
5721 #endif /* CONFIG_HOTPLUG_CPU */
5722 
5723 int __init sched_tick_offload_init(void)
5724 {
5725 	tick_work_cpu = alloc_percpu(struct tick_work);
5726 	BUG_ON(!tick_work_cpu);
5727 	return 0;
5728 }
5729 
5730 #else /* !CONFIG_NO_HZ_FULL: */
5731 static inline void sched_tick_start(int cpu) { }
5732 static inline void sched_tick_stop(int cpu) { }
5733 #endif /* !CONFIG_NO_HZ_FULL */
5734 
5735 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5736 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5737 /*
5738  * If the value passed in is equal to the current preempt count
5739  * then we just disabled preemption. Start timing the latency.
5740  */
5741 static inline void preempt_latency_start(int val)
5742 {
5743 	if (preempt_count() == val) {
5744 		unsigned long ip = get_lock_parent_ip();
5745 #ifdef CONFIG_DEBUG_PREEMPT
5746 		current->preempt_disable_ip = ip;
5747 #endif
5748 		trace_preempt_off(CALLER_ADDR0, ip);
5749 	}
5750 }
5751 
5752 void preempt_count_add(int val)
5753 {
5754 #ifdef CONFIG_DEBUG_PREEMPT
5755 	/*
5756 	 * Underflow?
5757 	 */
5758 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5759 		return;
5760 #endif
5761 	__preempt_count_add(val);
5762 #ifdef CONFIG_DEBUG_PREEMPT
5763 	/*
5764 	 * Spinlock count overflowing soon?
5765 	 */
5766 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5767 				PREEMPT_MASK - 10);
5768 #endif
5769 	preempt_latency_start(val);
5770 }
5771 EXPORT_SYMBOL(preempt_count_add);
5772 NOKPROBE_SYMBOL(preempt_count_add);
5773 
5774 /*
5775  * If the value passed in equals to the current preempt count
5776  * then we just enabled preemption. Stop timing the latency.
5777  */
5778 static inline void preempt_latency_stop(int val)
5779 {
5780 	if (preempt_count() == val)
5781 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5782 }
5783 
5784 void preempt_count_sub(int val)
5785 {
5786 #ifdef CONFIG_DEBUG_PREEMPT
5787 	/*
5788 	 * Underflow?
5789 	 */
5790 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5791 		return;
5792 	/*
5793 	 * Is the spinlock portion underflowing?
5794 	 */
5795 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5796 			!(preempt_count() & PREEMPT_MASK)))
5797 		return;
5798 #endif
5799 
5800 	preempt_latency_stop(val);
5801 	__preempt_count_sub(val);
5802 }
5803 EXPORT_SYMBOL(preempt_count_sub);
5804 NOKPROBE_SYMBOL(preempt_count_sub);
5805 
5806 #else
5807 static inline void preempt_latency_start(int val) { }
5808 static inline void preempt_latency_stop(int val) { }
5809 #endif
5810 
5811 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5812 {
5813 #ifdef CONFIG_DEBUG_PREEMPT
5814 	return p->preempt_disable_ip;
5815 #else
5816 	return 0;
5817 #endif
5818 }
5819 
5820 /*
5821  * Print scheduling while atomic bug:
5822  */
5823 static noinline void __schedule_bug(struct task_struct *prev)
5824 {
5825 	/* Save this before calling printk(), since that will clobber it */
5826 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5827 
5828 	if (oops_in_progress)
5829 		return;
5830 
5831 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5832 		prev->comm, prev->pid, preempt_count());
5833 
5834 	debug_show_held_locks(prev);
5835 	print_modules();
5836 	if (irqs_disabled())
5837 		print_irqtrace_events(prev);
5838 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5839 		pr_err("Preemption disabled at:");
5840 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5841 	}
5842 	check_panic_on_warn("scheduling while atomic");
5843 
5844 	dump_stack();
5845 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5846 }
5847 
5848 /*
5849  * Various schedule()-time debugging checks and statistics:
5850  */
5851 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5852 {
5853 #ifdef CONFIG_SCHED_STACK_END_CHECK
5854 	if (task_stack_end_corrupted(prev))
5855 		panic("corrupted stack end detected inside scheduler\n");
5856 
5857 	if (task_scs_end_corrupted(prev))
5858 		panic("corrupted shadow stack detected inside scheduler\n");
5859 #endif
5860 
5861 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5862 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5863 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5864 			prev->comm, prev->pid, prev->non_block_count);
5865 		dump_stack();
5866 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5867 	}
5868 #endif
5869 
5870 	if (unlikely(in_atomic_preempt_off())) {
5871 		__schedule_bug(prev);
5872 		preempt_count_set(PREEMPT_DISABLED);
5873 	}
5874 	rcu_sleep_check();
5875 	WARN_ON_ONCE(ct_state() == CT_STATE_USER);
5876 
5877 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5878 
5879 	schedstat_inc(this_rq()->sched_count);
5880 }
5881 
5882 static void prev_balance(struct rq *rq, struct task_struct *prev,
5883 			 struct rq_flags *rf)
5884 {
5885 	const struct sched_class *start_class = prev->sched_class;
5886 	const struct sched_class *class;
5887 
5888 	/*
5889 	 * We must do the balancing pass before put_prev_task(), such
5890 	 * that when we release the rq->lock the task is in the same
5891 	 * state as before we took rq->lock.
5892 	 *
5893 	 * We can terminate the balance pass as soon as we know there is
5894 	 * a runnable task of @class priority or higher.
5895 	 */
5896 	for_active_class_range(class, start_class, &idle_sched_class) {
5897 		if (class->balance && class->balance(rq, prev, rf))
5898 			break;
5899 	}
5900 }
5901 
5902 /*
5903  * Pick up the highest-prio task:
5904  */
5905 static inline struct task_struct *
5906 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5907 	__must_hold(__rq_lockp(rq))
5908 {
5909 	const struct sched_class *class;
5910 	struct task_struct *p;
5911 
5912 	rq->dl_server = NULL;
5913 
5914 	if (scx_enabled())
5915 		goto restart;
5916 
5917 	/*
5918 	 * Optimization: we know that if all tasks are in the fair class we can
5919 	 * call that function directly, but only if the @prev task wasn't of a
5920 	 * higher scheduling class, because otherwise those lose the
5921 	 * opportunity to pull in more work from other CPUs.
5922 	 */
5923 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5924 		   rq->nr_running == rq->cfs.h_nr_queued)) {
5925 
5926 		p = pick_next_task_fair(rq, prev, rf);
5927 		if (unlikely(p == RETRY_TASK))
5928 			goto restart;
5929 
5930 		/* Assume the next prioritized class is idle_sched_class */
5931 		if (!p) {
5932 			p = pick_task_idle(rq, rf);
5933 			put_prev_set_next_task(rq, prev, p);
5934 		}
5935 
5936 		return p;
5937 	}
5938 
5939 restart:
5940 	prev_balance(rq, prev, rf);
5941 
5942 	for_each_active_class(class) {
5943 		if (class->pick_next_task) {
5944 			p = class->pick_next_task(rq, prev, rf);
5945 			if (unlikely(p == RETRY_TASK))
5946 				goto restart;
5947 			if (p)
5948 				return p;
5949 		} else {
5950 			p = class->pick_task(rq, rf);
5951 			if (unlikely(p == RETRY_TASK))
5952 				goto restart;
5953 			if (p) {
5954 				put_prev_set_next_task(rq, prev, p);
5955 				return p;
5956 			}
5957 		}
5958 	}
5959 
5960 	BUG(); /* The idle class should always have a runnable task. */
5961 }
5962 
5963 #ifdef CONFIG_SCHED_CORE
5964 static inline bool is_task_rq_idle(struct task_struct *t)
5965 {
5966 	return (task_rq(t)->idle == t);
5967 }
5968 
5969 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5970 {
5971 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
5972 }
5973 
5974 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
5975 {
5976 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
5977 		return true;
5978 
5979 	return a->core_cookie == b->core_cookie;
5980 }
5981 
5982 /*
5983  * Careful; this can return RETRY_TASK, it does not include the retry-loop
5984  * itself due to the whole SMT pick retry thing below.
5985  */
5986 static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
5987 {
5988 	const struct sched_class *class;
5989 	struct task_struct *p;
5990 
5991 	rq->dl_server = NULL;
5992 
5993 	for_each_active_class(class) {
5994 		p = class->pick_task(rq, rf);
5995 		if (p)
5996 			return p;
5997 	}
5998 
5999 	BUG(); /* The idle class should always have a runnable task. */
6000 }
6001 
6002 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6003 
6004 static void queue_core_balance(struct rq *rq);
6005 
6006 static struct task_struct *
6007 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6008 	__must_hold(__rq_lockp(rq))
6009 {
6010 	struct task_struct *next, *p, *max;
6011 	const struct cpumask *smt_mask;
6012 	bool fi_before = false;
6013 	bool core_clock_updated = (rq == rq->core);
6014 	unsigned long cookie;
6015 	int i, cpu, occ = 0;
6016 	struct rq *rq_i;
6017 	bool need_sync;
6018 
6019 	if (!sched_core_enabled(rq))
6020 		return __pick_next_task(rq, prev, rf);
6021 
6022 	cpu = cpu_of(rq);
6023 
6024 	/* Stopper task is switching into idle, no need core-wide selection. */
6025 	if (cpu_is_offline(cpu)) {
6026 		/*
6027 		 * Reset core_pick so that we don't enter the fastpath when
6028 		 * coming online. core_pick would already be migrated to
6029 		 * another cpu during offline.
6030 		 */
6031 		rq->core_pick = NULL;
6032 		rq->core_dl_server = NULL;
6033 		return __pick_next_task(rq, prev, rf);
6034 	}
6035 
6036 	/*
6037 	 * If there were no {en,de}queues since we picked (IOW, the task
6038 	 * pointers are all still valid), and we haven't scheduled the last
6039 	 * pick yet, do so now.
6040 	 *
6041 	 * rq->core_pick can be NULL if no selection was made for a CPU because
6042 	 * it was either offline or went offline during a sibling's core-wide
6043 	 * selection. In this case, do a core-wide selection.
6044 	 */
6045 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6046 	    rq->core->core_pick_seq != rq->core_sched_seq &&
6047 	    rq->core_pick) {
6048 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6049 
6050 		next = rq->core_pick;
6051 		rq->dl_server = rq->core_dl_server;
6052 		rq->core_pick = NULL;
6053 		rq->core_dl_server = NULL;
6054 		goto out_set_next;
6055 	}
6056 
6057 	prev_balance(rq, prev, rf);
6058 
6059 	smt_mask = cpu_smt_mask(cpu);
6060 	need_sync = !!rq->core->core_cookie;
6061 
6062 	/* reset state */
6063 	rq->core->core_cookie = 0UL;
6064 	if (rq->core->core_forceidle_count) {
6065 		if (!core_clock_updated) {
6066 			update_rq_clock(rq->core);
6067 			core_clock_updated = true;
6068 		}
6069 		sched_core_account_forceidle(rq);
6070 		/* reset after accounting force idle */
6071 		rq->core->core_forceidle_start = 0;
6072 		rq->core->core_forceidle_count = 0;
6073 		rq->core->core_forceidle_occupation = 0;
6074 		need_sync = true;
6075 		fi_before = true;
6076 	}
6077 
6078 	/*
6079 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6080 	 *
6081 	 * @task_seq guards the task state ({en,de}queues)
6082 	 * @pick_seq is the @task_seq we did a selection on
6083 	 * @sched_seq is the @pick_seq we scheduled
6084 	 *
6085 	 * However, preemptions can cause multiple picks on the same task set.
6086 	 * 'Fix' this by also increasing @task_seq for every pick.
6087 	 */
6088 	rq->core->core_task_seq++;
6089 
6090 	/*
6091 	 * Optimize for common case where this CPU has no cookies
6092 	 * and there are no cookied tasks running on siblings.
6093 	 */
6094 	if (!need_sync) {
6095 restart_single:
6096 		next = pick_task(rq, rf);
6097 		if (unlikely(next == RETRY_TASK))
6098 			goto restart_single;
6099 		if (!next->core_cookie) {
6100 			rq->core_pick = NULL;
6101 			rq->core_dl_server = NULL;
6102 			/*
6103 			 * For robustness, update the min_vruntime_fi for
6104 			 * unconstrained picks as well.
6105 			 */
6106 			WARN_ON_ONCE(fi_before);
6107 			task_vruntime_update(rq, next, false);
6108 			goto out_set_next;
6109 		}
6110 	}
6111 
6112 	/*
6113 	 * For each thread: do the regular task pick and find the max prio task
6114 	 * amongst them.
6115 	 *
6116 	 * Tie-break prio towards the current CPU
6117 	 */
6118 restart_multi:
6119 	max = NULL;
6120 	for_each_cpu_wrap(i, smt_mask, cpu) {
6121 		rq_i = cpu_rq(i);
6122 
6123 		/*
6124 		 * Current cpu always has its clock updated on entrance to
6125 		 * pick_next_task(). If the current cpu is not the core,
6126 		 * the core may also have been updated above.
6127 		 */
6128 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6129 			update_rq_clock(rq_i);
6130 
6131 		p = pick_task(rq_i, rf);
6132 		if (unlikely(p == RETRY_TASK))
6133 			goto restart_multi;
6134 
6135 		rq_i->core_pick = p;
6136 		rq_i->core_dl_server = rq_i->dl_server;
6137 
6138 		if (!max || prio_less(max, p, fi_before))
6139 			max = p;
6140 	}
6141 
6142 	cookie = rq->core->core_cookie = max->core_cookie;
6143 
6144 	/*
6145 	 * For each thread: try and find a runnable task that matches @max or
6146 	 * force idle.
6147 	 */
6148 	for_each_cpu(i, smt_mask) {
6149 		rq_i = cpu_rq(i);
6150 		p = rq_i->core_pick;
6151 
6152 		if (!cookie_equals(p, cookie)) {
6153 			p = NULL;
6154 			if (cookie)
6155 				p = sched_core_find(rq_i, cookie);
6156 			if (!p)
6157 				p = idle_sched_class.pick_task(rq_i, rf);
6158 		}
6159 
6160 		rq_i->core_pick = p;
6161 		rq_i->core_dl_server = NULL;
6162 
6163 		if (p == rq_i->idle) {
6164 			if (rq_i->nr_running) {
6165 				rq->core->core_forceidle_count++;
6166 				if (!fi_before)
6167 					rq->core->core_forceidle_seq++;
6168 			}
6169 		} else {
6170 			occ++;
6171 		}
6172 	}
6173 
6174 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6175 		rq->core->core_forceidle_start = rq_clock(rq->core);
6176 		rq->core->core_forceidle_occupation = occ;
6177 	}
6178 
6179 	rq->core->core_pick_seq = rq->core->core_task_seq;
6180 	next = rq->core_pick;
6181 	rq->core_sched_seq = rq->core->core_pick_seq;
6182 
6183 	/* Something should have been selected for current CPU */
6184 	WARN_ON_ONCE(!next);
6185 
6186 	/*
6187 	 * Reschedule siblings
6188 	 *
6189 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6190 	 * sending an IPI (below) ensures the sibling will no longer be running
6191 	 * their task. This ensures there is no inter-sibling overlap between
6192 	 * non-matching user state.
6193 	 */
6194 	for_each_cpu(i, smt_mask) {
6195 		rq_i = cpu_rq(i);
6196 
6197 		/*
6198 		 * An online sibling might have gone offline before a task
6199 		 * could be picked for it, or it might be offline but later
6200 		 * happen to come online, but its too late and nothing was
6201 		 * picked for it.  That's Ok - it will pick tasks for itself,
6202 		 * so ignore it.
6203 		 */
6204 		if (!rq_i->core_pick)
6205 			continue;
6206 
6207 		/*
6208 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6209 		 * fi_before     fi      update?
6210 		 *  0            0       1
6211 		 *  0            1       1
6212 		 *  1            0       1
6213 		 *  1            1       0
6214 		 */
6215 		if (!(fi_before && rq->core->core_forceidle_count))
6216 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6217 
6218 		rq_i->core_pick->core_occupation = occ;
6219 
6220 		if (i == cpu) {
6221 			rq_i->core_pick = NULL;
6222 			rq_i->core_dl_server = NULL;
6223 			continue;
6224 		}
6225 
6226 		/* Did we break L1TF mitigation requirements? */
6227 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6228 
6229 		if (rq_i->curr == rq_i->core_pick) {
6230 			rq_i->core_pick = NULL;
6231 			rq_i->core_dl_server = NULL;
6232 			continue;
6233 		}
6234 
6235 		resched_curr(rq_i);
6236 	}
6237 
6238 out_set_next:
6239 	put_prev_set_next_task(rq, prev, next);
6240 	if (rq->core->core_forceidle_count && next == rq->idle)
6241 		queue_core_balance(rq);
6242 
6243 	return next;
6244 }
6245 
6246 static bool try_steal_cookie(int this, int that)
6247 {
6248 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6249 	struct task_struct *p;
6250 	unsigned long cookie;
6251 	bool success = false;
6252 
6253 	guard(irq)();
6254 	guard(double_rq_lock)(dst, src);
6255 
6256 	cookie = dst->core->core_cookie;
6257 	if (!cookie)
6258 		return false;
6259 
6260 	if (dst->curr != dst->idle)
6261 		return false;
6262 
6263 	p = sched_core_find(src, cookie);
6264 	if (!p)
6265 		return false;
6266 
6267 	do {
6268 		if (p == src->core_pick || p == src->curr)
6269 			goto next;
6270 
6271 		if (!is_cpu_allowed(p, this))
6272 			goto next;
6273 
6274 		if (p->core_occupation > dst->idle->core_occupation)
6275 			goto next;
6276 		/*
6277 		 * sched_core_find() and sched_core_next() will ensure
6278 		 * that task @p is not throttled now, we also need to
6279 		 * check whether the runqueue of the destination CPU is
6280 		 * being throttled.
6281 		 */
6282 		if (sched_task_is_throttled(p, this))
6283 			goto next;
6284 
6285 		move_queued_task_locked(src, dst, p);
6286 		resched_curr(dst);
6287 
6288 		success = true;
6289 		break;
6290 
6291 next:
6292 		p = sched_core_next(p, cookie);
6293 	} while (p);
6294 
6295 	return success;
6296 }
6297 
6298 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6299 {
6300 	int i;
6301 
6302 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6303 		if (i == cpu)
6304 			continue;
6305 
6306 		if (need_resched())
6307 			break;
6308 
6309 		if (try_steal_cookie(cpu, i))
6310 			return true;
6311 	}
6312 
6313 	return false;
6314 }
6315 
6316 static void sched_core_balance(struct rq *rq)
6317 	__must_hold(__rq_lockp(rq))
6318 {
6319 	struct sched_domain *sd;
6320 	int cpu = cpu_of(rq);
6321 
6322 	guard(preempt)();
6323 	guard(rcu)();
6324 
6325 	raw_spin_rq_unlock_irq(rq);
6326 	for_each_domain(cpu, sd) {
6327 		if (need_resched())
6328 			break;
6329 
6330 		if (steal_cookie_task(cpu, sd))
6331 			break;
6332 	}
6333 	raw_spin_rq_lock_irq(rq);
6334 }
6335 
6336 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6337 
6338 static void queue_core_balance(struct rq *rq)
6339 {
6340 	if (!sched_core_enabled(rq))
6341 		return;
6342 
6343 	if (!rq->core->core_cookie)
6344 		return;
6345 
6346 	if (!rq->nr_running) /* not forced idle */
6347 		return;
6348 
6349 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6350 }
6351 
6352 DEFINE_LOCK_GUARD_1(core_lock, int,
6353 		    sched_core_lock(*_T->lock, &_T->flags),
6354 		    sched_core_unlock(*_T->lock, &_T->flags),
6355 		    unsigned long flags)
6356 
6357 static void sched_core_cpu_starting(unsigned int cpu)
6358 {
6359 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6360 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6361 	int t;
6362 
6363 	guard(core_lock)(&cpu);
6364 
6365 	WARN_ON_ONCE(rq->core != rq);
6366 
6367 	/* if we're the first, we'll be our own leader */
6368 	if (cpumask_weight(smt_mask) == 1)
6369 		return;
6370 
6371 	/* find the leader */
6372 	for_each_cpu(t, smt_mask) {
6373 		if (t == cpu)
6374 			continue;
6375 		rq = cpu_rq(t);
6376 		if (rq->core == rq) {
6377 			core_rq = rq;
6378 			break;
6379 		}
6380 	}
6381 
6382 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6383 		return;
6384 
6385 	/* install and validate core_rq */
6386 	for_each_cpu(t, smt_mask) {
6387 		rq = cpu_rq(t);
6388 
6389 		if (t == cpu)
6390 			rq->core = core_rq;
6391 
6392 		WARN_ON_ONCE(rq->core != core_rq);
6393 	}
6394 }
6395 
6396 static void sched_core_cpu_deactivate(unsigned int cpu)
6397 {
6398 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6399 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6400 	int t;
6401 
6402 	guard(core_lock)(&cpu);
6403 
6404 	/* if we're the last man standing, nothing to do */
6405 	if (cpumask_weight(smt_mask) == 1) {
6406 		WARN_ON_ONCE(rq->core != rq);
6407 		return;
6408 	}
6409 
6410 	/* if we're not the leader, nothing to do */
6411 	if (rq->core != rq)
6412 		return;
6413 
6414 	/* find a new leader */
6415 	for_each_cpu(t, smt_mask) {
6416 		if (t == cpu)
6417 			continue;
6418 		core_rq = cpu_rq(t);
6419 		break;
6420 	}
6421 
6422 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6423 		return;
6424 
6425 	/* copy the shared state to the new leader */
6426 	core_rq->core_task_seq             = rq->core_task_seq;
6427 	core_rq->core_pick_seq             = rq->core_pick_seq;
6428 	core_rq->core_cookie               = rq->core_cookie;
6429 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6430 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6431 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6432 
6433 	/*
6434 	 * Accounting edge for forced idle is handled in pick_next_task().
6435 	 * Don't need another one here, since the hotplug thread shouldn't
6436 	 * have a cookie.
6437 	 */
6438 	core_rq->core_forceidle_start = 0;
6439 
6440 	/* install new leader */
6441 	for_each_cpu(t, smt_mask) {
6442 		rq = cpu_rq(t);
6443 		rq->core = core_rq;
6444 	}
6445 }
6446 
6447 static inline void sched_core_cpu_dying(unsigned int cpu)
6448 {
6449 	struct rq *rq = cpu_rq(cpu);
6450 
6451 	if (rq->core != rq)
6452 		rq->core = rq;
6453 }
6454 
6455 #else /* !CONFIG_SCHED_CORE: */
6456 
6457 static inline void sched_core_cpu_starting(unsigned int cpu) {}
6458 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6459 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6460 
6461 static struct task_struct *
6462 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6463 	__must_hold(__rq_lockp(rq))
6464 {
6465 	return __pick_next_task(rq, prev, rf);
6466 }
6467 
6468 #endif /* !CONFIG_SCHED_CORE */
6469 
6470 /*
6471  * Constants for the sched_mode argument of __schedule().
6472  *
6473  * The mode argument allows RT enabled kernels to differentiate a
6474  * preemption from blocking on an 'sleeping' spin/rwlock.
6475  */
6476 #define SM_IDLE			(-1)
6477 #define SM_NONE			0
6478 #define SM_PREEMPT		1
6479 #define SM_RTLOCK_WAIT		2
6480 
6481 /*
6482  * Helper function for __schedule()
6483  *
6484  * Tries to deactivate the task, unless the should_block arg
6485  * is false or if a signal is pending. In the case a signal
6486  * is pending, marks the task's __state as RUNNING (and clear
6487  * blocked_on).
6488  */
6489 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6490 			      unsigned long *task_state_p, bool should_block)
6491 {
6492 	unsigned long task_state = *task_state_p;
6493 	int flags = DEQUEUE_NOCLOCK;
6494 
6495 	if (signal_pending_state(task_state, p)) {
6496 		WRITE_ONCE(p->__state, TASK_RUNNING);
6497 		*task_state_p = TASK_RUNNING;
6498 		return false;
6499 	}
6500 
6501 	/*
6502 	 * We check should_block after signal_pending because we
6503 	 * will want to wake the task in that case. But if
6504 	 * should_block is false, its likely due to the task being
6505 	 * blocked on a mutex, and we want to keep it on the runqueue
6506 	 * to be selectable for proxy-execution.
6507 	 */
6508 	if (!should_block)
6509 		return false;
6510 
6511 	p->sched_contributes_to_load =
6512 		(task_state & TASK_UNINTERRUPTIBLE) &&
6513 		!(task_state & TASK_NOLOAD) &&
6514 		!(task_state & TASK_FROZEN);
6515 
6516 	if (unlikely(is_special_task_state(task_state)))
6517 		flags |= DEQUEUE_SPECIAL;
6518 
6519 	/*
6520 	 * __schedule()			ttwu()
6521 	 *   prev_state = prev->state;    if (p->on_rq && ...)
6522 	 *   if (prev_state)		    goto out;
6523 	 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6524 	 *				  p->state = TASK_WAKING
6525 	 *
6526 	 * Where __schedule() and ttwu() have matching control dependencies.
6527 	 *
6528 	 * After this, schedule() must not care about p->state any more.
6529 	 */
6530 	block_task(rq, p, flags);
6531 	return true;
6532 }
6533 
6534 #ifdef CONFIG_SCHED_PROXY_EXEC
6535 static inline struct task_struct *proxy_resched_idle(struct rq *rq)
6536 {
6537 	put_prev_set_next_task(rq, rq->donor, rq->idle);
6538 	rq_set_donor(rq, rq->idle);
6539 	set_tsk_need_resched(rq->idle);
6540 	return rq->idle;
6541 }
6542 
6543 static bool __proxy_deactivate(struct rq *rq, struct task_struct *donor)
6544 {
6545 	unsigned long state = READ_ONCE(donor->__state);
6546 
6547 	/* Don't deactivate if the state has been changed to TASK_RUNNING */
6548 	if (state == TASK_RUNNING)
6549 		return false;
6550 	/*
6551 	 * Because we got donor from pick_next_task(), it is *crucial*
6552 	 * that we call proxy_resched_idle() before we deactivate it.
6553 	 * As once we deactivate donor, donor->on_rq is set to zero,
6554 	 * which allows ttwu() to immediately try to wake the task on
6555 	 * another rq. So we cannot use *any* references to donor
6556 	 * after that point. So things like cfs_rq->curr or rq->donor
6557 	 * need to be changed from next *before* we deactivate.
6558 	 */
6559 	proxy_resched_idle(rq);
6560 	return try_to_block_task(rq, donor, &state, true);
6561 }
6562 
6563 static struct task_struct *proxy_deactivate(struct rq *rq, struct task_struct *donor)
6564 {
6565 	if (!__proxy_deactivate(rq, donor)) {
6566 		/*
6567 		 * XXX: For now, if deactivation failed, set donor
6568 		 * as unblocked, as we aren't doing proxy-migrations
6569 		 * yet (more logic will be needed then).
6570 		 */
6571 		donor->blocked_on = NULL;
6572 	}
6573 	return NULL;
6574 }
6575 
6576 /*
6577  * Find runnable lock owner to proxy for mutex blocked donor
6578  *
6579  * Follow the blocked-on relation:
6580  *   task->blocked_on -> mutex->owner -> task...
6581  *
6582  * Lock order:
6583  *
6584  *   p->pi_lock
6585  *     rq->lock
6586  *       mutex->wait_lock
6587  *
6588  * Returns the task that is going to be used as execution context (the one
6589  * that is actually going to be run on cpu_of(rq)).
6590  */
6591 static struct task_struct *
6592 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6593 {
6594 	struct task_struct *owner = NULL;
6595 	int this_cpu = cpu_of(rq);
6596 	struct task_struct *p;
6597 	struct mutex *mutex;
6598 
6599 	/* Follow blocked_on chain. */
6600 	for (p = donor; task_is_blocked(p); p = owner) {
6601 		mutex = p->blocked_on;
6602 		/* Something changed in the chain, so pick again */
6603 		if (!mutex)
6604 			return NULL;
6605 		/*
6606 		 * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6607 		 * and ensure @owner sticks around.
6608 		 */
6609 		guard(raw_spinlock)(&mutex->wait_lock);
6610 
6611 		/* Check again that p is blocked with wait_lock held */
6612 		if (mutex != __get_task_blocked_on(p)) {
6613 			/*
6614 			 * Something changed in the blocked_on chain and
6615 			 * we don't know if only at this level. So, let's
6616 			 * just bail out completely and let __schedule()
6617 			 * figure things out (pick_again loop).
6618 			 */
6619 			return NULL;
6620 		}
6621 
6622 		owner = __mutex_owner(mutex);
6623 		if (!owner) {
6624 			__clear_task_blocked_on(p, mutex);
6625 			return p;
6626 		}
6627 
6628 		if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
6629 			/* XXX Don't handle blocked owners/delayed dequeue yet */
6630 			return proxy_deactivate(rq, donor);
6631 		}
6632 
6633 		if (task_cpu(owner) != this_cpu) {
6634 			/* XXX Don't handle migrations yet */
6635 			return proxy_deactivate(rq, donor);
6636 		}
6637 
6638 		if (task_on_rq_migrating(owner)) {
6639 			/*
6640 			 * One of the chain of mutex owners is currently migrating to this
6641 			 * CPU, but has not yet been enqueued because we are holding the
6642 			 * rq lock. As a simple solution, just schedule rq->idle to give
6643 			 * the migration a chance to complete. Much like the migrate_task
6644 			 * case we should end up back in find_proxy_task(), this time
6645 			 * hopefully with all relevant tasks already enqueued.
6646 			 */
6647 			return proxy_resched_idle(rq);
6648 		}
6649 
6650 		/*
6651 		 * Its possible to race where after we check owner->on_rq
6652 		 * but before we check (owner_cpu != this_cpu) that the
6653 		 * task on another cpu was migrated back to this cpu. In
6654 		 * that case it could slip by our  checks. So double check
6655 		 * we are still on this cpu and not migrating. If we get
6656 		 * inconsistent results, try again.
6657 		 */
6658 		if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
6659 			return NULL;
6660 
6661 		if (owner == p) {
6662 			/*
6663 			 * It's possible we interleave with mutex_unlock like:
6664 			 *
6665 			 *				lock(&rq->lock);
6666 			 *				  find_proxy_task()
6667 			 * mutex_unlock()
6668 			 *   lock(&wait_lock);
6669 			 *   donor(owner) = current->blocked_donor;
6670 			 *   unlock(&wait_lock);
6671 			 *
6672 			 *   wake_up_q();
6673 			 *     ...
6674 			 *       ttwu_runnable()
6675 			 *         __task_rq_lock()
6676 			 *				  lock(&wait_lock);
6677 			 *				  owner == p
6678 			 *
6679 			 * Which leaves us to finish the ttwu_runnable() and make it go.
6680 			 *
6681 			 * So schedule rq->idle so that ttwu_runnable() can get the rq
6682 			 * lock and mark owner as running.
6683 			 */
6684 			return proxy_resched_idle(rq);
6685 		}
6686 		/*
6687 		 * OK, now we're absolutely sure @owner is on this
6688 		 * rq, therefore holding @rq->lock is sufficient to
6689 		 * guarantee its existence, as per ttwu_remote().
6690 		 */
6691 	}
6692 
6693 	WARN_ON_ONCE(owner && !owner->on_rq);
6694 	return owner;
6695 }
6696 #else /* SCHED_PROXY_EXEC */
6697 static struct task_struct *
6698 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6699 {
6700 	WARN_ONCE(1, "This should never be called in the !SCHED_PROXY_EXEC case\n");
6701 	return donor;
6702 }
6703 #endif /* SCHED_PROXY_EXEC */
6704 
6705 static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
6706 {
6707 	if (!sched_proxy_exec())
6708 		return;
6709 	/*
6710 	 * pick_next_task() calls set_next_task() on the chosen task
6711 	 * at some point, which ensures it is not push/pullable.
6712 	 * However, the chosen/donor task *and* the mutex owner form an
6713 	 * atomic pair wrt push/pull.
6714 	 *
6715 	 * Make sure owner we run is not pushable. Unfortunately we can
6716 	 * only deal with that by means of a dequeue/enqueue cycle. :-/
6717 	 */
6718 	dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
6719 	enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
6720 }
6721 
6722 /*
6723  * __schedule() is the main scheduler function.
6724  *
6725  * The main means of driving the scheduler and thus entering this function are:
6726  *
6727  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6728  *
6729  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6730  *      paths. For example, see arch/x86/entry_64.S.
6731  *
6732  *      To drive preemption between tasks, the scheduler sets the flag in timer
6733  *      interrupt handler sched_tick().
6734  *
6735  *   3. Wakeups don't really cause entry into schedule(). They add a
6736  *      task to the run-queue and that's it.
6737  *
6738  *      Now, if the new task added to the run-queue preempts the current
6739  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6740  *      called on the nearest possible occasion:
6741  *
6742  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6743  *
6744  *         - in syscall or exception context, at the next outmost
6745  *           preempt_enable(). (this might be as soon as the wake_up()'s
6746  *           spin_unlock()!)
6747  *
6748  *         - in IRQ context, return from interrupt-handler to
6749  *           preemptible context
6750  *
6751  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6752  *         then at the next:
6753  *
6754  *          - cond_resched() call
6755  *          - explicit schedule() call
6756  *          - return from syscall or exception to user-space
6757  *          - return from interrupt-handler to user-space
6758  *
6759  * WARNING: must be called with preemption disabled!
6760  */
6761 static void __sched notrace __schedule(int sched_mode)
6762 {
6763 	struct task_struct *prev, *next;
6764 	/*
6765 	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6766 	 * as a preemption by schedule_debug() and RCU.
6767 	 */
6768 	bool preempt = sched_mode > SM_NONE;
6769 	bool is_switch = false;
6770 	unsigned long *switch_count;
6771 	unsigned long prev_state;
6772 	struct rq_flags rf;
6773 	struct rq *rq;
6774 	int cpu;
6775 
6776 	/* Trace preemptions consistently with task switches */
6777 	trace_sched_entry_tp(sched_mode == SM_PREEMPT);
6778 
6779 	cpu = smp_processor_id();
6780 	rq = cpu_rq(cpu);
6781 	prev = rq->curr;
6782 
6783 	schedule_debug(prev, preempt);
6784 
6785 	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6786 		hrtick_clear(rq);
6787 
6788 	klp_sched_try_switch(prev);
6789 
6790 	local_irq_disable();
6791 	rcu_note_context_switch(preempt);
6792 	migrate_disable_switch(rq, prev);
6793 
6794 	/*
6795 	 * Make sure that signal_pending_state()->signal_pending() below
6796 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6797 	 * done by the caller to avoid the race with signal_wake_up():
6798 	 *
6799 	 * __set_current_state(@state)		signal_wake_up()
6800 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6801 	 *					  wake_up_state(p, state)
6802 	 *   LOCK rq->lock			    LOCK p->pi_state
6803 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6804 	 *     if (signal_pending_state())	    if (p->state & @state)
6805 	 *
6806 	 * Also, the membarrier system call requires a full memory barrier
6807 	 * after coming from user-space, before storing to rq->curr; this
6808 	 * barrier matches a full barrier in the proximity of the membarrier
6809 	 * system call exit.
6810 	 */
6811 	rq_lock(rq, &rf);
6812 	smp_mb__after_spinlock();
6813 
6814 	/* Promote REQ to ACT */
6815 	rq->clock_update_flags <<= 1;
6816 	update_rq_clock(rq);
6817 	rq->clock_update_flags = RQCF_UPDATED;
6818 
6819 	switch_count = &prev->nivcsw;
6820 
6821 	/* Task state changes only considers SM_PREEMPT as preemption */
6822 	preempt = sched_mode == SM_PREEMPT;
6823 
6824 	/*
6825 	 * We must load prev->state once (task_struct::state is volatile), such
6826 	 * that we form a control dependency vs deactivate_task() below.
6827 	 */
6828 	prev_state = READ_ONCE(prev->__state);
6829 	if (sched_mode == SM_IDLE) {
6830 		/* SCX must consult the BPF scheduler to tell if rq is empty */
6831 		if (!rq->nr_running && !scx_enabled()) {
6832 			next = prev;
6833 			goto picked;
6834 		}
6835 	} else if (!preempt && prev_state) {
6836 		/*
6837 		 * We pass task_is_blocked() as the should_block arg
6838 		 * in order to keep mutex-blocked tasks on the runqueue
6839 		 * for slection with proxy-exec (without proxy-exec
6840 		 * task_is_blocked() will always be false).
6841 		 */
6842 		try_to_block_task(rq, prev, &prev_state,
6843 				  !task_is_blocked(prev));
6844 		switch_count = &prev->nvcsw;
6845 	}
6846 
6847 pick_again:
6848 	next = pick_next_task(rq, rq->donor, &rf);
6849 	rq_set_donor(rq, next);
6850 	rq->next_class = next->sched_class;
6851 	if (unlikely(task_is_blocked(next))) {
6852 		next = find_proxy_task(rq, next, &rf);
6853 		if (!next)
6854 			goto pick_again;
6855 		if (next == rq->idle)
6856 			goto keep_resched;
6857 	}
6858 picked:
6859 	clear_tsk_need_resched(prev);
6860 	clear_preempt_need_resched();
6861 keep_resched:
6862 	rq->last_seen_need_resched_ns = 0;
6863 
6864 	is_switch = prev != next;
6865 	if (likely(is_switch)) {
6866 		rq->nr_switches++;
6867 		/*
6868 		 * RCU users of rcu_dereference(rq->curr) may not see
6869 		 * changes to task_struct made by pick_next_task().
6870 		 */
6871 		RCU_INIT_POINTER(rq->curr, next);
6872 
6873 		if (!task_current_donor(rq, next))
6874 			proxy_tag_curr(rq, next);
6875 
6876 		/*
6877 		 * The membarrier system call requires each architecture
6878 		 * to have a full memory barrier after updating
6879 		 * rq->curr, before returning to user-space.
6880 		 *
6881 		 * Here are the schemes providing that barrier on the
6882 		 * various architectures:
6883 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6884 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6885 		 *   on PowerPC and on RISC-V.
6886 		 * - finish_lock_switch() for weakly-ordered
6887 		 *   architectures where spin_unlock is a full barrier,
6888 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6889 		 *   is a RELEASE barrier),
6890 		 *
6891 		 * The barrier matches a full barrier in the proximity of
6892 		 * the membarrier system call entry.
6893 		 *
6894 		 * On RISC-V, this barrier pairing is also needed for the
6895 		 * SYNC_CORE command when switching between processes, cf.
6896 		 * the inline comments in membarrier_arch_switch_mm().
6897 		 */
6898 		++*switch_count;
6899 
6900 		psi_account_irqtime(rq, prev, next);
6901 		psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6902 					     prev->se.sched_delayed);
6903 
6904 		trace_sched_switch(preempt, prev, next, prev_state);
6905 
6906 		/* Also unlocks the rq: */
6907 		rq = context_switch(rq, prev, next, &rf);
6908 	} else {
6909 		/* In case next was already curr but just got blocked_donor */
6910 		if (!task_current_donor(rq, next))
6911 			proxy_tag_curr(rq, next);
6912 
6913 		rq_unpin_lock(rq, &rf);
6914 		__balance_callbacks(rq, NULL);
6915 		raw_spin_rq_unlock_irq(rq);
6916 	}
6917 	trace_sched_exit_tp(is_switch);
6918 }
6919 
6920 void __noreturn do_task_dead(void)
6921 {
6922 	/* Causes final put_task_struct in finish_task_switch(): */
6923 	set_special_state(TASK_DEAD);
6924 
6925 	/* Tell freezer to ignore us: */
6926 	current->flags |= PF_NOFREEZE;
6927 
6928 	__schedule(SM_NONE);
6929 	BUG();
6930 
6931 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6932 	for (;;)
6933 		cpu_relax();
6934 }
6935 
6936 static inline void sched_submit_work(struct task_struct *tsk)
6937 {
6938 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6939 	unsigned int task_flags;
6940 
6941 	/*
6942 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6943 	 * will use a blocking primitive -- which would lead to recursion.
6944 	 */
6945 	lock_map_acquire_try(&sched_map);
6946 
6947 	task_flags = tsk->flags;
6948 	/*
6949 	 * If a worker goes to sleep, notify and ask workqueue whether it
6950 	 * wants to wake up a task to maintain concurrency.
6951 	 */
6952 	if (task_flags & PF_WQ_WORKER)
6953 		wq_worker_sleeping(tsk);
6954 	else if (task_flags & PF_IO_WORKER)
6955 		io_wq_worker_sleeping(tsk);
6956 
6957 	/*
6958 	 * spinlock and rwlock must not flush block requests.  This will
6959 	 * deadlock if the callback attempts to acquire a lock which is
6960 	 * already acquired.
6961 	 */
6962 	WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
6963 
6964 	/*
6965 	 * If we are going to sleep and we have plugged IO queued,
6966 	 * make sure to submit it to avoid deadlocks.
6967 	 */
6968 	blk_flush_plug(tsk->plug, true);
6969 
6970 	lock_map_release(&sched_map);
6971 }
6972 
6973 static void sched_update_worker(struct task_struct *tsk)
6974 {
6975 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6976 		if (tsk->flags & PF_BLOCK_TS)
6977 			blk_plug_invalidate_ts(tsk);
6978 		if (tsk->flags & PF_WQ_WORKER)
6979 			wq_worker_running(tsk);
6980 		else if (tsk->flags & PF_IO_WORKER)
6981 			io_wq_worker_running(tsk);
6982 	}
6983 }
6984 
6985 static __always_inline void __schedule_loop(int sched_mode)
6986 {
6987 	do {
6988 		preempt_disable();
6989 		__schedule(sched_mode);
6990 		sched_preempt_enable_no_resched();
6991 	} while (need_resched());
6992 }
6993 
6994 asmlinkage __visible void __sched schedule(void)
6995 {
6996 	struct task_struct *tsk = current;
6997 
6998 #ifdef CONFIG_RT_MUTEXES
6999 	lockdep_assert(!tsk->sched_rt_mutex);
7000 #endif
7001 
7002 	if (!task_is_running(tsk))
7003 		sched_submit_work(tsk);
7004 	__schedule_loop(SM_NONE);
7005 	sched_update_worker(tsk);
7006 }
7007 EXPORT_SYMBOL(schedule);
7008 
7009 /*
7010  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
7011  * state (have scheduled out non-voluntarily) by making sure that all
7012  * tasks have either left the run queue or have gone into user space.
7013  * As idle tasks do not do either, they must not ever be preempted
7014  * (schedule out non-voluntarily).
7015  *
7016  * schedule_idle() is similar to schedule_preempt_disable() except that it
7017  * never enables preemption because it does not call sched_submit_work().
7018  */
7019 void __sched schedule_idle(void)
7020 {
7021 	/*
7022 	 * As this skips calling sched_submit_work(), which the idle task does
7023 	 * regardless because that function is a NOP when the task is in a
7024 	 * TASK_RUNNING state, make sure this isn't used someplace that the
7025 	 * current task can be in any other state. Note, idle is always in the
7026 	 * TASK_RUNNING state.
7027 	 */
7028 	WARN_ON_ONCE(current->__state);
7029 	do {
7030 		__schedule(SM_IDLE);
7031 	} while (need_resched());
7032 }
7033 
7034 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
7035 asmlinkage __visible void __sched schedule_user(void)
7036 {
7037 	/*
7038 	 * If we come here after a random call to set_need_resched(),
7039 	 * or we have been woken up remotely but the IPI has not yet arrived,
7040 	 * we haven't yet exited the RCU idle mode. Do it here manually until
7041 	 * we find a better solution.
7042 	 *
7043 	 * NB: There are buggy callers of this function.  Ideally we
7044 	 * should warn if prev_state != CT_STATE_USER, but that will trigger
7045 	 * too frequently to make sense yet.
7046 	 */
7047 	enum ctx_state prev_state = exception_enter();
7048 	schedule();
7049 	exception_exit(prev_state);
7050 }
7051 #endif
7052 
7053 /**
7054  * schedule_preempt_disabled - called with preemption disabled
7055  *
7056  * Returns with preemption disabled. Note: preempt_count must be 1
7057  */
7058 void __sched schedule_preempt_disabled(void)
7059 {
7060 	sched_preempt_enable_no_resched();
7061 	schedule();
7062 	preempt_disable();
7063 }
7064 
7065 #ifdef CONFIG_PREEMPT_RT
7066 void __sched notrace schedule_rtlock(void)
7067 {
7068 	__schedule_loop(SM_RTLOCK_WAIT);
7069 }
7070 NOKPROBE_SYMBOL(schedule_rtlock);
7071 #endif
7072 
7073 static void __sched notrace preempt_schedule_common(void)
7074 {
7075 	do {
7076 		/*
7077 		 * Because the function tracer can trace preempt_count_sub()
7078 		 * and it also uses preempt_enable/disable_notrace(), if
7079 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7080 		 * by the function tracer will call this function again and
7081 		 * cause infinite recursion.
7082 		 *
7083 		 * Preemption must be disabled here before the function
7084 		 * tracer can trace. Break up preempt_disable() into two
7085 		 * calls. One to disable preemption without fear of being
7086 		 * traced. The other to still record the preemption latency,
7087 		 * which can also be traced by the function tracer.
7088 		 */
7089 		preempt_disable_notrace();
7090 		preempt_latency_start(1);
7091 		__schedule(SM_PREEMPT);
7092 		preempt_latency_stop(1);
7093 		preempt_enable_no_resched_notrace();
7094 
7095 		/*
7096 		 * Check again in case we missed a preemption opportunity
7097 		 * between schedule and now.
7098 		 */
7099 	} while (need_resched());
7100 }
7101 
7102 #ifdef CONFIG_PREEMPTION
7103 /*
7104  * This is the entry point to schedule() from in-kernel preemption
7105  * off of preempt_enable.
7106  */
7107 asmlinkage __visible void __sched notrace preempt_schedule(void)
7108 {
7109 	/*
7110 	 * If there is a non-zero preempt_count or interrupts are disabled,
7111 	 * we do not want to preempt the current task. Just return..
7112 	 */
7113 	if (likely(!preemptible()))
7114 		return;
7115 	preempt_schedule_common();
7116 }
7117 NOKPROBE_SYMBOL(preempt_schedule);
7118 EXPORT_SYMBOL(preempt_schedule);
7119 
7120 #ifdef CONFIG_PREEMPT_DYNAMIC
7121 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7122 #  ifndef preempt_schedule_dynamic_enabled
7123 #   define preempt_schedule_dynamic_enabled	preempt_schedule
7124 #   define preempt_schedule_dynamic_disabled	NULL
7125 #  endif
7126 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
7127 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
7128 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7129 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
7130 void __sched notrace dynamic_preempt_schedule(void)
7131 {
7132 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7133 		return;
7134 	preempt_schedule();
7135 }
7136 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7137 EXPORT_SYMBOL(dynamic_preempt_schedule);
7138 # endif
7139 #endif /* CONFIG_PREEMPT_DYNAMIC */
7140 
7141 /**
7142  * preempt_schedule_notrace - preempt_schedule called by tracing
7143  *
7144  * The tracing infrastructure uses preempt_enable_notrace to prevent
7145  * recursion and tracing preempt enabling caused by the tracing
7146  * infrastructure itself. But as tracing can happen in areas coming
7147  * from userspace or just about to enter userspace, a preempt enable
7148  * can occur before user_exit() is called. This will cause the scheduler
7149  * to be called when the system is still in usermode.
7150  *
7151  * To prevent this, the preempt_enable_notrace will use this function
7152  * instead of preempt_schedule() to exit user context if needed before
7153  * calling the scheduler.
7154  */
7155 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7156 {
7157 	enum ctx_state prev_ctx;
7158 
7159 	if (likely(!preemptible()))
7160 		return;
7161 
7162 	do {
7163 		/*
7164 		 * Because the function tracer can trace preempt_count_sub()
7165 		 * and it also uses preempt_enable/disable_notrace(), if
7166 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7167 		 * by the function tracer will call this function again and
7168 		 * cause infinite recursion.
7169 		 *
7170 		 * Preemption must be disabled here before the function
7171 		 * tracer can trace. Break up preempt_disable() into two
7172 		 * calls. One to disable preemption without fear of being
7173 		 * traced. The other to still record the preemption latency,
7174 		 * which can also be traced by the function tracer.
7175 		 */
7176 		preempt_disable_notrace();
7177 		preempt_latency_start(1);
7178 		/*
7179 		 * Needs preempt disabled in case user_exit() is traced
7180 		 * and the tracer calls preempt_enable_notrace() causing
7181 		 * an infinite recursion.
7182 		 */
7183 		prev_ctx = exception_enter();
7184 		__schedule(SM_PREEMPT);
7185 		exception_exit(prev_ctx);
7186 
7187 		preempt_latency_stop(1);
7188 		preempt_enable_no_resched_notrace();
7189 	} while (need_resched());
7190 }
7191 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7192 
7193 #ifdef CONFIG_PREEMPT_DYNAMIC
7194 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7195 #  ifndef preempt_schedule_notrace_dynamic_enabled
7196 #   define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
7197 #   define preempt_schedule_notrace_dynamic_disabled	NULL
7198 #  endif
7199 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7200 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7201 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7202 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
7203 void __sched notrace dynamic_preempt_schedule_notrace(void)
7204 {
7205 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7206 		return;
7207 	preempt_schedule_notrace();
7208 }
7209 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7210 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7211 # endif
7212 #endif
7213 
7214 #endif /* CONFIG_PREEMPTION */
7215 
7216 /*
7217  * This is the entry point to schedule() from kernel preemption
7218  * off of IRQ context.
7219  * Note, that this is called and return with IRQs disabled. This will
7220  * protect us against recursive calling from IRQ contexts.
7221  */
7222 asmlinkage __visible void __sched preempt_schedule_irq(void)
7223 {
7224 	enum ctx_state prev_state;
7225 
7226 	/* Catch callers which need to be fixed */
7227 	BUG_ON(preempt_count() || !irqs_disabled());
7228 
7229 	prev_state = exception_enter();
7230 
7231 	do {
7232 		preempt_disable();
7233 		local_irq_enable();
7234 		__schedule(SM_PREEMPT);
7235 		local_irq_disable();
7236 		sched_preempt_enable_no_resched();
7237 	} while (need_resched());
7238 
7239 	exception_exit(prev_state);
7240 }
7241 
7242 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7243 			  void *key)
7244 {
7245 	WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7246 	return try_to_wake_up(curr->private, mode, wake_flags);
7247 }
7248 EXPORT_SYMBOL(default_wake_function);
7249 
7250 const struct sched_class *__setscheduler_class(int policy, int prio)
7251 {
7252 	if (dl_prio(prio))
7253 		return &dl_sched_class;
7254 
7255 	if (rt_prio(prio))
7256 		return &rt_sched_class;
7257 
7258 #ifdef CONFIG_SCHED_CLASS_EXT
7259 	if (task_should_scx(policy))
7260 		return &ext_sched_class;
7261 #endif
7262 
7263 	return &fair_sched_class;
7264 }
7265 
7266 #ifdef CONFIG_RT_MUTEXES
7267 
7268 /*
7269  * Would be more useful with typeof()/auto_type but they don't mix with
7270  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7271  * name such that if someone were to implement this function we get to compare
7272  * notes.
7273  */
7274 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7275 
7276 void rt_mutex_pre_schedule(void)
7277 {
7278 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7279 	sched_submit_work(current);
7280 }
7281 
7282 void rt_mutex_schedule(void)
7283 {
7284 	lockdep_assert(current->sched_rt_mutex);
7285 	__schedule_loop(SM_NONE);
7286 }
7287 
7288 void rt_mutex_post_schedule(void)
7289 {
7290 	sched_update_worker(current);
7291 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7292 }
7293 
7294 /*
7295  * rt_mutex_setprio - set the current priority of a task
7296  * @p: task to boost
7297  * @pi_task: donor task
7298  *
7299  * This function changes the 'effective' priority of a task. It does
7300  * not touch ->normal_prio like __setscheduler().
7301  *
7302  * Used by the rt_mutex code to implement priority inheritance
7303  * logic. Call site only calls if the priority of the task changed.
7304  */
7305 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7306 {
7307 	int prio, oldprio, queue_flag =
7308 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7309 	const struct sched_class *prev_class, *next_class;
7310 	struct rq_flags rf;
7311 	struct rq *rq;
7312 
7313 	/* XXX used to be waiter->prio, not waiter->task->prio */
7314 	prio = __rt_effective_prio(pi_task, p->normal_prio);
7315 
7316 	/*
7317 	 * If nothing changed; bail early.
7318 	 */
7319 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7320 		return;
7321 
7322 	rq = __task_rq_lock(p, &rf);
7323 	update_rq_clock(rq);
7324 	/*
7325 	 * Set under pi_lock && rq->lock, such that the value can be used under
7326 	 * either lock.
7327 	 *
7328 	 * Note that there is loads of tricky to make this pointer cache work
7329 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7330 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7331 	 * task is allowed to run again (and can exit). This ensures the pointer
7332 	 * points to a blocked task -- which guarantees the task is present.
7333 	 */
7334 	p->pi_top_task = pi_task;
7335 
7336 	/*
7337 	 * For FIFO/RR we only need to set prio, if that matches we're done.
7338 	 */
7339 	if (prio == p->prio && !dl_prio(prio))
7340 		goto out_unlock;
7341 
7342 	/*
7343 	 * Idle task boosting is a no-no in general. There is one
7344 	 * exception, when PREEMPT_RT and NOHZ is active:
7345 	 *
7346 	 * The idle task calls get_next_timer_interrupt() and holds
7347 	 * the timer wheel base->lock on the CPU and another CPU wants
7348 	 * to access the timer (probably to cancel it). We can safely
7349 	 * ignore the boosting request, as the idle CPU runs this code
7350 	 * with interrupts disabled and will complete the lock
7351 	 * protected section without being interrupted. So there is no
7352 	 * real need to boost.
7353 	 */
7354 	if (unlikely(p == rq->idle)) {
7355 		WARN_ON(p != rq->curr);
7356 		WARN_ON(p->pi_blocked_on);
7357 		goto out_unlock;
7358 	}
7359 
7360 	trace_sched_pi_setprio(p, pi_task);
7361 	oldprio = p->prio;
7362 
7363 	if (oldprio == prio && !dl_prio(prio))
7364 		queue_flag &= ~DEQUEUE_MOVE;
7365 
7366 	prev_class = p->sched_class;
7367 	next_class = __setscheduler_class(p->policy, prio);
7368 
7369 	if (prev_class != next_class)
7370 		queue_flag |= DEQUEUE_CLASS;
7371 
7372 	scoped_guard (sched_change, p, queue_flag) {
7373 		/*
7374 		 * Boosting condition are:
7375 		 * 1. -rt task is running and holds mutex A
7376 		 *      --> -dl task blocks on mutex A
7377 		 *
7378 		 * 2. -dl task is running and holds mutex A
7379 		 *      --> -dl task blocks on mutex A and could preempt the
7380 		 *          running task
7381 		 */
7382 		if (dl_prio(prio)) {
7383 			if (!dl_prio(p->normal_prio) ||
7384 			    (pi_task && dl_prio(pi_task->prio) &&
7385 			     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7386 				p->dl.pi_se = pi_task->dl.pi_se;
7387 				scope->flags |= ENQUEUE_REPLENISH;
7388 			} else {
7389 				p->dl.pi_se = &p->dl;
7390 			}
7391 		} else if (rt_prio(prio)) {
7392 			if (dl_prio(oldprio))
7393 				p->dl.pi_se = &p->dl;
7394 			if (oldprio < prio)
7395 				scope->flags |= ENQUEUE_HEAD;
7396 		} else {
7397 			if (dl_prio(oldprio))
7398 				p->dl.pi_se = &p->dl;
7399 			if (rt_prio(oldprio))
7400 				p->rt.timeout = 0;
7401 		}
7402 
7403 		p->sched_class = next_class;
7404 		p->prio = prio;
7405 	}
7406 out_unlock:
7407 	/* Caller holds task_struct::pi_lock, IRQs are still disabled */
7408 
7409 	__balance_callbacks(rq, &rf);
7410 	__task_rq_unlock(rq, p, &rf);
7411 }
7412 #endif /* CONFIG_RT_MUTEXES */
7413 
7414 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7415 int __sched __cond_resched(void)
7416 {
7417 	if (should_resched(0) && !irqs_disabled()) {
7418 		preempt_schedule_common();
7419 		return 1;
7420 	}
7421 	/*
7422 	 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
7423 	 * whether the current CPU is in an RCU read-side critical section,
7424 	 * so the tick can report quiescent states even for CPUs looping
7425 	 * in kernel context.  In contrast, in non-preemptible kernels,
7426 	 * RCU readers leave no in-memory hints, which means that CPU-bound
7427 	 * processes executing in kernel context might never report an
7428 	 * RCU quiescent state.  Therefore, the following code causes
7429 	 * cond_resched() to report a quiescent state, but only when RCU
7430 	 * is in urgent need of one.
7431 	 * A third case, preemptible, but non-PREEMPT_RCU provides for
7432 	 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
7433 	 */
7434 #ifndef CONFIG_PREEMPT_RCU
7435 	rcu_all_qs();
7436 #endif
7437 	return 0;
7438 }
7439 EXPORT_SYMBOL(__cond_resched);
7440 #endif
7441 
7442 #ifdef CONFIG_PREEMPT_DYNAMIC
7443 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7444 #  define cond_resched_dynamic_enabled	__cond_resched
7445 #  define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
7446 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7447 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7448 
7449 #  define might_resched_dynamic_enabled	__cond_resched
7450 #  define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7451 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7452 EXPORT_STATIC_CALL_TRAMP(might_resched);
7453 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7454 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7455 int __sched dynamic_cond_resched(void)
7456 {
7457 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7458 		return 0;
7459 	return __cond_resched();
7460 }
7461 EXPORT_SYMBOL(dynamic_cond_resched);
7462 
7463 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7464 int __sched dynamic_might_resched(void)
7465 {
7466 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
7467 		return 0;
7468 	return __cond_resched();
7469 }
7470 EXPORT_SYMBOL(dynamic_might_resched);
7471 # endif
7472 #endif /* CONFIG_PREEMPT_DYNAMIC */
7473 
7474 /*
7475  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7476  * call schedule, and on return reacquire the lock.
7477  *
7478  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7479  * operations here to prevent schedule() from being called twice (once via
7480  * spin_unlock(), once by hand).
7481  */
7482 int __cond_resched_lock(spinlock_t *lock)
7483 {
7484 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7485 	int ret = 0;
7486 
7487 	lockdep_assert_held(lock);
7488 
7489 	if (spin_needbreak(lock) || resched) {
7490 		spin_unlock(lock);
7491 		if (!_cond_resched())
7492 			cpu_relax();
7493 		ret = 1;
7494 		spin_lock(lock);
7495 	}
7496 	return ret;
7497 }
7498 EXPORT_SYMBOL(__cond_resched_lock);
7499 
7500 int __cond_resched_rwlock_read(rwlock_t *lock)
7501 {
7502 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7503 	int ret = 0;
7504 
7505 	lockdep_assert_held_read(lock);
7506 
7507 	if (rwlock_needbreak(lock) || resched) {
7508 		read_unlock(lock);
7509 		if (!_cond_resched())
7510 			cpu_relax();
7511 		ret = 1;
7512 		read_lock(lock);
7513 	}
7514 	return ret;
7515 }
7516 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7517 
7518 int __cond_resched_rwlock_write(rwlock_t *lock)
7519 {
7520 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7521 	int ret = 0;
7522 
7523 	lockdep_assert_held_write(lock);
7524 
7525 	if (rwlock_needbreak(lock) || resched) {
7526 		write_unlock(lock);
7527 		if (!_cond_resched())
7528 			cpu_relax();
7529 		ret = 1;
7530 		write_lock(lock);
7531 	}
7532 	return ret;
7533 }
7534 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7535 
7536 #ifdef CONFIG_PREEMPT_DYNAMIC
7537 
7538 # ifdef CONFIG_GENERIC_IRQ_ENTRY
7539 #  include <linux/irq-entry-common.h>
7540 # endif
7541 
7542 /*
7543  * SC:cond_resched
7544  * SC:might_resched
7545  * SC:preempt_schedule
7546  * SC:preempt_schedule_notrace
7547  * SC:irqentry_exit_cond_resched
7548  *
7549  *
7550  * NONE:
7551  *   cond_resched               <- __cond_resched
7552  *   might_resched              <- RET0
7553  *   preempt_schedule           <- NOP
7554  *   preempt_schedule_notrace   <- NOP
7555  *   irqentry_exit_cond_resched <- NOP
7556  *   dynamic_preempt_lazy       <- false
7557  *
7558  * VOLUNTARY:
7559  *   cond_resched               <- __cond_resched
7560  *   might_resched              <- __cond_resched
7561  *   preempt_schedule           <- NOP
7562  *   preempt_schedule_notrace   <- NOP
7563  *   irqentry_exit_cond_resched <- NOP
7564  *   dynamic_preempt_lazy       <- false
7565  *
7566  * FULL:
7567  *   cond_resched               <- RET0
7568  *   might_resched              <- RET0
7569  *   preempt_schedule           <- preempt_schedule
7570  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7571  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7572  *   dynamic_preempt_lazy       <- false
7573  *
7574  * LAZY:
7575  *   cond_resched               <- RET0
7576  *   might_resched              <- RET0
7577  *   preempt_schedule           <- preempt_schedule
7578  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7579  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7580  *   dynamic_preempt_lazy       <- true
7581  */
7582 
7583 enum {
7584 	preempt_dynamic_undefined = -1,
7585 	preempt_dynamic_none,
7586 	preempt_dynamic_voluntary,
7587 	preempt_dynamic_full,
7588 	preempt_dynamic_lazy,
7589 };
7590 
7591 int preempt_dynamic_mode = preempt_dynamic_undefined;
7592 
7593 int sched_dynamic_mode(const char *str)
7594 {
7595 # if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
7596 	if (!strcmp(str, "none"))
7597 		return preempt_dynamic_none;
7598 
7599 	if (!strcmp(str, "voluntary"))
7600 		return preempt_dynamic_voluntary;
7601 # endif
7602 
7603 	if (!strcmp(str, "full"))
7604 		return preempt_dynamic_full;
7605 
7606 # ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7607 	if (!strcmp(str, "lazy"))
7608 		return preempt_dynamic_lazy;
7609 # endif
7610 
7611 	return -EINVAL;
7612 }
7613 
7614 # define preempt_dynamic_key_enable(f)	static_key_enable(&sk_dynamic_##f.key)
7615 # define preempt_dynamic_key_disable(f)	static_key_disable(&sk_dynamic_##f.key)
7616 
7617 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7618 #  define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
7619 #  define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
7620 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7621 #  define preempt_dynamic_enable(f)	preempt_dynamic_key_enable(f)
7622 #  define preempt_dynamic_disable(f)	preempt_dynamic_key_disable(f)
7623 # else
7624 #  error "Unsupported PREEMPT_DYNAMIC mechanism"
7625 # endif
7626 
7627 static DEFINE_MUTEX(sched_dynamic_mutex);
7628 
7629 static void __sched_dynamic_update(int mode)
7630 {
7631 	/*
7632 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7633 	 * the ZERO state, which is invalid.
7634 	 */
7635 	preempt_dynamic_enable(cond_resched);
7636 	preempt_dynamic_enable(might_resched);
7637 	preempt_dynamic_enable(preempt_schedule);
7638 	preempt_dynamic_enable(preempt_schedule_notrace);
7639 	preempt_dynamic_enable(irqentry_exit_cond_resched);
7640 	preempt_dynamic_key_disable(preempt_lazy);
7641 
7642 	switch (mode) {
7643 	case preempt_dynamic_none:
7644 		preempt_dynamic_enable(cond_resched);
7645 		preempt_dynamic_disable(might_resched);
7646 		preempt_dynamic_disable(preempt_schedule);
7647 		preempt_dynamic_disable(preempt_schedule_notrace);
7648 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7649 		preempt_dynamic_key_disable(preempt_lazy);
7650 		if (mode != preempt_dynamic_mode)
7651 			pr_info("Dynamic Preempt: none\n");
7652 		break;
7653 
7654 	case preempt_dynamic_voluntary:
7655 		preempt_dynamic_enable(cond_resched);
7656 		preempt_dynamic_enable(might_resched);
7657 		preempt_dynamic_disable(preempt_schedule);
7658 		preempt_dynamic_disable(preempt_schedule_notrace);
7659 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7660 		preempt_dynamic_key_disable(preempt_lazy);
7661 		if (mode != preempt_dynamic_mode)
7662 			pr_info("Dynamic Preempt: voluntary\n");
7663 		break;
7664 
7665 	case preempt_dynamic_full:
7666 		preempt_dynamic_disable(cond_resched);
7667 		preempt_dynamic_disable(might_resched);
7668 		preempt_dynamic_enable(preempt_schedule);
7669 		preempt_dynamic_enable(preempt_schedule_notrace);
7670 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7671 		preempt_dynamic_key_disable(preempt_lazy);
7672 		if (mode != preempt_dynamic_mode)
7673 			pr_info("Dynamic Preempt: full\n");
7674 		break;
7675 
7676 	case preempt_dynamic_lazy:
7677 		preempt_dynamic_disable(cond_resched);
7678 		preempt_dynamic_disable(might_resched);
7679 		preempt_dynamic_enable(preempt_schedule);
7680 		preempt_dynamic_enable(preempt_schedule_notrace);
7681 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7682 		preempt_dynamic_key_enable(preempt_lazy);
7683 		if (mode != preempt_dynamic_mode)
7684 			pr_info("Dynamic Preempt: lazy\n");
7685 		break;
7686 	}
7687 
7688 	preempt_dynamic_mode = mode;
7689 }
7690 
7691 void sched_dynamic_update(int mode)
7692 {
7693 	mutex_lock(&sched_dynamic_mutex);
7694 	__sched_dynamic_update(mode);
7695 	mutex_unlock(&sched_dynamic_mutex);
7696 }
7697 
7698 static int __init setup_preempt_mode(char *str)
7699 {
7700 	int mode = sched_dynamic_mode(str);
7701 	if (mode < 0) {
7702 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7703 		return 0;
7704 	}
7705 
7706 	sched_dynamic_update(mode);
7707 	return 1;
7708 }
7709 __setup("preempt=", setup_preempt_mode);
7710 
7711 static void __init preempt_dynamic_init(void)
7712 {
7713 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7714 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7715 			sched_dynamic_update(preempt_dynamic_none);
7716 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7717 			sched_dynamic_update(preempt_dynamic_voluntary);
7718 		} else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7719 			sched_dynamic_update(preempt_dynamic_lazy);
7720 		} else {
7721 			/* Default static call setting, nothing to do */
7722 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7723 			preempt_dynamic_mode = preempt_dynamic_full;
7724 			pr_info("Dynamic Preempt: full\n");
7725 		}
7726 	}
7727 }
7728 
7729 # define PREEMPT_MODEL_ACCESSOR(mode) \
7730 	bool preempt_model_##mode(void)						 \
7731 	{									 \
7732 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7733 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
7734 	}									 \
7735 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
7736 
7737 PREEMPT_MODEL_ACCESSOR(none);
7738 PREEMPT_MODEL_ACCESSOR(voluntary);
7739 PREEMPT_MODEL_ACCESSOR(full);
7740 PREEMPT_MODEL_ACCESSOR(lazy);
7741 
7742 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7743 
7744 #define preempt_dynamic_mode -1
7745 
7746 static inline void preempt_dynamic_init(void) { }
7747 
7748 #endif /* CONFIG_PREEMPT_DYNAMIC */
7749 
7750 const char *preempt_modes[] = {
7751 	"none", "voluntary", "full", "lazy", NULL,
7752 };
7753 
7754 const char *preempt_model_str(void)
7755 {
7756 	bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
7757 		(IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
7758 		 IS_ENABLED(CONFIG_PREEMPT_LAZY));
7759 	static char buf[128];
7760 
7761 	if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
7762 		struct seq_buf s;
7763 
7764 		seq_buf_init(&s, buf, sizeof(buf));
7765 		seq_buf_puts(&s, "PREEMPT");
7766 
7767 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
7768 			seq_buf_printf(&s, "%sRT%s",
7769 				       brace ? "_{" : "_",
7770 				       brace ? "," : "");
7771 
7772 		if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
7773 			seq_buf_printf(&s, "(%s)%s",
7774 				       preempt_dynamic_mode >= 0 ?
7775 				       preempt_modes[preempt_dynamic_mode] : "undef",
7776 				       brace ? "}" : "");
7777 			return seq_buf_str(&s);
7778 		}
7779 
7780 		if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7781 			seq_buf_printf(&s, "LAZY%s",
7782 				       brace ? "}" : "");
7783 			return seq_buf_str(&s);
7784 		}
7785 
7786 		return seq_buf_str(&s);
7787 	}
7788 
7789 	if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
7790 		return "VOLUNTARY";
7791 
7792 	return "NONE";
7793 }
7794 
7795 int io_schedule_prepare(void)
7796 {
7797 	int old_iowait = current->in_iowait;
7798 
7799 	current->in_iowait = 1;
7800 	blk_flush_plug(current->plug, true);
7801 	return old_iowait;
7802 }
7803 
7804 void io_schedule_finish(int token)
7805 {
7806 	current->in_iowait = token;
7807 }
7808 
7809 /*
7810  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7811  * that process accounting knows that this is a task in IO wait state.
7812  */
7813 long __sched io_schedule_timeout(long timeout)
7814 {
7815 	int token;
7816 	long ret;
7817 
7818 	token = io_schedule_prepare();
7819 	ret = schedule_timeout(timeout);
7820 	io_schedule_finish(token);
7821 
7822 	return ret;
7823 }
7824 EXPORT_SYMBOL(io_schedule_timeout);
7825 
7826 void __sched io_schedule(void)
7827 {
7828 	int token;
7829 
7830 	token = io_schedule_prepare();
7831 	schedule();
7832 	io_schedule_finish(token);
7833 }
7834 EXPORT_SYMBOL(io_schedule);
7835 
7836 void sched_show_task(struct task_struct *p)
7837 {
7838 	unsigned long free;
7839 	int ppid;
7840 
7841 	if (!try_get_task_stack(p))
7842 		return;
7843 
7844 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7845 
7846 	if (task_is_running(p))
7847 		pr_cont("  running task    ");
7848 	free = stack_not_used(p);
7849 	ppid = 0;
7850 	rcu_read_lock();
7851 	if (pid_alive(p))
7852 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
7853 	rcu_read_unlock();
7854 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7855 		free, task_pid_nr(p), task_tgid_nr(p),
7856 		ppid, p->flags, read_task_thread_flags(p));
7857 
7858 	print_worker_info(KERN_INFO, p);
7859 	print_stop_info(KERN_INFO, p);
7860 	print_scx_info(KERN_INFO, p);
7861 	show_stack(p, NULL, KERN_INFO);
7862 	put_task_stack(p);
7863 }
7864 EXPORT_SYMBOL_GPL(sched_show_task);
7865 
7866 static inline bool
7867 state_filter_match(unsigned long state_filter, struct task_struct *p)
7868 {
7869 	unsigned int state = READ_ONCE(p->__state);
7870 
7871 	/* no filter, everything matches */
7872 	if (!state_filter)
7873 		return true;
7874 
7875 	/* filter, but doesn't match */
7876 	if (!(state & state_filter))
7877 		return false;
7878 
7879 	/*
7880 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7881 	 * TASK_KILLABLE).
7882 	 */
7883 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7884 		return false;
7885 
7886 	return true;
7887 }
7888 
7889 
7890 void show_state_filter(unsigned int state_filter)
7891 {
7892 	struct task_struct *g, *p;
7893 
7894 	rcu_read_lock();
7895 	for_each_process_thread(g, p) {
7896 		/*
7897 		 * reset the NMI-timeout, listing all files on a slow
7898 		 * console might take a lot of time:
7899 		 * Also, reset softlockup watchdogs on all CPUs, because
7900 		 * another CPU might be blocked waiting for us to process
7901 		 * an IPI.
7902 		 */
7903 		touch_nmi_watchdog();
7904 		touch_all_softlockup_watchdogs();
7905 		if (state_filter_match(state_filter, p))
7906 			sched_show_task(p);
7907 	}
7908 
7909 	if (!state_filter)
7910 		sysrq_sched_debug_show();
7911 
7912 	rcu_read_unlock();
7913 	/*
7914 	 * Only show locks if all tasks are dumped:
7915 	 */
7916 	if (!state_filter)
7917 		debug_show_all_locks();
7918 }
7919 
7920 /**
7921  * init_idle - set up an idle thread for a given CPU
7922  * @idle: task in question
7923  * @cpu: CPU the idle task belongs to
7924  *
7925  * NOTE: this function does not set the idle thread's NEED_RESCHED
7926  * flag, to make booting more robust.
7927  */
7928 void __init init_idle(struct task_struct *idle, int cpu)
7929 {
7930 	struct affinity_context ac = (struct affinity_context) {
7931 		.new_mask  = cpumask_of(cpu),
7932 		.flags     = 0,
7933 	};
7934 	struct rq *rq = cpu_rq(cpu);
7935 	unsigned long flags;
7936 
7937 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
7938 	raw_spin_rq_lock(rq);
7939 
7940 	idle->__state = TASK_RUNNING;
7941 	idle->se.exec_start = sched_clock();
7942 	/*
7943 	 * PF_KTHREAD should already be set at this point; regardless, make it
7944 	 * look like a proper per-CPU kthread.
7945 	 */
7946 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7947 	kthread_set_per_cpu(idle, cpu);
7948 
7949 	/*
7950 	 * No validation and serialization required at boot time and for
7951 	 * setting up the idle tasks of not yet online CPUs.
7952 	 */
7953 	set_cpus_allowed_common(idle, &ac);
7954 	/*
7955 	 * We're having a chicken and egg problem, even though we are
7956 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
7957 	 * lockdep check in task_group() will fail.
7958 	 *
7959 	 * Similar case to sched_fork(). / Alternatively we could
7960 	 * use task_rq_lock() here and obtain the other rq->lock.
7961 	 *
7962 	 * Silence PROVE_RCU
7963 	 */
7964 	rcu_read_lock();
7965 	__set_task_cpu(idle, cpu);
7966 	rcu_read_unlock();
7967 
7968 	rq->idle = idle;
7969 	rq_set_donor(rq, idle);
7970 	rcu_assign_pointer(rq->curr, idle);
7971 	idle->on_rq = TASK_ON_RQ_QUEUED;
7972 	idle->on_cpu = 1;
7973 	raw_spin_rq_unlock(rq);
7974 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7975 
7976 	/* Set the preempt count _outside_ the spinlocks! */
7977 	init_idle_preempt_count(idle, cpu);
7978 
7979 	/*
7980 	 * The idle tasks have their own, simple scheduling class:
7981 	 */
7982 	idle->sched_class = &idle_sched_class;
7983 	ftrace_graph_init_idle_task(idle, cpu);
7984 	vtime_init_idle(idle, cpu);
7985 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7986 }
7987 
7988 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7989 			      const struct cpumask *trial)
7990 {
7991 	int ret = 1;
7992 
7993 	if (cpumask_empty(cur))
7994 		return ret;
7995 
7996 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7997 
7998 	return ret;
7999 }
8000 
8001 int task_can_attach(struct task_struct *p)
8002 {
8003 	int ret = 0;
8004 
8005 	/*
8006 	 * Kthreads which disallow setaffinity shouldn't be moved
8007 	 * to a new cpuset; we don't want to change their CPU
8008 	 * affinity and isolating such threads by their set of
8009 	 * allowed nodes is unnecessary.  Thus, cpusets are not
8010 	 * applicable for such threads.  This prevents checking for
8011 	 * success of set_cpus_allowed_ptr() on all attached tasks
8012 	 * before cpus_mask may be changed.
8013 	 */
8014 	if (p->flags & PF_NO_SETAFFINITY)
8015 		ret = -EINVAL;
8016 
8017 	return ret;
8018 }
8019 
8020 bool sched_smp_initialized __read_mostly;
8021 
8022 #ifdef CONFIG_NUMA_BALANCING
8023 /* Migrate current task p to target_cpu */
8024 int migrate_task_to(struct task_struct *p, int target_cpu)
8025 {
8026 	struct migration_arg arg = { p, target_cpu };
8027 	int curr_cpu = task_cpu(p);
8028 
8029 	if (curr_cpu == target_cpu)
8030 		return 0;
8031 
8032 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
8033 		return -EINVAL;
8034 
8035 	/* TODO: This is not properly updating schedstats */
8036 
8037 	trace_sched_move_numa(p, curr_cpu, target_cpu);
8038 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8039 }
8040 
8041 /*
8042  * Requeue a task on a given node and accurately track the number of NUMA
8043  * tasks on the runqueues
8044  */
8045 void sched_setnuma(struct task_struct *p, int nid)
8046 {
8047 	guard(task_rq_lock)(p);
8048 	scoped_guard (sched_change, p, DEQUEUE_SAVE)
8049 		p->numa_preferred_nid = nid;
8050 }
8051 #endif /* CONFIG_NUMA_BALANCING */
8052 
8053 #ifdef CONFIG_HOTPLUG_CPU
8054 /*
8055  * Invoked on the outgoing CPU in context of the CPU hotplug thread
8056  * after ensuring that there are no user space tasks left on the CPU.
8057  *
8058  * If there is a lazy mm in use on the hotplug thread, drop it and
8059  * switch to init_mm.
8060  *
8061  * The reference count on init_mm is dropped in finish_cpu().
8062  */
8063 static void sched_force_init_mm(void)
8064 {
8065 	struct mm_struct *mm = current->active_mm;
8066 
8067 	if (mm != &init_mm) {
8068 		mmgrab_lazy_tlb(&init_mm);
8069 		local_irq_disable();
8070 		current->active_mm = &init_mm;
8071 		switch_mm_irqs_off(mm, &init_mm, current);
8072 		local_irq_enable();
8073 		finish_arch_post_lock_switch();
8074 		mmdrop_lazy_tlb(mm);
8075 	}
8076 
8077 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8078 }
8079 
8080 static int __balance_push_cpu_stop(void *arg)
8081 {
8082 	struct task_struct *p = arg;
8083 	struct rq *rq = this_rq();
8084 	struct rq_flags rf;
8085 	int cpu;
8086 
8087 	scoped_guard (raw_spinlock_irq, &p->pi_lock) {
8088 		/*
8089 		 * We may change the underlying rq, but the locks held will
8090 		 * appropriately be "transferred" when switching.
8091 		 */
8092 		context_unsafe_alias(rq);
8093 
8094 		cpu = select_fallback_rq(rq->cpu, p);
8095 
8096 		rq_lock(rq, &rf);
8097 		update_rq_clock(rq);
8098 		if (task_rq(p) == rq && task_on_rq_queued(p))
8099 			rq = __migrate_task(rq, &rf, p, cpu);
8100 		rq_unlock(rq, &rf);
8101 	}
8102 
8103 	put_task_struct(p);
8104 
8105 	return 0;
8106 }
8107 
8108 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8109 
8110 /*
8111  * Ensure we only run per-cpu kthreads once the CPU goes !active.
8112  *
8113  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8114  * effective when the hotplug motion is down.
8115  */
8116 static void balance_push(struct rq *rq)
8117 	__must_hold(__rq_lockp(rq))
8118 {
8119 	struct task_struct *push_task = rq->curr;
8120 
8121 	lockdep_assert_rq_held(rq);
8122 
8123 	/*
8124 	 * Ensure the thing is persistent until balance_push_set(.on = false);
8125 	 */
8126 	rq->balance_callback = &balance_push_callback;
8127 
8128 	/*
8129 	 * Only active while going offline and when invoked on the outgoing
8130 	 * CPU.
8131 	 */
8132 	if (!cpu_dying(rq->cpu) || rq != this_rq())
8133 		return;
8134 
8135 	/*
8136 	 * Both the cpu-hotplug and stop task are in this case and are
8137 	 * required to complete the hotplug process.
8138 	 */
8139 	if (kthread_is_per_cpu(push_task) ||
8140 	    is_migration_disabled(push_task)) {
8141 
8142 		/*
8143 		 * If this is the idle task on the outgoing CPU try to wake
8144 		 * up the hotplug control thread which might wait for the
8145 		 * last task to vanish. The rcuwait_active() check is
8146 		 * accurate here because the waiter is pinned on this CPU
8147 		 * and can't obviously be running in parallel.
8148 		 *
8149 		 * On RT kernels this also has to check whether there are
8150 		 * pinned and scheduled out tasks on the runqueue. They
8151 		 * need to leave the migrate disabled section first.
8152 		 */
8153 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8154 		    rcuwait_active(&rq->hotplug_wait)) {
8155 			raw_spin_rq_unlock(rq);
8156 			rcuwait_wake_up(&rq->hotplug_wait);
8157 			raw_spin_rq_lock(rq);
8158 		}
8159 		return;
8160 	}
8161 
8162 	get_task_struct(push_task);
8163 	/*
8164 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
8165 	 * Both preemption and IRQs are still disabled.
8166 	 */
8167 	preempt_disable();
8168 	raw_spin_rq_unlock(rq);
8169 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8170 			    this_cpu_ptr(&push_work));
8171 	preempt_enable();
8172 	/*
8173 	 * At this point need_resched() is true and we'll take the loop in
8174 	 * schedule(). The next pick is obviously going to be the stop task
8175 	 * which kthread_is_per_cpu() and will push this task away.
8176 	 */
8177 	raw_spin_rq_lock(rq);
8178 }
8179 
8180 static void balance_push_set(int cpu, bool on)
8181 {
8182 	struct rq *rq = cpu_rq(cpu);
8183 	struct rq_flags rf;
8184 
8185 	rq_lock_irqsave(rq, &rf);
8186 	if (on) {
8187 		WARN_ON_ONCE(rq->balance_callback);
8188 		rq->balance_callback = &balance_push_callback;
8189 	} else if (rq->balance_callback == &balance_push_callback) {
8190 		rq->balance_callback = NULL;
8191 	}
8192 	rq_unlock_irqrestore(rq, &rf);
8193 }
8194 
8195 /*
8196  * Invoked from a CPUs hotplug control thread after the CPU has been marked
8197  * inactive. All tasks which are not per CPU kernel threads are either
8198  * pushed off this CPU now via balance_push() or placed on a different CPU
8199  * during wakeup. Wait until the CPU is quiescent.
8200  */
8201 static void balance_hotplug_wait(void)
8202 {
8203 	struct rq *rq = this_rq();
8204 
8205 	rcuwait_wait_event(&rq->hotplug_wait,
8206 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8207 			   TASK_UNINTERRUPTIBLE);
8208 }
8209 
8210 #else /* !CONFIG_HOTPLUG_CPU: */
8211 
8212 static inline void balance_push(struct rq *rq)
8213 {
8214 }
8215 
8216 static inline void balance_push_set(int cpu, bool on)
8217 {
8218 }
8219 
8220 static inline void balance_hotplug_wait(void)
8221 {
8222 }
8223 
8224 #endif /* !CONFIG_HOTPLUG_CPU */
8225 
8226 void set_rq_online(struct rq *rq)
8227 {
8228 	if (!rq->online) {
8229 		const struct sched_class *class;
8230 
8231 		cpumask_set_cpu(rq->cpu, rq->rd->online);
8232 		rq->online = 1;
8233 
8234 		for_each_class(class) {
8235 			if (class->rq_online)
8236 				class->rq_online(rq);
8237 		}
8238 	}
8239 }
8240 
8241 void set_rq_offline(struct rq *rq)
8242 {
8243 	if (rq->online) {
8244 		const struct sched_class *class;
8245 
8246 		update_rq_clock(rq);
8247 		for_each_class(class) {
8248 			if (class->rq_offline)
8249 				class->rq_offline(rq);
8250 		}
8251 
8252 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
8253 		rq->online = 0;
8254 	}
8255 }
8256 
8257 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8258 {
8259 	struct rq_flags rf;
8260 
8261 	rq_lock_irqsave(rq, &rf);
8262 	if (rq->rd) {
8263 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8264 		set_rq_online(rq);
8265 	}
8266 	rq_unlock_irqrestore(rq, &rf);
8267 }
8268 
8269 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8270 {
8271 	struct rq_flags rf;
8272 
8273 	rq_lock_irqsave(rq, &rf);
8274 	if (rq->rd) {
8275 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8276 		set_rq_offline(rq);
8277 	}
8278 	rq_unlock_irqrestore(rq, &rf);
8279 }
8280 
8281 /*
8282  * used to mark begin/end of suspend/resume:
8283  */
8284 static int num_cpus_frozen;
8285 
8286 /*
8287  * Update cpusets according to cpu_active mask.  If cpusets are
8288  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8289  * around partition_sched_domains().
8290  *
8291  * If we come here as part of a suspend/resume, don't touch cpusets because we
8292  * want to restore it back to its original state upon resume anyway.
8293  */
8294 static void cpuset_cpu_active(void)
8295 {
8296 	if (cpuhp_tasks_frozen) {
8297 		/*
8298 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
8299 		 * resume sequence. As long as this is not the last online
8300 		 * operation in the resume sequence, just build a single sched
8301 		 * domain, ignoring cpusets.
8302 		 */
8303 		cpuset_reset_sched_domains();
8304 		if (--num_cpus_frozen)
8305 			return;
8306 		/*
8307 		 * This is the last CPU online operation. So fall through and
8308 		 * restore the original sched domains by considering the
8309 		 * cpuset configurations.
8310 		 */
8311 		cpuset_force_rebuild();
8312 	}
8313 	cpuset_update_active_cpus();
8314 }
8315 
8316 static void cpuset_cpu_inactive(unsigned int cpu)
8317 {
8318 	if (!cpuhp_tasks_frozen) {
8319 		cpuset_update_active_cpus();
8320 	} else {
8321 		num_cpus_frozen++;
8322 		cpuset_reset_sched_domains();
8323 	}
8324 }
8325 
8326 static inline void sched_smt_present_inc(int cpu)
8327 {
8328 #ifdef CONFIG_SCHED_SMT
8329 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8330 		static_branch_inc_cpuslocked(&sched_smt_present);
8331 #endif
8332 }
8333 
8334 static inline void sched_smt_present_dec(int cpu)
8335 {
8336 #ifdef CONFIG_SCHED_SMT
8337 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8338 		static_branch_dec_cpuslocked(&sched_smt_present);
8339 #endif
8340 }
8341 
8342 int sched_cpu_activate(unsigned int cpu)
8343 {
8344 	struct rq *rq = cpu_rq(cpu);
8345 
8346 	/*
8347 	 * Clear the balance_push callback and prepare to schedule
8348 	 * regular tasks.
8349 	 */
8350 	balance_push_set(cpu, false);
8351 
8352 	/*
8353 	 * When going up, increment the number of cores with SMT present.
8354 	 */
8355 	sched_smt_present_inc(cpu);
8356 	set_cpu_active(cpu, true);
8357 
8358 	if (sched_smp_initialized) {
8359 		sched_update_numa(cpu, true);
8360 		sched_domains_numa_masks_set(cpu);
8361 		cpuset_cpu_active();
8362 	}
8363 
8364 	scx_rq_activate(rq);
8365 
8366 	/*
8367 	 * Put the rq online, if not already. This happens:
8368 	 *
8369 	 * 1) In the early boot process, because we build the real domains
8370 	 *    after all CPUs have been brought up.
8371 	 *
8372 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8373 	 *    domains.
8374 	 */
8375 	sched_set_rq_online(rq, cpu);
8376 
8377 	return 0;
8378 }
8379 
8380 int sched_cpu_deactivate(unsigned int cpu)
8381 {
8382 	struct rq *rq = cpu_rq(cpu);
8383 	int ret;
8384 
8385 	ret = dl_bw_deactivate(cpu);
8386 
8387 	if (ret)
8388 		return ret;
8389 
8390 	/*
8391 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8392 	 * load balancing when not active
8393 	 */
8394 	nohz_balance_exit_idle(rq);
8395 
8396 	set_cpu_active(cpu, false);
8397 
8398 	/*
8399 	 * From this point forward, this CPU will refuse to run any task that
8400 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8401 	 * push those tasks away until this gets cleared, see
8402 	 * sched_cpu_dying().
8403 	 */
8404 	balance_push_set(cpu, true);
8405 
8406 	/*
8407 	 * We've cleared cpu_active_mask / set balance_push, wait for all
8408 	 * preempt-disabled and RCU users of this state to go away such that
8409 	 * all new such users will observe it.
8410 	 *
8411 	 * Specifically, we rely on ttwu to no longer target this CPU, see
8412 	 * ttwu_queue_cond() and is_cpu_allowed().
8413 	 *
8414 	 * Do sync before park smpboot threads to take care the RCU boost case.
8415 	 */
8416 	synchronize_rcu();
8417 
8418 	sched_set_rq_offline(rq, cpu);
8419 
8420 	scx_rq_deactivate(rq);
8421 
8422 	/*
8423 	 * When going down, decrement the number of cores with SMT present.
8424 	 */
8425 	sched_smt_present_dec(cpu);
8426 
8427 #ifdef CONFIG_SCHED_SMT
8428 	sched_core_cpu_deactivate(cpu);
8429 #endif
8430 
8431 	if (!sched_smp_initialized)
8432 		return 0;
8433 
8434 	sched_update_numa(cpu, false);
8435 	cpuset_cpu_inactive(cpu);
8436 	sched_domains_numa_masks_clear(cpu);
8437 	return 0;
8438 }
8439 
8440 static void sched_rq_cpu_starting(unsigned int cpu)
8441 {
8442 	struct rq *rq = cpu_rq(cpu);
8443 
8444 	rq->calc_load_update = calc_load_update;
8445 	update_max_interval();
8446 }
8447 
8448 int sched_cpu_starting(unsigned int cpu)
8449 {
8450 	sched_core_cpu_starting(cpu);
8451 	sched_rq_cpu_starting(cpu);
8452 	sched_tick_start(cpu);
8453 	return 0;
8454 }
8455 
8456 #ifdef CONFIG_HOTPLUG_CPU
8457 
8458 /*
8459  * Invoked immediately before the stopper thread is invoked to bring the
8460  * CPU down completely. At this point all per CPU kthreads except the
8461  * hotplug thread (current) and the stopper thread (inactive) have been
8462  * either parked or have been unbound from the outgoing CPU. Ensure that
8463  * any of those which might be on the way out are gone.
8464  *
8465  * If after this point a bound task is being woken on this CPU then the
8466  * responsible hotplug callback has failed to do it's job.
8467  * sched_cpu_dying() will catch it with the appropriate fireworks.
8468  */
8469 int sched_cpu_wait_empty(unsigned int cpu)
8470 {
8471 	balance_hotplug_wait();
8472 	sched_force_init_mm();
8473 	return 0;
8474 }
8475 
8476 /*
8477  * Since this CPU is going 'away' for a while, fold any nr_active delta we
8478  * might have. Called from the CPU stopper task after ensuring that the
8479  * stopper is the last running task on the CPU, so nr_active count is
8480  * stable. We need to take the tear-down thread which is calling this into
8481  * account, so we hand in adjust = 1 to the load calculation.
8482  *
8483  * Also see the comment "Global load-average calculations".
8484  */
8485 static void calc_load_migrate(struct rq *rq)
8486 {
8487 	long delta = calc_load_fold_active(rq, 1);
8488 
8489 	if (delta)
8490 		atomic_long_add(delta, &calc_load_tasks);
8491 }
8492 
8493 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8494 {
8495 	struct task_struct *g, *p;
8496 	int cpu = cpu_of(rq);
8497 
8498 	lockdep_assert_rq_held(rq);
8499 
8500 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8501 	for_each_process_thread(g, p) {
8502 		if (task_cpu(p) != cpu)
8503 			continue;
8504 
8505 		if (!task_on_rq_queued(p))
8506 			continue;
8507 
8508 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8509 	}
8510 }
8511 
8512 int sched_cpu_dying(unsigned int cpu)
8513 {
8514 	struct rq *rq = cpu_rq(cpu);
8515 	struct rq_flags rf;
8516 
8517 	/* Handle pending wakeups and then migrate everything off */
8518 	sched_tick_stop(cpu);
8519 
8520 	rq_lock_irqsave(rq, &rf);
8521 	update_rq_clock(rq);
8522 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8523 		WARN(true, "Dying CPU not properly vacated!");
8524 		dump_rq_tasks(rq, KERN_WARNING);
8525 	}
8526 	dl_server_stop(&rq->fair_server);
8527 #ifdef CONFIG_SCHED_CLASS_EXT
8528 	dl_server_stop(&rq->ext_server);
8529 #endif
8530 	rq_unlock_irqrestore(rq, &rf);
8531 
8532 	calc_load_migrate(rq);
8533 	update_max_interval();
8534 	hrtick_clear(rq);
8535 	sched_core_cpu_dying(cpu);
8536 	return 0;
8537 }
8538 #endif /* CONFIG_HOTPLUG_CPU */
8539 
8540 void __init sched_init_smp(void)
8541 {
8542 	sched_init_numa(NUMA_NO_NODE);
8543 
8544 	prandom_init_once(&sched_rnd_state);
8545 
8546 	/*
8547 	 * There's no userspace yet to cause hotplug operations; hence all the
8548 	 * CPU masks are stable and all blatant races in the below code cannot
8549 	 * happen.
8550 	 */
8551 	sched_domains_mutex_lock();
8552 	sched_init_domains(cpu_active_mask);
8553 	sched_domains_mutex_unlock();
8554 
8555 	/* Move init over to a non-isolated CPU */
8556 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8557 		BUG();
8558 	current->flags &= ~PF_NO_SETAFFINITY;
8559 	sched_init_granularity();
8560 
8561 	init_sched_rt_class();
8562 	init_sched_dl_class();
8563 
8564 	sched_init_dl_servers();
8565 
8566 	sched_smp_initialized = true;
8567 }
8568 
8569 static int __init migration_init(void)
8570 {
8571 	sched_cpu_starting(smp_processor_id());
8572 	return 0;
8573 }
8574 early_initcall(migration_init);
8575 
8576 int in_sched_functions(unsigned long addr)
8577 {
8578 	return in_lock_functions(addr) ||
8579 		(addr >= (unsigned long)__sched_text_start
8580 		&& addr < (unsigned long)__sched_text_end);
8581 }
8582 
8583 #ifdef CONFIG_CGROUP_SCHED
8584 /*
8585  * Default task group.
8586  * Every task in system belongs to this group at bootup.
8587  */
8588 struct task_group root_task_group;
8589 LIST_HEAD(task_groups);
8590 
8591 /* Cacheline aligned slab cache for task_group */
8592 static struct kmem_cache *task_group_cache __ro_after_init;
8593 #endif
8594 
8595 void __init sched_init(void)
8596 {
8597 	unsigned long ptr = 0;
8598 	int i;
8599 
8600 	/* Make sure the linker didn't screw up */
8601 	BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8602 	BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8603 	BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8604 	BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8605 #ifdef CONFIG_SCHED_CLASS_EXT
8606 	BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8607 	BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8608 #endif
8609 
8610 	wait_bit_init();
8611 
8612 #ifdef CONFIG_FAIR_GROUP_SCHED
8613 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8614 #endif
8615 #ifdef CONFIG_RT_GROUP_SCHED
8616 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8617 #endif
8618 	if (ptr) {
8619 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8620 
8621 #ifdef CONFIG_FAIR_GROUP_SCHED
8622 		root_task_group.se = (struct sched_entity **)ptr;
8623 		ptr += nr_cpu_ids * sizeof(void **);
8624 
8625 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8626 		ptr += nr_cpu_ids * sizeof(void **);
8627 
8628 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8629 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8630 #endif /* CONFIG_FAIR_GROUP_SCHED */
8631 #ifdef CONFIG_EXT_GROUP_SCHED
8632 		scx_tg_init(&root_task_group);
8633 #endif /* CONFIG_EXT_GROUP_SCHED */
8634 #ifdef CONFIG_RT_GROUP_SCHED
8635 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8636 		ptr += nr_cpu_ids * sizeof(void **);
8637 
8638 		root_task_group.rt_rq = (struct rt_rq **)ptr;
8639 		ptr += nr_cpu_ids * sizeof(void **);
8640 
8641 #endif /* CONFIG_RT_GROUP_SCHED */
8642 	}
8643 
8644 	init_defrootdomain();
8645 
8646 #ifdef CONFIG_RT_GROUP_SCHED
8647 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8648 			global_rt_period(), global_rt_runtime());
8649 #endif /* CONFIG_RT_GROUP_SCHED */
8650 
8651 #ifdef CONFIG_CGROUP_SCHED
8652 	task_group_cache = KMEM_CACHE(task_group, 0);
8653 
8654 	list_add(&root_task_group.list, &task_groups);
8655 	INIT_LIST_HEAD(&root_task_group.children);
8656 	INIT_LIST_HEAD(&root_task_group.siblings);
8657 	autogroup_init(&init_task);
8658 #endif /* CONFIG_CGROUP_SCHED */
8659 
8660 	for_each_possible_cpu(i) {
8661 		struct rq *rq;
8662 
8663 		rq = cpu_rq(i);
8664 		raw_spin_lock_init(&rq->__lock);
8665 		rq->nr_running = 0;
8666 		rq->calc_load_active = 0;
8667 		rq->calc_load_update = jiffies + LOAD_FREQ;
8668 		init_cfs_rq(&rq->cfs);
8669 		init_rt_rq(&rq->rt);
8670 		init_dl_rq(&rq->dl);
8671 #ifdef CONFIG_FAIR_GROUP_SCHED
8672 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8673 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8674 		/*
8675 		 * How much CPU bandwidth does root_task_group get?
8676 		 *
8677 		 * In case of task-groups formed through the cgroup filesystem, it
8678 		 * gets 100% of the CPU resources in the system. This overall
8679 		 * system CPU resource is divided among the tasks of
8680 		 * root_task_group and its child task-groups in a fair manner,
8681 		 * based on each entity's (task or task-group's) weight
8682 		 * (se->load.weight).
8683 		 *
8684 		 * In other words, if root_task_group has 10 tasks of weight
8685 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8686 		 * then A0's share of the CPU resource is:
8687 		 *
8688 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8689 		 *
8690 		 * We achieve this by letting root_task_group's tasks sit
8691 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8692 		 */
8693 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8694 #endif /* CONFIG_FAIR_GROUP_SCHED */
8695 
8696 #ifdef CONFIG_RT_GROUP_SCHED
8697 		/*
8698 		 * This is required for init cpu because rt.c:__enable_runtime()
8699 		 * starts working after scheduler_running, which is not the case
8700 		 * yet.
8701 		 */
8702 		rq->rt.rt_runtime = global_rt_runtime();
8703 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8704 #endif
8705 		rq->next_class = &idle_sched_class;
8706 
8707 		rq->sd = NULL;
8708 		rq->rd = NULL;
8709 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8710 		rq->balance_callback = &balance_push_callback;
8711 		rq->active_balance = 0;
8712 		rq->next_balance = jiffies;
8713 		rq->push_cpu = 0;
8714 		rq->cpu = i;
8715 		rq->online = 0;
8716 		rq->idle_stamp = 0;
8717 		rq->avg_idle = 2*sysctl_sched_migration_cost;
8718 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8719 
8720 		INIT_LIST_HEAD(&rq->cfs_tasks);
8721 
8722 		rq_attach_root(rq, &def_root_domain);
8723 #ifdef CONFIG_NO_HZ_COMMON
8724 		rq->last_blocked_load_update_tick = jiffies;
8725 		atomic_set(&rq->nohz_flags, 0);
8726 
8727 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8728 #endif
8729 #ifdef CONFIG_HOTPLUG_CPU
8730 		rcuwait_init(&rq->hotplug_wait);
8731 #endif
8732 		hrtick_rq_init(rq);
8733 		atomic_set(&rq->nr_iowait, 0);
8734 		fair_server_init(rq);
8735 #ifdef CONFIG_SCHED_CLASS_EXT
8736 		ext_server_init(rq);
8737 #endif
8738 
8739 #ifdef CONFIG_SCHED_CORE
8740 		rq->core = rq;
8741 		rq->core_pick = NULL;
8742 		rq->core_dl_server = NULL;
8743 		rq->core_enabled = 0;
8744 		rq->core_tree = RB_ROOT;
8745 		rq->core_forceidle_count = 0;
8746 		rq->core_forceidle_occupation = 0;
8747 		rq->core_forceidle_start = 0;
8748 
8749 		rq->core_cookie = 0UL;
8750 #endif
8751 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8752 	}
8753 
8754 	set_load_weight(&init_task, false);
8755 	init_task.se.slice = sysctl_sched_base_slice,
8756 
8757 	/*
8758 	 * The boot idle thread does lazy MMU switching as well:
8759 	 */
8760 	mmgrab_lazy_tlb(&init_mm);
8761 	enter_lazy_tlb(&init_mm, current);
8762 
8763 	/*
8764 	 * The idle task doesn't need the kthread struct to function, but it
8765 	 * is dressed up as a per-CPU kthread and thus needs to play the part
8766 	 * if we want to avoid special-casing it in code that deals with per-CPU
8767 	 * kthreads.
8768 	 */
8769 	WARN_ON(!set_kthread_struct(current));
8770 
8771 	/*
8772 	 * Make us the idle thread. Technically, schedule() should not be
8773 	 * called from this thread, however somewhere below it might be,
8774 	 * but because we are the idle thread, we just pick up running again
8775 	 * when this runqueue becomes "idle".
8776 	 */
8777 	__sched_fork(0, current);
8778 	init_idle(current, smp_processor_id());
8779 
8780 	calc_load_update = jiffies + LOAD_FREQ;
8781 
8782 	idle_thread_set_boot_cpu();
8783 
8784 	balance_push_set(smp_processor_id(), false);
8785 	init_sched_fair_class();
8786 	init_sched_ext_class();
8787 
8788 	psi_init();
8789 
8790 	init_uclamp();
8791 
8792 	preempt_dynamic_init();
8793 
8794 	scheduler_running = 1;
8795 }
8796 
8797 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8798 
8799 void __might_sleep(const char *file, int line)
8800 {
8801 	unsigned int state = get_current_state();
8802 	/*
8803 	 * Blocking primitives will set (and therefore destroy) current->state,
8804 	 * since we will exit with TASK_RUNNING make sure we enter with it,
8805 	 * otherwise we will destroy state.
8806 	 */
8807 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8808 			"do not call blocking ops when !TASK_RUNNING; "
8809 			"state=%x set at [<%p>] %pS\n", state,
8810 			(void *)current->task_state_change,
8811 			(void *)current->task_state_change);
8812 
8813 	__might_resched(file, line, 0);
8814 }
8815 EXPORT_SYMBOL(__might_sleep);
8816 
8817 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8818 {
8819 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8820 		return;
8821 
8822 	if (preempt_count() == preempt_offset)
8823 		return;
8824 
8825 	pr_err("Preemption disabled at:");
8826 	print_ip_sym(KERN_ERR, ip);
8827 }
8828 
8829 static inline bool resched_offsets_ok(unsigned int offsets)
8830 {
8831 	unsigned int nested = preempt_count();
8832 
8833 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8834 
8835 	return nested == offsets;
8836 }
8837 
8838 void __might_resched(const char *file, int line, unsigned int offsets)
8839 {
8840 	/* Ratelimiting timestamp: */
8841 	static unsigned long prev_jiffy;
8842 
8843 	unsigned long preempt_disable_ip;
8844 
8845 	/* WARN_ON_ONCE() by default, no rate limit required: */
8846 	rcu_sleep_check();
8847 
8848 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8849 	     !is_idle_task(current) && !current->non_block_count) ||
8850 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8851 	    oops_in_progress)
8852 		return;
8853 
8854 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8855 		return;
8856 	prev_jiffy = jiffies;
8857 
8858 	/* Save this before calling printk(), since that will clobber it: */
8859 	preempt_disable_ip = get_preempt_disable_ip(current);
8860 
8861 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8862 	       file, line);
8863 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8864 	       in_atomic(), irqs_disabled(), current->non_block_count,
8865 	       current->pid, current->comm);
8866 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8867 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
8868 
8869 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8870 		pr_err("RCU nest depth: %d, expected: %u\n",
8871 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8872 	}
8873 
8874 	if (task_stack_end_corrupted(current))
8875 		pr_emerg("Thread overran stack, or stack corrupted\n");
8876 
8877 	debug_show_held_locks(current);
8878 	if (irqs_disabled())
8879 		print_irqtrace_events(current);
8880 
8881 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8882 				 preempt_disable_ip);
8883 
8884 	dump_stack();
8885 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8886 }
8887 EXPORT_SYMBOL(__might_resched);
8888 
8889 void __cant_sleep(const char *file, int line, int preempt_offset)
8890 {
8891 	static unsigned long prev_jiffy;
8892 
8893 	if (irqs_disabled())
8894 		return;
8895 
8896 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8897 		return;
8898 
8899 	if (preempt_count() > preempt_offset)
8900 		return;
8901 
8902 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8903 		return;
8904 	prev_jiffy = jiffies;
8905 
8906 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8907 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8908 			in_atomic(), irqs_disabled(),
8909 			current->pid, current->comm);
8910 
8911 	debug_show_held_locks(current);
8912 	dump_stack();
8913 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8914 }
8915 EXPORT_SYMBOL_GPL(__cant_sleep);
8916 
8917 # ifdef CONFIG_SMP
8918 void __cant_migrate(const char *file, int line)
8919 {
8920 	static unsigned long prev_jiffy;
8921 
8922 	if (irqs_disabled())
8923 		return;
8924 
8925 	if (is_migration_disabled(current))
8926 		return;
8927 
8928 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8929 		return;
8930 
8931 	if (preempt_count() > 0)
8932 		return;
8933 
8934 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8935 		return;
8936 	prev_jiffy = jiffies;
8937 
8938 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8939 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8940 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
8941 	       current->pid, current->comm);
8942 
8943 	debug_show_held_locks(current);
8944 	dump_stack();
8945 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8946 }
8947 EXPORT_SYMBOL_GPL(__cant_migrate);
8948 # endif /* CONFIG_SMP */
8949 #endif /* CONFIG_DEBUG_ATOMIC_SLEEP */
8950 
8951 #ifdef CONFIG_MAGIC_SYSRQ
8952 void normalize_rt_tasks(void)
8953 {
8954 	struct task_struct *g, *p;
8955 	struct sched_attr attr = {
8956 		.sched_policy = SCHED_NORMAL,
8957 	};
8958 
8959 	read_lock(&tasklist_lock);
8960 	for_each_process_thread(g, p) {
8961 		/*
8962 		 * Only normalize user tasks:
8963 		 */
8964 		if (p->flags & PF_KTHREAD)
8965 			continue;
8966 
8967 		p->se.exec_start = 0;
8968 		schedstat_set(p->stats.wait_start,  0);
8969 		schedstat_set(p->stats.sleep_start, 0);
8970 		schedstat_set(p->stats.block_start, 0);
8971 
8972 		if (!rt_or_dl_task(p)) {
8973 			/*
8974 			 * Renice negative nice level userspace
8975 			 * tasks back to 0:
8976 			 */
8977 			if (task_nice(p) < 0)
8978 				set_user_nice(p, 0);
8979 			continue;
8980 		}
8981 
8982 		__sched_setscheduler(p, &attr, false, false);
8983 	}
8984 	read_unlock(&tasklist_lock);
8985 }
8986 
8987 #endif /* CONFIG_MAGIC_SYSRQ */
8988 
8989 #ifdef CONFIG_KGDB_KDB
8990 /*
8991  * These functions are only useful for KDB.
8992  *
8993  * They can only be called when the whole system has been
8994  * stopped - every CPU needs to be quiescent, and no scheduling
8995  * activity can take place. Using them for anything else would
8996  * be a serious bug, and as a result, they aren't even visible
8997  * under any other configuration.
8998  */
8999 
9000 /**
9001  * curr_task - return the current task for a given CPU.
9002  * @cpu: the processor in question.
9003  *
9004  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9005  *
9006  * Return: The current task for @cpu.
9007  */
9008 struct task_struct *curr_task(int cpu)
9009 {
9010 	return cpu_curr(cpu);
9011 }
9012 
9013 #endif /* CONFIG_KGDB_KDB */
9014 
9015 #ifdef CONFIG_CGROUP_SCHED
9016 /* task_group_lock serializes the addition/removal of task groups */
9017 static DEFINE_SPINLOCK(task_group_lock);
9018 
9019 static inline void alloc_uclamp_sched_group(struct task_group *tg,
9020 					    struct task_group *parent)
9021 {
9022 #ifdef CONFIG_UCLAMP_TASK_GROUP
9023 	enum uclamp_id clamp_id;
9024 
9025 	for_each_clamp_id(clamp_id) {
9026 		uclamp_se_set(&tg->uclamp_req[clamp_id],
9027 			      uclamp_none(clamp_id), false);
9028 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
9029 	}
9030 #endif
9031 }
9032 
9033 static void sched_free_group(struct task_group *tg)
9034 {
9035 	free_fair_sched_group(tg);
9036 	free_rt_sched_group(tg);
9037 	autogroup_free(tg);
9038 	kmem_cache_free(task_group_cache, tg);
9039 }
9040 
9041 static void sched_free_group_rcu(struct rcu_head *rcu)
9042 {
9043 	sched_free_group(container_of(rcu, struct task_group, rcu));
9044 }
9045 
9046 static void sched_unregister_group(struct task_group *tg)
9047 {
9048 	unregister_fair_sched_group(tg);
9049 	unregister_rt_sched_group(tg);
9050 	/*
9051 	 * We have to wait for yet another RCU grace period to expire, as
9052 	 * print_cfs_stats() might run concurrently.
9053 	 */
9054 	call_rcu(&tg->rcu, sched_free_group_rcu);
9055 }
9056 
9057 /* allocate runqueue etc for a new task group */
9058 struct task_group *sched_create_group(struct task_group *parent)
9059 {
9060 	struct task_group *tg;
9061 
9062 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
9063 	if (!tg)
9064 		return ERR_PTR(-ENOMEM);
9065 
9066 	if (!alloc_fair_sched_group(tg, parent))
9067 		goto err;
9068 
9069 	if (!alloc_rt_sched_group(tg, parent))
9070 		goto err;
9071 
9072 	scx_tg_init(tg);
9073 	alloc_uclamp_sched_group(tg, parent);
9074 
9075 	return tg;
9076 
9077 err:
9078 	sched_free_group(tg);
9079 	return ERR_PTR(-ENOMEM);
9080 }
9081 
9082 void sched_online_group(struct task_group *tg, struct task_group *parent)
9083 {
9084 	unsigned long flags;
9085 
9086 	spin_lock_irqsave(&task_group_lock, flags);
9087 	list_add_tail_rcu(&tg->list, &task_groups);
9088 
9089 	/* Root should already exist: */
9090 	WARN_ON(!parent);
9091 
9092 	tg->parent = parent;
9093 	INIT_LIST_HEAD(&tg->children);
9094 	list_add_rcu(&tg->siblings, &parent->children);
9095 	spin_unlock_irqrestore(&task_group_lock, flags);
9096 
9097 	online_fair_sched_group(tg);
9098 }
9099 
9100 /* RCU callback to free various structures associated with a task group */
9101 static void sched_unregister_group_rcu(struct rcu_head *rhp)
9102 {
9103 	/* Now it should be safe to free those cfs_rqs: */
9104 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
9105 }
9106 
9107 void sched_destroy_group(struct task_group *tg)
9108 {
9109 	/* Wait for possible concurrent references to cfs_rqs complete: */
9110 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
9111 }
9112 
9113 void sched_release_group(struct task_group *tg)
9114 {
9115 	unsigned long flags;
9116 
9117 	/*
9118 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9119 	 * sched_cfs_period_timer()).
9120 	 *
9121 	 * For this to be effective, we have to wait for all pending users of
9122 	 * this task group to leave their RCU critical section to ensure no new
9123 	 * user will see our dying task group any more. Specifically ensure
9124 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9125 	 *
9126 	 * We therefore defer calling unregister_fair_sched_group() to
9127 	 * sched_unregister_group() which is guarantied to get called only after the
9128 	 * current RCU grace period has expired.
9129 	 */
9130 	spin_lock_irqsave(&task_group_lock, flags);
9131 	list_del_rcu(&tg->list);
9132 	list_del_rcu(&tg->siblings);
9133 	spin_unlock_irqrestore(&task_group_lock, flags);
9134 }
9135 
9136 static void sched_change_group(struct task_struct *tsk)
9137 {
9138 	struct task_group *tg;
9139 
9140 	/*
9141 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
9142 	 * which is pointless here. Thus, we pass "true" to task_css_check()
9143 	 * to prevent lockdep warnings.
9144 	 */
9145 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9146 			  struct task_group, css);
9147 	tg = autogroup_task_group(tsk, tg);
9148 	tsk->sched_task_group = tg;
9149 
9150 #ifdef CONFIG_FAIR_GROUP_SCHED
9151 	if (tsk->sched_class->task_change_group)
9152 		tsk->sched_class->task_change_group(tsk);
9153 	else
9154 #endif
9155 		set_task_rq(tsk, task_cpu(tsk));
9156 }
9157 
9158 /*
9159  * Change task's runqueue when it moves between groups.
9160  *
9161  * The caller of this function should have put the task in its new group by
9162  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9163  * its new group.
9164  */
9165 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9166 {
9167 	unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
9168 	bool resched = false;
9169 	bool queued = false;
9170 	struct rq *rq;
9171 
9172 	CLASS(task_rq_lock, rq_guard)(tsk);
9173 	rq = rq_guard.rq;
9174 
9175 	scoped_guard (sched_change, tsk, queue_flags) {
9176 		sched_change_group(tsk);
9177 		if (!for_autogroup)
9178 			scx_cgroup_move_task(tsk);
9179 		if (scope->running)
9180 			resched = true;
9181 		queued = scope->queued;
9182 	}
9183 
9184 	if (resched)
9185 		resched_curr(rq);
9186 	else if (queued)
9187 		wakeup_preempt(rq, tsk, 0);
9188 
9189 	__balance_callbacks(rq, &rq_guard.rf);
9190 }
9191 
9192 static struct cgroup_subsys_state *
9193 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9194 {
9195 	struct task_group *parent = css_tg(parent_css);
9196 	struct task_group *tg;
9197 
9198 	if (!parent) {
9199 		/* This is early initialization for the top cgroup */
9200 		return &root_task_group.css;
9201 	}
9202 
9203 	tg = sched_create_group(parent);
9204 	if (IS_ERR(tg))
9205 		return ERR_PTR(-ENOMEM);
9206 
9207 	return &tg->css;
9208 }
9209 
9210 /* Expose task group only after completing cgroup initialization */
9211 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9212 {
9213 	struct task_group *tg = css_tg(css);
9214 	struct task_group *parent = css_tg(css->parent);
9215 	int ret;
9216 
9217 	ret = scx_tg_online(tg);
9218 	if (ret)
9219 		return ret;
9220 
9221 	if (parent)
9222 		sched_online_group(tg, parent);
9223 
9224 #ifdef CONFIG_UCLAMP_TASK_GROUP
9225 	/* Propagate the effective uclamp value for the new group */
9226 	guard(mutex)(&uclamp_mutex);
9227 	guard(rcu)();
9228 	cpu_util_update_eff(css);
9229 #endif
9230 
9231 	return 0;
9232 }
9233 
9234 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9235 {
9236 	struct task_group *tg = css_tg(css);
9237 
9238 	scx_tg_offline(tg);
9239 }
9240 
9241 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9242 {
9243 	struct task_group *tg = css_tg(css);
9244 
9245 	sched_release_group(tg);
9246 }
9247 
9248 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9249 {
9250 	struct task_group *tg = css_tg(css);
9251 
9252 	/*
9253 	 * Relies on the RCU grace period between css_released() and this.
9254 	 */
9255 	sched_unregister_group(tg);
9256 }
9257 
9258 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9259 {
9260 #ifdef CONFIG_RT_GROUP_SCHED
9261 	struct task_struct *task;
9262 	struct cgroup_subsys_state *css;
9263 
9264 	if (!rt_group_sched_enabled())
9265 		goto scx_check;
9266 
9267 	cgroup_taskset_for_each(task, css, tset) {
9268 		if (!sched_rt_can_attach(css_tg(css), task))
9269 			return -EINVAL;
9270 	}
9271 scx_check:
9272 #endif /* CONFIG_RT_GROUP_SCHED */
9273 	return scx_cgroup_can_attach(tset);
9274 }
9275 
9276 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9277 {
9278 	struct task_struct *task;
9279 	struct cgroup_subsys_state *css;
9280 
9281 	cgroup_taskset_for_each(task, css, tset)
9282 		sched_move_task(task, false);
9283 }
9284 
9285 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9286 {
9287 	scx_cgroup_cancel_attach(tset);
9288 }
9289 
9290 #ifdef CONFIG_UCLAMP_TASK_GROUP
9291 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9292 {
9293 	struct cgroup_subsys_state *top_css = css;
9294 	struct uclamp_se *uc_parent = NULL;
9295 	struct uclamp_se *uc_se = NULL;
9296 	unsigned int eff[UCLAMP_CNT];
9297 	enum uclamp_id clamp_id;
9298 	unsigned int clamps;
9299 
9300 	lockdep_assert_held(&uclamp_mutex);
9301 	WARN_ON_ONCE(!rcu_read_lock_held());
9302 
9303 	css_for_each_descendant_pre(css, top_css) {
9304 		uc_parent = css_tg(css)->parent
9305 			? css_tg(css)->parent->uclamp : NULL;
9306 
9307 		for_each_clamp_id(clamp_id) {
9308 			/* Assume effective clamps matches requested clamps */
9309 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9310 			/* Cap effective clamps with parent's effective clamps */
9311 			if (uc_parent &&
9312 			    eff[clamp_id] > uc_parent[clamp_id].value) {
9313 				eff[clamp_id] = uc_parent[clamp_id].value;
9314 			}
9315 		}
9316 		/* Ensure protection is always capped by limit */
9317 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9318 
9319 		/* Propagate most restrictive effective clamps */
9320 		clamps = 0x0;
9321 		uc_se = css_tg(css)->uclamp;
9322 		for_each_clamp_id(clamp_id) {
9323 			if (eff[clamp_id] == uc_se[clamp_id].value)
9324 				continue;
9325 			uc_se[clamp_id].value = eff[clamp_id];
9326 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9327 			clamps |= (0x1 << clamp_id);
9328 		}
9329 		if (!clamps) {
9330 			css = css_rightmost_descendant(css);
9331 			continue;
9332 		}
9333 
9334 		/* Immediately update descendants RUNNABLE tasks */
9335 		uclamp_update_active_tasks(css);
9336 	}
9337 }
9338 
9339 /*
9340  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9341  * C expression. Since there is no way to convert a macro argument (N) into a
9342  * character constant, use two levels of macros.
9343  */
9344 #define _POW10(exp) ((unsigned int)1e##exp)
9345 #define POW10(exp) _POW10(exp)
9346 
9347 struct uclamp_request {
9348 #define UCLAMP_PERCENT_SHIFT	2
9349 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
9350 	s64 percent;
9351 	u64 util;
9352 	int ret;
9353 };
9354 
9355 static inline struct uclamp_request
9356 capacity_from_percent(char *buf)
9357 {
9358 	struct uclamp_request req = {
9359 		.percent = UCLAMP_PERCENT_SCALE,
9360 		.util = SCHED_CAPACITY_SCALE,
9361 		.ret = 0,
9362 	};
9363 
9364 	buf = strim(buf);
9365 	if (strcmp(buf, "max")) {
9366 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9367 					     &req.percent);
9368 		if (req.ret)
9369 			return req;
9370 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9371 			req.ret = -ERANGE;
9372 			return req;
9373 		}
9374 
9375 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
9376 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9377 	}
9378 
9379 	return req;
9380 }
9381 
9382 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9383 				size_t nbytes, loff_t off,
9384 				enum uclamp_id clamp_id)
9385 {
9386 	struct uclamp_request req;
9387 	struct task_group *tg;
9388 
9389 	req = capacity_from_percent(buf);
9390 	if (req.ret)
9391 		return req.ret;
9392 
9393 	sched_uclamp_enable();
9394 
9395 	guard(mutex)(&uclamp_mutex);
9396 	guard(rcu)();
9397 
9398 	tg = css_tg(of_css(of));
9399 	if (tg->uclamp_req[clamp_id].value != req.util)
9400 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9401 
9402 	/*
9403 	 * Because of not recoverable conversion rounding we keep track of the
9404 	 * exact requested value
9405 	 */
9406 	tg->uclamp_pct[clamp_id] = req.percent;
9407 
9408 	/* Update effective clamps to track the most restrictive value */
9409 	cpu_util_update_eff(of_css(of));
9410 
9411 	return nbytes;
9412 }
9413 
9414 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9415 				    char *buf, size_t nbytes,
9416 				    loff_t off)
9417 {
9418 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9419 }
9420 
9421 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9422 				    char *buf, size_t nbytes,
9423 				    loff_t off)
9424 {
9425 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9426 }
9427 
9428 static inline void cpu_uclamp_print(struct seq_file *sf,
9429 				    enum uclamp_id clamp_id)
9430 {
9431 	struct task_group *tg;
9432 	u64 util_clamp;
9433 	u64 percent;
9434 	u32 rem;
9435 
9436 	scoped_guard (rcu) {
9437 		tg = css_tg(seq_css(sf));
9438 		util_clamp = tg->uclamp_req[clamp_id].value;
9439 	}
9440 
9441 	if (util_clamp == SCHED_CAPACITY_SCALE) {
9442 		seq_puts(sf, "max\n");
9443 		return;
9444 	}
9445 
9446 	percent = tg->uclamp_pct[clamp_id];
9447 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9448 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9449 }
9450 
9451 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9452 {
9453 	cpu_uclamp_print(sf, UCLAMP_MIN);
9454 	return 0;
9455 }
9456 
9457 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9458 {
9459 	cpu_uclamp_print(sf, UCLAMP_MAX);
9460 	return 0;
9461 }
9462 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9463 
9464 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9465 static unsigned long tg_weight(struct task_group *tg)
9466 {
9467 #ifdef CONFIG_FAIR_GROUP_SCHED
9468 	return scale_load_down(tg->shares);
9469 #else
9470 	return sched_weight_from_cgroup(tg->scx.weight);
9471 #endif
9472 }
9473 
9474 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9475 				struct cftype *cftype, u64 shareval)
9476 {
9477 	int ret;
9478 
9479 	if (shareval > scale_load_down(ULONG_MAX))
9480 		shareval = MAX_SHARES;
9481 	ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9482 	if (!ret)
9483 		scx_group_set_weight(css_tg(css),
9484 				     sched_weight_to_cgroup(shareval));
9485 	return ret;
9486 }
9487 
9488 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9489 			       struct cftype *cft)
9490 {
9491 	return tg_weight(css_tg(css));
9492 }
9493 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9494 
9495 #ifdef CONFIG_CFS_BANDWIDTH
9496 static DEFINE_MUTEX(cfs_constraints_mutex);
9497 
9498 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9499 
9500 static int tg_set_cfs_bandwidth(struct task_group *tg,
9501 				u64 period_us, u64 quota_us, u64 burst_us)
9502 {
9503 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
9504 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9505 	u64 period, quota, burst;
9506 
9507 	period = (u64)period_us * NSEC_PER_USEC;
9508 
9509 	if (quota_us == RUNTIME_INF)
9510 		quota = RUNTIME_INF;
9511 	else
9512 		quota = (u64)quota_us * NSEC_PER_USEC;
9513 
9514 	burst = (u64)burst_us * NSEC_PER_USEC;
9515 
9516 	/*
9517 	 * Prevent race between setting of cfs_rq->runtime_enabled and
9518 	 * unthrottle_offline_cfs_rqs().
9519 	 */
9520 	guard(cpus_read_lock)();
9521 	guard(mutex)(&cfs_constraints_mutex);
9522 
9523 	ret = __cfs_schedulable(tg, period, quota);
9524 	if (ret)
9525 		return ret;
9526 
9527 	runtime_enabled = quota != RUNTIME_INF;
9528 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9529 	/*
9530 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
9531 	 * before making related changes, and on->off must occur afterwards
9532 	 */
9533 	if (runtime_enabled && !runtime_was_enabled)
9534 		cfs_bandwidth_usage_inc();
9535 
9536 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9537 		cfs_b->period = ns_to_ktime(period);
9538 		cfs_b->quota = quota;
9539 		cfs_b->burst = burst;
9540 
9541 		__refill_cfs_bandwidth_runtime(cfs_b);
9542 
9543 		/*
9544 		 * Restart the period timer (if active) to handle new
9545 		 * period expiry:
9546 		 */
9547 		if (runtime_enabled)
9548 			start_cfs_bandwidth(cfs_b);
9549 	}
9550 
9551 	for_each_online_cpu(i) {
9552 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9553 		struct rq *rq = cfs_rq->rq;
9554 
9555 		guard(rq_lock_irq)(rq);
9556 		cfs_rq->runtime_enabled = runtime_enabled;
9557 		cfs_rq->runtime_remaining = 1;
9558 
9559 		if (cfs_rq->throttled)
9560 			unthrottle_cfs_rq(cfs_rq);
9561 	}
9562 
9563 	if (runtime_was_enabled && !runtime_enabled)
9564 		cfs_bandwidth_usage_dec();
9565 
9566 	return 0;
9567 }
9568 
9569 static u64 tg_get_cfs_period(struct task_group *tg)
9570 {
9571 	u64 cfs_period_us;
9572 
9573 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9574 	do_div(cfs_period_us, NSEC_PER_USEC);
9575 
9576 	return cfs_period_us;
9577 }
9578 
9579 static u64 tg_get_cfs_quota(struct task_group *tg)
9580 {
9581 	u64 quota_us;
9582 
9583 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9584 		return RUNTIME_INF;
9585 
9586 	quota_us = tg->cfs_bandwidth.quota;
9587 	do_div(quota_us, NSEC_PER_USEC);
9588 
9589 	return quota_us;
9590 }
9591 
9592 static u64 tg_get_cfs_burst(struct task_group *tg)
9593 {
9594 	u64 burst_us;
9595 
9596 	burst_us = tg->cfs_bandwidth.burst;
9597 	do_div(burst_us, NSEC_PER_USEC);
9598 
9599 	return burst_us;
9600 }
9601 
9602 struct cfs_schedulable_data {
9603 	struct task_group *tg;
9604 	u64 period, quota;
9605 };
9606 
9607 /*
9608  * normalize group quota/period to be quota/max_period
9609  * note: units are usecs
9610  */
9611 static u64 normalize_cfs_quota(struct task_group *tg,
9612 			       struct cfs_schedulable_data *d)
9613 {
9614 	u64 quota, period;
9615 
9616 	if (tg == d->tg) {
9617 		period = d->period;
9618 		quota = d->quota;
9619 	} else {
9620 		period = tg_get_cfs_period(tg);
9621 		quota = tg_get_cfs_quota(tg);
9622 	}
9623 
9624 	/* note: these should typically be equivalent */
9625 	if (quota == RUNTIME_INF || quota == -1)
9626 		return RUNTIME_INF;
9627 
9628 	return to_ratio(period, quota);
9629 }
9630 
9631 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9632 {
9633 	struct cfs_schedulable_data *d = data;
9634 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9635 	s64 quota = 0, parent_quota = -1;
9636 
9637 	if (!tg->parent) {
9638 		quota = RUNTIME_INF;
9639 	} else {
9640 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9641 
9642 		quota = normalize_cfs_quota(tg, d);
9643 		parent_quota = parent_b->hierarchical_quota;
9644 
9645 		/*
9646 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9647 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
9648 		 * inherit when no limit is set. In both cases this is used
9649 		 * by the scheduler to determine if a given CFS task has a
9650 		 * bandwidth constraint at some higher level.
9651 		 */
9652 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9653 			if (quota == RUNTIME_INF)
9654 				quota = parent_quota;
9655 			else if (parent_quota != RUNTIME_INF)
9656 				quota = min(quota, parent_quota);
9657 		} else {
9658 			if (quota == RUNTIME_INF)
9659 				quota = parent_quota;
9660 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9661 				return -EINVAL;
9662 		}
9663 	}
9664 	cfs_b->hierarchical_quota = quota;
9665 
9666 	return 0;
9667 }
9668 
9669 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9670 {
9671 	struct cfs_schedulable_data data = {
9672 		.tg = tg,
9673 		.period = period,
9674 		.quota = quota,
9675 	};
9676 
9677 	if (quota != RUNTIME_INF) {
9678 		do_div(data.period, NSEC_PER_USEC);
9679 		do_div(data.quota, NSEC_PER_USEC);
9680 	}
9681 
9682 	guard(rcu)();
9683 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9684 }
9685 
9686 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9687 {
9688 	struct task_group *tg = css_tg(seq_css(sf));
9689 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9690 
9691 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9692 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9693 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9694 
9695 	if (schedstat_enabled() && tg != &root_task_group) {
9696 		struct sched_statistics *stats;
9697 		u64 ws = 0;
9698 		int i;
9699 
9700 		for_each_possible_cpu(i) {
9701 			stats = __schedstats_from_se(tg->se[i]);
9702 			ws += schedstat_val(stats->wait_sum);
9703 		}
9704 
9705 		seq_printf(sf, "wait_sum %llu\n", ws);
9706 	}
9707 
9708 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9709 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9710 
9711 	return 0;
9712 }
9713 
9714 static u64 throttled_time_self(struct task_group *tg)
9715 {
9716 	int i;
9717 	u64 total = 0;
9718 
9719 	for_each_possible_cpu(i) {
9720 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9721 	}
9722 
9723 	return total;
9724 }
9725 
9726 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9727 {
9728 	struct task_group *tg = css_tg(seq_css(sf));
9729 
9730 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9731 
9732 	return 0;
9733 }
9734 #endif /* CONFIG_CFS_BANDWIDTH */
9735 
9736 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9737 const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC; /* 1s */
9738 static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC; /* 1ms */
9739 /* More than 203 days if BW_SHIFT equals 20. */
9740 static const u64 max_bw_runtime_us = MAX_BW;
9741 
9742 static void tg_bandwidth(struct task_group *tg,
9743 			 u64 *period_us_p, u64 *quota_us_p, u64 *burst_us_p)
9744 {
9745 #ifdef CONFIG_CFS_BANDWIDTH
9746 	if (period_us_p)
9747 		*period_us_p = tg_get_cfs_period(tg);
9748 	if (quota_us_p)
9749 		*quota_us_p = tg_get_cfs_quota(tg);
9750 	if (burst_us_p)
9751 		*burst_us_p = tg_get_cfs_burst(tg);
9752 #else /* !CONFIG_CFS_BANDWIDTH */
9753 	if (period_us_p)
9754 		*period_us_p = tg->scx.bw_period_us;
9755 	if (quota_us_p)
9756 		*quota_us_p = tg->scx.bw_quota_us;
9757 	if (burst_us_p)
9758 		*burst_us_p = tg->scx.bw_burst_us;
9759 #endif /* CONFIG_CFS_BANDWIDTH */
9760 }
9761 
9762 static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
9763 			       struct cftype *cft)
9764 {
9765 	u64 period_us;
9766 
9767 	tg_bandwidth(css_tg(css), &period_us, NULL, NULL);
9768 	return period_us;
9769 }
9770 
9771 static int tg_set_bandwidth(struct task_group *tg,
9772 			    u64 period_us, u64 quota_us, u64 burst_us)
9773 {
9774 	const u64 max_usec = U64_MAX / NSEC_PER_USEC;
9775 	int ret = 0;
9776 
9777 	if (tg == &root_task_group)
9778 		return -EINVAL;
9779 
9780 	/* Values should survive translation to nsec */
9781 	if (period_us > max_usec ||
9782 	    (quota_us != RUNTIME_INF && quota_us > max_usec) ||
9783 	    burst_us > max_usec)
9784 		return -EINVAL;
9785 
9786 	/*
9787 	 * Ensure we have some amount of bandwidth every period. This is to
9788 	 * prevent reaching a state of large arrears when throttled via
9789 	 * entity_tick() resulting in prolonged exit starvation.
9790 	 */
9791 	if (quota_us < min_bw_quota_period_us ||
9792 	    period_us < min_bw_quota_period_us)
9793 		return -EINVAL;
9794 
9795 	/*
9796 	 * Likewise, bound things on the other side by preventing insane quota
9797 	 * periods.  This also allows us to normalize in computing quota
9798 	 * feasibility.
9799 	 */
9800 	if (period_us > max_bw_quota_period_us)
9801 		return -EINVAL;
9802 
9803 	/*
9804 	 * Bound quota to defend quota against overflow during bandwidth shift.
9805 	 */
9806 	if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us)
9807 		return -EINVAL;
9808 
9809 	if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
9810 					burst_us + quota_us > max_bw_runtime_us))
9811 		return -EINVAL;
9812 
9813 #ifdef CONFIG_CFS_BANDWIDTH
9814 	ret = tg_set_cfs_bandwidth(tg, period_us, quota_us, burst_us);
9815 #endif /* CONFIG_CFS_BANDWIDTH */
9816 	if (!ret)
9817 		scx_group_set_bandwidth(tg, period_us, quota_us, burst_us);
9818 	return ret;
9819 }
9820 
9821 static s64 cpu_quota_read_s64(struct cgroup_subsys_state *css,
9822 			      struct cftype *cft)
9823 {
9824 	u64 quota_us;
9825 
9826 	tg_bandwidth(css_tg(css), NULL, &quota_us, NULL);
9827 	return quota_us;	/* (s64)RUNTIME_INF becomes -1 */
9828 }
9829 
9830 static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
9831 			      struct cftype *cft)
9832 {
9833 	u64 burst_us;
9834 
9835 	tg_bandwidth(css_tg(css), NULL, NULL, &burst_us);
9836 	return burst_us;
9837 }
9838 
9839 static int cpu_period_write_u64(struct cgroup_subsys_state *css,
9840 				struct cftype *cftype, u64 period_us)
9841 {
9842 	struct task_group *tg = css_tg(css);
9843 	u64 quota_us, burst_us;
9844 
9845 	tg_bandwidth(tg, NULL, &quota_us, &burst_us);
9846 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9847 }
9848 
9849 static int cpu_quota_write_s64(struct cgroup_subsys_state *css,
9850 			       struct cftype *cftype, s64 quota_us)
9851 {
9852 	struct task_group *tg = css_tg(css);
9853 	u64 period_us, burst_us;
9854 
9855 	if (quota_us < 0)
9856 		quota_us = RUNTIME_INF;
9857 
9858 	tg_bandwidth(tg, &period_us, NULL, &burst_us);
9859 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9860 }
9861 
9862 static int cpu_burst_write_u64(struct cgroup_subsys_state *css,
9863 			       struct cftype *cftype, u64 burst_us)
9864 {
9865 	struct task_group *tg = css_tg(css);
9866 	u64 period_us, quota_us;
9867 
9868 	tg_bandwidth(tg, &period_us, &quota_us, NULL);
9869 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9870 }
9871 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */
9872 
9873 #ifdef CONFIG_RT_GROUP_SCHED
9874 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9875 				struct cftype *cft, s64 val)
9876 {
9877 	return sched_group_set_rt_runtime(css_tg(css), val);
9878 }
9879 
9880 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9881 			       struct cftype *cft)
9882 {
9883 	return sched_group_rt_runtime(css_tg(css));
9884 }
9885 
9886 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9887 				    struct cftype *cftype, u64 rt_period_us)
9888 {
9889 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
9890 }
9891 
9892 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9893 				   struct cftype *cft)
9894 {
9895 	return sched_group_rt_period(css_tg(css));
9896 }
9897 #endif /* CONFIG_RT_GROUP_SCHED */
9898 
9899 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9900 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9901 			       struct cftype *cft)
9902 {
9903 	return css_tg(css)->idle;
9904 }
9905 
9906 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9907 				struct cftype *cft, s64 idle)
9908 {
9909 	int ret;
9910 
9911 	ret = sched_group_set_idle(css_tg(css), idle);
9912 	if (!ret)
9913 		scx_group_set_idle(css_tg(css), idle);
9914 	return ret;
9915 }
9916 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9917 
9918 static struct cftype cpu_legacy_files[] = {
9919 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9920 	{
9921 		.name = "shares",
9922 		.read_u64 = cpu_shares_read_u64,
9923 		.write_u64 = cpu_shares_write_u64,
9924 	},
9925 	{
9926 		.name = "idle",
9927 		.read_s64 = cpu_idle_read_s64,
9928 		.write_s64 = cpu_idle_write_s64,
9929 	},
9930 #endif
9931 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9932 	{
9933 		.name = "cfs_period_us",
9934 		.read_u64 = cpu_period_read_u64,
9935 		.write_u64 = cpu_period_write_u64,
9936 	},
9937 	{
9938 		.name = "cfs_quota_us",
9939 		.read_s64 = cpu_quota_read_s64,
9940 		.write_s64 = cpu_quota_write_s64,
9941 	},
9942 	{
9943 		.name = "cfs_burst_us",
9944 		.read_u64 = cpu_burst_read_u64,
9945 		.write_u64 = cpu_burst_write_u64,
9946 	},
9947 #endif
9948 #ifdef CONFIG_CFS_BANDWIDTH
9949 	{
9950 		.name = "stat",
9951 		.seq_show = cpu_cfs_stat_show,
9952 	},
9953 	{
9954 		.name = "stat.local",
9955 		.seq_show = cpu_cfs_local_stat_show,
9956 	},
9957 #endif
9958 #ifdef CONFIG_UCLAMP_TASK_GROUP
9959 	{
9960 		.name = "uclamp.min",
9961 		.flags = CFTYPE_NOT_ON_ROOT,
9962 		.seq_show = cpu_uclamp_min_show,
9963 		.write = cpu_uclamp_min_write,
9964 	},
9965 	{
9966 		.name = "uclamp.max",
9967 		.flags = CFTYPE_NOT_ON_ROOT,
9968 		.seq_show = cpu_uclamp_max_show,
9969 		.write = cpu_uclamp_max_write,
9970 	},
9971 #endif
9972 	{ }	/* Terminate */
9973 };
9974 
9975 #ifdef CONFIG_RT_GROUP_SCHED
9976 static struct cftype rt_group_files[] = {
9977 	{
9978 		.name = "rt_runtime_us",
9979 		.read_s64 = cpu_rt_runtime_read,
9980 		.write_s64 = cpu_rt_runtime_write,
9981 	},
9982 	{
9983 		.name = "rt_period_us",
9984 		.read_u64 = cpu_rt_period_read_uint,
9985 		.write_u64 = cpu_rt_period_write_uint,
9986 	},
9987 	{ }	/* Terminate */
9988 };
9989 
9990 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
9991 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
9992 # else
9993 DEFINE_STATIC_KEY_TRUE(rt_group_sched);
9994 # endif
9995 
9996 static int __init setup_rt_group_sched(char *str)
9997 {
9998 	long val;
9999 
10000 	if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
10001 		pr_warn("Unable to set rt_group_sched\n");
10002 		return 1;
10003 	}
10004 	if (val)
10005 		static_branch_enable(&rt_group_sched);
10006 	else
10007 		static_branch_disable(&rt_group_sched);
10008 
10009 	return 1;
10010 }
10011 __setup("rt_group_sched=", setup_rt_group_sched);
10012 
10013 static int __init cpu_rt_group_init(void)
10014 {
10015 	if (!rt_group_sched_enabled())
10016 		return 0;
10017 
10018 	WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
10019 	return 0;
10020 }
10021 subsys_initcall(cpu_rt_group_init);
10022 #endif /* CONFIG_RT_GROUP_SCHED */
10023 
10024 static int cpu_extra_stat_show(struct seq_file *sf,
10025 			       struct cgroup_subsys_state *css)
10026 {
10027 #ifdef CONFIG_CFS_BANDWIDTH
10028 	{
10029 		struct task_group *tg = css_tg(css);
10030 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10031 		u64 throttled_usec, burst_usec;
10032 
10033 		throttled_usec = cfs_b->throttled_time;
10034 		do_div(throttled_usec, NSEC_PER_USEC);
10035 		burst_usec = cfs_b->burst_time;
10036 		do_div(burst_usec, NSEC_PER_USEC);
10037 
10038 		seq_printf(sf, "nr_periods %d\n"
10039 			   "nr_throttled %d\n"
10040 			   "throttled_usec %llu\n"
10041 			   "nr_bursts %d\n"
10042 			   "burst_usec %llu\n",
10043 			   cfs_b->nr_periods, cfs_b->nr_throttled,
10044 			   throttled_usec, cfs_b->nr_burst, burst_usec);
10045 	}
10046 #endif /* CONFIG_CFS_BANDWIDTH */
10047 	return 0;
10048 }
10049 
10050 static int cpu_local_stat_show(struct seq_file *sf,
10051 			       struct cgroup_subsys_state *css)
10052 {
10053 #ifdef CONFIG_CFS_BANDWIDTH
10054 	{
10055 		struct task_group *tg = css_tg(css);
10056 		u64 throttled_self_usec;
10057 
10058 		throttled_self_usec = throttled_time_self(tg);
10059 		do_div(throttled_self_usec, NSEC_PER_USEC);
10060 
10061 		seq_printf(sf, "throttled_usec %llu\n",
10062 			   throttled_self_usec);
10063 	}
10064 #endif
10065 	return 0;
10066 }
10067 
10068 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10069 
10070 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10071 			       struct cftype *cft)
10072 {
10073 	return sched_weight_to_cgroup(tg_weight(css_tg(css)));
10074 }
10075 
10076 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10077 				struct cftype *cft, u64 cgrp_weight)
10078 {
10079 	unsigned long weight;
10080 	int ret;
10081 
10082 	if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
10083 		return -ERANGE;
10084 
10085 	weight = sched_weight_from_cgroup(cgrp_weight);
10086 
10087 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10088 	if (!ret)
10089 		scx_group_set_weight(css_tg(css), cgrp_weight);
10090 	return ret;
10091 }
10092 
10093 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10094 				    struct cftype *cft)
10095 {
10096 	unsigned long weight = tg_weight(css_tg(css));
10097 	int last_delta = INT_MAX;
10098 	int prio, delta;
10099 
10100 	/* find the closest nice value to the current weight */
10101 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10102 		delta = abs(sched_prio_to_weight[prio] - weight);
10103 		if (delta >= last_delta)
10104 			break;
10105 		last_delta = delta;
10106 	}
10107 
10108 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10109 }
10110 
10111 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10112 				     struct cftype *cft, s64 nice)
10113 {
10114 	unsigned long weight;
10115 	int idx, ret;
10116 
10117 	if (nice < MIN_NICE || nice > MAX_NICE)
10118 		return -ERANGE;
10119 
10120 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10121 	idx = array_index_nospec(idx, 40);
10122 	weight = sched_prio_to_weight[idx];
10123 
10124 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10125 	if (!ret)
10126 		scx_group_set_weight(css_tg(css),
10127 				     sched_weight_to_cgroup(weight));
10128 	return ret;
10129 }
10130 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10131 
10132 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10133 						  long period, long quota)
10134 {
10135 	if (quota < 0)
10136 		seq_puts(sf, "max");
10137 	else
10138 		seq_printf(sf, "%ld", quota);
10139 
10140 	seq_printf(sf, " %ld\n", period);
10141 }
10142 
10143 /* caller should put the current value in *@periodp before calling */
10144 static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *period_us_p,
10145 						 u64 *quota_us_p)
10146 {
10147 	char tok[21];	/* U64_MAX */
10148 
10149 	if (sscanf(buf, "%20s %llu", tok, period_us_p) < 1)
10150 		return -EINVAL;
10151 
10152 	if (sscanf(tok, "%llu", quota_us_p) < 1) {
10153 		if (!strcmp(tok, "max"))
10154 			*quota_us_p = RUNTIME_INF;
10155 		else
10156 			return -EINVAL;
10157 	}
10158 
10159 	return 0;
10160 }
10161 
10162 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10163 static int cpu_max_show(struct seq_file *sf, void *v)
10164 {
10165 	struct task_group *tg = css_tg(seq_css(sf));
10166 	u64 period_us, quota_us;
10167 
10168 	tg_bandwidth(tg, &period_us, &quota_us, NULL);
10169 	cpu_period_quota_print(sf, period_us, quota_us);
10170 	return 0;
10171 }
10172 
10173 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10174 			     char *buf, size_t nbytes, loff_t off)
10175 {
10176 	struct task_group *tg = css_tg(of_css(of));
10177 	u64 period_us, quota_us, burst_us;
10178 	int ret;
10179 
10180 	tg_bandwidth(tg, &period_us, NULL, &burst_us);
10181 	ret = cpu_period_quota_parse(buf, &period_us, &quota_us);
10182 	if (!ret)
10183 		ret = tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10184 	return ret ?: nbytes;
10185 }
10186 #endif /* CONFIG_CFS_BANDWIDTH */
10187 
10188 static struct cftype cpu_files[] = {
10189 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10190 	{
10191 		.name = "weight",
10192 		.flags = CFTYPE_NOT_ON_ROOT,
10193 		.read_u64 = cpu_weight_read_u64,
10194 		.write_u64 = cpu_weight_write_u64,
10195 	},
10196 	{
10197 		.name = "weight.nice",
10198 		.flags = CFTYPE_NOT_ON_ROOT,
10199 		.read_s64 = cpu_weight_nice_read_s64,
10200 		.write_s64 = cpu_weight_nice_write_s64,
10201 	},
10202 	{
10203 		.name = "idle",
10204 		.flags = CFTYPE_NOT_ON_ROOT,
10205 		.read_s64 = cpu_idle_read_s64,
10206 		.write_s64 = cpu_idle_write_s64,
10207 	},
10208 #endif
10209 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10210 	{
10211 		.name = "max",
10212 		.flags = CFTYPE_NOT_ON_ROOT,
10213 		.seq_show = cpu_max_show,
10214 		.write = cpu_max_write,
10215 	},
10216 	{
10217 		.name = "max.burst",
10218 		.flags = CFTYPE_NOT_ON_ROOT,
10219 		.read_u64 = cpu_burst_read_u64,
10220 		.write_u64 = cpu_burst_write_u64,
10221 	},
10222 #endif /* CONFIG_CFS_BANDWIDTH */
10223 #ifdef CONFIG_UCLAMP_TASK_GROUP
10224 	{
10225 		.name = "uclamp.min",
10226 		.flags = CFTYPE_NOT_ON_ROOT,
10227 		.seq_show = cpu_uclamp_min_show,
10228 		.write = cpu_uclamp_min_write,
10229 	},
10230 	{
10231 		.name = "uclamp.max",
10232 		.flags = CFTYPE_NOT_ON_ROOT,
10233 		.seq_show = cpu_uclamp_max_show,
10234 		.write = cpu_uclamp_max_write,
10235 	},
10236 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10237 	{ }	/* terminate */
10238 };
10239 
10240 struct cgroup_subsys cpu_cgrp_subsys = {
10241 	.css_alloc	= cpu_cgroup_css_alloc,
10242 	.css_online	= cpu_cgroup_css_online,
10243 	.css_offline	= cpu_cgroup_css_offline,
10244 	.css_released	= cpu_cgroup_css_released,
10245 	.css_free	= cpu_cgroup_css_free,
10246 	.css_extra_stat_show = cpu_extra_stat_show,
10247 	.css_local_stat_show = cpu_local_stat_show,
10248 	.can_attach	= cpu_cgroup_can_attach,
10249 	.attach		= cpu_cgroup_attach,
10250 	.cancel_attach	= cpu_cgroup_cancel_attach,
10251 	.legacy_cftypes	= cpu_legacy_files,
10252 	.dfl_cftypes	= cpu_files,
10253 	.early_init	= true,
10254 	.threaded	= true,
10255 };
10256 
10257 #endif /* CONFIG_CGROUP_SCHED */
10258 
10259 void dump_cpu_task(int cpu)
10260 {
10261 	if (in_hardirq() && cpu == smp_processor_id()) {
10262 		struct pt_regs *regs;
10263 
10264 		regs = get_irq_regs();
10265 		if (regs) {
10266 			show_regs(regs);
10267 			return;
10268 		}
10269 	}
10270 
10271 	if (trigger_single_cpu_backtrace(cpu))
10272 		return;
10273 
10274 	pr_info("Task dump for CPU %d:\n", cpu);
10275 	sched_show_task(cpu_curr(cpu));
10276 }
10277 
10278 /*
10279  * Nice levels are multiplicative, with a gentle 10% change for every
10280  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10281  * nice 1, it will get ~10% less CPU time than another CPU-bound task
10282  * that remained on nice 0.
10283  *
10284  * The "10% effect" is relative and cumulative: from _any_ nice level,
10285  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10286  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10287  * If a task goes up by ~10% and another task goes down by ~10% then
10288  * the relative distance between them is ~25%.)
10289  */
10290 const int sched_prio_to_weight[40] = {
10291  /* -20 */     88761,     71755,     56483,     46273,     36291,
10292  /* -15 */     29154,     23254,     18705,     14949,     11916,
10293  /* -10 */      9548,      7620,      6100,      4904,      3906,
10294  /*  -5 */      3121,      2501,      1991,      1586,      1277,
10295  /*   0 */      1024,       820,       655,       526,       423,
10296  /*   5 */       335,       272,       215,       172,       137,
10297  /*  10 */       110,        87,        70,        56,        45,
10298  /*  15 */        36,        29,        23,        18,        15,
10299 };
10300 
10301 /*
10302  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10303  *
10304  * In cases where the weight does not change often, we can use the
10305  * pre-calculated inverse to speed up arithmetics by turning divisions
10306  * into multiplications:
10307  */
10308 const u32 sched_prio_to_wmult[40] = {
10309  /* -20 */     48388,     59856,     76040,     92818,    118348,
10310  /* -15 */    147320,    184698,    229616,    287308,    360437,
10311  /* -10 */    449829,    563644,    704093,    875809,   1099582,
10312  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
10313  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
10314  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
10315  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
10316  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10317 };
10318 
10319 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10320 {
10321         trace_sched_update_nr_running_tp(rq, count);
10322 }
10323 
10324 #ifdef CONFIG_SCHED_MM_CID
10325 /*
10326  * Concurrency IDentifier management
10327  *
10328  * Serialization rules:
10329  *
10330  * mm::mm_cid::mutex:	Serializes fork() and exit() and therefore
10331  *			protects mm::mm_cid::users and mode switch
10332  *			transitions
10333  *
10334  * mm::mm_cid::lock:	Serializes mm_update_max_cids() and
10335  *			mm_update_cpus_allowed(). Nests in mm_cid::mutex
10336  *			and runqueue lock.
10337  *
10338  * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
10339  * and can only be modified with atomic operations.
10340  *
10341  * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
10342  * lock.
10343  *
10344  * CID ownership:
10345  *
10346  * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or
10347  * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the
10348  * MM_CID_ONCPU bit set.
10349  *
10350  * During the transition of ownership mode, the MM_CID_TRANSIT bit is set
10351  * on the CIDs. When this bit is set the tasks drop the CID back into the
10352  * pool when scheduling out.
10353  *
10354  * Both bits (ONCPU and TRANSIT) are filtered out by task_cid() when the
10355  * CID is actually handed over to user space in the RSEQ memory.
10356  *
10357  * Mode switching:
10358  *
10359  * The ownership mode is per process and stored in mm:mm_cid::mode with the
10360  * following possible states:
10361  *
10362  *	0:				Per task ownership
10363  *	0 | MM_CID_TRANSIT:		Transition from per CPU to per task
10364  *	MM_CID_ONCPU:			Per CPU ownership
10365  *	MM_CID_ONCPU | MM_CID_TRANSIT:	Transition from per task to per CPU
10366  *
10367  * All transitions of ownership mode happen in two phases:
10368  *
10369  *  1) mm:mm_cid::mode has the MM_CID_TRANSIT bit set. This is OR'ed on the
10370  *     CIDs and denotes that the CID is only temporarily owned by a
10371  *     task. When the task schedules out it drops the CID back into the
10372  *     pool if this bit is set.
10373  *
10374  *  2) The initiating context walks the per CPU space or the tasks to fixup
10375  *     or drop the CIDs and after completion it clears MM_CID_TRANSIT in
10376  *     mm:mm_cid::mode. After that point the CIDs are strictly task or CPU
10377  *     owned again.
10378  *
10379  * This two phase transition is required to prevent CID space exhaustion
10380  * during the transition as a direct transfer of ownership would fail:
10381  *
10382  *   - On task to CPU mode switch if a task is scheduled in on one CPU and
10383  *     then migrated to another CPU before the fixup freed enough per task
10384  *     CIDs.
10385  *
10386  *   - On CPU to task mode switch if two tasks are scheduled in on the same
10387  *     CPU before the fixup freed per CPU CIDs.
10388  *
10389  *   Both scenarios can result in a live lock because sched_in() is invoked
10390  *   with runqueue lock held and loops in search of a CID and the fixup
10391  *   thread can't make progress freeing them up because it is stuck on the
10392  *   same runqueue lock.
10393  *
10394  * While MM_CID_TRANSIT is active during the transition phase the MM_CID
10395  * bitmap can be contended, but that's a temporary contention bound to the
10396  * transition period. After that everything goes back into steady state and
10397  * nothing except fork() and exit() will touch the bitmap. This is an
10398  * acceptable tradeoff as it completely avoids complex serialization,
10399  * memory barriers and atomic operations for the common case.
10400  *
10401  * Aside of that this mechanism also ensures RT compability:
10402  *
10403  *   - The task which runs the fixup is fully preemptible except for the
10404  *     short runqueue lock held sections.
10405  *
10406  *   - The transient impact of the bitmap contention is only problematic
10407  *     when there is a thundering herd scenario of tasks scheduling in and
10408  *     out concurrently. There is not much which can be done about that
10409  *     except for avoiding mode switching by a proper overall system
10410  *     configuration.
10411  *
10412  * Switching to per CPU mode happens when the user count becomes greater
10413  * than the maximum number of CIDs, which is calculated by:
10414  *
10415  *	opt_cids = min(mm_cid::nr_cpus_allowed, mm_cid::users);
10416  *	max_cids = min(1.25 * opt_cids, num_possible_cpus());
10417  *
10418  * The +25% allowance is useful for tight CPU masks in scenarios where only
10419  * a few threads are created and destroyed to avoid frequent mode
10420  * switches. Though this allowance shrinks, the closer opt_cids becomes to
10421  * num_possible_cpus(), which is the (unfortunate) hard ABI limit.
10422  *
10423  * At the point of switching to per CPU mode the new user is not yet
10424  * visible in the system, so the task which initiated the fork() runs the
10425  * fixup function. mm_cid_fixup_tasks_to_cpu() walks the thread list and
10426  * either marks each task owned CID with MM_CID_TRANSIT if the task is
10427  * running on a CPU or drops it into the CID pool if a task is not on a
10428  * CPU. Tasks which schedule in before the task walk reaches them do the
10429  * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus()
10430  * completes it is guaranteed that no task related to that MM owns a CID
10431  * anymore.
10432  *
10433  * Switching back to task mode happens when the user count goes below the
10434  * threshold which was recorded on the per CPU mode switch:
10435  *
10436  *	pcpu_thrs = min(opt_cids - (opt_cids / 4), num_possible_cpus() / 2);
10437  *
10438  * This threshold is updated when a affinity change increases the number of
10439  * allowed CPUs for the MM, which might cause a switch back to per task
10440  * mode.
10441  *
10442  * If the switch back was initiated by a exiting task, then that task runs
10443  * the fixup function. If it was initiated by a affinity change, then it's
10444  * run either in the deferred update function in context of a workqueue or
10445  * by a task which forks a new one or by a task which exits. Whatever
10446  * happens first. mm_cid_fixup_cpus_to_task() walks through the possible
10447  * CPUs and either marks the CPU owned CIDs with MM_CID_TRANSIT if a
10448  * related task is running on the CPU or drops it into the pool. Tasks
10449  * which are scheduled in before the fixup covered them do the handover
10450  * themself. When mm_cid_fixup_cpus_to_tasks() completes it is guaranteed
10451  * that no CID related to that MM is owned by a CPU anymore.
10452  */
10453 
10454 /*
10455  * Update the CID range properties when the constraints change. Invoked via
10456  * fork(), exit() and affinity changes
10457  */
10458 static void __mm_update_max_cids(struct mm_mm_cid *mc)
10459 {
10460 	unsigned int opt_cids, max_cids;
10461 
10462 	/* Calculate the new optimal constraint */
10463 	opt_cids = min(mc->nr_cpus_allowed, mc->users);
10464 
10465 	/* Adjust the maximum CIDs to +25% limited by the number of possible CPUs */
10466 	max_cids = min(opt_cids + (opt_cids / 4), num_possible_cpus());
10467 	WRITE_ONCE(mc->max_cids, max_cids);
10468 }
10469 
10470 static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc)
10471 {
10472 	unsigned int opt_cids;
10473 
10474 	opt_cids = min(mc->nr_cpus_allowed, mc->users);
10475 	/* Has to be at least 1 because 0 indicates PCPU mode off */
10476 	return max(min(opt_cids - opt_cids / 4, num_possible_cpus() / 2), 1);
10477 }
10478 
10479 static bool mm_update_max_cids(struct mm_struct *mm)
10480 {
10481 	struct mm_mm_cid *mc = &mm->mm_cid;
10482 	bool percpu = cid_on_cpu(mc->mode);
10483 
10484 	lockdep_assert_held(&mm->mm_cid.lock);
10485 
10486 	/* Clear deferred mode switch flag. A change is handled by the caller */
10487 	mc->update_deferred = false;
10488 	__mm_update_max_cids(mc);
10489 
10490 	/* Check whether owner mode must be changed */
10491 	if (!percpu) {
10492 		/* Enable per CPU mode when the number of users is above max_cids */
10493 		if (mc->users > mc->max_cids)
10494 			mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10495 	} else {
10496 		/* Switch back to per task if user count under threshold */
10497 		if (mc->users < mc->pcpu_thrs)
10498 			mc->pcpu_thrs = 0;
10499 	}
10500 
10501 	/* Mode change required? */
10502 	if (percpu == !!mc->pcpu_thrs)
10503 		return false;
10504 
10505 	/* Flip the mode and set the transition flag to bridge the transfer */
10506 	WRITE_ONCE(mc->mode, mc->mode ^ (MM_CID_TRANSIT | MM_CID_ONCPU));
10507 	/*
10508 	 * Order the store against the subsequent fixups so that
10509 	 * acquire(rq::lock) cannot be reordered by the CPU before the
10510 	 * store.
10511 	 */
10512 	smp_mb();
10513 	return true;
10514 }
10515 
10516 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
10517 {
10518 	struct cpumask *mm_allowed;
10519 	struct mm_mm_cid *mc;
10520 	unsigned int weight;
10521 
10522 	if (!mm || !READ_ONCE(mm->mm_cid.users))
10523 		return;
10524 	/*
10525 	 * mm::mm_cid::mm_cpus_allowed is the superset of each threads
10526 	 * allowed CPUs mask which means it can only grow.
10527 	 */
10528 	mc = &mm->mm_cid;
10529 	guard(raw_spinlock)(&mc->lock);
10530 	mm_allowed = mm_cpus_allowed(mm);
10531 	weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
10532 	if (weight == mc->nr_cpus_allowed)
10533 		return;
10534 
10535 	WRITE_ONCE(mc->nr_cpus_allowed, weight);
10536 	__mm_update_max_cids(mc);
10537 	if (!cid_on_cpu(mc->mode))
10538 		return;
10539 
10540 	/* Adjust the threshold to the wider set */
10541 	mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10542 	/* Switch back to per task mode? */
10543 	if (mc->users >= mc->pcpu_thrs)
10544 		return;
10545 
10546 	/* Don't queue twice */
10547 	if (mc->update_deferred)
10548 		return;
10549 
10550 	/* Queue the irq work, which schedules the real work */
10551 	mc->update_deferred = true;
10552 	irq_work_queue(&mc->irq_work);
10553 }
10554 
10555 static inline void mm_cid_complete_transit(struct mm_struct *mm, unsigned int mode)
10556 {
10557 	/*
10558 	 * Ensure that the store removing the TRANSIT bit cannot be
10559 	 * reordered by the CPU before the fixups have been completed.
10560 	 */
10561 	smp_mb();
10562 	WRITE_ONCE(mm->mm_cid.mode, mode);
10563 }
10564 
10565 static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)
10566 {
10567 	if (cid_on_cpu(t->mm_cid.cid)) {
10568 		unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid);
10569 
10570 		t->mm_cid.cid = cid_to_transit_cid(cid);
10571 		pcp->cid = t->mm_cid.cid;
10572 	}
10573 }
10574 
10575 static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm)
10576 {
10577 	unsigned int cpu;
10578 
10579 	/* Walk the CPUs and fixup all stale CIDs */
10580 	for_each_possible_cpu(cpu) {
10581 		struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
10582 		struct rq *rq = cpu_rq(cpu);
10583 
10584 		/* Remote access to mm::mm_cid::pcpu requires rq_lock */
10585 		guard(rq_lock_irq)(rq);
10586 		/* Is the CID still owned by the CPU? */
10587 		if (cid_on_cpu(pcp->cid)) {
10588 			/*
10589 			 * If rq->curr has @mm, transfer it with the
10590 			 * transition bit set. Otherwise drop it.
10591 			 */
10592 			if (rq->curr->mm == mm && rq->curr->mm_cid.active)
10593 				mm_cid_transit_to_task(rq->curr, pcp);
10594 			else
10595 				mm_drop_cid_on_cpu(mm, pcp);
10596 
10597 		} else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
10598 			unsigned int cid = rq->curr->mm_cid.cid;
10599 
10600 			/* Ensure it has the transition bit set */
10601 			if (!cid_in_transit(cid)) {
10602 				cid = cid_to_transit_cid(cid);
10603 				rq->curr->mm_cid.cid = cid;
10604 				pcp->cid = cid;
10605 			}
10606 		}
10607 	}
10608 	mm_cid_complete_transit(mm, 0);
10609 }
10610 
10611 static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)
10612 {
10613 	if (cid_on_task(t->mm_cid.cid)) {
10614 		t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid);
10615 		pcp->cid = t->mm_cid.cid;
10616 	}
10617 }
10618 
10619 static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
10620 {
10621 	/* Remote access to mm::mm_cid::pcpu requires rq_lock */
10622 	guard(task_rq_lock)(t);
10623 	/* If the task is not active it is not in the users count */
10624 	if (!t->mm_cid.active)
10625 		return false;
10626 	if (cid_on_task(t->mm_cid.cid)) {
10627 		/* If running on the CPU, put the CID in transit mode, otherwise drop it */
10628 		if (task_rq(t)->curr == t)
10629 			mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
10630 		else
10631 			mm_unset_cid_on_task(t);
10632 	}
10633 	return true;
10634 }
10635 
10636 static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm)
10637 {
10638 	struct task_struct *p, *t;
10639 	unsigned int users;
10640 
10641 	/*
10642 	 * This can obviously race with a concurrent affinity change, which
10643 	 * increases the number of allowed CPUs for this mm, but that does
10644 	 * not affect the mode and only changes the CID constraints. A
10645 	 * possible switch back to per task mode happens either in the
10646 	 * deferred handler function or in the next fork()/exit().
10647 	 *
10648 	 * The caller has already transferred. The newly incoming task is
10649 	 * already accounted for, but not yet visible.
10650 	 */
10651 	users = mm->mm_cid.users - 2;
10652 	if (!users)
10653 		return;
10654 
10655 	guard(rcu)();
10656 	for_other_threads(current, t) {
10657 		if (mm_cid_fixup_task_to_cpu(t, mm))
10658 			users--;
10659 	}
10660 
10661 	if (!users)
10662 		return;
10663 
10664 	/* Happens only for VM_CLONE processes. */
10665 	for_each_process_thread(p, t) {
10666 		if (t == current || t->mm != mm)
10667 			continue;
10668 		if (mm_cid_fixup_task_to_cpu(t, mm)) {
10669 			if (--users == 0)
10670 				return;
10671 		}
10672 	}
10673 }
10674 
10675 static void mm_cid_fixup_tasks_to_cpus(void)
10676 {
10677 	struct mm_struct *mm = current->mm;
10678 
10679 	mm_cid_do_fixup_tasks_to_cpus(mm);
10680 	mm_cid_complete_transit(mm, MM_CID_ONCPU);
10681 }
10682 
10683 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
10684 {
10685 	t->mm_cid.active = 1;
10686 	mm->mm_cid.users++;
10687 	return mm_update_max_cids(mm);
10688 }
10689 
10690 void sched_mm_cid_fork(struct task_struct *t)
10691 {
10692 	struct mm_struct *mm = t->mm;
10693 	bool percpu;
10694 
10695 	WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
10696 
10697 	guard(mutex)(&mm->mm_cid.mutex);
10698 	scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10699 		struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu);
10700 
10701 		/* First user ? */
10702 		if (!mm->mm_cid.users) {
10703 			sched_mm_cid_add_user(t, mm);
10704 			t->mm_cid.cid = mm_get_cid(mm);
10705 			/* Required for execve() */
10706 			pcp->cid = t->mm_cid.cid;
10707 			return;
10708 		}
10709 
10710 		if (!sched_mm_cid_add_user(t, mm)) {
10711 			if (!cid_on_cpu(mm->mm_cid.mode))
10712 				t->mm_cid.cid = mm_get_cid(mm);
10713 			return;
10714 		}
10715 
10716 		/* Handle the mode change and transfer current's CID */
10717 		percpu = cid_on_cpu(mm->mm_cid.mode);
10718 		if (!percpu)
10719 			mm_cid_transit_to_task(current, pcp);
10720 		else
10721 			mm_cid_transit_to_cpu(current, pcp);
10722 	}
10723 
10724 	if (percpu) {
10725 		mm_cid_fixup_tasks_to_cpus();
10726 	} else {
10727 		mm_cid_fixup_cpus_to_tasks(mm);
10728 		t->mm_cid.cid = mm_get_cid(mm);
10729 	}
10730 }
10731 
10732 static bool sched_mm_cid_remove_user(struct task_struct *t)
10733 {
10734 	t->mm_cid.active = 0;
10735 	scoped_guard(preempt) {
10736 		/* Clear the transition bit */
10737 		t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
10738 		mm_unset_cid_on_task(t);
10739 	}
10740 	t->mm->mm_cid.users--;
10741 	return mm_update_max_cids(t->mm);
10742 }
10743 
10744 static bool __sched_mm_cid_exit(struct task_struct *t)
10745 {
10746 	struct mm_struct *mm = t->mm;
10747 
10748 	if (!sched_mm_cid_remove_user(t))
10749 		return false;
10750 	/*
10751 	 * Contrary to fork() this only deals with a switch back to per
10752 	 * task mode either because the above decreased users or an
10753 	 * affinity change increased the number of allowed CPUs and the
10754 	 * deferred fixup did not run yet.
10755 	 */
10756 	if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
10757 		return false;
10758 	/*
10759 	 * A failed fork(2) cleanup never gets here, so @current must have
10760 	 * the same MM as @t. That's true for exit() and the failed
10761 	 * pthread_create() cleanup case.
10762 	 */
10763 	if (WARN_ON_ONCE(current->mm != mm))
10764 		return false;
10765 	return true;
10766 }
10767 
10768 /*
10769  * When a task exits, the MM CID held by the task is not longer required as
10770  * the task cannot return to user space.
10771  */
10772 void sched_mm_cid_exit(struct task_struct *t)
10773 {
10774 	struct mm_struct *mm = t->mm;
10775 
10776 	if (!mm || !t->mm_cid.active)
10777 		return;
10778 	/*
10779 	 * Ensure that only one instance is doing MM CID operations within
10780 	 * a MM. The common case is uncontended. The rare fixup case adds
10781 	 * some overhead.
10782 	 */
10783 	scoped_guard(mutex, &mm->mm_cid.mutex) {
10784 		/* mm_cid::mutex is sufficient to protect mm_cid::users */
10785 		if (likely(mm->mm_cid.users > 1)) {
10786 			scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10787 				if (!__sched_mm_cid_exit(t))
10788 					return;
10789 				/*
10790 				 * Mode change. The task has the CID unset
10791 				 * already and dealt with an eventually set
10792 				 * TRANSIT bit. If the CID is owned by the CPU
10793 				 * then drop it.
10794 				 */
10795 				mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu));
10796 			}
10797 			mm_cid_fixup_cpus_to_tasks(mm);
10798 			return;
10799 		}
10800 		/* Last user */
10801 		scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10802 			/* Required across execve() */
10803 			if (t == current)
10804 				mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu));
10805 			/* Ignore mode change. There is nothing to do. */
10806 			sched_mm_cid_remove_user(t);
10807 		}
10808 	}
10809 
10810 	/*
10811 	 * As this is the last user (execve(), process exit or failed
10812 	 * fork(2)) there is no concurrency anymore.
10813 	 *
10814 	 * Synchronize eventually pending work to ensure that there are no
10815 	 * dangling references left. @t->mm_cid.users is zero so nothing
10816 	 * can queue this work anymore.
10817 	 */
10818 	irq_work_sync(&mm->mm_cid.irq_work);
10819 	cancel_work_sync(&mm->mm_cid.work);
10820 }
10821 
10822 /* Deactivate MM CID allocation across execve() */
10823 void sched_mm_cid_before_execve(struct task_struct *t)
10824 {
10825 	sched_mm_cid_exit(t);
10826 }
10827 
10828 /* Reactivate MM CID after execve() */
10829 void sched_mm_cid_after_execve(struct task_struct *t)
10830 {
10831 	if (t->mm)
10832 		sched_mm_cid_fork(t);
10833 }
10834 
10835 static void mm_cid_work_fn(struct work_struct *work)
10836 {
10837 	struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
10838 
10839 	guard(mutex)(&mm->mm_cid.mutex);
10840 	/* Did the last user task exit already? */
10841 	if (!mm->mm_cid.users)
10842 		return;
10843 
10844 	scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10845 		/* Have fork() or exit() handled it already? */
10846 		if (!mm->mm_cid.update_deferred)
10847 			return;
10848 		/* This clears mm_cid::update_deferred */
10849 		if (!mm_update_max_cids(mm))
10850 			return;
10851 		/* Affinity changes can only switch back to task mode */
10852 		if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
10853 			return;
10854 	}
10855 	mm_cid_fixup_cpus_to_tasks(mm);
10856 }
10857 
10858 static void mm_cid_irq_work(struct irq_work *work)
10859 {
10860 	struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
10861 
10862 	/*
10863 	 * Needs to be unconditional because mm_cid::lock cannot be held
10864 	 * when scheduling work as mm_update_cpus_allowed() nests inside
10865 	 * rq::lock and schedule_work() might end up in wakeup...
10866 	 */
10867 	schedule_work(&mm->mm_cid.work);
10868 }
10869 
10870 void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
10871 {
10872 	mm->mm_cid.max_cids = 0;
10873 	mm->mm_cid.mode = 0;
10874 	mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
10875 	mm->mm_cid.users = 0;
10876 	mm->mm_cid.pcpu_thrs = 0;
10877 	mm->mm_cid.update_deferred = 0;
10878 	raw_spin_lock_init(&mm->mm_cid.lock);
10879 	mutex_init(&mm->mm_cid.mutex);
10880 	mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
10881 	INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
10882 	cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
10883 	bitmap_zero(mm_cidmask(mm), num_possible_cpus());
10884 }
10885 #else /* CONFIG_SCHED_MM_CID */
10886 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
10887 #endif /* !CONFIG_SCHED_MM_CID */
10888 
10889 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
10890 
10891 struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int flags)
10892 {
10893 	struct sched_change_ctx *ctx = this_cpu_ptr(&sched_change_ctx);
10894 	struct rq *rq = task_rq(p);
10895 
10896 	/*
10897 	 * Must exclusively use matched flags since this is both dequeue and
10898 	 * enqueue.
10899 	 */
10900 	WARN_ON_ONCE(flags & 0xFFFF0000);
10901 
10902 	lockdep_assert_rq_held(rq);
10903 
10904 	if (!(flags & DEQUEUE_NOCLOCK)) {
10905 		update_rq_clock(rq);
10906 		flags |= DEQUEUE_NOCLOCK;
10907 	}
10908 
10909 	if ((flags & DEQUEUE_CLASS) && p->sched_class->switching_from)
10910 		p->sched_class->switching_from(rq, p);
10911 
10912 	*ctx = (struct sched_change_ctx){
10913 		.p = p,
10914 		.class = p->sched_class,
10915 		.flags = flags,
10916 		.queued = task_on_rq_queued(p),
10917 		.running = task_current_donor(rq, p),
10918 	};
10919 
10920 	if (!(flags & DEQUEUE_CLASS)) {
10921 		if (p->sched_class->get_prio)
10922 			ctx->prio = p->sched_class->get_prio(rq, p);
10923 		else
10924 			ctx->prio = p->prio;
10925 	}
10926 
10927 	if (ctx->queued)
10928 		dequeue_task(rq, p, flags);
10929 	if (ctx->running)
10930 		put_prev_task(rq, p);
10931 
10932 	if ((flags & DEQUEUE_CLASS) && p->sched_class->switched_from)
10933 		p->sched_class->switched_from(rq, p);
10934 
10935 	return ctx;
10936 }
10937 
10938 void sched_change_end(struct sched_change_ctx *ctx)
10939 {
10940 	struct task_struct *p = ctx->p;
10941 	struct rq *rq = task_rq(p);
10942 
10943 	lockdep_assert_rq_held(rq);
10944 
10945 	/*
10946 	 * Changing class without *QUEUE_CLASS is bad.
10947 	 */
10948 	WARN_ON_ONCE(p->sched_class != ctx->class && !(ctx->flags & ENQUEUE_CLASS));
10949 
10950 	if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switching_to)
10951 		p->sched_class->switching_to(rq, p);
10952 
10953 	if (ctx->queued)
10954 		enqueue_task(rq, p, ctx->flags);
10955 	if (ctx->running)
10956 		set_next_task(rq, p);
10957 
10958 	if (ctx->flags & ENQUEUE_CLASS) {
10959 		if (p->sched_class->switched_to)
10960 			p->sched_class->switched_to(rq, p);
10961 
10962 		if (ctx->running) {
10963 			/*
10964 			 * If this was a class promotion; let the old class
10965 			 * know it got preempted. Note that none of the
10966 			 * switch*_from() methods know the new class and none
10967 			 * of the switch*_to() methods know the old class.
10968 			 */
10969 			if (sched_class_above(p->sched_class, ctx->class)) {
10970 				rq->next_class->wakeup_preempt(rq, p, 0);
10971 				rq->next_class = p->sched_class;
10972 			}
10973 			/*
10974 			 * If this was a degradation in class; make sure to
10975 			 * reschedule.
10976 			 */
10977 			if (sched_class_above(ctx->class, p->sched_class))
10978 				resched_curr(rq);
10979 		}
10980 	} else {
10981 		p->sched_class->prio_changed(rq, p, ctx->prio);
10982 	}
10983 }
10984