xref: /linux/kernel/sched/core.c (revision 4b2bdc22210e39a02b3dc984cb8eb6b3293a56a7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel CPU scheduler code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #define INSTANTIATE_EXPORTED_MIGRATE_DISABLE
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hrtimer_api.h>
14 #include <linux/ktime_api.h>
15 #include <linux/sched/signal.h>
16 #include <linux/syscalls_api.h>
17 #include <linux/debug_locks.h>
18 #include <linux/prefetch.h>
19 #include <linux/capability.h>
20 #include <linux/pgtable_api.h>
21 #include <linux/wait_bit.h>
22 #include <linux/jiffies.h>
23 #include <linux/spinlock_api.h>
24 #include <linux/cpumask_api.h>
25 #include <linux/lockdep_api.h>
26 #include <linux/hardirq.h>
27 #include <linux/softirq.h>
28 #include <linux/refcount_api.h>
29 #include <linux/topology.h>
30 #include <linux/sched/clock.h>
31 #include <linux/sched/cond_resched.h>
32 #include <linux/sched/cputime.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/hotplug.h>
35 #include <linux/sched/init.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/sched/loadavg.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/nohz.h>
40 #include <linux/sched/rseq_api.h>
41 #include <linux/sched/rt.h>
42 
43 #include <linux/blkdev.h>
44 #include <linux/context_tracking.h>
45 #include <linux/cpuset.h>
46 #include <linux/delayacct.h>
47 #include <linux/init_task.h>
48 #include <linux/interrupt.h>
49 #include <linux/ioprio.h>
50 #include <linux/kallsyms.h>
51 #include <linux/kcov.h>
52 #include <linux/kprobes.h>
53 #include <linux/llist_api.h>
54 #include <linux/mmu_context.h>
55 #include <linux/mmzone.h>
56 #include <linux/mutex_api.h>
57 #include <linux/nmi.h>
58 #include <linux/nospec.h>
59 #include <linux/perf_event_api.h>
60 #include <linux/profile.h>
61 #include <linux/psi.h>
62 #include <linux/rcuwait_api.h>
63 #include <linux/rseq.h>
64 #include <linux/sched/wake_q.h>
65 #include <linux/scs.h>
66 #include <linux/slab.h>
67 #include <linux/syscalls.h>
68 #include <linux/vtime.h>
69 #include <linux/wait_api.h>
70 #include <linux/workqueue_api.h>
71 #include <linux/livepatch_sched.h>
72 
73 #ifdef CONFIG_PREEMPT_DYNAMIC
74 # ifdef CONFIG_GENERIC_IRQ_ENTRY
75 #  include <linux/irq-entry-common.h>
76 # endif
77 #endif
78 
79 #include <uapi/linux/sched/types.h>
80 
81 #include <asm/irq_regs.h>
82 #include <asm/switch_to.h>
83 #include <asm/tlb.h>
84 
85 #define CREATE_TRACE_POINTS
86 #include <linux/sched/rseq_api.h>
87 #include <trace/events/sched.h>
88 #include <trace/events/ipi.h>
89 #undef CREATE_TRACE_POINTS
90 
91 #include "sched.h"
92 #include "stats.h"
93 
94 #include "autogroup.h"
95 #include "pelt.h"
96 #include "smp.h"
97 
98 #include "../workqueue_internal.h"
99 #include "../../io_uring/io-wq.h"
100 #include "../smpboot.h"
101 #include "../locking/mutex.h"
102 
103 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
104 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
105 
106 /*
107  * Export tracepoints that act as a bare tracehook (ie: have no trace event
108  * associated with them) to allow external modules to probe them.
109  */
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
119 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
120 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
121 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
122 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
123 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
124 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
125 
126 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
127 DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
128 
129 #ifdef CONFIG_SCHED_PROXY_EXEC
130 DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
131 static int __init setup_proxy_exec(char *str)
132 {
133 	bool proxy_enable = true;
134 
135 	if (*str && kstrtobool(str + 1, &proxy_enable)) {
136 		pr_warn("Unable to parse sched_proxy_exec=\n");
137 		return 0;
138 	}
139 
140 	if (proxy_enable) {
141 		pr_info("sched_proxy_exec enabled via boot arg\n");
142 		static_branch_enable(&__sched_proxy_exec);
143 	} else {
144 		pr_info("sched_proxy_exec disabled via boot arg\n");
145 		static_branch_disable(&__sched_proxy_exec);
146 	}
147 	return 1;
148 }
149 #else
150 static int __init setup_proxy_exec(char *str)
151 {
152 	pr_warn("CONFIG_SCHED_PROXY_EXEC=n, so it cannot be enabled or disabled at boot time\n");
153 	return 0;
154 }
155 #endif
156 __setup("sched_proxy_exec", setup_proxy_exec);
157 
158 /*
159  * Debugging: various feature bits
160  *
161  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
162  * sysctl_sched_features, defined in sched.h, to allow constants propagation
163  * at compile time and compiler optimization based on features default.
164  */
165 #define SCHED_FEAT(name, enabled)	\
166 	(1UL << __SCHED_FEAT_##name) * enabled |
167 __read_mostly unsigned int sysctl_sched_features =
168 #include "features.h"
169 	0;
170 #undef SCHED_FEAT
171 
172 /*
173  * Print a warning if need_resched is set for the given duration (if
174  * LATENCY_WARN is enabled).
175  *
176  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
177  * per boot.
178  */
179 __read_mostly int sysctl_resched_latency_warn_ms = 100;
180 __read_mostly int sysctl_resched_latency_warn_once = 1;
181 
182 /*
183  * Number of tasks to iterate in a single balance run.
184  * Limited because this is done with IRQs disabled.
185  */
186 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
187 
188 __read_mostly int scheduler_running;
189 
190 #ifdef CONFIG_SCHED_CORE
191 
192 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
193 
194 /* kernel prio, less is more */
195 static inline int __task_prio(const struct task_struct *p)
196 {
197 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
198 		return -2;
199 
200 	if (p->dl_server)
201 		return -1; /* deadline */
202 
203 	if (rt_or_dl_prio(p->prio))
204 		return p->prio; /* [-1, 99] */
205 
206 	if (p->sched_class == &idle_sched_class)
207 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
208 
209 	if (task_on_scx(p))
210 		return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
211 
212 	return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
213 }
214 
215 /*
216  * l(a,b)
217  * le(a,b) := !l(b,a)
218  * g(a,b)  := l(b,a)
219  * ge(a,b) := !l(a,b)
220  */
221 
222 /* real prio, less is less */
223 static inline bool prio_less(const struct task_struct *a,
224 			     const struct task_struct *b, bool in_fi)
225 {
226 
227 	int pa = __task_prio(a), pb = __task_prio(b);
228 
229 	if (-pa < -pb)
230 		return true;
231 
232 	if (-pb < -pa)
233 		return false;
234 
235 	if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
236 		const struct sched_dl_entity *a_dl, *b_dl;
237 
238 		a_dl = &a->dl;
239 		/*
240 		 * Since,'a' and 'b' can be CFS tasks served by DL server,
241 		 * __task_prio() can return -1 (for DL) even for those. In that
242 		 * case, get to the dl_server's DL entity.
243 		 */
244 		if (a->dl_server)
245 			a_dl = a->dl_server;
246 
247 		b_dl = &b->dl;
248 		if (b->dl_server)
249 			b_dl = b->dl_server;
250 
251 		return !dl_time_before(a_dl->deadline, b_dl->deadline);
252 	}
253 
254 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
255 		return cfs_prio_less(a, b, in_fi);
256 
257 #ifdef CONFIG_SCHED_CLASS_EXT
258 	if (pa == MAX_RT_PRIO + MAX_NICE + 1)	/* ext */
259 		return scx_prio_less(a, b, in_fi);
260 #endif
261 
262 	return false;
263 }
264 
265 static inline bool __sched_core_less(const struct task_struct *a,
266 				     const struct task_struct *b)
267 {
268 	if (a->core_cookie < b->core_cookie)
269 		return true;
270 
271 	if (a->core_cookie > b->core_cookie)
272 		return false;
273 
274 	/* flip prio, so high prio is leftmost */
275 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
276 		return true;
277 
278 	return false;
279 }
280 
281 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
282 
283 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
284 {
285 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
286 }
287 
288 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
289 {
290 	const struct task_struct *p = __node_2_sc(node);
291 	unsigned long cookie = (unsigned long)key;
292 
293 	if (cookie < p->core_cookie)
294 		return -1;
295 
296 	if (cookie > p->core_cookie)
297 		return 1;
298 
299 	return 0;
300 }
301 
302 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
303 {
304 	if (p->se.sched_delayed)
305 		return;
306 
307 	rq->core->core_task_seq++;
308 
309 	if (!p->core_cookie)
310 		return;
311 
312 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
313 }
314 
315 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
316 {
317 	if (p->se.sched_delayed)
318 		return;
319 
320 	rq->core->core_task_seq++;
321 
322 	if (sched_core_enqueued(p)) {
323 		rb_erase(&p->core_node, &rq->core_tree);
324 		RB_CLEAR_NODE(&p->core_node);
325 	}
326 
327 	/*
328 	 * Migrating the last task off the cpu, with the cpu in forced idle
329 	 * state. Reschedule to create an accounting edge for forced idle,
330 	 * and re-examine whether the core is still in forced idle state.
331 	 */
332 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
333 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
334 		resched_curr(rq);
335 }
336 
337 static int sched_task_is_throttled(struct task_struct *p, int cpu)
338 {
339 	if (p->sched_class->task_is_throttled)
340 		return p->sched_class->task_is_throttled(p, cpu);
341 
342 	return 0;
343 }
344 
345 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
346 {
347 	struct rb_node *node = &p->core_node;
348 	int cpu = task_cpu(p);
349 
350 	do {
351 		node = rb_next(node);
352 		if (!node)
353 			return NULL;
354 
355 		p = __node_2_sc(node);
356 		if (p->core_cookie != cookie)
357 			return NULL;
358 
359 	} while (sched_task_is_throttled(p, cpu));
360 
361 	return p;
362 }
363 
364 /*
365  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
366  * If no suitable task is found, NULL will be returned.
367  */
368 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
369 {
370 	struct task_struct *p;
371 	struct rb_node *node;
372 
373 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
374 	if (!node)
375 		return NULL;
376 
377 	p = __node_2_sc(node);
378 	if (!sched_task_is_throttled(p, rq->cpu))
379 		return p;
380 
381 	return sched_core_next(p, cookie);
382 }
383 
384 /*
385  * Magic required such that:
386  *
387  *	raw_spin_rq_lock(rq);
388  *	...
389  *	raw_spin_rq_unlock(rq);
390  *
391  * ends up locking and unlocking the _same_ lock, and all CPUs
392  * always agree on what rq has what lock.
393  *
394  * XXX entirely possible to selectively enable cores, don't bother for now.
395  */
396 
397 static DEFINE_MUTEX(sched_core_mutex);
398 static atomic_t sched_core_count;
399 static struct cpumask sched_core_mask;
400 
401 static void sched_core_lock(int cpu, unsigned long *flags)
402 	__context_unsafe(/* acquires multiple */)
403 	__acquires(&runqueues.__lock) /* overapproximation */
404 {
405 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
406 	int t, i = 0;
407 
408 	local_irq_save(*flags);
409 	for_each_cpu(t, smt_mask)
410 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
411 }
412 
413 static void sched_core_unlock(int cpu, unsigned long *flags)
414 	__context_unsafe(/* releases multiple */)
415 	__releases(&runqueues.__lock) /* overapproximation */
416 {
417 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
418 	int t;
419 
420 	for_each_cpu(t, smt_mask)
421 		raw_spin_unlock(&cpu_rq(t)->__lock);
422 	local_irq_restore(*flags);
423 }
424 
425 static void __sched_core_flip(bool enabled)
426 {
427 	unsigned long flags;
428 	int cpu, t;
429 
430 	cpus_read_lock();
431 
432 	/*
433 	 * Toggle the online cores, one by one.
434 	 */
435 	cpumask_copy(&sched_core_mask, cpu_online_mask);
436 	for_each_cpu(cpu, &sched_core_mask) {
437 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
438 
439 		sched_core_lock(cpu, &flags);
440 
441 		for_each_cpu(t, smt_mask)
442 			cpu_rq(t)->core_enabled = enabled;
443 
444 		cpu_rq(cpu)->core->core_forceidle_start = 0;
445 
446 		sched_core_unlock(cpu, &flags);
447 
448 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
449 	}
450 
451 	/*
452 	 * Toggle the offline CPUs.
453 	 */
454 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
455 		cpu_rq(cpu)->core_enabled = enabled;
456 
457 	cpus_read_unlock();
458 }
459 
460 static void sched_core_assert_empty(void)
461 {
462 	int cpu;
463 
464 	for_each_possible_cpu(cpu)
465 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
466 }
467 
468 static void __sched_core_enable(void)
469 {
470 	static_branch_enable(&__sched_core_enabled);
471 	/*
472 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
473 	 * and future ones will observe !sched_core_disabled().
474 	 */
475 	synchronize_rcu();
476 	__sched_core_flip(true);
477 	sched_core_assert_empty();
478 }
479 
480 static void __sched_core_disable(void)
481 {
482 	sched_core_assert_empty();
483 	__sched_core_flip(false);
484 	static_branch_disable(&__sched_core_enabled);
485 }
486 
487 void sched_core_get(void)
488 {
489 	if (atomic_inc_not_zero(&sched_core_count))
490 		return;
491 
492 	mutex_lock(&sched_core_mutex);
493 	if (!atomic_read(&sched_core_count))
494 		__sched_core_enable();
495 
496 	smp_mb__before_atomic();
497 	atomic_inc(&sched_core_count);
498 	mutex_unlock(&sched_core_mutex);
499 }
500 
501 static void __sched_core_put(struct work_struct *work)
502 {
503 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
504 		__sched_core_disable();
505 		mutex_unlock(&sched_core_mutex);
506 	}
507 }
508 
509 void sched_core_put(void)
510 {
511 	static DECLARE_WORK(_work, __sched_core_put);
512 
513 	/*
514 	 * "There can be only one"
515 	 *
516 	 * Either this is the last one, or we don't actually need to do any
517 	 * 'work'. If it is the last *again*, we rely on
518 	 * WORK_STRUCT_PENDING_BIT.
519 	 */
520 	if (!atomic_add_unless(&sched_core_count, -1, 1))
521 		schedule_work(&_work);
522 }
523 
524 #else /* !CONFIG_SCHED_CORE: */
525 
526 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
527 static inline void
528 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
529 
530 #endif /* !CONFIG_SCHED_CORE */
531 
532 /* need a wrapper since we may need to trace from modules */
533 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp);
534 
535 /* Call via the helper macro trace_set_current_state. */
536 void __trace_set_current_state(int state_value)
537 {
538 	trace_sched_set_state_tp(current, state_value);
539 }
540 EXPORT_SYMBOL(__trace_set_current_state);
541 
542 /*
543  * Serialization rules:
544  *
545  * Lock order:
546  *
547  *   p->pi_lock
548  *     rq->lock
549  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
550  *
551  *  rq1->lock
552  *    rq2->lock  where: rq1 < rq2
553  *
554  * Regular state:
555  *
556  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
557  * local CPU's rq->lock, it optionally removes the task from the runqueue and
558  * always looks at the local rq data structures to find the most eligible task
559  * to run next.
560  *
561  * Task enqueue is also under rq->lock, possibly taken from another CPU.
562  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
563  * the local CPU to avoid bouncing the runqueue state around [ see
564  * ttwu_queue_wakelist() ]
565  *
566  * Task wakeup, specifically wakeups that involve migration, are horribly
567  * complicated to avoid having to take two rq->locks.
568  *
569  * Special state:
570  *
571  * System-calls and anything external will use task_rq_lock() which acquires
572  * both p->pi_lock and rq->lock. As a consequence the state they change is
573  * stable while holding either lock:
574  *
575  *  - sched_setaffinity()/
576  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
577  *  - set_user_nice():		p->se.load, p->*prio
578  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
579  *				p->se.load, p->rt_priority,
580  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
581  *  - sched_setnuma():		p->numa_preferred_nid
582  *  - sched_move_task():	p->sched_task_group
583  *  - uclamp_update_active()	p->uclamp*
584  *
585  * p->state <- TASK_*:
586  *
587  *   is changed locklessly using set_current_state(), __set_current_state() or
588  *   set_special_state(), see their respective comments, or by
589  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
590  *   concurrent self.
591  *
592  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
593  *
594  *   is set by activate_task() and cleared by deactivate_task()/block_task(),
595  *   under rq->lock. Non-zero indicates the task is runnable, the special
596  *   ON_RQ_MIGRATING state is used for migration without holding both
597  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
598  *
599  *   Additionally it is possible to be ->on_rq but still be considered not
600  *   runnable when p->se.sched_delayed is true. These tasks are on the runqueue
601  *   but will be dequeued as soon as they get picked again. See the
602  *   task_is_runnable() helper.
603  *
604  * p->on_cpu <- { 0, 1 }:
605  *
606  *   is set by prepare_task() and cleared by finish_task() such that it will be
607  *   set before p is scheduled-in and cleared after p is scheduled-out, both
608  *   under rq->lock. Non-zero indicates the task is running on its CPU.
609  *
610  *   [ The astute reader will observe that it is possible for two tasks on one
611  *     CPU to have ->on_cpu = 1 at the same time. ]
612  *
613  * task_cpu(p): is changed by set_task_cpu(), the rules are:
614  *
615  *  - Don't call set_task_cpu() on a blocked task:
616  *
617  *    We don't care what CPU we're not running on, this simplifies hotplug,
618  *    the CPU assignment of blocked tasks isn't required to be valid.
619  *
620  *  - for try_to_wake_up(), called under p->pi_lock:
621  *
622  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
623  *
624  *  - for migration called under rq->lock:
625  *    [ see task_on_rq_migrating() in task_rq_lock() ]
626  *
627  *    o move_queued_task()
628  *    o detach_task()
629  *
630  *  - for migration called under double_rq_lock():
631  *
632  *    o __migrate_swap_task()
633  *    o push_rt_task() / pull_rt_task()
634  *    o push_dl_task() / pull_dl_task()
635  *    o dl_task_offline_migration()
636  *
637  */
638 
639 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
640 	__context_unsafe()
641 {
642 	raw_spinlock_t *lock;
643 
644 	/* Matches synchronize_rcu() in __sched_core_enable() */
645 	preempt_disable();
646 	if (sched_core_disabled()) {
647 		raw_spin_lock_nested(&rq->__lock, subclass);
648 		/* preempt_count *MUST* be > 1 */
649 		preempt_enable_no_resched();
650 		return;
651 	}
652 
653 	for (;;) {
654 		lock = __rq_lockp(rq);
655 		raw_spin_lock_nested(lock, subclass);
656 		if (likely(lock == __rq_lockp(rq))) {
657 			/* preempt_count *MUST* be > 1 */
658 			preempt_enable_no_resched();
659 			return;
660 		}
661 		raw_spin_unlock(lock);
662 	}
663 }
664 
665 bool raw_spin_rq_trylock(struct rq *rq)
666 	__context_unsafe()
667 {
668 	raw_spinlock_t *lock;
669 	bool ret;
670 
671 	/* Matches synchronize_rcu() in __sched_core_enable() */
672 	preempt_disable();
673 	if (sched_core_disabled()) {
674 		ret = raw_spin_trylock(&rq->__lock);
675 		preempt_enable();
676 		return ret;
677 	}
678 
679 	for (;;) {
680 		lock = __rq_lockp(rq);
681 		ret = raw_spin_trylock(lock);
682 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
683 			preempt_enable();
684 			return ret;
685 		}
686 		raw_spin_unlock(lock);
687 	}
688 }
689 
690 void raw_spin_rq_unlock(struct rq *rq)
691 {
692 	raw_spin_unlock(rq_lockp(rq));
693 }
694 
695 /*
696  * double_rq_lock - safely lock two runqueues
697  */
698 void double_rq_lock(struct rq *rq1, struct rq *rq2)
699 {
700 	lockdep_assert_irqs_disabled();
701 
702 	if (rq_order_less(rq2, rq1))
703 		swap(rq1, rq2);
704 
705 	raw_spin_rq_lock(rq1);
706 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
707 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
708 	else
709 		__acquire_ctx_lock(__rq_lockp(rq2)); /* fake acquire */
710 
711 	double_rq_clock_clear_update(rq1, rq2);
712 }
713 
714 /*
715  * ___task_rq_lock - lock the rq @p resides on.
716  */
717 struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
718 {
719 	struct rq *rq;
720 
721 	lockdep_assert_held(&p->pi_lock);
722 
723 	for (;;) {
724 		rq = task_rq(p);
725 		raw_spin_rq_lock(rq);
726 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
727 			rq_pin_lock(rq, rf);
728 			return rq;
729 		}
730 		raw_spin_rq_unlock(rq);
731 
732 		while (unlikely(task_on_rq_migrating(p)))
733 			cpu_relax();
734 	}
735 }
736 
737 /*
738  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
739  */
740 struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
741 {
742 	struct rq *rq;
743 
744 	for (;;) {
745 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
746 		rq = task_rq(p);
747 		raw_spin_rq_lock(rq);
748 		/*
749 		 *	move_queued_task()		task_rq_lock()
750 		 *
751 		 *	ACQUIRE (rq->lock)
752 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
753 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
754 		 *	[S] ->cpu = new_cpu		[L] task_rq()
755 		 *					[L] ->on_rq
756 		 *	RELEASE (rq->lock)
757 		 *
758 		 * If we observe the old CPU in task_rq_lock(), the acquire of
759 		 * the old rq->lock will fully serialize against the stores.
760 		 *
761 		 * If we observe the new CPU in task_rq_lock(), the address
762 		 * dependency headed by '[L] rq = task_rq()' and the acquire
763 		 * will pair with the WMB to ensure we then also see migrating.
764 		 */
765 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
766 			rq_pin_lock(rq, rf);
767 			return rq;
768 		}
769 		raw_spin_rq_unlock(rq);
770 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
771 
772 		while (unlikely(task_on_rq_migrating(p)))
773 			cpu_relax();
774 	}
775 }
776 
777 /*
778  * RQ-clock updating methods:
779  */
780 
781 /* Use CONFIG_PARAVIRT as this will avoid more #ifdef in arch code. */
782 #ifdef CONFIG_PARAVIRT
783 struct static_key paravirt_steal_rq_enabled;
784 #endif
785 
786 static void update_rq_clock_task(struct rq *rq, s64 delta)
787 {
788 /*
789  * In theory, the compile should just see 0 here, and optimize out the call
790  * to sched_rt_avg_update. But I don't trust it...
791  */
792 	s64 __maybe_unused steal = 0, irq_delta = 0;
793 
794 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
795 	if (irqtime_enabled()) {
796 		irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
797 
798 		/*
799 		 * Since irq_time is only updated on {soft,}irq_exit, we might run into
800 		 * this case when a previous update_rq_clock() happened inside a
801 		 * {soft,}IRQ region.
802 		 *
803 		 * When this happens, we stop ->clock_task and only update the
804 		 * prev_irq_time stamp to account for the part that fit, so that a next
805 		 * update will consume the rest. This ensures ->clock_task is
806 		 * monotonic.
807 		 *
808 		 * It does however cause some slight miss-attribution of {soft,}IRQ
809 		 * time, a more accurate solution would be to update the irq_time using
810 		 * the current rq->clock timestamp, except that would require using
811 		 * atomic ops.
812 		 */
813 		if (irq_delta > delta)
814 			irq_delta = delta;
815 
816 		rq->prev_irq_time += irq_delta;
817 		delta -= irq_delta;
818 		delayacct_irq(rq->curr, irq_delta);
819 	}
820 #endif
821 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
822 	if (static_key_false((&paravirt_steal_rq_enabled))) {
823 		u64 prev_steal;
824 
825 		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
826 		steal -= rq->prev_steal_time_rq;
827 
828 		if (unlikely(steal > delta))
829 			steal = delta;
830 
831 		rq->prev_steal_time_rq = prev_steal;
832 		delta -= steal;
833 	}
834 #endif
835 
836 	rq->clock_task += delta;
837 
838 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
839 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
840 		update_irq_load_avg(rq, irq_delta + steal);
841 #endif
842 	update_rq_clock_pelt(rq, delta);
843 }
844 
845 void update_rq_clock(struct rq *rq)
846 {
847 	s64 delta;
848 	u64 clock;
849 
850 	lockdep_assert_rq_held(rq);
851 
852 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
853 		return;
854 
855 	if (sched_feat(WARN_DOUBLE_CLOCK))
856 		WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
857 	rq->clock_update_flags |= RQCF_UPDATED;
858 
859 	clock = sched_clock_cpu(cpu_of(rq));
860 	scx_rq_clock_update(rq, clock);
861 
862 	delta = clock - rq->clock;
863 	if (delta < 0)
864 		return;
865 	rq->clock += delta;
866 
867 	update_rq_clock_task(rq, delta);
868 }
869 
870 #ifdef CONFIG_SCHED_HRTICK
871 /*
872  * Use HR-timers to deliver accurate preemption points.
873  */
874 
875 enum {
876 	HRTICK_SCHED_NONE		= 0,
877 	HRTICK_SCHED_DEFER		= BIT(1),
878 	HRTICK_SCHED_START		= BIT(2),
879 	HRTICK_SCHED_REARM_HRTIMER	= BIT(3)
880 };
881 
882 static void __used hrtick_clear(struct rq *rq)
883 {
884 	if (hrtimer_active(&rq->hrtick_timer))
885 		hrtimer_cancel(&rq->hrtick_timer);
886 }
887 
888 /*
889  * High-resolution timer tick.
890  * Runs from hardirq context with interrupts disabled.
891  */
892 static enum hrtimer_restart hrtick(struct hrtimer *timer)
893 {
894 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
895 	struct rq_flags rf;
896 
897 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
898 
899 	rq_lock(rq, &rf);
900 	update_rq_clock(rq);
901 	rq->donor->sched_class->task_tick(rq, rq->donor, 1);
902 	rq_unlock(rq, &rf);
903 
904 	return HRTIMER_NORESTART;
905 }
906 
907 static inline bool hrtick_needs_rearm(struct hrtimer *timer, ktime_t expires)
908 {
909 	/*
910 	 * Queued is false when the timer is not started or currently
911 	 * running the callback. In both cases, restart. If queued check
912 	 * whether the expiry time actually changes substantially.
913 	 */
914 	return !hrtimer_is_queued(timer) ||
915 		abs(expires - hrtimer_get_expires(timer)) > 5000;
916 }
917 
918 static void hrtick_cond_restart(struct rq *rq)
919 {
920 	struct hrtimer *timer = &rq->hrtick_timer;
921 	ktime_t time = rq->hrtick_time;
922 
923 	if (hrtick_needs_rearm(timer, time))
924 		hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
925 }
926 
927 /*
928  * called from hardirq (IPI) context
929  */
930 static void __hrtick_start(void *arg)
931 {
932 	struct rq *rq = arg;
933 	struct rq_flags rf;
934 
935 	rq_lock(rq, &rf);
936 	hrtick_cond_restart(rq);
937 	rq_unlock(rq, &rf);
938 }
939 
940 /*
941  * Called to set the hrtick timer state.
942  *
943  * called with rq->lock held and IRQs disabled
944  */
945 void hrtick_start(struct rq *rq, u64 delay)
946 {
947 	s64 delta;
948 
949 	/*
950 	 * Don't schedule slices shorter than 10000ns, that just
951 	 * doesn't make sense and can cause timer DoS.
952 	 */
953 	delta = max_t(s64, delay, 10000LL);
954 
955 	/*
956 	 * If this is in the middle of schedule() only note the delay
957 	 * and let hrtick_schedule_exit() deal with it.
958 	 */
959 	if (rq->hrtick_sched) {
960 		rq->hrtick_sched |= HRTICK_SCHED_START;
961 		rq->hrtick_delay = delta;
962 		return;
963 	}
964 
965 	rq->hrtick_time = ktime_add_ns(ktime_get(), delta);
966 	if (!hrtick_needs_rearm(&rq->hrtick_timer, rq->hrtick_time))
967 		return;
968 
969 	if (rq == this_rq())
970 		hrtimer_start(&rq->hrtick_timer, rq->hrtick_time, HRTIMER_MODE_ABS_PINNED_HARD);
971 	else
972 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
973 }
974 
975 static inline void hrtick_schedule_enter(struct rq *rq)
976 {
977 	rq->hrtick_sched = HRTICK_SCHED_DEFER;
978 	if (hrtimer_test_and_clear_rearm_deferred())
979 		rq->hrtick_sched |= HRTICK_SCHED_REARM_HRTIMER;
980 }
981 
982 static inline void hrtick_schedule_exit(struct rq *rq)
983 {
984 	if (rq->hrtick_sched & HRTICK_SCHED_START) {
985 		rq->hrtick_time = ktime_add_ns(ktime_get(), rq->hrtick_delay);
986 		hrtick_cond_restart(rq);
987 	} else if (idle_rq(rq)) {
988 		/*
989 		 * No need for using hrtimer_is_active(). The timer is CPU local
990 		 * and interrupts are disabled, so the callback cannot be
991 		 * running and the queued state is valid.
992 		 */
993 		if (hrtimer_is_queued(&rq->hrtick_timer))
994 			hrtimer_cancel(&rq->hrtick_timer);
995 	}
996 
997 	if (rq->hrtick_sched & HRTICK_SCHED_REARM_HRTIMER)
998 		__hrtimer_rearm_deferred();
999 
1000 	rq->hrtick_sched = HRTICK_SCHED_NONE;
1001 }
1002 
1003 static void hrtick_rq_init(struct rq *rq)
1004 {
1005 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
1006 	rq->hrtick_sched = HRTICK_SCHED_NONE;
1007 	hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC,
1008 		      HRTIMER_MODE_REL_HARD | HRTIMER_MODE_LAZY_REARM);
1009 }
1010 #else /* !CONFIG_SCHED_HRTICK: */
1011 static inline void hrtick_clear(struct rq *rq) { }
1012 static inline void hrtick_rq_init(struct rq *rq) { }
1013 static inline void hrtick_schedule_enter(struct rq *rq) { }
1014 static inline void hrtick_schedule_exit(struct rq *rq) { }
1015 #endif /* !CONFIG_SCHED_HRTICK */
1016 
1017 /*
1018  * try_cmpxchg based fetch_or() macro so it works for different integer types:
1019  */
1020 #define fetch_or(ptr, mask)						\
1021 	({								\
1022 		typeof(ptr) _ptr = (ptr);				\
1023 		typeof(mask) _mask = (mask);				\
1024 		typeof(*_ptr) _val = *_ptr;				\
1025 									\
1026 		do {							\
1027 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
1028 	_val;								\
1029 })
1030 
1031 #ifdef TIF_POLLING_NRFLAG
1032 /*
1033  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
1034  * this avoids any races wrt polling state changes and thereby avoids
1035  * spurious IPIs.
1036  */
1037 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
1038 {
1039 	return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
1040 }
1041 
1042 /*
1043  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
1044  *
1045  * If this returns true, then the idle task promises to call
1046  * sched_ttwu_pending() and reschedule soon.
1047  */
1048 static bool set_nr_if_polling(struct task_struct *p)
1049 {
1050 	struct thread_info *ti = task_thread_info(p);
1051 	typeof(ti->flags) val = READ_ONCE(ti->flags);
1052 
1053 	do {
1054 		if (!(val & _TIF_POLLING_NRFLAG))
1055 			return false;
1056 		if (val & _TIF_NEED_RESCHED)
1057 			return true;
1058 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
1059 
1060 	return true;
1061 }
1062 
1063 #else
1064 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
1065 {
1066 	set_ti_thread_flag(ti, tif);
1067 	return true;
1068 }
1069 
1070 static inline bool set_nr_if_polling(struct task_struct *p)
1071 {
1072 	return false;
1073 }
1074 #endif
1075 
1076 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
1077 {
1078 	struct wake_q_node *node = &task->wake_q;
1079 
1080 	/*
1081 	 * Atomically grab the task, if ->wake_q is !nil already it means
1082 	 * it's already queued (either by us or someone else) and will get the
1083 	 * wakeup due to that.
1084 	 *
1085 	 * In order to ensure that a pending wakeup will observe our pending
1086 	 * state, even in the failed case, an explicit smp_mb() must be used.
1087 	 */
1088 	smp_mb__before_atomic();
1089 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1090 		return false;
1091 
1092 	/*
1093 	 * The head is context local, there can be no concurrency.
1094 	 */
1095 	*head->lastp = node;
1096 	head->lastp = &node->next;
1097 	return true;
1098 }
1099 
1100 /**
1101  * wake_q_add() - queue a wakeup for 'later' waking.
1102  * @head: the wake_q_head to add @task to
1103  * @task: the task to queue for 'later' wakeup
1104  *
1105  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1106  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1107  * instantly.
1108  *
1109  * This function must be used as-if it were wake_up_process(); IOW the task
1110  * must be ready to be woken at this location.
1111  */
1112 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1113 {
1114 	if (__wake_q_add(head, task))
1115 		get_task_struct(task);
1116 }
1117 
1118 /**
1119  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1120  * @head: the wake_q_head to add @task to
1121  * @task: the task to queue for 'later' wakeup
1122  *
1123  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1124  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1125  * instantly.
1126  *
1127  * This function must be used as-if it were wake_up_process(); IOW the task
1128  * must be ready to be woken at this location.
1129  *
1130  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1131  * that already hold reference to @task can call the 'safe' version and trust
1132  * wake_q to do the right thing depending whether or not the @task is already
1133  * queued for wakeup.
1134  */
1135 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1136 {
1137 	if (!__wake_q_add(head, task))
1138 		put_task_struct(task);
1139 }
1140 
1141 void wake_up_q(struct wake_q_head *head)
1142 {
1143 	struct wake_q_node *node = head->first;
1144 
1145 	while (node != WAKE_Q_TAIL) {
1146 		struct task_struct *task;
1147 
1148 		task = container_of(node, struct task_struct, wake_q);
1149 		node = node->next;
1150 		/* pairs with cmpxchg_relaxed() in __wake_q_add() */
1151 		WRITE_ONCE(task->wake_q.next, NULL);
1152 		/* Task can safely be re-inserted now. */
1153 
1154 		/*
1155 		 * wake_up_process() executes a full barrier, which pairs with
1156 		 * the queueing in wake_q_add() so as not to miss wakeups.
1157 		 */
1158 		wake_up_process(task);
1159 		put_task_struct(task);
1160 	}
1161 }
1162 
1163 /*
1164  * resched_curr - mark rq's current task 'to be rescheduled now'.
1165  *
1166  * On UP this means the setting of the need_resched flag, on SMP it
1167  * might also involve a cross-CPU call to trigger the scheduler on
1168  * the target CPU.
1169  */
1170 static void __resched_curr(struct rq *rq, int tif)
1171 {
1172 	struct task_struct *curr = rq->curr;
1173 	struct thread_info *cti = task_thread_info(curr);
1174 	int cpu;
1175 
1176 	lockdep_assert_rq_held(rq);
1177 
1178 	/*
1179 	 * Always immediately preempt the idle task; no point in delaying doing
1180 	 * actual work.
1181 	 */
1182 	if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1183 		tif = TIF_NEED_RESCHED;
1184 
1185 	if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1186 		return;
1187 
1188 	cpu = cpu_of(rq);
1189 
1190 	trace_sched_set_need_resched_tp(curr, cpu, tif);
1191 	if (cpu == smp_processor_id()) {
1192 		set_ti_thread_flag(cti, tif);
1193 		if (tif == TIF_NEED_RESCHED)
1194 			set_preempt_need_resched();
1195 		return;
1196 	}
1197 
1198 	if (set_nr_and_not_polling(cti, tif)) {
1199 		if (tif == TIF_NEED_RESCHED)
1200 			smp_send_reschedule(cpu);
1201 	} else {
1202 		trace_sched_wake_idle_without_ipi(cpu);
1203 	}
1204 }
1205 
1206 void __trace_set_need_resched(struct task_struct *curr, int tif)
1207 {
1208 	trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif);
1209 }
1210 EXPORT_SYMBOL_GPL(__trace_set_need_resched);
1211 
1212 void resched_curr(struct rq *rq)
1213 {
1214 	__resched_curr(rq, TIF_NEED_RESCHED);
1215 }
1216 
1217 #ifdef CONFIG_PREEMPT_DYNAMIC
1218 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
1219 static __always_inline bool dynamic_preempt_lazy(void)
1220 {
1221 	return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1222 }
1223 #else
1224 static __always_inline bool dynamic_preempt_lazy(void)
1225 {
1226 	return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1227 }
1228 #endif
1229 
1230 static __always_inline int get_lazy_tif_bit(void)
1231 {
1232 	if (dynamic_preempt_lazy())
1233 		return TIF_NEED_RESCHED_LAZY;
1234 
1235 	return TIF_NEED_RESCHED;
1236 }
1237 
1238 void resched_curr_lazy(struct rq *rq)
1239 {
1240 	__resched_curr(rq, get_lazy_tif_bit());
1241 }
1242 
1243 void resched_cpu(int cpu)
1244 {
1245 	struct rq *rq = cpu_rq(cpu);
1246 	unsigned long flags;
1247 
1248 	raw_spin_rq_lock_irqsave(rq, flags);
1249 	if (cpu_online(cpu) || cpu == smp_processor_id())
1250 		resched_curr(rq);
1251 	raw_spin_rq_unlock_irqrestore(rq, flags);
1252 }
1253 
1254 #ifdef CONFIG_NO_HZ_COMMON
1255 /*
1256  * In the semi idle case, use the nearest busy CPU for migrating timers
1257  * from an idle CPU.  This is good for power-savings.
1258  *
1259  * We don't do similar optimization for completely idle system, as
1260  * selecting an idle CPU will add more delays to the timers than intended
1261  * (as that CPU's timer base may not be up to date wrt jiffies etc).
1262  */
1263 int get_nohz_timer_target(void)
1264 {
1265 	int i, cpu = smp_processor_id(), default_cpu = -1;
1266 	struct sched_domain *sd;
1267 	const struct cpumask *hk_mask;
1268 
1269 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1270 		if (!idle_cpu(cpu))
1271 			return cpu;
1272 		default_cpu = cpu;
1273 	}
1274 
1275 	hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1276 
1277 	guard(rcu)();
1278 
1279 	for_each_domain(cpu, sd) {
1280 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1281 			if (cpu == i)
1282 				continue;
1283 
1284 			if (!idle_cpu(i))
1285 				return i;
1286 		}
1287 	}
1288 
1289 	if (default_cpu == -1)
1290 		default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1291 
1292 	return default_cpu;
1293 }
1294 
1295 /*
1296  * When add_timer_on() enqueues a timer into the timer wheel of an
1297  * idle CPU then this timer might expire before the next timer event
1298  * which is scheduled to wake up that CPU. In case of a completely
1299  * idle system the next event might even be infinite time into the
1300  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1301  * leaves the inner idle loop so the newly added timer is taken into
1302  * account when the CPU goes back to idle and evaluates the timer
1303  * wheel for the next timer event.
1304  */
1305 static void wake_up_idle_cpu(int cpu)
1306 {
1307 	struct rq *rq = cpu_rq(cpu);
1308 
1309 	if (cpu == smp_processor_id())
1310 		return;
1311 
1312 	/*
1313 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1314 	 * part of the idle loop. This forces an exit from the idle loop
1315 	 * and a round trip to schedule(). Now this could be optimized
1316 	 * because a simple new idle loop iteration is enough to
1317 	 * re-evaluate the next tick. Provided some re-ordering of tick
1318 	 * nohz functions that would need to follow TIF_NR_POLLING
1319 	 * clearing:
1320 	 *
1321 	 * - On most architectures, a simple fetch_or on ti::flags with a
1322 	 *   "0" value would be enough to know if an IPI needs to be sent.
1323 	 *
1324 	 * - x86 needs to perform a last need_resched() check between
1325 	 *   monitor and mwait which doesn't take timers into account.
1326 	 *   There a dedicated TIF_TIMER flag would be required to
1327 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1328 	 *   before mwait().
1329 	 *
1330 	 * However, remote timer enqueue is not such a frequent event
1331 	 * and testing of the above solutions didn't appear to report
1332 	 * much benefits.
1333 	 */
1334 	if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1335 		smp_send_reschedule(cpu);
1336 	else
1337 		trace_sched_wake_idle_without_ipi(cpu);
1338 }
1339 
1340 static bool wake_up_full_nohz_cpu(int cpu)
1341 {
1342 	/*
1343 	 * We just need the target to call irq_exit() and re-evaluate
1344 	 * the next tick. The nohz full kick at least implies that.
1345 	 * If needed we can still optimize that later with an
1346 	 * empty IRQ.
1347 	 */
1348 	if (cpu_is_offline(cpu))
1349 		return true;  /* Don't try to wake offline CPUs. */
1350 	if (tick_nohz_full_cpu(cpu)) {
1351 		if (cpu != smp_processor_id() ||
1352 		    tick_nohz_tick_stopped())
1353 			tick_nohz_full_kick_cpu(cpu);
1354 		return true;
1355 	}
1356 
1357 	return false;
1358 }
1359 
1360 /*
1361  * Wake up the specified CPU.  If the CPU is going offline, it is the
1362  * caller's responsibility to deal with the lost wakeup, for example,
1363  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1364  */
1365 void wake_up_nohz_cpu(int cpu)
1366 {
1367 	if (!wake_up_full_nohz_cpu(cpu))
1368 		wake_up_idle_cpu(cpu);
1369 }
1370 
1371 static void nohz_csd_func(void *info)
1372 {
1373 	struct rq *rq = info;
1374 	int cpu = cpu_of(rq);
1375 	unsigned int flags;
1376 
1377 	/*
1378 	 * Release the rq::nohz_csd.
1379 	 */
1380 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1381 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1382 
1383 	rq->idle_balance = idle_cpu(cpu);
1384 	if (rq->idle_balance) {
1385 		rq->nohz_idle_balance = flags;
1386 		__raise_softirq_irqoff(SCHED_SOFTIRQ);
1387 	}
1388 }
1389 
1390 #endif /* CONFIG_NO_HZ_COMMON */
1391 
1392 #ifdef CONFIG_NO_HZ_FULL
1393 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1394 {
1395 	if (rq->nr_running != 1)
1396 		return false;
1397 
1398 	if (p->sched_class != &fair_sched_class)
1399 		return false;
1400 
1401 	if (!task_on_rq_queued(p))
1402 		return false;
1403 
1404 	return true;
1405 }
1406 
1407 bool sched_can_stop_tick(struct rq *rq)
1408 {
1409 	int fifo_nr_running;
1410 
1411 	/* Deadline tasks, even if single, need the tick */
1412 	if (rq->dl.dl_nr_running)
1413 		return false;
1414 
1415 	/*
1416 	 * If there are more than one RR tasks, we need the tick to affect the
1417 	 * actual RR behaviour.
1418 	 */
1419 	if (rq->rt.rr_nr_running) {
1420 		if (rq->rt.rr_nr_running == 1)
1421 			return true;
1422 		else
1423 			return false;
1424 	}
1425 
1426 	/*
1427 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1428 	 * forced preemption between FIFO tasks.
1429 	 */
1430 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1431 	if (fifo_nr_running)
1432 		return true;
1433 
1434 	/*
1435 	 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1436 	 * left. For CFS, if there's more than one we need the tick for
1437 	 * involuntary preemption. For SCX, ask.
1438 	 */
1439 	if (scx_enabled() && !scx_can_stop_tick(rq))
1440 		return false;
1441 
1442 	if (rq->cfs.h_nr_queued > 1)
1443 		return false;
1444 
1445 	/*
1446 	 * If there is one task and it has CFS runtime bandwidth constraints
1447 	 * and it's on the cpu now we don't want to stop the tick.
1448 	 * This check prevents clearing the bit if a newly enqueued task here is
1449 	 * dequeued by migrating while the constrained task continues to run.
1450 	 * E.g. going from 2->1 without going through pick_next_task().
1451 	 */
1452 	if (__need_bw_check(rq, rq->curr)) {
1453 		if (cfs_task_bw_constrained(rq->curr))
1454 			return false;
1455 	}
1456 
1457 	return true;
1458 }
1459 #endif /* CONFIG_NO_HZ_FULL */
1460 
1461 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
1462 /*
1463  * Iterate task_group tree rooted at *from, calling @down when first entering a
1464  * node and @up when leaving it for the final time.
1465  *
1466  * Caller must hold rcu_lock or sufficient equivalent.
1467  */
1468 int walk_tg_tree_from(struct task_group *from,
1469 			     tg_visitor down, tg_visitor up, void *data)
1470 {
1471 	struct task_group *parent, *child;
1472 	int ret;
1473 
1474 	parent = from;
1475 
1476 down:
1477 	ret = (*down)(parent, data);
1478 	if (ret)
1479 		goto out;
1480 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1481 		parent = child;
1482 		goto down;
1483 
1484 up:
1485 		continue;
1486 	}
1487 	ret = (*up)(parent, data);
1488 	if (ret || parent == from)
1489 		goto out;
1490 
1491 	child = parent;
1492 	parent = parent->parent;
1493 	if (parent)
1494 		goto up;
1495 out:
1496 	return ret;
1497 }
1498 
1499 int tg_nop(struct task_group *tg, void *data)
1500 {
1501 	return 0;
1502 }
1503 #endif
1504 
1505 void set_load_weight(struct task_struct *p, bool update_load)
1506 {
1507 	int prio = p->static_prio - MAX_RT_PRIO;
1508 	struct load_weight lw;
1509 
1510 	if (task_has_idle_policy(p)) {
1511 		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1512 		lw.inv_weight = WMULT_IDLEPRIO;
1513 	} else {
1514 		lw.weight = scale_load(sched_prio_to_weight[prio]);
1515 		lw.inv_weight = sched_prio_to_wmult[prio];
1516 	}
1517 
1518 	/*
1519 	 * SCHED_OTHER tasks have to update their load when changing their
1520 	 * weight
1521 	 */
1522 	if (update_load && p->sched_class->reweight_task)
1523 		p->sched_class->reweight_task(task_rq(p), p, &lw);
1524 	else
1525 		p->se.load = lw;
1526 }
1527 
1528 #ifdef CONFIG_UCLAMP_TASK
1529 /*
1530  * Serializes updates of utilization clamp values
1531  *
1532  * The (slow-path) user-space triggers utilization clamp value updates which
1533  * can require updates on (fast-path) scheduler's data structures used to
1534  * support enqueue/dequeue operations.
1535  * While the per-CPU rq lock protects fast-path update operations, user-space
1536  * requests are serialized using a mutex to reduce the risk of conflicting
1537  * updates or API abuses.
1538  */
1539 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1540 
1541 /* Max allowed minimum utilization */
1542 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1543 
1544 /* Max allowed maximum utilization */
1545 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1546 
1547 /*
1548  * By default RT tasks run at the maximum performance point/capacity of the
1549  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1550  * SCHED_CAPACITY_SCALE.
1551  *
1552  * This knob allows admins to change the default behavior when uclamp is being
1553  * used. In battery powered devices, particularly, running at the maximum
1554  * capacity and frequency will increase energy consumption and shorten the
1555  * battery life.
1556  *
1557  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1558  *
1559  * This knob will not override the system default sched_util_clamp_min defined
1560  * above.
1561  */
1562 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1563 
1564 /* All clamps are required to be less or equal than these values */
1565 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1566 
1567 /*
1568  * This static key is used to reduce the uclamp overhead in the fast path. It
1569  * primarily disables the call to uclamp_rq_{inc, dec}() in
1570  * enqueue/dequeue_task().
1571  *
1572  * This allows users to continue to enable uclamp in their kernel config with
1573  * minimum uclamp overhead in the fast path.
1574  *
1575  * As soon as userspace modifies any of the uclamp knobs, the static key is
1576  * enabled, since we have an actual users that make use of uclamp
1577  * functionality.
1578  *
1579  * The knobs that would enable this static key are:
1580  *
1581  *   * A task modifying its uclamp value with sched_setattr().
1582  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1583  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1584  */
1585 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1586 
1587 static inline unsigned int
1588 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1589 		  unsigned int clamp_value)
1590 {
1591 	/*
1592 	 * Avoid blocked utilization pushing up the frequency when we go
1593 	 * idle (which drops the max-clamp) by retaining the last known
1594 	 * max-clamp.
1595 	 */
1596 	if (clamp_id == UCLAMP_MAX) {
1597 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1598 		return clamp_value;
1599 	}
1600 
1601 	return uclamp_none(UCLAMP_MIN);
1602 }
1603 
1604 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1605 				     unsigned int clamp_value)
1606 {
1607 	/* Reset max-clamp retention only on idle exit */
1608 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1609 		return;
1610 
1611 	uclamp_rq_set(rq, clamp_id, clamp_value);
1612 }
1613 
1614 static inline
1615 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1616 				   unsigned int clamp_value)
1617 {
1618 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1619 	int bucket_id = UCLAMP_BUCKETS - 1;
1620 
1621 	/*
1622 	 * Since both min and max clamps are max aggregated, find the
1623 	 * top most bucket with tasks in.
1624 	 */
1625 	for ( ; bucket_id >= 0; bucket_id--) {
1626 		if (!bucket[bucket_id].tasks)
1627 			continue;
1628 		return bucket[bucket_id].value;
1629 	}
1630 
1631 	/* No tasks -- default clamp values */
1632 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1633 }
1634 
1635 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1636 {
1637 	unsigned int default_util_min;
1638 	struct uclamp_se *uc_se;
1639 
1640 	lockdep_assert_held(&p->pi_lock);
1641 
1642 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1643 
1644 	/* Only sync if user didn't override the default */
1645 	if (uc_se->user_defined)
1646 		return;
1647 
1648 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1649 	uclamp_se_set(uc_se, default_util_min, false);
1650 }
1651 
1652 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1653 {
1654 	if (!rt_task(p))
1655 		return;
1656 
1657 	/* Protect updates to p->uclamp_* */
1658 	guard(task_rq_lock)(p);
1659 	__uclamp_update_util_min_rt_default(p);
1660 }
1661 
1662 static inline struct uclamp_se
1663 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1664 {
1665 	/* Copy by value as we could modify it */
1666 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1667 #ifdef CONFIG_UCLAMP_TASK_GROUP
1668 	unsigned int tg_min, tg_max, value;
1669 
1670 	/*
1671 	 * Tasks in autogroups or root task group will be
1672 	 * restricted by system defaults.
1673 	 */
1674 	if (task_group_is_autogroup(task_group(p)))
1675 		return uc_req;
1676 	if (task_group(p) == &root_task_group)
1677 		return uc_req;
1678 
1679 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1680 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1681 	value = uc_req.value;
1682 	value = clamp(value, tg_min, tg_max);
1683 	uclamp_se_set(&uc_req, value, false);
1684 #endif
1685 
1686 	return uc_req;
1687 }
1688 
1689 /*
1690  * The effective clamp bucket index of a task depends on, by increasing
1691  * priority:
1692  * - the task specific clamp value, when explicitly requested from userspace
1693  * - the task group effective clamp value, for tasks not either in the root
1694  *   group or in an autogroup
1695  * - the system default clamp value, defined by the sysadmin
1696  */
1697 static inline struct uclamp_se
1698 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1699 {
1700 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1701 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1702 
1703 	/* System default restrictions always apply */
1704 	if (unlikely(uc_req.value > uc_max.value))
1705 		return uc_max;
1706 
1707 	return uc_req;
1708 }
1709 
1710 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1711 {
1712 	struct uclamp_se uc_eff;
1713 
1714 	/* Task currently refcounted: use back-annotated (effective) value */
1715 	if (p->uclamp[clamp_id].active)
1716 		return (unsigned long)p->uclamp[clamp_id].value;
1717 
1718 	uc_eff = uclamp_eff_get(p, clamp_id);
1719 
1720 	return (unsigned long)uc_eff.value;
1721 }
1722 
1723 /*
1724  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1725  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1726  * updates the rq's clamp value if required.
1727  *
1728  * Tasks can have a task-specific value requested from user-space, track
1729  * within each bucket the maximum value for tasks refcounted in it.
1730  * This "local max aggregation" allows to track the exact "requested" value
1731  * for each bucket when all its RUNNABLE tasks require the same clamp.
1732  */
1733 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1734 				    enum uclamp_id clamp_id)
1735 {
1736 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1737 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1738 	struct uclamp_bucket *bucket;
1739 
1740 	lockdep_assert_rq_held(rq);
1741 
1742 	/* Update task effective clamp */
1743 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1744 
1745 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1746 	bucket->tasks++;
1747 	uc_se->active = true;
1748 
1749 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1750 
1751 	/*
1752 	 * Local max aggregation: rq buckets always track the max
1753 	 * "requested" clamp value of its RUNNABLE tasks.
1754 	 */
1755 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1756 		bucket->value = uc_se->value;
1757 
1758 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1759 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1760 }
1761 
1762 /*
1763  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1764  * is released. If this is the last task reference counting the rq's max
1765  * active clamp value, then the rq's clamp value is updated.
1766  *
1767  * Both refcounted tasks and rq's cached clamp values are expected to be
1768  * always valid. If it's detected they are not, as defensive programming,
1769  * enforce the expected state and warn.
1770  */
1771 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1772 				    enum uclamp_id clamp_id)
1773 {
1774 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1775 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1776 	struct uclamp_bucket *bucket;
1777 	unsigned int bkt_clamp;
1778 	unsigned int rq_clamp;
1779 
1780 	lockdep_assert_rq_held(rq);
1781 
1782 	/*
1783 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1784 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1785 	 *
1786 	 * In this case the uc_se->active flag should be false since no uclamp
1787 	 * accounting was performed at enqueue time and we can just return
1788 	 * here.
1789 	 *
1790 	 * Need to be careful of the following enqueue/dequeue ordering
1791 	 * problem too
1792 	 *
1793 	 *	enqueue(taskA)
1794 	 *	// sched_uclamp_used gets enabled
1795 	 *	enqueue(taskB)
1796 	 *	dequeue(taskA)
1797 	 *	// Must not decrement bucket->tasks here
1798 	 *	dequeue(taskB)
1799 	 *
1800 	 * where we could end up with stale data in uc_se and
1801 	 * bucket[uc_se->bucket_id].
1802 	 *
1803 	 * The following check here eliminates the possibility of such race.
1804 	 */
1805 	if (unlikely(!uc_se->active))
1806 		return;
1807 
1808 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1809 
1810 	WARN_ON_ONCE(!bucket->tasks);
1811 	if (likely(bucket->tasks))
1812 		bucket->tasks--;
1813 
1814 	uc_se->active = false;
1815 
1816 	/*
1817 	 * Keep "local max aggregation" simple and accept to (possibly)
1818 	 * overboost some RUNNABLE tasks in the same bucket.
1819 	 * The rq clamp bucket value is reset to its base value whenever
1820 	 * there are no more RUNNABLE tasks refcounting it.
1821 	 */
1822 	if (likely(bucket->tasks))
1823 		return;
1824 
1825 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1826 	/*
1827 	 * Defensive programming: this should never happen. If it happens,
1828 	 * e.g. due to future modification, warn and fix up the expected value.
1829 	 */
1830 	WARN_ON_ONCE(bucket->value > rq_clamp);
1831 	if (bucket->value >= rq_clamp) {
1832 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1833 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1834 	}
1835 }
1836 
1837 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
1838 {
1839 	enum uclamp_id clamp_id;
1840 
1841 	/*
1842 	 * Avoid any overhead until uclamp is actually used by the userspace.
1843 	 *
1844 	 * The condition is constructed such that a NOP is generated when
1845 	 * sched_uclamp_used is disabled.
1846 	 */
1847 	if (!uclamp_is_used())
1848 		return;
1849 
1850 	if (unlikely(!p->sched_class->uclamp_enabled))
1851 		return;
1852 
1853 	/* Only inc the delayed task which being woken up. */
1854 	if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED))
1855 		return;
1856 
1857 	for_each_clamp_id(clamp_id)
1858 		uclamp_rq_inc_id(rq, p, clamp_id);
1859 
1860 	/* Reset clamp idle holding when there is one RUNNABLE task */
1861 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1862 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1863 }
1864 
1865 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1866 {
1867 	enum uclamp_id clamp_id;
1868 
1869 	/*
1870 	 * Avoid any overhead until uclamp is actually used by the userspace.
1871 	 *
1872 	 * The condition is constructed such that a NOP is generated when
1873 	 * sched_uclamp_used is disabled.
1874 	 */
1875 	if (!uclamp_is_used())
1876 		return;
1877 
1878 	if (unlikely(!p->sched_class->uclamp_enabled))
1879 		return;
1880 
1881 	if (p->se.sched_delayed)
1882 		return;
1883 
1884 	for_each_clamp_id(clamp_id)
1885 		uclamp_rq_dec_id(rq, p, clamp_id);
1886 }
1887 
1888 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1889 				      enum uclamp_id clamp_id)
1890 {
1891 	if (!p->uclamp[clamp_id].active)
1892 		return;
1893 
1894 	uclamp_rq_dec_id(rq, p, clamp_id);
1895 	uclamp_rq_inc_id(rq, p, clamp_id);
1896 
1897 	/*
1898 	 * Make sure to clear the idle flag if we've transiently reached 0
1899 	 * active tasks on rq.
1900 	 */
1901 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1902 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1903 }
1904 
1905 static inline void
1906 uclamp_update_active(struct task_struct *p)
1907 {
1908 	enum uclamp_id clamp_id;
1909 	struct rq_flags rf;
1910 	struct rq *rq;
1911 
1912 	/*
1913 	 * Lock the task and the rq where the task is (or was) queued.
1914 	 *
1915 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1916 	 * price to pay to safely serialize util_{min,max} updates with
1917 	 * enqueues, dequeues and migration operations.
1918 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1919 	 */
1920 	rq = task_rq_lock(p, &rf);
1921 
1922 	/*
1923 	 * Setting the clamp bucket is serialized by task_rq_lock().
1924 	 * If the task is not yet RUNNABLE and its task_struct is not
1925 	 * affecting a valid clamp bucket, the next time it's enqueued,
1926 	 * it will already see the updated clamp bucket value.
1927 	 */
1928 	for_each_clamp_id(clamp_id)
1929 		uclamp_rq_reinc_id(rq, p, clamp_id);
1930 
1931 	task_rq_unlock(rq, p, &rf);
1932 }
1933 
1934 #ifdef CONFIG_UCLAMP_TASK_GROUP
1935 static inline void
1936 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1937 {
1938 	struct css_task_iter it;
1939 	struct task_struct *p;
1940 
1941 	css_task_iter_start(css, 0, &it);
1942 	while ((p = css_task_iter_next(&it)))
1943 		uclamp_update_active(p);
1944 	css_task_iter_end(&it);
1945 }
1946 
1947 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1948 #endif
1949 
1950 #ifdef CONFIG_SYSCTL
1951 #ifdef CONFIG_UCLAMP_TASK_GROUP
1952 static void uclamp_update_root_tg(void)
1953 {
1954 	struct task_group *tg = &root_task_group;
1955 
1956 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1957 		      sysctl_sched_uclamp_util_min, false);
1958 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1959 		      sysctl_sched_uclamp_util_max, false);
1960 
1961 	guard(rcu)();
1962 	cpu_util_update_eff(&root_task_group.css);
1963 }
1964 #else
1965 static void uclamp_update_root_tg(void) { }
1966 #endif
1967 
1968 static void uclamp_sync_util_min_rt_default(void)
1969 {
1970 	struct task_struct *g, *p;
1971 
1972 	/*
1973 	 * copy_process()			sysctl_uclamp
1974 	 *					  uclamp_min_rt = X;
1975 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1976 	 *   // link thread			  smp_mb__after_spinlock()
1977 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1978 	 *   sched_post_fork()			  for_each_process_thread()
1979 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1980 	 *
1981 	 * Ensures that either sched_post_fork() will observe the new
1982 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1983 	 * task.
1984 	 */
1985 	read_lock(&tasklist_lock);
1986 	smp_mb__after_spinlock();
1987 	read_unlock(&tasklist_lock);
1988 
1989 	guard(rcu)();
1990 	for_each_process_thread(g, p)
1991 		uclamp_update_util_min_rt_default(p);
1992 }
1993 
1994 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1995 				void *buffer, size_t *lenp, loff_t *ppos)
1996 {
1997 	bool update_root_tg = false;
1998 	int old_min, old_max, old_min_rt;
1999 	int result;
2000 
2001 	guard(mutex)(&uclamp_mutex);
2002 
2003 	old_min = sysctl_sched_uclamp_util_min;
2004 	old_max = sysctl_sched_uclamp_util_max;
2005 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
2006 
2007 	result = proc_dointvec(table, write, buffer, lenp, ppos);
2008 	if (result)
2009 		goto undo;
2010 	if (!write)
2011 		return 0;
2012 
2013 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
2014 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
2015 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
2016 
2017 		result = -EINVAL;
2018 		goto undo;
2019 	}
2020 
2021 	if (old_min != sysctl_sched_uclamp_util_min) {
2022 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
2023 			      sysctl_sched_uclamp_util_min, false);
2024 		update_root_tg = true;
2025 	}
2026 	if (old_max != sysctl_sched_uclamp_util_max) {
2027 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
2028 			      sysctl_sched_uclamp_util_max, false);
2029 		update_root_tg = true;
2030 	}
2031 
2032 	if (update_root_tg) {
2033 		sched_uclamp_enable();
2034 		uclamp_update_root_tg();
2035 	}
2036 
2037 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
2038 		sched_uclamp_enable();
2039 		uclamp_sync_util_min_rt_default();
2040 	}
2041 
2042 	/*
2043 	 * We update all RUNNABLE tasks only when task groups are in use.
2044 	 * Otherwise, keep it simple and do just a lazy update at each next
2045 	 * task enqueue time.
2046 	 */
2047 	return 0;
2048 
2049 undo:
2050 	sysctl_sched_uclamp_util_min = old_min;
2051 	sysctl_sched_uclamp_util_max = old_max;
2052 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
2053 	return result;
2054 }
2055 #endif /* CONFIG_SYSCTL */
2056 
2057 static void uclamp_fork(struct task_struct *p)
2058 {
2059 	enum uclamp_id clamp_id;
2060 
2061 	/*
2062 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
2063 	 * as the task is still at its early fork stages.
2064 	 */
2065 	for_each_clamp_id(clamp_id)
2066 		p->uclamp[clamp_id].active = false;
2067 
2068 	if (likely(!p->sched_reset_on_fork))
2069 		return;
2070 
2071 	for_each_clamp_id(clamp_id) {
2072 		uclamp_se_set(&p->uclamp_req[clamp_id],
2073 			      uclamp_none(clamp_id), false);
2074 	}
2075 }
2076 
2077 static void uclamp_post_fork(struct task_struct *p)
2078 {
2079 	uclamp_update_util_min_rt_default(p);
2080 }
2081 
2082 static void __init init_uclamp_rq(struct rq *rq)
2083 {
2084 	enum uclamp_id clamp_id;
2085 	struct uclamp_rq *uc_rq = rq->uclamp;
2086 
2087 	for_each_clamp_id(clamp_id) {
2088 		uc_rq[clamp_id] = (struct uclamp_rq) {
2089 			.value = uclamp_none(clamp_id)
2090 		};
2091 	}
2092 
2093 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2094 }
2095 
2096 static void __init init_uclamp(void)
2097 {
2098 	struct uclamp_se uc_max = {};
2099 	enum uclamp_id clamp_id;
2100 	int cpu;
2101 
2102 	for_each_possible_cpu(cpu)
2103 		init_uclamp_rq(cpu_rq(cpu));
2104 
2105 	for_each_clamp_id(clamp_id) {
2106 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
2107 			      uclamp_none(clamp_id), false);
2108 	}
2109 
2110 	/* System defaults allow max clamp values for both indexes */
2111 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2112 	for_each_clamp_id(clamp_id) {
2113 		uclamp_default[clamp_id] = uc_max;
2114 #ifdef CONFIG_UCLAMP_TASK_GROUP
2115 		root_task_group.uclamp_req[clamp_id] = uc_max;
2116 		root_task_group.uclamp[clamp_id] = uc_max;
2117 #endif
2118 	}
2119 }
2120 
2121 #else /* !CONFIG_UCLAMP_TASK: */
2122 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
2123 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
2124 static inline void uclamp_fork(struct task_struct *p) { }
2125 static inline void uclamp_post_fork(struct task_struct *p) { }
2126 static inline void init_uclamp(void) { }
2127 #endif /* !CONFIG_UCLAMP_TASK */
2128 
2129 bool sched_task_on_rq(struct task_struct *p)
2130 {
2131 	return task_on_rq_queued(p);
2132 }
2133 
2134 unsigned long get_wchan(struct task_struct *p)
2135 {
2136 	unsigned long ip = 0;
2137 	unsigned int state;
2138 
2139 	if (!p || p == current)
2140 		return 0;
2141 
2142 	/* Only get wchan if task is blocked and we can keep it that way. */
2143 	raw_spin_lock_irq(&p->pi_lock);
2144 	state = READ_ONCE(p->__state);
2145 	smp_rmb(); /* see try_to_wake_up() */
2146 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2147 		ip = __get_wchan(p);
2148 	raw_spin_unlock_irq(&p->pi_lock);
2149 
2150 	return ip;
2151 }
2152 
2153 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2154 {
2155 	if (!(flags & ENQUEUE_NOCLOCK))
2156 		update_rq_clock(rq);
2157 
2158 	/*
2159 	 * Can be before ->enqueue_task() because uclamp considers the
2160 	 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared
2161 	 * in ->enqueue_task().
2162 	 */
2163 	uclamp_rq_inc(rq, p, flags);
2164 
2165 	p->sched_class->enqueue_task(rq, p, flags);
2166 
2167 	psi_enqueue(p, flags);
2168 
2169 	if (!(flags & ENQUEUE_RESTORE))
2170 		sched_info_enqueue(rq, p);
2171 
2172 	if (sched_core_enabled(rq))
2173 		sched_core_enqueue(rq, p);
2174 }
2175 
2176 /*
2177  * Must only return false when DEQUEUE_SLEEP.
2178  */
2179 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2180 {
2181 	if (sched_core_enabled(rq))
2182 		sched_core_dequeue(rq, p, flags);
2183 
2184 	if (!(flags & DEQUEUE_NOCLOCK))
2185 		update_rq_clock(rq);
2186 
2187 	if (!(flags & DEQUEUE_SAVE))
2188 		sched_info_dequeue(rq, p);
2189 
2190 	psi_dequeue(p, flags);
2191 
2192 	/*
2193 	 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2194 	 * and mark the task ->sched_delayed.
2195 	 */
2196 	uclamp_rq_dec(rq, p);
2197 	return p->sched_class->dequeue_task(rq, p, flags);
2198 }
2199 
2200 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2201 {
2202 	if (task_on_rq_migrating(p))
2203 		flags |= ENQUEUE_MIGRATED;
2204 
2205 	enqueue_task(rq, p, flags);
2206 
2207 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2208 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2209 }
2210 
2211 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2212 {
2213 	WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
2214 
2215 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2216 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2217 
2218 	/*
2219 	 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2220 	 * dequeue_task() and cleared *after* enqueue_task().
2221 	 */
2222 
2223 	dequeue_task(rq, p, flags);
2224 }
2225 
2226 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2227 {
2228 	if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2229 		__block_task(rq, p);
2230 }
2231 
2232 /**
2233  * task_curr - is this task currently executing on a CPU?
2234  * @p: the task in question.
2235  *
2236  * Return: 1 if the task is currently executing. 0 otherwise.
2237  */
2238 inline int task_curr(const struct task_struct *p)
2239 {
2240 	return cpu_curr(task_cpu(p)) == p;
2241 }
2242 
2243 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2244 {
2245 	struct task_struct *donor = rq->donor;
2246 
2247 	if (p->sched_class == rq->next_class) {
2248 		rq->next_class->wakeup_preempt(rq, p, flags);
2249 
2250 	} else if (sched_class_above(p->sched_class, rq->next_class)) {
2251 		rq->next_class->wakeup_preempt(rq, p, flags);
2252 		resched_curr(rq);
2253 		rq->next_class = p->sched_class;
2254 	}
2255 
2256 	/*
2257 	 * A queue event has occurred, and we're going to schedule.  In
2258 	 * this case, we can save a useless back to back clock update.
2259 	 */
2260 	if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2261 		rq_clock_skip_update(rq);
2262 }
2263 
2264 static __always_inline
2265 int __task_state_match(struct task_struct *p, unsigned int state)
2266 {
2267 	if (READ_ONCE(p->__state) & state)
2268 		return 1;
2269 
2270 	if (READ_ONCE(p->saved_state) & state)
2271 		return -1;
2272 
2273 	return 0;
2274 }
2275 
2276 static __always_inline
2277 int task_state_match(struct task_struct *p, unsigned int state)
2278 {
2279 	/*
2280 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2281 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2282 	 */
2283 	guard(raw_spinlock_irq)(&p->pi_lock);
2284 	return __task_state_match(p, state);
2285 }
2286 
2287 /*
2288  * wait_task_inactive - wait for a thread to unschedule.
2289  *
2290  * Wait for the thread to block in any of the states set in @match_state.
2291  * If it changes, i.e. @p might have woken up, then return zero.  When we
2292  * succeed in waiting for @p to be off its CPU, we return a positive number
2293  * (its total switch count).  If a second call a short while later returns the
2294  * same number, the caller can be sure that @p has remained unscheduled the
2295  * whole time.
2296  *
2297  * The caller must ensure that the task *will* unschedule sometime soon,
2298  * else this function might spin for a *long* time. This function can't
2299  * be called with interrupts off, or it may introduce deadlock with
2300  * smp_call_function() if an IPI is sent by the same process we are
2301  * waiting to become inactive.
2302  */
2303 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2304 {
2305 	int running, queued, match;
2306 	struct rq_flags rf;
2307 	unsigned long ncsw;
2308 	struct rq *rq;
2309 
2310 	for (;;) {
2311 		/*
2312 		 * We do the initial early heuristics without holding
2313 		 * any task-queue locks at all. We'll only try to get
2314 		 * the runqueue lock when things look like they will
2315 		 * work out!
2316 		 */
2317 		rq = task_rq(p);
2318 
2319 		/*
2320 		 * If the task is actively running on another CPU
2321 		 * still, just relax and busy-wait without holding
2322 		 * any locks.
2323 		 *
2324 		 * NOTE! Since we don't hold any locks, it's not
2325 		 * even sure that "rq" stays as the right runqueue!
2326 		 * But we don't care, since "task_on_cpu()" will
2327 		 * return false if the runqueue has changed and p
2328 		 * is actually now running somewhere else!
2329 		 */
2330 		while (task_on_cpu(rq, p)) {
2331 			if (!task_state_match(p, match_state))
2332 				return 0;
2333 			cpu_relax();
2334 		}
2335 
2336 		/*
2337 		 * Ok, time to look more closely! We need the rq
2338 		 * lock now, to be *sure*. If we're wrong, we'll
2339 		 * just go back and repeat.
2340 		 */
2341 		rq = task_rq_lock(p, &rf);
2342 		/*
2343 		 * If task is sched_delayed, force dequeue it, to avoid always
2344 		 * hitting the tick timeout in the queued case
2345 		 */
2346 		if (p->se.sched_delayed)
2347 			dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
2348 		trace_sched_wait_task(p);
2349 		running = task_on_cpu(rq, p);
2350 		queued = task_on_rq_queued(p);
2351 		ncsw = 0;
2352 		if ((match = __task_state_match(p, match_state))) {
2353 			/*
2354 			 * When matching on p->saved_state, consider this task
2355 			 * still queued so it will wait.
2356 			 */
2357 			if (match < 0)
2358 				queued = 1;
2359 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2360 		}
2361 		task_rq_unlock(rq, p, &rf);
2362 
2363 		/*
2364 		 * If it changed from the expected state, bail out now.
2365 		 */
2366 		if (unlikely(!ncsw))
2367 			break;
2368 
2369 		/*
2370 		 * Was it really running after all now that we
2371 		 * checked with the proper locks actually held?
2372 		 *
2373 		 * Oops. Go back and try again..
2374 		 */
2375 		if (unlikely(running)) {
2376 			cpu_relax();
2377 			continue;
2378 		}
2379 
2380 		/*
2381 		 * It's not enough that it's not actively running,
2382 		 * it must be off the runqueue _entirely_, and not
2383 		 * preempted!
2384 		 *
2385 		 * So if it was still runnable (but just not actively
2386 		 * running right now), it's preempted, and we should
2387 		 * yield - it could be a while.
2388 		 */
2389 		if (unlikely(queued)) {
2390 			ktime_t to = NSEC_PER_SEC / HZ;
2391 
2392 			set_current_state(TASK_UNINTERRUPTIBLE);
2393 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2394 			continue;
2395 		}
2396 
2397 		/*
2398 		 * Ahh, all good. It wasn't running, and it wasn't
2399 		 * runnable, which means that it will never become
2400 		 * running in the future either. We're all done!
2401 		 */
2402 		break;
2403 	}
2404 
2405 	return ncsw;
2406 }
2407 
2408 static void
2409 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2410 
2411 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2412 {
2413 	struct affinity_context ac = {
2414 		.new_mask  = cpumask_of(rq->cpu),
2415 		.flags     = SCA_MIGRATE_DISABLE,
2416 	};
2417 
2418 	if (likely(!p->migration_disabled))
2419 		return;
2420 
2421 	if (p->cpus_ptr != &p->cpus_mask)
2422 		return;
2423 
2424 	scoped_guard (task_rq_lock, p)
2425 		do_set_cpus_allowed(p, &ac);
2426 }
2427 
2428 void ___migrate_enable(void)
2429 {
2430 	struct task_struct *p = current;
2431 	struct affinity_context ac = {
2432 		.new_mask  = &p->cpus_mask,
2433 		.flags     = SCA_MIGRATE_ENABLE,
2434 	};
2435 
2436 	__set_cpus_allowed_ptr(p, &ac);
2437 }
2438 EXPORT_SYMBOL_GPL(___migrate_enable);
2439 
2440 void migrate_disable(void)
2441 {
2442 	__migrate_disable();
2443 }
2444 EXPORT_SYMBOL_GPL(migrate_disable);
2445 
2446 void migrate_enable(void)
2447 {
2448 	__migrate_enable();
2449 }
2450 EXPORT_SYMBOL_GPL(migrate_enable);
2451 
2452 static inline bool rq_has_pinned_tasks(struct rq *rq)
2453 {
2454 	return rq->nr_pinned;
2455 }
2456 
2457 /*
2458  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2459  * __set_cpus_allowed_ptr() and select_fallback_rq().
2460  */
2461 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2462 {
2463 	/* When not in the task's cpumask, no point in looking further. */
2464 	if (!task_allowed_on_cpu(p, cpu))
2465 		return false;
2466 
2467 	/* migrate_disabled() must be allowed to finish. */
2468 	if (is_migration_disabled(p))
2469 		return cpu_online(cpu);
2470 
2471 	/* Non kernel threads are not allowed during either online or offline. */
2472 	if (!(p->flags & PF_KTHREAD))
2473 		return cpu_active(cpu);
2474 
2475 	/* KTHREAD_IS_PER_CPU is always allowed. */
2476 	if (kthread_is_per_cpu(p))
2477 		return cpu_online(cpu);
2478 
2479 	/* Regular kernel threads don't get to stay during offline. */
2480 	if (cpu_dying(cpu))
2481 		return false;
2482 
2483 	/* But are allowed during online. */
2484 	return cpu_online(cpu);
2485 }
2486 
2487 /*
2488  * This is how migration works:
2489  *
2490  * 1) we invoke migration_cpu_stop() on the target CPU using
2491  *    stop_one_cpu().
2492  * 2) stopper starts to run (implicitly forcing the migrated thread
2493  *    off the CPU)
2494  * 3) it checks whether the migrated task is still in the wrong runqueue.
2495  * 4) if it's in the wrong runqueue then the migration thread removes
2496  *    it and puts it into the right queue.
2497  * 5) stopper completes and stop_one_cpu() returns and the migration
2498  *    is done.
2499  */
2500 
2501 /*
2502  * move_queued_task - move a queued task to new rq.
2503  *
2504  * Returns (locked) new rq. Old rq's lock is released.
2505  */
2506 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2507 				   struct task_struct *p, int new_cpu)
2508 	__must_hold(__rq_lockp(rq))
2509 {
2510 	lockdep_assert_rq_held(rq);
2511 
2512 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2513 	set_task_cpu(p, new_cpu);
2514 	rq_unlock(rq, rf);
2515 
2516 	rq = cpu_rq(new_cpu);
2517 
2518 	rq_lock(rq, rf);
2519 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2520 	activate_task(rq, p, 0);
2521 	wakeup_preempt(rq, p, 0);
2522 
2523 	return rq;
2524 }
2525 
2526 struct migration_arg {
2527 	struct task_struct		*task;
2528 	int				dest_cpu;
2529 	struct set_affinity_pending	*pending;
2530 };
2531 
2532 /*
2533  * @refs: number of wait_for_completion()
2534  * @stop_pending: is @stop_work in use
2535  */
2536 struct set_affinity_pending {
2537 	refcount_t		refs;
2538 	unsigned int		stop_pending;
2539 	struct completion	done;
2540 	struct cpu_stop_work	stop_work;
2541 	struct migration_arg	arg;
2542 };
2543 
2544 /*
2545  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2546  * this because either it can't run here any more (set_cpus_allowed()
2547  * away from this CPU, or CPU going down), or because we're
2548  * attempting to rebalance this task on exec (sched_exec).
2549  *
2550  * So we race with normal scheduler movements, but that's OK, as long
2551  * as the task is no longer on this CPU.
2552  */
2553 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2554 				 struct task_struct *p, int dest_cpu)
2555 	__must_hold(__rq_lockp(rq))
2556 {
2557 	/* Affinity changed (again). */
2558 	if (!is_cpu_allowed(p, dest_cpu))
2559 		return rq;
2560 
2561 	rq = move_queued_task(rq, rf, p, dest_cpu);
2562 
2563 	return rq;
2564 }
2565 
2566 /*
2567  * migration_cpu_stop - this will be executed by a high-prio stopper thread
2568  * and performs thread migration by bumping thread off CPU then
2569  * 'pushing' onto another runqueue.
2570  */
2571 static int migration_cpu_stop(void *data)
2572 {
2573 	struct migration_arg *arg = data;
2574 	struct set_affinity_pending *pending = arg->pending;
2575 	struct task_struct *p = arg->task;
2576 	struct rq *rq = this_rq();
2577 	bool complete = false;
2578 	struct rq_flags rf;
2579 
2580 	/*
2581 	 * The original target CPU might have gone down and we might
2582 	 * be on another CPU but it doesn't matter.
2583 	 */
2584 	local_irq_save(rf.flags);
2585 	/*
2586 	 * We need to explicitly wake pending tasks before running
2587 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2588 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2589 	 */
2590 	flush_smp_call_function_queue();
2591 
2592 	/*
2593 	 * We may change the underlying rq, but the locks held will
2594 	 * appropriately be "transferred" when switching.
2595 	 */
2596 	context_unsafe_alias(rq);
2597 
2598 	raw_spin_lock(&p->pi_lock);
2599 	rq_lock(rq, &rf);
2600 
2601 	/*
2602 	 * If we were passed a pending, then ->stop_pending was set, thus
2603 	 * p->migration_pending must have remained stable.
2604 	 */
2605 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2606 
2607 	/*
2608 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2609 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2610 	 * we're holding p->pi_lock.
2611 	 */
2612 	if (task_rq(p) == rq) {
2613 		if (is_migration_disabled(p))
2614 			goto out;
2615 
2616 		if (pending) {
2617 			p->migration_pending = NULL;
2618 			complete = true;
2619 
2620 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2621 				goto out;
2622 		}
2623 
2624 		if (task_on_rq_queued(p)) {
2625 			update_rq_clock(rq);
2626 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2627 		} else {
2628 			p->wake_cpu = arg->dest_cpu;
2629 		}
2630 
2631 		/*
2632 		 * XXX __migrate_task() can fail, at which point we might end
2633 		 * up running on a dodgy CPU, AFAICT this can only happen
2634 		 * during CPU hotplug, at which point we'll get pushed out
2635 		 * anyway, so it's probably not a big deal.
2636 		 */
2637 
2638 	} else if (pending) {
2639 		/*
2640 		 * This happens when we get migrated between migrate_enable()'s
2641 		 * preempt_enable() and scheduling the stopper task. At that
2642 		 * point we're a regular task again and not current anymore.
2643 		 *
2644 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2645 		 * more likely.
2646 		 */
2647 
2648 		/*
2649 		 * The task moved before the stopper got to run. We're holding
2650 		 * ->pi_lock, so the allowed mask is stable - if it got
2651 		 * somewhere allowed, we're done.
2652 		 */
2653 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2654 			p->migration_pending = NULL;
2655 			complete = true;
2656 			goto out;
2657 		}
2658 
2659 		/*
2660 		 * When migrate_enable() hits a rq mis-match we can't reliably
2661 		 * determine is_migration_disabled() and so have to chase after
2662 		 * it.
2663 		 */
2664 		WARN_ON_ONCE(!pending->stop_pending);
2665 		preempt_disable();
2666 		rq_unlock(rq, &rf);
2667 		raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2668 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2669 				    &pending->arg, &pending->stop_work);
2670 		preempt_enable();
2671 		return 0;
2672 	}
2673 out:
2674 	if (pending)
2675 		pending->stop_pending = false;
2676 	rq_unlock(rq, &rf);
2677 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2678 
2679 	if (complete)
2680 		complete_all(&pending->done);
2681 
2682 	return 0;
2683 }
2684 
2685 int push_cpu_stop(void *arg)
2686 {
2687 	struct rq *lowest_rq = NULL, *rq = this_rq();
2688 	struct task_struct *p = arg;
2689 
2690 	raw_spin_lock_irq(&p->pi_lock);
2691 	raw_spin_rq_lock(rq);
2692 
2693 	if (task_rq(p) != rq)
2694 		goto out_unlock;
2695 
2696 	if (is_migration_disabled(p)) {
2697 		p->migration_flags |= MDF_PUSH;
2698 		goto out_unlock;
2699 	}
2700 
2701 	p->migration_flags &= ~MDF_PUSH;
2702 
2703 	if (p->sched_class->find_lock_rq)
2704 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2705 
2706 	if (!lowest_rq)
2707 		goto out_unlock;
2708 
2709 	lockdep_assert_rq_held(lowest_rq);
2710 
2711 	// XXX validate p is still the highest prio task
2712 	if (task_rq(p) == rq) {
2713 		move_queued_task_locked(rq, lowest_rq, p);
2714 		resched_curr(lowest_rq);
2715 	}
2716 
2717 	double_unlock_balance(rq, lowest_rq);
2718 
2719 out_unlock:
2720 	rq->push_busy = false;
2721 	raw_spin_rq_unlock(rq);
2722 	raw_spin_unlock_irq(&p->pi_lock);
2723 
2724 	put_task_struct(p);
2725 	return 0;
2726 }
2727 
2728 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const cpumask_t *affmask);
2729 
2730 /*
2731  * sched_class::set_cpus_allowed must do the below, but is not required to
2732  * actually call this function.
2733  */
2734 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2735 {
2736 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2737 		p->cpus_ptr = ctx->new_mask;
2738 		return;
2739 	}
2740 
2741 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2742 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2743 	mm_update_cpus_allowed(p->mm, ctx->new_mask);
2744 
2745 	/*
2746 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2747 	 */
2748 	if (ctx->flags & SCA_USER)
2749 		swap(p->user_cpus_ptr, ctx->user_mask);
2750 }
2751 
2752 static void
2753 do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2754 {
2755 	scoped_guard (sched_change, p, DEQUEUE_SAVE)
2756 		p->sched_class->set_cpus_allowed(p, ctx);
2757 }
2758 
2759 /*
2760  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2761  * affinity (if any) should be destroyed too.
2762  */
2763 void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask)
2764 {
2765 	struct affinity_context ac = {
2766 		.new_mask  = new_mask,
2767 		.user_mask = NULL,
2768 		.flags     = SCA_USER,	/* clear the user requested mask */
2769 	};
2770 	union cpumask_rcuhead {
2771 		cpumask_t cpumask;
2772 		struct rcu_head rcu;
2773 	};
2774 
2775 	scoped_guard (__task_rq_lock, p)
2776 		do_set_cpus_allowed(p, &ac);
2777 
2778 	/*
2779 	 * Because this is called with p->pi_lock held, it is not possible
2780 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2781 	 * kfree_rcu().
2782 	 */
2783 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2784 }
2785 
2786 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2787 		      int node)
2788 {
2789 	cpumask_t *user_mask;
2790 	unsigned long flags;
2791 
2792 	/*
2793 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2794 	 * may differ by now due to racing.
2795 	 */
2796 	dst->user_cpus_ptr = NULL;
2797 
2798 	/*
2799 	 * This check is racy and losing the race is a valid situation.
2800 	 * It is not worth the extra overhead of taking the pi_lock on
2801 	 * every fork/clone.
2802 	 */
2803 	if (data_race(!src->user_cpus_ptr))
2804 		return 0;
2805 
2806 	user_mask = alloc_user_cpus_ptr(node);
2807 	if (!user_mask)
2808 		return -ENOMEM;
2809 
2810 	/*
2811 	 * Use pi_lock to protect content of user_cpus_ptr
2812 	 *
2813 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2814 	 * set_cpus_allowed_force().
2815 	 */
2816 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2817 	if (src->user_cpus_ptr) {
2818 		swap(dst->user_cpus_ptr, user_mask);
2819 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2820 	}
2821 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2822 
2823 	if (unlikely(user_mask))
2824 		kfree(user_mask);
2825 
2826 	return 0;
2827 }
2828 
2829 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2830 {
2831 	struct cpumask *user_mask = NULL;
2832 
2833 	swap(p->user_cpus_ptr, user_mask);
2834 
2835 	return user_mask;
2836 }
2837 
2838 void release_user_cpus_ptr(struct task_struct *p)
2839 {
2840 	kfree(clear_user_cpus_ptr(p));
2841 }
2842 
2843 /*
2844  * This function is wildly self concurrent; here be dragons.
2845  *
2846  *
2847  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2848  * designated task is enqueued on an allowed CPU. If that task is currently
2849  * running, we have to kick it out using the CPU stopper.
2850  *
2851  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2852  * Consider:
2853  *
2854  *     Initial conditions: P0->cpus_mask = [0, 1]
2855  *
2856  *     P0@CPU0                  P1
2857  *
2858  *     migrate_disable();
2859  *     <preempted>
2860  *                              set_cpus_allowed_ptr(P0, [1]);
2861  *
2862  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2863  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2864  * This means we need the following scheme:
2865  *
2866  *     P0@CPU0                  P1
2867  *
2868  *     migrate_disable();
2869  *     <preempted>
2870  *                              set_cpus_allowed_ptr(P0, [1]);
2871  *                                <blocks>
2872  *     <resumes>
2873  *     migrate_enable();
2874  *       __set_cpus_allowed_ptr();
2875  *       <wakes local stopper>
2876  *                         `--> <woken on migration completion>
2877  *
2878  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2879  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2880  * task p are serialized by p->pi_lock, which we can leverage: the one that
2881  * should come into effect at the end of the Migrate-Disable region is the last
2882  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2883  * but we still need to properly signal those waiting tasks at the appropriate
2884  * moment.
2885  *
2886  * This is implemented using struct set_affinity_pending. The first
2887  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2888  * setup an instance of that struct and install it on the targeted task_struct.
2889  * Any and all further callers will reuse that instance. Those then wait for
2890  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2891  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2892  *
2893  *
2894  * (1) In the cases covered above. There is one more where the completion is
2895  * signaled within affine_move_task() itself: when a subsequent affinity request
2896  * occurs after the stopper bailed out due to the targeted task still being
2897  * Migrate-Disable. Consider:
2898  *
2899  *     Initial conditions: P0->cpus_mask = [0, 1]
2900  *
2901  *     CPU0		  P1				P2
2902  *     <P0>
2903  *       migrate_disable();
2904  *       <preempted>
2905  *                        set_cpus_allowed_ptr(P0, [1]);
2906  *                          <blocks>
2907  *     <migration/0>
2908  *       migration_cpu_stop()
2909  *         is_migration_disabled()
2910  *           <bails>
2911  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2912  *                                                         <signal completion>
2913  *                          <awakes>
2914  *
2915  * Note that the above is safe vs a concurrent migrate_enable(), as any
2916  * pending affinity completion is preceded by an uninstallation of
2917  * p->migration_pending done with p->pi_lock held.
2918  */
2919 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2920 			    int dest_cpu, unsigned int flags)
2921 	__releases(__rq_lockp(rq), &p->pi_lock)
2922 {
2923 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2924 	bool stop_pending, complete = false;
2925 
2926 	/*
2927 	 * Can the task run on the task's current CPU? If so, we're done
2928 	 *
2929 	 * We are also done if the task is the current donor, boosting a lock-
2930 	 * holding proxy, (and potentially has been migrated outside its
2931 	 * current or previous affinity mask)
2932 	 */
2933 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask) ||
2934 	    (task_current_donor(rq, p) && !task_current(rq, p))) {
2935 		struct task_struct *push_task = NULL;
2936 
2937 		if ((flags & SCA_MIGRATE_ENABLE) &&
2938 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2939 			rq->push_busy = true;
2940 			push_task = get_task_struct(p);
2941 		}
2942 
2943 		/*
2944 		 * If there are pending waiters, but no pending stop_work,
2945 		 * then complete now.
2946 		 */
2947 		pending = p->migration_pending;
2948 		if (pending && !pending->stop_pending) {
2949 			p->migration_pending = NULL;
2950 			complete = true;
2951 		}
2952 
2953 		preempt_disable();
2954 		task_rq_unlock(rq, p, rf);
2955 		if (push_task) {
2956 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2957 					    p, &rq->push_work);
2958 		}
2959 		preempt_enable();
2960 
2961 		if (complete)
2962 			complete_all(&pending->done);
2963 
2964 		return 0;
2965 	}
2966 
2967 	if (!(flags & SCA_MIGRATE_ENABLE)) {
2968 		/* serialized by p->pi_lock */
2969 		if (!p->migration_pending) {
2970 			/* Install the request */
2971 			refcount_set(&my_pending.refs, 1);
2972 			init_completion(&my_pending.done);
2973 			my_pending.arg = (struct migration_arg) {
2974 				.task = p,
2975 				.dest_cpu = dest_cpu,
2976 				.pending = &my_pending,
2977 			};
2978 
2979 			p->migration_pending = &my_pending;
2980 		} else {
2981 			pending = p->migration_pending;
2982 			refcount_inc(&pending->refs);
2983 			/*
2984 			 * Affinity has changed, but we've already installed a
2985 			 * pending. migration_cpu_stop() *must* see this, else
2986 			 * we risk a completion of the pending despite having a
2987 			 * task on a disallowed CPU.
2988 			 *
2989 			 * Serialized by p->pi_lock, so this is safe.
2990 			 */
2991 			pending->arg.dest_cpu = dest_cpu;
2992 		}
2993 	}
2994 	pending = p->migration_pending;
2995 	/*
2996 	 * - !MIGRATE_ENABLE:
2997 	 *   we'll have installed a pending if there wasn't one already.
2998 	 *
2999 	 * - MIGRATE_ENABLE:
3000 	 *   we're here because the current CPU isn't matching anymore,
3001 	 *   the only way that can happen is because of a concurrent
3002 	 *   set_cpus_allowed_ptr() call, which should then still be
3003 	 *   pending completion.
3004 	 *
3005 	 * Either way, we really should have a @pending here.
3006 	 */
3007 	if (WARN_ON_ONCE(!pending)) {
3008 		task_rq_unlock(rq, p, rf);
3009 		return -EINVAL;
3010 	}
3011 
3012 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3013 		/*
3014 		 * MIGRATE_ENABLE gets here because 'p == current', but for
3015 		 * anything else we cannot do is_migration_disabled(), punt
3016 		 * and have the stopper function handle it all race-free.
3017 		 */
3018 		stop_pending = pending->stop_pending;
3019 		if (!stop_pending)
3020 			pending->stop_pending = true;
3021 
3022 		if (flags & SCA_MIGRATE_ENABLE)
3023 			p->migration_flags &= ~MDF_PUSH;
3024 
3025 		preempt_disable();
3026 		task_rq_unlock(rq, p, rf);
3027 		if (!stop_pending) {
3028 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3029 					    &pending->arg, &pending->stop_work);
3030 		}
3031 		preempt_enable();
3032 
3033 		if (flags & SCA_MIGRATE_ENABLE)
3034 			return 0;
3035 	} else {
3036 
3037 		if (!is_migration_disabled(p)) {
3038 			if (task_on_rq_queued(p))
3039 				rq = move_queued_task(rq, rf, p, dest_cpu);
3040 
3041 			if (!pending->stop_pending) {
3042 				p->migration_pending = NULL;
3043 				complete = true;
3044 			}
3045 		}
3046 		task_rq_unlock(rq, p, rf);
3047 
3048 		if (complete)
3049 			complete_all(&pending->done);
3050 	}
3051 
3052 	wait_for_completion(&pending->done);
3053 
3054 	if (refcount_dec_and_test(&pending->refs))
3055 		wake_up_var(&pending->refs); /* No UaF, just an address */
3056 
3057 	/*
3058 	 * Block the original owner of &pending until all subsequent callers
3059 	 * have seen the completion and decremented the refcount
3060 	 */
3061 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3062 
3063 	/* ARGH */
3064 	WARN_ON_ONCE(my_pending.stop_pending);
3065 
3066 	return 0;
3067 }
3068 
3069 /*
3070  * Called with both p->pi_lock and rq->lock held; drops both before returning.
3071  */
3072 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3073 					 struct affinity_context *ctx,
3074 					 struct rq *rq,
3075 					 struct rq_flags *rf)
3076 	__releases(__rq_lockp(rq), &p->pi_lock)
3077 {
3078 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3079 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3080 	bool kthread = p->flags & PF_KTHREAD;
3081 	unsigned int dest_cpu;
3082 	int ret = 0;
3083 
3084 	if (kthread || is_migration_disabled(p)) {
3085 		/*
3086 		 * Kernel threads are allowed on online && !active CPUs,
3087 		 * however, during cpu-hot-unplug, even these might get pushed
3088 		 * away if not KTHREAD_IS_PER_CPU.
3089 		 *
3090 		 * Specifically, migration_disabled() tasks must not fail the
3091 		 * cpumask_any_and_distribute() pick below, esp. so on
3092 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3093 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3094 		 */
3095 		cpu_valid_mask = cpu_online_mask;
3096 	}
3097 
3098 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3099 		ret = -EINVAL;
3100 		goto out;
3101 	}
3102 
3103 	/*
3104 	 * Must re-check here, to close a race against __kthread_bind(),
3105 	 * sched_setaffinity() is not guaranteed to observe the flag.
3106 	 */
3107 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3108 		ret = -EINVAL;
3109 		goto out;
3110 	}
3111 
3112 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3113 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3114 			if (ctx->flags & SCA_USER)
3115 				swap(p->user_cpus_ptr, ctx->user_mask);
3116 			goto out;
3117 		}
3118 
3119 		if (WARN_ON_ONCE(p == current &&
3120 				 is_migration_disabled(p) &&
3121 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3122 			ret = -EBUSY;
3123 			goto out;
3124 		}
3125 	}
3126 
3127 	/*
3128 	 * Picking a ~random cpu helps in cases where we are changing affinity
3129 	 * for groups of tasks (ie. cpuset), so that load balancing is not
3130 	 * immediately required to distribute the tasks within their new mask.
3131 	 */
3132 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3133 	if (dest_cpu >= nr_cpu_ids) {
3134 		ret = -EINVAL;
3135 		goto out;
3136 	}
3137 
3138 	do_set_cpus_allowed(p, ctx);
3139 
3140 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3141 
3142 out:
3143 	task_rq_unlock(rq, p, rf);
3144 
3145 	return ret;
3146 }
3147 
3148 /*
3149  * Change a given task's CPU affinity. Migrate the thread to a
3150  * proper CPU and schedule it away if the CPU it's executing on
3151  * is removed from the allowed bitmask.
3152  *
3153  * NOTE: the caller must have a valid reference to the task, the
3154  * task must not exit() & deallocate itself prematurely. The
3155  * call is not atomic; no spinlocks may be held.
3156  */
3157 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3158 {
3159 	struct rq_flags rf;
3160 	struct rq *rq;
3161 
3162 	rq = task_rq_lock(p, &rf);
3163 	/*
3164 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3165 	 * flags are set.
3166 	 */
3167 	if (p->user_cpus_ptr &&
3168 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3169 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3170 		ctx->new_mask = rq->scratch_mask;
3171 
3172 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3173 }
3174 
3175 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3176 {
3177 	struct affinity_context ac = {
3178 		.new_mask  = new_mask,
3179 		.flags     = 0,
3180 	};
3181 
3182 	return __set_cpus_allowed_ptr(p, &ac);
3183 }
3184 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3185 
3186 /*
3187  * Change a given task's CPU affinity to the intersection of its current
3188  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3189  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3190  * affinity or use cpu_online_mask instead.
3191  *
3192  * If the resulting mask is empty, leave the affinity unchanged and return
3193  * -EINVAL.
3194  */
3195 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3196 				     struct cpumask *new_mask,
3197 				     const struct cpumask *subset_mask)
3198 {
3199 	struct affinity_context ac = {
3200 		.new_mask  = new_mask,
3201 		.flags     = 0,
3202 	};
3203 	struct rq_flags rf;
3204 	struct rq *rq;
3205 	int err;
3206 
3207 	rq = task_rq_lock(p, &rf);
3208 
3209 	/*
3210 	 * Forcefully restricting the affinity of a deadline task is
3211 	 * likely to cause problems, so fail and noisily override the
3212 	 * mask entirely.
3213 	 */
3214 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3215 		err = -EPERM;
3216 		goto err_unlock;
3217 	}
3218 
3219 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3220 		err = -EINVAL;
3221 		goto err_unlock;
3222 	}
3223 
3224 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3225 
3226 err_unlock:
3227 	task_rq_unlock(rq, p, &rf);
3228 	return err;
3229 }
3230 
3231 /*
3232  * Restrict the CPU affinity of task @p so that it is a subset of
3233  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3234  * old affinity mask. If the resulting mask is empty, we warn and walk
3235  * up the cpuset hierarchy until we find a suitable mask.
3236  */
3237 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3238 {
3239 	cpumask_var_t new_mask;
3240 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3241 
3242 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3243 
3244 	/*
3245 	 * __migrate_task() can fail silently in the face of concurrent
3246 	 * offlining of the chosen destination CPU, so take the hotplug
3247 	 * lock to ensure that the migration succeeds.
3248 	 */
3249 	cpus_read_lock();
3250 	if (!cpumask_available(new_mask))
3251 		goto out_set_mask;
3252 
3253 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3254 		goto out_free_mask;
3255 
3256 	/*
3257 	 * We failed to find a valid subset of the affinity mask for the
3258 	 * task, so override it based on its cpuset hierarchy.
3259 	 */
3260 	cpuset_cpus_allowed(p, new_mask);
3261 	override_mask = new_mask;
3262 
3263 out_set_mask:
3264 	if (printk_ratelimit()) {
3265 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3266 				task_pid_nr(p), p->comm,
3267 				cpumask_pr_args(override_mask));
3268 	}
3269 
3270 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3271 out_free_mask:
3272 	cpus_read_unlock();
3273 	free_cpumask_var(new_mask);
3274 }
3275 
3276 /*
3277  * Restore the affinity of a task @p which was previously restricted by a
3278  * call to force_compatible_cpus_allowed_ptr().
3279  *
3280  * It is the caller's responsibility to serialise this with any calls to
3281  * force_compatible_cpus_allowed_ptr(@p).
3282  */
3283 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3284 {
3285 	struct affinity_context ac = {
3286 		.new_mask  = task_user_cpus(p),
3287 		.flags     = 0,
3288 	};
3289 	int ret;
3290 
3291 	/*
3292 	 * Try to restore the old affinity mask with __sched_setaffinity().
3293 	 * Cpuset masking will be done there too.
3294 	 */
3295 	ret = __sched_setaffinity(p, &ac);
3296 	WARN_ON_ONCE(ret);
3297 }
3298 
3299 #ifdef CONFIG_SMP
3300 
3301 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3302 {
3303 	unsigned int state = READ_ONCE(p->__state);
3304 
3305 	/*
3306 	 * We should never call set_task_cpu() on a blocked task,
3307 	 * ttwu() will sort out the placement.
3308 	 */
3309 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3310 
3311 	/*
3312 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3313 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3314 	 * time relying on p->on_rq.
3315 	 */
3316 	WARN_ON_ONCE(state == TASK_RUNNING &&
3317 		     p->sched_class == &fair_sched_class &&
3318 		     (p->on_rq && !task_on_rq_migrating(p)));
3319 
3320 #ifdef CONFIG_LOCKDEP
3321 	/*
3322 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3323 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3324 	 *
3325 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3326 	 * see task_group().
3327 	 *
3328 	 * Furthermore, all task_rq users should acquire both locks, see
3329 	 * task_rq_lock().
3330 	 */
3331 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3332 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3333 #endif
3334 	/*
3335 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3336 	 */
3337 	WARN_ON_ONCE(!cpu_online(new_cpu));
3338 
3339 	WARN_ON_ONCE(is_migration_disabled(p));
3340 
3341 	trace_sched_migrate_task(p, new_cpu);
3342 
3343 	if (task_cpu(p) != new_cpu) {
3344 		if (p->sched_class->migrate_task_rq)
3345 			p->sched_class->migrate_task_rq(p, new_cpu);
3346 		p->se.nr_migrations++;
3347 		perf_event_task_migrate(p);
3348 	}
3349 
3350 	__set_task_cpu(p, new_cpu);
3351 }
3352 #endif /* CONFIG_SMP */
3353 
3354 #ifdef CONFIG_NUMA_BALANCING
3355 static void __migrate_swap_task(struct task_struct *p, int cpu)
3356 {
3357 	if (task_on_rq_queued(p)) {
3358 		struct rq *src_rq, *dst_rq;
3359 		struct rq_flags srf, drf;
3360 
3361 		src_rq = task_rq(p);
3362 		dst_rq = cpu_rq(cpu);
3363 
3364 		rq_pin_lock(src_rq, &srf);
3365 		rq_pin_lock(dst_rq, &drf);
3366 
3367 		move_queued_task_locked(src_rq, dst_rq, p);
3368 		wakeup_preempt(dst_rq, p, 0);
3369 
3370 		rq_unpin_lock(dst_rq, &drf);
3371 		rq_unpin_lock(src_rq, &srf);
3372 
3373 	} else {
3374 		/*
3375 		 * Task isn't running anymore; make it appear like we migrated
3376 		 * it before it went to sleep. This means on wakeup we make the
3377 		 * previous CPU our target instead of where it really is.
3378 		 */
3379 		p->wake_cpu = cpu;
3380 	}
3381 }
3382 
3383 struct migration_swap_arg {
3384 	struct task_struct *src_task, *dst_task;
3385 	int src_cpu, dst_cpu;
3386 };
3387 
3388 static int migrate_swap_stop(void *data)
3389 {
3390 	struct migration_swap_arg *arg = data;
3391 	struct rq *src_rq, *dst_rq;
3392 
3393 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3394 		return -EAGAIN;
3395 
3396 	src_rq = cpu_rq(arg->src_cpu);
3397 	dst_rq = cpu_rq(arg->dst_cpu);
3398 
3399 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3400 	guard(double_rq_lock)(src_rq, dst_rq);
3401 
3402 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3403 		return -EAGAIN;
3404 
3405 	if (task_cpu(arg->src_task) != arg->src_cpu)
3406 		return -EAGAIN;
3407 
3408 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3409 		return -EAGAIN;
3410 
3411 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3412 		return -EAGAIN;
3413 
3414 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3415 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3416 
3417 	return 0;
3418 }
3419 
3420 /*
3421  * Cross migrate two tasks
3422  */
3423 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3424 		int target_cpu, int curr_cpu)
3425 {
3426 	struct migration_swap_arg arg;
3427 	int ret = -EINVAL;
3428 
3429 	arg = (struct migration_swap_arg){
3430 		.src_task = cur,
3431 		.src_cpu = curr_cpu,
3432 		.dst_task = p,
3433 		.dst_cpu = target_cpu,
3434 	};
3435 
3436 	if (arg.src_cpu == arg.dst_cpu)
3437 		goto out;
3438 
3439 	/*
3440 	 * These three tests are all lockless; this is OK since all of them
3441 	 * will be re-checked with proper locks held further down the line.
3442 	 */
3443 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3444 		goto out;
3445 
3446 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3447 		goto out;
3448 
3449 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3450 		goto out;
3451 
3452 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3453 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3454 
3455 out:
3456 	return ret;
3457 }
3458 #endif /* CONFIG_NUMA_BALANCING */
3459 
3460 /***
3461  * kick_process - kick a running thread to enter/exit the kernel
3462  * @p: the to-be-kicked thread
3463  *
3464  * Cause a process which is running on another CPU to enter
3465  * kernel-mode, without any delay. (to get signals handled.)
3466  *
3467  * NOTE: this function doesn't have to take the runqueue lock,
3468  * because all it wants to ensure is that the remote task enters
3469  * the kernel. If the IPI races and the task has been migrated
3470  * to another CPU then no harm is done and the purpose has been
3471  * achieved as well.
3472  */
3473 void kick_process(struct task_struct *p)
3474 {
3475 	guard(preempt)();
3476 	int cpu = task_cpu(p);
3477 
3478 	if ((cpu != smp_processor_id()) && task_curr(p))
3479 		smp_send_reschedule(cpu);
3480 }
3481 EXPORT_SYMBOL_GPL(kick_process);
3482 
3483 /*
3484  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3485  *
3486  * A few notes on cpu_active vs cpu_online:
3487  *
3488  *  - cpu_active must be a subset of cpu_online
3489  *
3490  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3491  *    see __set_cpus_allowed_ptr(). At this point the newly online
3492  *    CPU isn't yet part of the sched domains, and balancing will not
3493  *    see it.
3494  *
3495  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3496  *    avoid the load balancer to place new tasks on the to be removed
3497  *    CPU. Existing tasks will remain running there and will be taken
3498  *    off.
3499  *
3500  * This means that fallback selection must not select !active CPUs.
3501  * And can assume that any active CPU must be online. Conversely
3502  * select_task_rq() below may allow selection of !active CPUs in order
3503  * to satisfy the above rules.
3504  */
3505 static int select_fallback_rq(int cpu, struct task_struct *p)
3506 {
3507 	int nid = cpu_to_node(cpu);
3508 	const struct cpumask *nodemask = NULL;
3509 	enum { cpuset, possible, fail } state = cpuset;
3510 	int dest_cpu;
3511 
3512 	/*
3513 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3514 	 * will return -1. There is no CPU on the node, and we should
3515 	 * select the CPU on the other node.
3516 	 */
3517 	if (nid != -1) {
3518 		nodemask = cpumask_of_node(nid);
3519 
3520 		/* Look for allowed, online CPU in same node. */
3521 		for_each_cpu(dest_cpu, nodemask) {
3522 			if (is_cpu_allowed(p, dest_cpu))
3523 				return dest_cpu;
3524 		}
3525 	}
3526 
3527 	for (;;) {
3528 		/* Any allowed, online CPU? */
3529 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3530 			if (!is_cpu_allowed(p, dest_cpu))
3531 				continue;
3532 
3533 			goto out;
3534 		}
3535 
3536 		/* No more Mr. Nice Guy. */
3537 		switch (state) {
3538 		case cpuset:
3539 			if (cpuset_cpus_allowed_fallback(p)) {
3540 				state = possible;
3541 				break;
3542 			}
3543 			fallthrough;
3544 		case possible:
3545 			set_cpus_allowed_force(p, task_cpu_fallback_mask(p));
3546 			state = fail;
3547 			break;
3548 		case fail:
3549 			BUG();
3550 			break;
3551 		}
3552 	}
3553 
3554 out:
3555 	if (state != cpuset) {
3556 		/*
3557 		 * Don't tell them about moving exiting tasks or
3558 		 * kernel threads (both mm NULL), since they never
3559 		 * leave kernel.
3560 		 */
3561 		if (p->mm && printk_ratelimit()) {
3562 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3563 					task_pid_nr(p), p->comm, cpu);
3564 		}
3565 	}
3566 
3567 	return dest_cpu;
3568 }
3569 
3570 /*
3571  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3572  */
3573 static inline
3574 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3575 {
3576 	lockdep_assert_held(&p->pi_lock);
3577 
3578 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3579 		cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3580 		*wake_flags |= WF_RQ_SELECTED;
3581 	} else {
3582 		cpu = cpumask_any(p->cpus_ptr);
3583 	}
3584 
3585 	/*
3586 	 * In order not to call set_task_cpu() on a blocking task we need
3587 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3588 	 * CPU.
3589 	 *
3590 	 * Since this is common to all placement strategies, this lives here.
3591 	 *
3592 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3593 	 *   not worry about this generic constraint ]
3594 	 */
3595 	if (unlikely(!is_cpu_allowed(p, cpu)))
3596 		cpu = select_fallback_rq(task_cpu(p), p);
3597 
3598 	return cpu;
3599 }
3600 
3601 void sched_set_stop_task(int cpu, struct task_struct *stop)
3602 {
3603 	static struct lock_class_key stop_pi_lock;
3604 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3605 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3606 
3607 	if (stop) {
3608 		/*
3609 		 * Make it appear like a SCHED_FIFO task, its something
3610 		 * userspace knows about and won't get confused about.
3611 		 *
3612 		 * Also, it will make PI more or less work without too
3613 		 * much confusion -- but then, stop work should not
3614 		 * rely on PI working anyway.
3615 		 */
3616 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3617 
3618 		stop->sched_class = &stop_sched_class;
3619 
3620 		/*
3621 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3622 		 * adjust the effective priority of a task. As a result,
3623 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3624 		 * which can then trigger wakeups of the stop thread to push
3625 		 * around the current task.
3626 		 *
3627 		 * The stop task itself will never be part of the PI-chain, it
3628 		 * never blocks, therefore that ->pi_lock recursion is safe.
3629 		 * Tell lockdep about this by placing the stop->pi_lock in its
3630 		 * own class.
3631 		 */
3632 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3633 	}
3634 
3635 	cpu_rq(cpu)->stop = stop;
3636 
3637 	if (old_stop) {
3638 		/*
3639 		 * Reset it back to a normal scheduling class so that
3640 		 * it can die in pieces.
3641 		 */
3642 		old_stop->sched_class = &rt_sched_class;
3643 	}
3644 }
3645 
3646 static void
3647 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3648 {
3649 	struct rq *rq;
3650 
3651 	if (!schedstat_enabled())
3652 		return;
3653 
3654 	rq = this_rq();
3655 
3656 	if (cpu == rq->cpu) {
3657 		__schedstat_inc(rq->ttwu_local);
3658 		__schedstat_inc(p->stats.nr_wakeups_local);
3659 	} else {
3660 		struct sched_domain *sd;
3661 
3662 		__schedstat_inc(p->stats.nr_wakeups_remote);
3663 
3664 		guard(rcu)();
3665 		for_each_domain(rq->cpu, sd) {
3666 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3667 				__schedstat_inc(sd->ttwu_wake_remote);
3668 				break;
3669 			}
3670 		}
3671 	}
3672 
3673 	if (wake_flags & WF_MIGRATED)
3674 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3675 
3676 	__schedstat_inc(rq->ttwu_count);
3677 	__schedstat_inc(p->stats.nr_wakeups);
3678 
3679 	if (wake_flags & WF_SYNC)
3680 		__schedstat_inc(p->stats.nr_wakeups_sync);
3681 }
3682 
3683 /*
3684  * Mark the task runnable.
3685  */
3686 static inline void ttwu_do_wakeup(struct task_struct *p)
3687 {
3688 	WRITE_ONCE(p->__state, TASK_RUNNING);
3689 	trace_sched_wakeup(p);
3690 }
3691 
3692 void update_rq_avg_idle(struct rq *rq)
3693 {
3694 	u64 delta = rq_clock(rq) - rq->idle_stamp;
3695 	u64 max = 2*rq->max_idle_balance_cost;
3696 
3697 	update_avg(&rq->avg_idle, delta);
3698 
3699 	if (rq->avg_idle > max)
3700 		rq->avg_idle = max;
3701 	rq->idle_stamp = 0;
3702 }
3703 
3704 static void
3705 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3706 		 struct rq_flags *rf)
3707 {
3708 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3709 
3710 	lockdep_assert_rq_held(rq);
3711 
3712 	if (p->sched_contributes_to_load)
3713 		rq->nr_uninterruptible--;
3714 
3715 	if (wake_flags & WF_RQ_SELECTED)
3716 		en_flags |= ENQUEUE_RQ_SELECTED;
3717 	if (wake_flags & WF_MIGRATED)
3718 		en_flags |= ENQUEUE_MIGRATED;
3719 	else
3720 	if (p->in_iowait) {
3721 		delayacct_blkio_end(p);
3722 		atomic_dec(&task_rq(p)->nr_iowait);
3723 	}
3724 
3725 	activate_task(rq, p, en_flags);
3726 	wakeup_preempt(rq, p, wake_flags);
3727 
3728 	ttwu_do_wakeup(p);
3729 
3730 	if (p->sched_class->task_woken) {
3731 		/*
3732 		 * Our task @p is fully woken up and running; so it's safe to
3733 		 * drop the rq->lock, hereafter rq is only used for statistics.
3734 		 */
3735 		rq_unpin_lock(rq, rf);
3736 		p->sched_class->task_woken(rq, p);
3737 		rq_repin_lock(rq, rf);
3738 	}
3739 }
3740 
3741 /*
3742  * Consider @p being inside a wait loop:
3743  *
3744  *   for (;;) {
3745  *      set_current_state(TASK_UNINTERRUPTIBLE);
3746  *
3747  *      if (CONDITION)
3748  *         break;
3749  *
3750  *      schedule();
3751  *   }
3752  *   __set_current_state(TASK_RUNNING);
3753  *
3754  * between set_current_state() and schedule(). In this case @p is still
3755  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3756  * an atomic manner.
3757  *
3758  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3759  * then schedule() must still happen and p->state can be changed to
3760  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3761  * need to do a full wakeup with enqueue.
3762  *
3763  * Returns: %true when the wakeup is done,
3764  *          %false otherwise.
3765  */
3766 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3767 {
3768 	struct rq_flags rf;
3769 	struct rq *rq;
3770 	int ret = 0;
3771 
3772 	rq = __task_rq_lock(p, &rf);
3773 	if (task_on_rq_queued(p)) {
3774 		update_rq_clock(rq);
3775 		if (p->se.sched_delayed)
3776 			enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3777 		if (!task_on_cpu(rq, p)) {
3778 			/*
3779 			 * When on_rq && !on_cpu the task is preempted, see if
3780 			 * it should preempt the task that is current now.
3781 			 */
3782 			wakeup_preempt(rq, p, wake_flags);
3783 		}
3784 		ttwu_do_wakeup(p);
3785 		ret = 1;
3786 	}
3787 	__task_rq_unlock(rq, p, &rf);
3788 
3789 	return ret;
3790 }
3791 
3792 void sched_ttwu_pending(void *arg)
3793 {
3794 	struct llist_node *llist = arg;
3795 	struct rq *rq = this_rq();
3796 	struct task_struct *p, *t;
3797 	struct rq_flags rf;
3798 
3799 	if (!llist)
3800 		return;
3801 
3802 	rq_lock_irqsave(rq, &rf);
3803 	update_rq_clock(rq);
3804 
3805 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3806 		if (WARN_ON_ONCE(p->on_cpu))
3807 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3808 
3809 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3810 			set_task_cpu(p, cpu_of(rq));
3811 
3812 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3813 	}
3814 
3815 	/*
3816 	 * Must be after enqueueing at least once task such that
3817 	 * idle_cpu() does not observe a false-negative -- if it does,
3818 	 * it is possible for select_idle_siblings() to stack a number
3819 	 * of tasks on this CPU during that window.
3820 	 *
3821 	 * It is OK to clear ttwu_pending when another task pending.
3822 	 * We will receive IPI after local IRQ enabled and then enqueue it.
3823 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3824 	 */
3825 	WRITE_ONCE(rq->ttwu_pending, 0);
3826 	rq_unlock_irqrestore(rq, &rf);
3827 }
3828 
3829 /*
3830  * Prepare the scene for sending an IPI for a remote smp_call
3831  *
3832  * Returns true if the caller can proceed with sending the IPI.
3833  * Returns false otherwise.
3834  */
3835 bool call_function_single_prep_ipi(int cpu)
3836 {
3837 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3838 		trace_sched_wake_idle_without_ipi(cpu);
3839 		return false;
3840 	}
3841 
3842 	return true;
3843 }
3844 
3845 /*
3846  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3847  * necessary. The wakee CPU on receipt of the IPI will queue the task
3848  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3849  * of the wakeup instead of the waker.
3850  */
3851 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3852 {
3853 	struct rq *rq = cpu_rq(cpu);
3854 
3855 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3856 
3857 	WRITE_ONCE(rq->ttwu_pending, 1);
3858 #ifdef CONFIG_SMP
3859 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3860 #endif
3861 }
3862 
3863 void wake_up_if_idle(int cpu)
3864 {
3865 	struct rq *rq = cpu_rq(cpu);
3866 
3867 	guard(rcu)();
3868 	if (is_idle_task(rcu_dereference(rq->curr))) {
3869 		guard(rq_lock_irqsave)(rq);
3870 		if (is_idle_task(rq->curr))
3871 			resched_curr(rq);
3872 	}
3873 }
3874 
3875 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3876 {
3877 	if (!sched_asym_cpucap_active())
3878 		return true;
3879 
3880 	if (this_cpu == that_cpu)
3881 		return true;
3882 
3883 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3884 }
3885 
3886 bool cpus_share_cache(int this_cpu, int that_cpu)
3887 {
3888 	if (this_cpu == that_cpu)
3889 		return true;
3890 
3891 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3892 }
3893 
3894 /*
3895  * Whether CPUs are share cache resources, which means LLC on non-cluster
3896  * machines and LLC tag or L2 on machines with clusters.
3897  */
3898 bool cpus_share_resources(int this_cpu, int that_cpu)
3899 {
3900 	if (this_cpu == that_cpu)
3901 		return true;
3902 
3903 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3904 }
3905 
3906 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3907 {
3908 	/* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
3909 	if (!scx_allow_ttwu_queue(p))
3910 		return false;
3911 
3912 #ifdef CONFIG_SMP
3913 	if (p->sched_class == &stop_sched_class)
3914 		return false;
3915 #endif
3916 
3917 	/*
3918 	 * Do not complicate things with the async wake_list while the CPU is
3919 	 * in hotplug state.
3920 	 */
3921 	if (!cpu_active(cpu))
3922 		return false;
3923 
3924 	/* Ensure the task will still be allowed to run on the CPU. */
3925 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3926 		return false;
3927 
3928 	/*
3929 	 * If the CPU does not share cache, then queue the task on the
3930 	 * remote rqs wakelist to avoid accessing remote data.
3931 	 */
3932 	if (!cpus_share_cache(smp_processor_id(), cpu))
3933 		return true;
3934 
3935 	if (cpu == smp_processor_id())
3936 		return false;
3937 
3938 	/*
3939 	 * If the wakee cpu is idle, or the task is descheduling and the
3940 	 * only running task on the CPU, then use the wakelist to offload
3941 	 * the task activation to the idle (or soon-to-be-idle) CPU as
3942 	 * the current CPU is likely busy. nr_running is checked to
3943 	 * avoid unnecessary task stacking.
3944 	 *
3945 	 * Note that we can only get here with (wakee) p->on_rq=0,
3946 	 * p->on_cpu can be whatever, we've done the dequeue, so
3947 	 * the wakee has been accounted out of ->nr_running.
3948 	 */
3949 	if (!cpu_rq(cpu)->nr_running)
3950 		return true;
3951 
3952 	return false;
3953 }
3954 
3955 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3956 {
3957 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3958 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3959 		__ttwu_queue_wakelist(p, cpu, wake_flags);
3960 		return true;
3961 	}
3962 
3963 	return false;
3964 }
3965 
3966 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3967 {
3968 	struct rq *rq = cpu_rq(cpu);
3969 	struct rq_flags rf;
3970 
3971 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
3972 		return;
3973 
3974 	rq_lock(rq, &rf);
3975 	update_rq_clock(rq);
3976 	ttwu_do_activate(rq, p, wake_flags, &rf);
3977 	rq_unlock(rq, &rf);
3978 }
3979 
3980 /*
3981  * Invoked from try_to_wake_up() to check whether the task can be woken up.
3982  *
3983  * The caller holds p::pi_lock if p != current or has preemption
3984  * disabled when p == current.
3985  *
3986  * The rules of saved_state:
3987  *
3988  *   The related locking code always holds p::pi_lock when updating
3989  *   p::saved_state, which means the code is fully serialized in both cases.
3990  *
3991  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3992  *   No other bits set. This allows to distinguish all wakeup scenarios.
3993  *
3994  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3995  *   allows us to prevent early wakeup of tasks before they can be run on
3996  *   asymmetric ISA architectures (eg ARMv9).
3997  */
3998 static __always_inline
3999 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4000 {
4001 	int match;
4002 
4003 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4004 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4005 			     state != TASK_RTLOCK_WAIT);
4006 	}
4007 
4008 	*success = !!(match = __task_state_match(p, state));
4009 
4010 	/*
4011 	 * Saved state preserves the task state across blocking on
4012 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
4013 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
4014 	 * because it waits for a lock wakeup or __thaw_task(). Also
4015 	 * indicate success because from the regular waker's point of
4016 	 * view this has succeeded.
4017 	 *
4018 	 * After acquiring the lock the task will restore p::__state
4019 	 * from p::saved_state which ensures that the regular
4020 	 * wakeup is not lost. The restore will also set
4021 	 * p::saved_state to TASK_RUNNING so any further tests will
4022 	 * not result in false positives vs. @success
4023 	 */
4024 	if (match < 0)
4025 		p->saved_state = TASK_RUNNING;
4026 
4027 	return match > 0;
4028 }
4029 
4030 /*
4031  * Notes on Program-Order guarantees on SMP systems.
4032  *
4033  *  MIGRATION
4034  *
4035  * The basic program-order guarantee on SMP systems is that when a task [t]
4036  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4037  * execution on its new CPU [c1].
4038  *
4039  * For migration (of runnable tasks) this is provided by the following means:
4040  *
4041  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
4042  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
4043  *     rq(c1)->lock (if not at the same time, then in that order).
4044  *  C) LOCK of the rq(c1)->lock scheduling in task
4045  *
4046  * Release/acquire chaining guarantees that B happens after A and C after B.
4047  * Note: the CPU doing B need not be c0 or c1
4048  *
4049  * Example:
4050  *
4051  *   CPU0            CPU1            CPU2
4052  *
4053  *   LOCK rq(0)->lock
4054  *   sched-out X
4055  *   sched-in Y
4056  *   UNLOCK rq(0)->lock
4057  *
4058  *                                   LOCK rq(0)->lock // orders against CPU0
4059  *                                   dequeue X
4060  *                                   UNLOCK rq(0)->lock
4061  *
4062  *                                   LOCK rq(1)->lock
4063  *                                   enqueue X
4064  *                                   UNLOCK rq(1)->lock
4065  *
4066  *                   LOCK rq(1)->lock // orders against CPU2
4067  *                   sched-out Z
4068  *                   sched-in X
4069  *                   UNLOCK rq(1)->lock
4070  *
4071  *
4072  *  BLOCKING -- aka. SLEEP + WAKEUP
4073  *
4074  * For blocking we (obviously) need to provide the same guarantee as for
4075  * migration. However the means are completely different as there is no lock
4076  * chain to provide order. Instead we do:
4077  *
4078  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4079  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4080  *
4081  * Example:
4082  *
4083  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4084  *
4085  *   LOCK rq(0)->lock LOCK X->pi_lock
4086  *   dequeue X
4087  *   sched-out X
4088  *   smp_store_release(X->on_cpu, 0);
4089  *
4090  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4091  *                    X->state = WAKING
4092  *                    set_task_cpu(X,2)
4093  *
4094  *                    LOCK rq(2)->lock
4095  *                    enqueue X
4096  *                    X->state = RUNNING
4097  *                    UNLOCK rq(2)->lock
4098  *
4099  *                                          LOCK rq(2)->lock // orders against CPU1
4100  *                                          sched-out Z
4101  *                                          sched-in X
4102  *                                          UNLOCK rq(2)->lock
4103  *
4104  *                    UNLOCK X->pi_lock
4105  *   UNLOCK rq(0)->lock
4106  *
4107  *
4108  * However, for wakeups there is a second guarantee we must provide, namely we
4109  * must ensure that CONDITION=1 done by the caller can not be reordered with
4110  * accesses to the task state; see try_to_wake_up() and set_current_state().
4111  */
4112 
4113 /**
4114  * try_to_wake_up - wake up a thread
4115  * @p: the thread to be awakened
4116  * @state: the mask of task states that can be woken
4117  * @wake_flags: wake modifier flags (WF_*)
4118  *
4119  * Conceptually does:
4120  *
4121  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4122  *
4123  * If the task was not queued/runnable, also place it back on a runqueue.
4124  *
4125  * This function is atomic against schedule() which would dequeue the task.
4126  *
4127  * It issues a full memory barrier before accessing @p->state, see the comment
4128  * with set_current_state().
4129  *
4130  * Uses p->pi_lock to serialize against concurrent wake-ups.
4131  *
4132  * Relies on p->pi_lock stabilizing:
4133  *  - p->sched_class
4134  *  - p->cpus_ptr
4135  *  - p->sched_task_group
4136  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4137  *
4138  * Tries really hard to only take one task_rq(p)->lock for performance.
4139  * Takes rq->lock in:
4140  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4141  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4142  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4143  *
4144  * As a consequence we race really badly with just about everything. See the
4145  * many memory barriers and their comments for details.
4146  *
4147  * Return: %true if @p->state changes (an actual wakeup was done),
4148  *	   %false otherwise.
4149  */
4150 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4151 {
4152 	guard(preempt)();
4153 	int cpu, success = 0;
4154 
4155 	wake_flags |= WF_TTWU;
4156 
4157 	if (p == current) {
4158 		/*
4159 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4160 		 * == smp_processor_id()'. Together this means we can special
4161 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4162 		 * without taking any locks.
4163 		 *
4164 		 * Specifically, given current runs ttwu() we must be before
4165 		 * schedule()'s block_task(), as such this must not observe
4166 		 * sched_delayed.
4167 		 *
4168 		 * In particular:
4169 		 *  - we rely on Program-Order guarantees for all the ordering,
4170 		 *  - we're serialized against set_special_state() by virtue of
4171 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4172 		 */
4173 		WARN_ON_ONCE(p->se.sched_delayed);
4174 		if (!ttwu_state_match(p, state, &success))
4175 			goto out;
4176 
4177 		trace_sched_waking(p);
4178 		ttwu_do_wakeup(p);
4179 		goto out;
4180 	}
4181 
4182 	/*
4183 	 * If we are going to wake up a thread waiting for CONDITION we
4184 	 * need to ensure that CONDITION=1 done by the caller can not be
4185 	 * reordered with p->state check below. This pairs with smp_store_mb()
4186 	 * in set_current_state() that the waiting thread does.
4187 	 */
4188 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4189 		smp_mb__after_spinlock();
4190 		if (!ttwu_state_match(p, state, &success))
4191 			break;
4192 
4193 		trace_sched_waking(p);
4194 
4195 		/*
4196 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4197 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4198 		 * in smp_cond_load_acquire() below.
4199 		 *
4200 		 * sched_ttwu_pending()			try_to_wake_up()
4201 		 *   STORE p->on_rq = 1			  LOAD p->state
4202 		 *   UNLOCK rq->lock
4203 		 *
4204 		 * __schedule() (switch to task 'p')
4205 		 *   LOCK rq->lock			  smp_rmb();
4206 		 *   smp_mb__after_spinlock();
4207 		 *   UNLOCK rq->lock
4208 		 *
4209 		 * [task p]
4210 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4211 		 *
4212 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4213 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4214 		 *
4215 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4216 		 */
4217 		smp_rmb();
4218 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4219 			break;
4220 
4221 		/*
4222 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4223 		 * possible to, falsely, observe p->on_cpu == 0.
4224 		 *
4225 		 * One must be running (->on_cpu == 1) in order to remove oneself
4226 		 * from the runqueue.
4227 		 *
4228 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4229 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4230 		 *   UNLOCK rq->lock
4231 		 *
4232 		 * __schedule() (put 'p' to sleep)
4233 		 *   LOCK rq->lock			  smp_rmb();
4234 		 *   smp_mb__after_spinlock();
4235 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4236 		 *
4237 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4238 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4239 		 *
4240 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4241 		 * schedule()'s block_task() has 'happened' and p will no longer
4242 		 * care about it's own p->state. See the comment in __schedule().
4243 		 */
4244 		smp_acquire__after_ctrl_dep();
4245 
4246 		/*
4247 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4248 		 * == 0), which means we need to do an enqueue, change p->state to
4249 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4250 		 * enqueue, such as ttwu_queue_wakelist().
4251 		 */
4252 		WRITE_ONCE(p->__state, TASK_WAKING);
4253 
4254 		/*
4255 		 * If the owning (remote) CPU is still in the middle of schedule() with
4256 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4257 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4258 		 * let the waker make forward progress. This is safe because IRQs are
4259 		 * disabled and the IPI will deliver after on_cpu is cleared.
4260 		 *
4261 		 * Ensure we load task_cpu(p) after p->on_cpu:
4262 		 *
4263 		 * set_task_cpu(p, cpu);
4264 		 *   STORE p->cpu = @cpu
4265 		 * __schedule() (switch to task 'p')
4266 		 *   LOCK rq->lock
4267 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4268 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4269 		 *
4270 		 * to ensure we observe the correct CPU on which the task is currently
4271 		 * scheduling.
4272 		 */
4273 		if (smp_load_acquire(&p->on_cpu) &&
4274 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4275 			break;
4276 
4277 		/*
4278 		 * If the owning (remote) CPU is still in the middle of schedule() with
4279 		 * this task as prev, wait until it's done referencing the task.
4280 		 *
4281 		 * Pairs with the smp_store_release() in finish_task().
4282 		 *
4283 		 * This ensures that tasks getting woken will be fully ordered against
4284 		 * their previous state and preserve Program Order.
4285 		 */
4286 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4287 
4288 		cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4289 		if (task_cpu(p) != cpu) {
4290 			if (p->in_iowait) {
4291 				delayacct_blkio_end(p);
4292 				atomic_dec(&task_rq(p)->nr_iowait);
4293 			}
4294 
4295 			wake_flags |= WF_MIGRATED;
4296 			psi_ttwu_dequeue(p);
4297 			set_task_cpu(p, cpu);
4298 		}
4299 
4300 		ttwu_queue(p, cpu, wake_flags);
4301 	}
4302 out:
4303 	if (success)
4304 		ttwu_stat(p, task_cpu(p), wake_flags);
4305 
4306 	return success;
4307 }
4308 
4309 static bool __task_needs_rq_lock(struct task_struct *p)
4310 {
4311 	unsigned int state = READ_ONCE(p->__state);
4312 
4313 	/*
4314 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4315 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4316 	 * locks at the end, see ttwu_queue_wakelist().
4317 	 */
4318 	if (state == TASK_RUNNING || state == TASK_WAKING)
4319 		return true;
4320 
4321 	/*
4322 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4323 	 * possible to, falsely, observe p->on_rq == 0.
4324 	 *
4325 	 * See try_to_wake_up() for a longer comment.
4326 	 */
4327 	smp_rmb();
4328 	if (p->on_rq)
4329 		return true;
4330 
4331 	/*
4332 	 * Ensure the task has finished __schedule() and will not be referenced
4333 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4334 	 */
4335 	smp_rmb();
4336 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4337 
4338 	return false;
4339 }
4340 
4341 /**
4342  * task_call_func - Invoke a function on task in fixed state
4343  * @p: Process for which the function is to be invoked, can be @current.
4344  * @func: Function to invoke.
4345  * @arg: Argument to function.
4346  *
4347  * Fix the task in it's current state by avoiding wakeups and or rq operations
4348  * and call @func(@arg) on it.  This function can use task_is_runnable() and
4349  * task_curr() to work out what the state is, if required.  Given that @func
4350  * can be invoked with a runqueue lock held, it had better be quite
4351  * lightweight.
4352  *
4353  * Returns:
4354  *   Whatever @func returns
4355  */
4356 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4357 {
4358 	struct rq_flags rf;
4359 	int ret;
4360 
4361 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4362 
4363 	if (__task_needs_rq_lock(p)) {
4364 		struct rq *rq = __task_rq_lock(p, &rf);
4365 
4366 		/*
4367 		 * At this point the task is pinned; either:
4368 		 *  - blocked and we're holding off wakeups	 (pi->lock)
4369 		 *  - woken, and we're holding off enqueue	 (rq->lock)
4370 		 *  - queued, and we're holding off schedule	 (rq->lock)
4371 		 *  - running, and we're holding off de-schedule (rq->lock)
4372 		 *
4373 		 * The called function (@func) can use: task_curr(), p->on_rq and
4374 		 * p->__state to differentiate between these states.
4375 		 */
4376 		ret = func(p, arg);
4377 
4378 		__task_rq_unlock(rq, p, &rf);
4379 	} else {
4380 		ret = func(p, arg);
4381 	}
4382 
4383 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4384 	return ret;
4385 }
4386 
4387 /**
4388  * cpu_curr_snapshot - Return a snapshot of the currently running task
4389  * @cpu: The CPU on which to snapshot the task.
4390  *
4391  * Returns the task_struct pointer of the task "currently" running on
4392  * the specified CPU.
4393  *
4394  * If the specified CPU was offline, the return value is whatever it
4395  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4396  * task, but there is no guarantee.  Callers wishing a useful return
4397  * value must take some action to ensure that the specified CPU remains
4398  * online throughout.
4399  *
4400  * This function executes full memory barriers before and after fetching
4401  * the pointer, which permits the caller to confine this function's fetch
4402  * with respect to the caller's accesses to other shared variables.
4403  */
4404 struct task_struct *cpu_curr_snapshot(int cpu)
4405 {
4406 	struct rq *rq = cpu_rq(cpu);
4407 	struct task_struct *t;
4408 	struct rq_flags rf;
4409 
4410 	rq_lock_irqsave(rq, &rf);
4411 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4412 	t = rcu_dereference(cpu_curr(cpu));
4413 	rq_unlock_irqrestore(rq, &rf);
4414 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4415 
4416 	return t;
4417 }
4418 
4419 /**
4420  * wake_up_process - Wake up a specific process
4421  * @p: The process to be woken up.
4422  *
4423  * Attempt to wake up the nominated process and move it to the set of runnable
4424  * processes.
4425  *
4426  * Return: 1 if the process was woken up, 0 if it was already running.
4427  *
4428  * This function executes a full memory barrier before accessing the task state.
4429  */
4430 int wake_up_process(struct task_struct *p)
4431 {
4432 	return try_to_wake_up(p, TASK_NORMAL, 0);
4433 }
4434 EXPORT_SYMBOL(wake_up_process);
4435 
4436 int wake_up_state(struct task_struct *p, unsigned int state)
4437 {
4438 	return try_to_wake_up(p, state, 0);
4439 }
4440 
4441 /*
4442  * Perform scheduler related setup for a newly forked process p.
4443  * p is forked by current.
4444  *
4445  * __sched_fork() is basic setup which is also used by sched_init() to
4446  * initialize the boot CPU's idle task.
4447  */
4448 static void __sched_fork(u64 clone_flags, struct task_struct *p)
4449 {
4450 	p->on_rq			= 0;
4451 
4452 	p->se.on_rq			= 0;
4453 	p->se.exec_start		= 0;
4454 	p->se.sum_exec_runtime		= 0;
4455 	p->se.prev_sum_exec_runtime	= 0;
4456 	p->se.nr_migrations		= 0;
4457 	p->se.vruntime			= 0;
4458 	p->se.vlag			= 0;
4459 	INIT_LIST_HEAD(&p->se.group_node);
4460 
4461 	/* A delayed task cannot be in clone(). */
4462 	WARN_ON_ONCE(p->se.sched_delayed);
4463 
4464 #ifdef CONFIG_FAIR_GROUP_SCHED
4465 	p->se.cfs_rq			= NULL;
4466 #ifdef CONFIG_CFS_BANDWIDTH
4467 	init_cfs_throttle_work(p);
4468 #endif
4469 #endif
4470 
4471 #ifdef CONFIG_SCHEDSTATS
4472 	/* Even if schedstat is disabled, there should not be garbage */
4473 	memset(&p->stats, 0, sizeof(p->stats));
4474 #endif
4475 
4476 	init_dl_entity(&p->dl);
4477 
4478 	INIT_LIST_HEAD(&p->rt.run_list);
4479 	p->rt.timeout		= 0;
4480 	p->rt.time_slice	= sched_rr_timeslice;
4481 	p->rt.on_rq		= 0;
4482 	p->rt.on_list		= 0;
4483 
4484 #ifdef CONFIG_SCHED_CLASS_EXT
4485 	init_scx_entity(&p->scx);
4486 #endif
4487 
4488 #ifdef CONFIG_PREEMPT_NOTIFIERS
4489 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4490 #endif
4491 
4492 #ifdef CONFIG_COMPACTION
4493 	p->capture_control = NULL;
4494 #endif
4495 	init_numa_balancing(clone_flags, p);
4496 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4497 	p->migration_pending = NULL;
4498 }
4499 
4500 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4501 
4502 #ifdef CONFIG_NUMA_BALANCING
4503 
4504 int sysctl_numa_balancing_mode;
4505 
4506 static void __set_numabalancing_state(bool enabled)
4507 {
4508 	if (enabled)
4509 		static_branch_enable(&sched_numa_balancing);
4510 	else
4511 		static_branch_disable(&sched_numa_balancing);
4512 }
4513 
4514 void set_numabalancing_state(bool enabled)
4515 {
4516 	if (enabled)
4517 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4518 	else
4519 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4520 	__set_numabalancing_state(enabled);
4521 }
4522 
4523 #ifdef CONFIG_PROC_SYSCTL
4524 static void reset_memory_tiering(void)
4525 {
4526 	struct pglist_data *pgdat;
4527 
4528 	for_each_online_pgdat(pgdat) {
4529 		pgdat->nbp_threshold = 0;
4530 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4531 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4532 	}
4533 }
4534 
4535 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4536 			  void *buffer, size_t *lenp, loff_t *ppos)
4537 {
4538 	struct ctl_table t;
4539 	int err;
4540 	int state = sysctl_numa_balancing_mode;
4541 
4542 	if (write && !capable(CAP_SYS_ADMIN))
4543 		return -EPERM;
4544 
4545 	t = *table;
4546 	t.data = &state;
4547 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4548 	if (err < 0)
4549 		return err;
4550 	if (write) {
4551 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4552 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4553 			reset_memory_tiering();
4554 		sysctl_numa_balancing_mode = state;
4555 		__set_numabalancing_state(state);
4556 	}
4557 	return err;
4558 }
4559 #endif /* CONFIG_PROC_SYSCTL */
4560 #endif /* CONFIG_NUMA_BALANCING */
4561 
4562 #ifdef CONFIG_SCHEDSTATS
4563 
4564 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4565 
4566 static void set_schedstats(bool enabled)
4567 {
4568 	if (enabled)
4569 		static_branch_enable(&sched_schedstats);
4570 	else
4571 		static_branch_disable(&sched_schedstats);
4572 }
4573 
4574 void force_schedstat_enabled(void)
4575 {
4576 	if (!schedstat_enabled()) {
4577 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4578 		static_branch_enable(&sched_schedstats);
4579 	}
4580 }
4581 
4582 static int __init setup_schedstats(char *str)
4583 {
4584 	int ret = 0;
4585 	if (!str)
4586 		goto out;
4587 
4588 	if (!strcmp(str, "enable")) {
4589 		set_schedstats(true);
4590 		ret = 1;
4591 	} else if (!strcmp(str, "disable")) {
4592 		set_schedstats(false);
4593 		ret = 1;
4594 	}
4595 out:
4596 	if (!ret)
4597 		pr_warn("Unable to parse schedstats=\n");
4598 
4599 	return ret;
4600 }
4601 __setup("schedstats=", setup_schedstats);
4602 
4603 #ifdef CONFIG_PROC_SYSCTL
4604 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4605 		size_t *lenp, loff_t *ppos)
4606 {
4607 	struct ctl_table t;
4608 	int err;
4609 	int state = static_branch_likely(&sched_schedstats);
4610 
4611 	if (write && !capable(CAP_SYS_ADMIN))
4612 		return -EPERM;
4613 
4614 	t = *table;
4615 	t.data = &state;
4616 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4617 	if (err < 0)
4618 		return err;
4619 	if (write)
4620 		set_schedstats(state);
4621 	return err;
4622 }
4623 #endif /* CONFIG_PROC_SYSCTL */
4624 #endif /* CONFIG_SCHEDSTATS */
4625 
4626 #ifdef CONFIG_SYSCTL
4627 static const struct ctl_table sched_core_sysctls[] = {
4628 #ifdef CONFIG_SCHEDSTATS
4629 	{
4630 		.procname       = "sched_schedstats",
4631 		.data           = NULL,
4632 		.maxlen         = sizeof(unsigned int),
4633 		.mode           = 0644,
4634 		.proc_handler   = sysctl_schedstats,
4635 		.extra1         = SYSCTL_ZERO,
4636 		.extra2         = SYSCTL_ONE,
4637 	},
4638 #endif /* CONFIG_SCHEDSTATS */
4639 #ifdef CONFIG_UCLAMP_TASK
4640 	{
4641 		.procname       = "sched_util_clamp_min",
4642 		.data           = &sysctl_sched_uclamp_util_min,
4643 		.maxlen         = sizeof(unsigned int),
4644 		.mode           = 0644,
4645 		.proc_handler   = sysctl_sched_uclamp_handler,
4646 	},
4647 	{
4648 		.procname       = "sched_util_clamp_max",
4649 		.data           = &sysctl_sched_uclamp_util_max,
4650 		.maxlen         = sizeof(unsigned int),
4651 		.mode           = 0644,
4652 		.proc_handler   = sysctl_sched_uclamp_handler,
4653 	},
4654 	{
4655 		.procname       = "sched_util_clamp_min_rt_default",
4656 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4657 		.maxlen         = sizeof(unsigned int),
4658 		.mode           = 0644,
4659 		.proc_handler   = sysctl_sched_uclamp_handler,
4660 	},
4661 #endif /* CONFIG_UCLAMP_TASK */
4662 #ifdef CONFIG_NUMA_BALANCING
4663 	{
4664 		.procname	= "numa_balancing",
4665 		.data		= NULL, /* filled in by handler */
4666 		.maxlen		= sizeof(unsigned int),
4667 		.mode		= 0644,
4668 		.proc_handler	= sysctl_numa_balancing,
4669 		.extra1		= SYSCTL_ZERO,
4670 		.extra2		= SYSCTL_FOUR,
4671 	},
4672 #endif /* CONFIG_NUMA_BALANCING */
4673 };
4674 static int __init sched_core_sysctl_init(void)
4675 {
4676 	register_sysctl_init("kernel", sched_core_sysctls);
4677 	return 0;
4678 }
4679 late_initcall(sched_core_sysctl_init);
4680 #endif /* CONFIG_SYSCTL */
4681 
4682 /*
4683  * fork()/clone()-time setup:
4684  */
4685 int sched_fork(u64 clone_flags, struct task_struct *p)
4686 {
4687 	__sched_fork(clone_flags, p);
4688 	/*
4689 	 * We mark the process as NEW here. This guarantees that
4690 	 * nobody will actually run it, and a signal or other external
4691 	 * event cannot wake it up and insert it on the runqueue either.
4692 	 */
4693 	p->__state = TASK_NEW;
4694 
4695 	/*
4696 	 * Make sure we do not leak PI boosting priority to the child.
4697 	 */
4698 	p->prio = current->normal_prio;
4699 
4700 	uclamp_fork(p);
4701 
4702 	/*
4703 	 * Revert to default priority/policy on fork if requested.
4704 	 */
4705 	if (unlikely(p->sched_reset_on_fork)) {
4706 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4707 			p->policy = SCHED_NORMAL;
4708 			p->static_prio = NICE_TO_PRIO(0);
4709 			p->rt_priority = 0;
4710 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4711 			p->static_prio = NICE_TO_PRIO(0);
4712 
4713 		p->prio = p->normal_prio = p->static_prio;
4714 		set_load_weight(p, false);
4715 		p->se.custom_slice = 0;
4716 		p->se.slice = sysctl_sched_base_slice;
4717 
4718 		/*
4719 		 * We don't need the reset flag anymore after the fork. It has
4720 		 * fulfilled its duty:
4721 		 */
4722 		p->sched_reset_on_fork = 0;
4723 	}
4724 
4725 	if (dl_prio(p->prio))
4726 		return -EAGAIN;
4727 
4728 	scx_pre_fork(p);
4729 
4730 	if (rt_prio(p->prio)) {
4731 		p->sched_class = &rt_sched_class;
4732 #ifdef CONFIG_SCHED_CLASS_EXT
4733 	} else if (task_should_scx(p->policy)) {
4734 		p->sched_class = &ext_sched_class;
4735 #endif
4736 	} else {
4737 		p->sched_class = &fair_sched_class;
4738 	}
4739 
4740 	init_entity_runnable_average(&p->se);
4741 
4742 
4743 #ifdef CONFIG_SCHED_INFO
4744 	if (likely(sched_info_on()))
4745 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4746 #endif
4747 	p->on_cpu = 0;
4748 	init_task_preempt_count(p);
4749 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4750 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4751 
4752 	return 0;
4753 }
4754 
4755 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4756 {
4757 	unsigned long flags;
4758 
4759 	/*
4760 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4761 	 * required yet, but lockdep gets upset if rules are violated.
4762 	 */
4763 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4764 #ifdef CONFIG_CGROUP_SCHED
4765 	if (1) {
4766 		struct task_group *tg;
4767 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4768 				  struct task_group, css);
4769 		tg = autogroup_task_group(p, tg);
4770 		p->sched_task_group = tg;
4771 	}
4772 #endif
4773 	/*
4774 	 * We're setting the CPU for the first time, we don't migrate,
4775 	 * so use __set_task_cpu().
4776 	 */
4777 	__set_task_cpu(p, smp_processor_id());
4778 	if (p->sched_class->task_fork)
4779 		p->sched_class->task_fork(p);
4780 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4781 
4782 	return scx_fork(p);
4783 }
4784 
4785 void sched_cancel_fork(struct task_struct *p)
4786 {
4787 	scx_cancel_fork(p);
4788 }
4789 
4790 static void sched_mm_cid_fork(struct task_struct *t);
4791 
4792 void sched_post_fork(struct task_struct *p)
4793 {
4794 	sched_mm_cid_fork(p);
4795 	uclamp_post_fork(p);
4796 	scx_post_fork(p);
4797 }
4798 
4799 unsigned long to_ratio(u64 period, u64 runtime)
4800 {
4801 	if (runtime == RUNTIME_INF)
4802 		return BW_UNIT;
4803 
4804 	/*
4805 	 * Doing this here saves a lot of checks in all
4806 	 * the calling paths, and returning zero seems
4807 	 * safe for them anyway.
4808 	 */
4809 	if (period == 0)
4810 		return 0;
4811 
4812 	return div64_u64(runtime << BW_SHIFT, period);
4813 }
4814 
4815 /*
4816  * wake_up_new_task - wake up a newly created task for the first time.
4817  *
4818  * This function will do some initial scheduler statistics housekeeping
4819  * that must be done for every newly created context, then puts the task
4820  * on the runqueue and wakes it.
4821  */
4822 void wake_up_new_task(struct task_struct *p)
4823 {
4824 	struct rq_flags rf;
4825 	struct rq *rq;
4826 	int wake_flags = WF_FORK;
4827 
4828 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4829 	WRITE_ONCE(p->__state, TASK_RUNNING);
4830 	/*
4831 	 * Fork balancing, do it here and not earlier because:
4832 	 *  - cpus_ptr can change in the fork path
4833 	 *  - any previously selected CPU might disappear through hotplug
4834 	 *
4835 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4836 	 * as we're not fully set-up yet.
4837 	 */
4838 	p->recent_used_cpu = task_cpu(p);
4839 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4840 	rq = __task_rq_lock(p, &rf);
4841 	update_rq_clock(rq);
4842 	post_init_entity_util_avg(p);
4843 
4844 	activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4845 	trace_sched_wakeup_new(p);
4846 	wakeup_preempt(rq, p, wake_flags);
4847 	if (p->sched_class->task_woken) {
4848 		/*
4849 		 * Nothing relies on rq->lock after this, so it's fine to
4850 		 * drop it.
4851 		 */
4852 		rq_unpin_lock(rq, &rf);
4853 		p->sched_class->task_woken(rq, p);
4854 		rq_repin_lock(rq, &rf);
4855 	}
4856 	task_rq_unlock(rq, p, &rf);
4857 }
4858 
4859 #ifdef CONFIG_PREEMPT_NOTIFIERS
4860 
4861 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4862 
4863 void preempt_notifier_inc(void)
4864 {
4865 	static_branch_inc(&preempt_notifier_key);
4866 }
4867 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4868 
4869 void preempt_notifier_dec(void)
4870 {
4871 	static_branch_dec(&preempt_notifier_key);
4872 }
4873 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4874 
4875 /**
4876  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4877  * @notifier: notifier struct to register
4878  */
4879 void preempt_notifier_register(struct preempt_notifier *notifier)
4880 {
4881 	if (!static_branch_unlikely(&preempt_notifier_key))
4882 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4883 
4884 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4885 }
4886 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4887 
4888 /**
4889  * preempt_notifier_unregister - no longer interested in preemption notifications
4890  * @notifier: notifier struct to unregister
4891  *
4892  * This is *not* safe to call from within a preemption notifier.
4893  */
4894 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4895 {
4896 	hlist_del(&notifier->link);
4897 }
4898 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4899 
4900 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4901 {
4902 	struct preempt_notifier *notifier;
4903 
4904 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4905 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4906 }
4907 
4908 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4909 {
4910 	if (static_branch_unlikely(&preempt_notifier_key))
4911 		__fire_sched_in_preempt_notifiers(curr);
4912 }
4913 
4914 static void
4915 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4916 				   struct task_struct *next)
4917 {
4918 	struct preempt_notifier *notifier;
4919 
4920 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4921 		notifier->ops->sched_out(notifier, next);
4922 }
4923 
4924 static __always_inline void
4925 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4926 				 struct task_struct *next)
4927 {
4928 	if (static_branch_unlikely(&preempt_notifier_key))
4929 		__fire_sched_out_preempt_notifiers(curr, next);
4930 }
4931 
4932 #else /* !CONFIG_PREEMPT_NOTIFIERS: */
4933 
4934 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4935 {
4936 }
4937 
4938 static inline void
4939 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4940 				 struct task_struct *next)
4941 {
4942 }
4943 
4944 #endif /* !CONFIG_PREEMPT_NOTIFIERS */
4945 
4946 static inline void prepare_task(struct task_struct *next)
4947 {
4948 	/*
4949 	 * Claim the task as running, we do this before switching to it
4950 	 * such that any running task will have this set.
4951 	 *
4952 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4953 	 * its ordering comment.
4954 	 */
4955 	WRITE_ONCE(next->on_cpu, 1);
4956 }
4957 
4958 static inline void finish_task(struct task_struct *prev)
4959 {
4960 	/*
4961 	 * This must be the very last reference to @prev from this CPU. After
4962 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4963 	 * must ensure this doesn't happen until the switch is completely
4964 	 * finished.
4965 	 *
4966 	 * In particular, the load of prev->state in finish_task_switch() must
4967 	 * happen before this.
4968 	 *
4969 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4970 	 */
4971 	smp_store_release(&prev->on_cpu, 0);
4972 }
4973 
4974 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4975 {
4976 	void (*func)(struct rq *rq);
4977 	struct balance_callback *next;
4978 
4979 	lockdep_assert_rq_held(rq);
4980 
4981 	while (head) {
4982 		func = (void (*)(struct rq *))head->func;
4983 		next = head->next;
4984 		head->next = NULL;
4985 		head = next;
4986 
4987 		func(rq);
4988 	}
4989 }
4990 
4991 static void balance_push(struct rq *rq);
4992 
4993 /*
4994  * balance_push_callback is a right abuse of the callback interface and plays
4995  * by significantly different rules.
4996  *
4997  * Where the normal balance_callback's purpose is to be ran in the same context
4998  * that queued it (only later, when it's safe to drop rq->lock again),
4999  * balance_push_callback is specifically targeted at __schedule().
5000  *
5001  * This abuse is tolerated because it places all the unlikely/odd cases behind
5002  * a single test, namely: rq->balance_callback == NULL.
5003  */
5004 struct balance_callback balance_push_callback = {
5005 	.next = NULL,
5006 	.func = balance_push,
5007 };
5008 
5009 static inline struct balance_callback *
5010 __splice_balance_callbacks(struct rq *rq, bool split)
5011 {
5012 	struct balance_callback *head = rq->balance_callback;
5013 
5014 	if (likely(!head))
5015 		return NULL;
5016 
5017 	lockdep_assert_rq_held(rq);
5018 	/*
5019 	 * Must not take balance_push_callback off the list when
5020 	 * splice_balance_callbacks() and balance_callbacks() are not
5021 	 * in the same rq->lock section.
5022 	 *
5023 	 * In that case it would be possible for __schedule() to interleave
5024 	 * and observe the list empty.
5025 	 */
5026 	if (split && head == &balance_push_callback)
5027 		head = NULL;
5028 	else
5029 		rq->balance_callback = NULL;
5030 
5031 	return head;
5032 }
5033 
5034 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5035 {
5036 	return __splice_balance_callbacks(rq, true);
5037 }
5038 
5039 void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
5040 {
5041 	if (rf)
5042 		rq_unpin_lock(rq, rf);
5043 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5044 	if (rf)
5045 		rq_repin_lock(rq, rf);
5046 }
5047 
5048 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5049 {
5050 	unsigned long flags;
5051 
5052 	if (unlikely(head)) {
5053 		raw_spin_rq_lock_irqsave(rq, flags);
5054 		do_balance_callbacks(rq, head);
5055 		raw_spin_rq_unlock_irqrestore(rq, flags);
5056 	}
5057 }
5058 
5059 static inline void
5060 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5061 	__releases(__rq_lockp(rq))
5062 	__acquires(__rq_lockp(this_rq()))
5063 {
5064 	/*
5065 	 * Since the runqueue lock will be released by the next
5066 	 * task (which is an invalid locking op but in the case
5067 	 * of the scheduler it's an obvious special-case), so we
5068 	 * do an early lockdep release here:
5069 	 */
5070 	rq_unpin_lock(rq, rf);
5071 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5072 #ifdef CONFIG_DEBUG_SPINLOCK
5073 	/* this is a valid case when another task releases the spinlock */
5074 	rq_lockp(rq)->owner = next;
5075 #endif
5076 	/*
5077 	 * Model the rq reference switcheroo.
5078 	 */
5079 	__release(__rq_lockp(rq));
5080 	__acquire(__rq_lockp(this_rq()));
5081 }
5082 
5083 static inline void finish_lock_switch(struct rq *rq)
5084 	__releases(__rq_lockp(rq))
5085 {
5086 	/*
5087 	 * If we are tracking spinlock dependencies then we have to
5088 	 * fix up the runqueue lock - which gets 'carried over' from
5089 	 * prev into current:
5090 	 */
5091 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5092 	__balance_callbacks(rq, NULL);
5093 	hrtick_schedule_exit(rq);
5094 	raw_spin_rq_unlock_irq(rq);
5095 }
5096 
5097 /*
5098  * NOP if the arch has not defined these:
5099  */
5100 
5101 #ifndef prepare_arch_switch
5102 # define prepare_arch_switch(next)	do { } while (0)
5103 #endif
5104 
5105 #ifndef finish_arch_post_lock_switch
5106 # define finish_arch_post_lock_switch()	do { } while (0)
5107 #endif
5108 
5109 static inline void kmap_local_sched_out(void)
5110 {
5111 #ifdef CONFIG_KMAP_LOCAL
5112 	if (unlikely(current->kmap_ctrl.idx))
5113 		__kmap_local_sched_out();
5114 #endif
5115 }
5116 
5117 static inline void kmap_local_sched_in(void)
5118 {
5119 #ifdef CONFIG_KMAP_LOCAL
5120 	if (unlikely(current->kmap_ctrl.idx))
5121 		__kmap_local_sched_in();
5122 #endif
5123 }
5124 
5125 /**
5126  * prepare_task_switch - prepare to switch tasks
5127  * @rq: the runqueue preparing to switch
5128  * @prev: the current task that is being switched out
5129  * @next: the task we are going to switch to.
5130  *
5131  * This is called with the rq lock held and interrupts off. It must
5132  * be paired with a subsequent finish_task_switch after the context
5133  * switch.
5134  *
5135  * prepare_task_switch sets up locking and calls architecture specific
5136  * hooks.
5137  */
5138 static inline void
5139 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5140 		    struct task_struct *next)
5141 	__must_hold(__rq_lockp(rq))
5142 {
5143 	kcov_prepare_switch(prev);
5144 	sched_info_switch(rq, prev, next);
5145 	perf_event_task_sched_out(prev, next);
5146 	fire_sched_out_preempt_notifiers(prev, next);
5147 	kmap_local_sched_out();
5148 	prepare_task(next);
5149 	prepare_arch_switch(next);
5150 }
5151 
5152 /**
5153  * finish_task_switch - clean up after a task-switch
5154  * @prev: the thread we just switched away from.
5155  *
5156  * finish_task_switch must be called after the context switch, paired
5157  * with a prepare_task_switch call before the context switch.
5158  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5159  * and do any other architecture-specific cleanup actions.
5160  *
5161  * Note that we may have delayed dropping an mm in context_switch(). If
5162  * so, we finish that here outside of the runqueue lock. (Doing it
5163  * with the lock held can cause deadlocks; see schedule() for
5164  * details.)
5165  *
5166  * The context switch have flipped the stack from under us and restored the
5167  * local variables which were saved when this task called schedule() in the
5168  * past. 'prev == current' is still correct but we need to recalculate this_rq
5169  * because prev may have moved to another CPU.
5170  */
5171 static struct rq *finish_task_switch(struct task_struct *prev)
5172 	__releases(__rq_lockp(this_rq()))
5173 {
5174 	struct rq *rq = this_rq();
5175 	struct mm_struct *mm = rq->prev_mm;
5176 	unsigned int prev_state;
5177 
5178 	/*
5179 	 * The previous task will have left us with a preempt_count of 2
5180 	 * because it left us after:
5181 	 *
5182 	 *	schedule()
5183 	 *	  preempt_disable();			// 1
5184 	 *	  __schedule()
5185 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5186 	 *
5187 	 * Also, see FORK_PREEMPT_COUNT.
5188 	 */
5189 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5190 		      "corrupted preempt_count: %s/%d/0x%x\n",
5191 		      current->comm, current->pid, preempt_count()))
5192 		preempt_count_set(FORK_PREEMPT_COUNT);
5193 
5194 	rq->prev_mm = NULL;
5195 
5196 	/*
5197 	 * A task struct has one reference for the use as "current".
5198 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5199 	 * schedule one last time. The schedule call will never return, and
5200 	 * the scheduled task must drop that reference.
5201 	 *
5202 	 * We must observe prev->state before clearing prev->on_cpu (in
5203 	 * finish_task), otherwise a concurrent wakeup can get prev
5204 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5205 	 * transition, resulting in a double drop.
5206 	 */
5207 	prev_state = READ_ONCE(prev->__state);
5208 	vtime_task_switch(prev);
5209 	perf_event_task_sched_in(prev, current);
5210 	finish_task(prev);
5211 	tick_nohz_task_switch();
5212 	finish_lock_switch(rq);
5213 	finish_arch_post_lock_switch();
5214 	kcov_finish_switch(current);
5215 	/*
5216 	 * kmap_local_sched_out() is invoked with rq::lock held and
5217 	 * interrupts disabled. There is no requirement for that, but the
5218 	 * sched out code does not have an interrupt enabled section.
5219 	 * Restoring the maps on sched in does not require interrupts being
5220 	 * disabled either.
5221 	 */
5222 	kmap_local_sched_in();
5223 
5224 	fire_sched_in_preempt_notifiers(current);
5225 	/*
5226 	 * When switching through a kernel thread, the loop in
5227 	 * membarrier_{private,global}_expedited() may have observed that
5228 	 * kernel thread and not issued an IPI. It is therefore possible to
5229 	 * schedule between user->kernel->user threads without passing though
5230 	 * switch_mm(). Membarrier requires a barrier after storing to
5231 	 * rq->curr, before returning to userspace, so provide them here:
5232 	 *
5233 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5234 	 *   provided by mmdrop_lazy_tlb(),
5235 	 * - a sync_core for SYNC_CORE.
5236 	 */
5237 	if (mm) {
5238 		membarrier_mm_sync_core_before_usermode(mm);
5239 		mmdrop_lazy_tlb_sched(mm);
5240 	}
5241 
5242 	if (unlikely(prev_state == TASK_DEAD)) {
5243 		if (prev->sched_class->task_dead)
5244 			prev->sched_class->task_dead(prev);
5245 
5246 		/*
5247 		 * sched_ext_dead() must come before cgroup_task_dead() to
5248 		 * prevent cgroups from being removed while its member tasks are
5249 		 * visible to SCX schedulers.
5250 		 */
5251 		sched_ext_dead(prev);
5252 		cgroup_task_dead(prev);
5253 
5254 		/* Task is done with its stack. */
5255 		put_task_stack(prev);
5256 
5257 		put_task_struct_rcu_user(prev);
5258 	}
5259 
5260 	return rq;
5261 }
5262 
5263 /**
5264  * schedule_tail - first thing a freshly forked thread must call.
5265  * @prev: the thread we just switched away from.
5266  */
5267 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5268 	__releases(__rq_lockp(this_rq()))
5269 {
5270 	/*
5271 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5272 	 * finish_task_switch() for details.
5273 	 *
5274 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5275 	 * and the preempt_enable() will end up enabling preemption (on
5276 	 * PREEMPT_COUNT kernels).
5277 	 */
5278 
5279 	finish_task_switch(prev);
5280 	/*
5281 	 * This is a special case: the newly created task has just
5282 	 * switched the context for the first time. It is returning from
5283 	 * schedule for the first time in this path.
5284 	 */
5285 	trace_sched_exit_tp(true);
5286 	preempt_enable();
5287 
5288 	if (current->set_child_tid)
5289 		put_user(task_pid_vnr(current), current->set_child_tid);
5290 
5291 	calculate_sigpending();
5292 }
5293 
5294 /*
5295  * context_switch - switch to the new MM and the new thread's register state.
5296  */
5297 static __always_inline struct rq *
5298 context_switch(struct rq *rq, struct task_struct *prev,
5299 	       struct task_struct *next, struct rq_flags *rf)
5300 	__releases(__rq_lockp(rq))
5301 {
5302 	prepare_task_switch(rq, prev, next);
5303 
5304 	/*
5305 	 * For paravirt, this is coupled with an exit in switch_to to
5306 	 * combine the page table reload and the switch backend into
5307 	 * one hypercall.
5308 	 */
5309 	arch_start_context_switch(prev);
5310 
5311 	/*
5312 	 * kernel -> kernel   lazy + transfer active
5313 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5314 	 *
5315 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5316 	 *   user ->   user   switch
5317 	 */
5318 	if (!next->mm) {				// to kernel
5319 		enter_lazy_tlb(prev->active_mm, next);
5320 
5321 		next->active_mm = prev->active_mm;
5322 		if (prev->mm)				// from user
5323 			mmgrab_lazy_tlb(prev->active_mm);
5324 		else
5325 			prev->active_mm = NULL;
5326 	} else {					// to user
5327 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5328 		/*
5329 		 * sys_membarrier() requires an smp_mb() between setting
5330 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5331 		 *
5332 		 * The below provides this either through switch_mm(), or in
5333 		 * case 'prev->active_mm == next->mm' through
5334 		 * finish_task_switch()'s mmdrop().
5335 		 */
5336 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5337 		lru_gen_use_mm(next->mm);
5338 
5339 		if (!prev->mm) {			// from kernel
5340 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5341 			rq->prev_mm = prev->active_mm;
5342 			prev->active_mm = NULL;
5343 		}
5344 	}
5345 
5346 	mm_cid_switch_to(prev, next);
5347 
5348 	/*
5349 	 * Tell rseq that the task was scheduled in. Must be after
5350 	 * switch_mm_cid() to get the TIF flag set.
5351 	 */
5352 	rseq_sched_switch_event(next);
5353 
5354 	prepare_lock_switch(rq, next, rf);
5355 
5356 	/* Here we just switch the register state and the stack. */
5357 	switch_to(prev, next, prev);
5358 	barrier();
5359 
5360 	return finish_task_switch(prev);
5361 }
5362 
5363 /*
5364  * nr_running and nr_context_switches:
5365  *
5366  * externally visible scheduler statistics: current number of runnable
5367  * threads, total number of context switches performed since bootup.
5368  */
5369 unsigned int nr_running(void)
5370 {
5371 	unsigned int i, sum = 0;
5372 
5373 	for_each_online_cpu(i)
5374 		sum += cpu_rq(i)->nr_running;
5375 
5376 	return sum;
5377 }
5378 
5379 /*
5380  * Check if only the current task is running on the CPU.
5381  *
5382  * Caution: this function does not check that the caller has disabled
5383  * preemption, thus the result might have a time-of-check-to-time-of-use
5384  * race.  The caller is responsible to use it correctly, for example:
5385  *
5386  * - from a non-preemptible section (of course)
5387  *
5388  * - from a thread that is bound to a single CPU
5389  *
5390  * - in a loop with very short iterations (e.g. a polling loop)
5391  */
5392 bool single_task_running(void)
5393 {
5394 	return raw_rq()->nr_running == 1;
5395 }
5396 EXPORT_SYMBOL(single_task_running);
5397 
5398 unsigned long long nr_context_switches_cpu(int cpu)
5399 {
5400 	return cpu_rq(cpu)->nr_switches;
5401 }
5402 
5403 unsigned long long nr_context_switches(void)
5404 {
5405 	int i;
5406 	unsigned long long sum = 0;
5407 
5408 	for_each_possible_cpu(i)
5409 		sum += cpu_rq(i)->nr_switches;
5410 
5411 	return sum;
5412 }
5413 
5414 /*
5415  * Consumers of these two interfaces, like for example the cpuidle menu
5416  * governor, are using nonsensical data. Preferring shallow idle state selection
5417  * for a CPU that has IO-wait which might not even end up running the task when
5418  * it does become runnable.
5419  */
5420 
5421 unsigned int nr_iowait_cpu(int cpu)
5422 {
5423 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5424 }
5425 
5426 /*
5427  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5428  *
5429  * The idea behind IO-wait account is to account the idle time that we could
5430  * have spend running if it were not for IO. That is, if we were to improve the
5431  * storage performance, we'd have a proportional reduction in IO-wait time.
5432  *
5433  * This all works nicely on UP, where, when a task blocks on IO, we account
5434  * idle time as IO-wait, because if the storage were faster, it could've been
5435  * running and we'd not be idle.
5436  *
5437  * This has been extended to SMP, by doing the same for each CPU. This however
5438  * is broken.
5439  *
5440  * Imagine for instance the case where two tasks block on one CPU, only the one
5441  * CPU will have IO-wait accounted, while the other has regular idle. Even
5442  * though, if the storage were faster, both could've ran at the same time,
5443  * utilising both CPUs.
5444  *
5445  * This means, that when looking globally, the current IO-wait accounting on
5446  * SMP is a lower bound, by reason of under accounting.
5447  *
5448  * Worse, since the numbers are provided per CPU, they are sometimes
5449  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5450  * associated with any one particular CPU, it can wake to another CPU than it
5451  * blocked on. This means the per CPU IO-wait number is meaningless.
5452  *
5453  * Task CPU affinities can make all that even more 'interesting'.
5454  */
5455 
5456 unsigned int nr_iowait(void)
5457 {
5458 	unsigned int i, sum = 0;
5459 
5460 	for_each_possible_cpu(i)
5461 		sum += nr_iowait_cpu(i);
5462 
5463 	return sum;
5464 }
5465 
5466 /*
5467  * sched_exec - execve() is a valuable balancing opportunity, because at
5468  * this point the task has the smallest effective memory and cache footprint.
5469  */
5470 void sched_exec(void)
5471 {
5472 	struct task_struct *p = current;
5473 	struct migration_arg arg;
5474 	int dest_cpu;
5475 
5476 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5477 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5478 		if (dest_cpu == smp_processor_id())
5479 			return;
5480 
5481 		if (unlikely(!cpu_active(dest_cpu)))
5482 			return;
5483 
5484 		arg = (struct migration_arg){ p, dest_cpu };
5485 	}
5486 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5487 }
5488 
5489 DEFINE_PER_CPU(struct kernel_stat, kstat);
5490 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5491 
5492 EXPORT_PER_CPU_SYMBOL(kstat);
5493 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5494 
5495 /*
5496  * The function fair_sched_class.update_curr accesses the struct curr
5497  * and its field curr->exec_start; when called from task_sched_runtime(),
5498  * we observe a high rate of cache misses in practice.
5499  * Prefetching this data results in improved performance.
5500  */
5501 static inline void prefetch_curr_exec_start(struct task_struct *p)
5502 {
5503 #ifdef CONFIG_FAIR_GROUP_SCHED
5504 	struct sched_entity *curr = p->se.cfs_rq->curr;
5505 #else
5506 	struct sched_entity *curr = task_rq(p)->cfs.curr;
5507 #endif
5508 	prefetch(curr);
5509 	prefetch(&curr->exec_start);
5510 }
5511 
5512 /*
5513  * Return accounted runtime for the task.
5514  * In case the task is currently running, return the runtime plus current's
5515  * pending runtime that have not been accounted yet.
5516  */
5517 unsigned long long task_sched_runtime(struct task_struct *p)
5518 {
5519 	struct rq_flags rf;
5520 	struct rq *rq;
5521 	u64 ns;
5522 
5523 #ifdef CONFIG_64BIT
5524 	/*
5525 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5526 	 * So we have a optimization chance when the task's delta_exec is 0.
5527 	 * Reading ->on_cpu is racy, but this is OK.
5528 	 *
5529 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5530 	 * If we race with it entering CPU, unaccounted time is 0. This is
5531 	 * indistinguishable from the read occurring a few cycles earlier.
5532 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5533 	 * been accounted, so we're correct here as well.
5534 	 */
5535 	if (!p->on_cpu || !task_on_rq_queued(p))
5536 		return p->se.sum_exec_runtime;
5537 #endif
5538 
5539 	rq = task_rq_lock(p, &rf);
5540 	/*
5541 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5542 	 * project cycles that may never be accounted to this
5543 	 * thread, breaking clock_gettime().
5544 	 */
5545 	if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5546 		prefetch_curr_exec_start(p);
5547 		update_rq_clock(rq);
5548 		p->sched_class->update_curr(rq);
5549 	}
5550 	ns = p->se.sum_exec_runtime;
5551 	task_rq_unlock(rq, p, &rf);
5552 
5553 	return ns;
5554 }
5555 
5556 static u64 cpu_resched_latency(struct rq *rq)
5557 {
5558 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5559 	u64 resched_latency, now = rq_clock(rq);
5560 	static bool warned_once;
5561 
5562 	if (sysctl_resched_latency_warn_once && warned_once)
5563 		return 0;
5564 
5565 	if (!need_resched() || !latency_warn_ms)
5566 		return 0;
5567 
5568 	if (system_state == SYSTEM_BOOTING)
5569 		return 0;
5570 
5571 	if (!rq->last_seen_need_resched_ns) {
5572 		rq->last_seen_need_resched_ns = now;
5573 		rq->ticks_without_resched = 0;
5574 		return 0;
5575 	}
5576 
5577 	rq->ticks_without_resched++;
5578 	resched_latency = now - rq->last_seen_need_resched_ns;
5579 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5580 		return 0;
5581 
5582 	warned_once = true;
5583 
5584 	return resched_latency;
5585 }
5586 
5587 static int __init setup_resched_latency_warn_ms(char *str)
5588 {
5589 	long val;
5590 
5591 	if ((kstrtol(str, 0, &val))) {
5592 		pr_warn("Unable to set resched_latency_warn_ms\n");
5593 		return 1;
5594 	}
5595 
5596 	sysctl_resched_latency_warn_ms = val;
5597 	return 1;
5598 }
5599 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5600 
5601 /*
5602  * This function gets called by the timer code, with HZ frequency.
5603  * We call it with interrupts disabled.
5604  */
5605 void sched_tick(void)
5606 {
5607 	int cpu = smp_processor_id();
5608 	struct rq *rq = cpu_rq(cpu);
5609 	/* accounting goes to the donor task */
5610 	struct task_struct *donor;
5611 	struct rq_flags rf;
5612 	unsigned long hw_pressure;
5613 	u64 resched_latency;
5614 
5615 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5616 		arch_scale_freq_tick();
5617 
5618 	sched_clock_tick();
5619 
5620 	rq_lock(rq, &rf);
5621 	donor = rq->donor;
5622 
5623 	psi_account_irqtime(rq, donor, NULL);
5624 
5625 	update_rq_clock(rq);
5626 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5627 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5628 
5629 	if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5630 		resched_curr(rq);
5631 
5632 	donor->sched_class->task_tick(rq, donor, 0);
5633 	if (sched_feat(LATENCY_WARN))
5634 		resched_latency = cpu_resched_latency(rq);
5635 	calc_global_load_tick(rq);
5636 	sched_core_tick(rq);
5637 	scx_tick(rq);
5638 
5639 	rq_unlock(rq, &rf);
5640 
5641 	if (sched_feat(LATENCY_WARN) && resched_latency)
5642 		resched_latency_warn(cpu, resched_latency);
5643 
5644 	perf_event_task_tick();
5645 
5646 	if (donor->flags & PF_WQ_WORKER)
5647 		wq_worker_tick(donor);
5648 
5649 	if (!scx_switched_all()) {
5650 		rq->idle_balance = idle_cpu(cpu);
5651 		sched_balance_trigger(rq);
5652 	}
5653 }
5654 
5655 #ifdef CONFIG_NO_HZ_FULL
5656 
5657 struct tick_work {
5658 	int			cpu;
5659 	atomic_t		state;
5660 	struct delayed_work	work;
5661 };
5662 /* Values for ->state, see diagram below. */
5663 #define TICK_SCHED_REMOTE_OFFLINE	0
5664 #define TICK_SCHED_REMOTE_OFFLINING	1
5665 #define TICK_SCHED_REMOTE_RUNNING	2
5666 
5667 /*
5668  * State diagram for ->state:
5669  *
5670  *
5671  *          TICK_SCHED_REMOTE_OFFLINE
5672  *                    |   ^
5673  *                    |   |
5674  *                    |   | sched_tick_remote()
5675  *                    |   |
5676  *                    |   |
5677  *                    +--TICK_SCHED_REMOTE_OFFLINING
5678  *                    |   ^
5679  *                    |   |
5680  * sched_tick_start() |   | sched_tick_stop()
5681  *                    |   |
5682  *                    V   |
5683  *          TICK_SCHED_REMOTE_RUNNING
5684  *
5685  *
5686  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5687  * and sched_tick_start() are happy to leave the state in RUNNING.
5688  */
5689 
5690 static struct tick_work __percpu *tick_work_cpu;
5691 
5692 static void sched_tick_remote(struct work_struct *work)
5693 {
5694 	struct delayed_work *dwork = to_delayed_work(work);
5695 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5696 	int cpu = twork->cpu;
5697 	struct rq *rq = cpu_rq(cpu);
5698 	int os;
5699 
5700 	/*
5701 	 * Handle the tick only if it appears the remote CPU is running in full
5702 	 * dynticks mode. The check is racy by nature, but missing a tick or
5703 	 * having one too much is no big deal because the scheduler tick updates
5704 	 * statistics and checks timeslices in a time-independent way, regardless
5705 	 * of when exactly it is running.
5706 	 */
5707 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5708 		guard(rq_lock_irq)(rq);
5709 		struct task_struct *curr = rq->curr;
5710 
5711 		if (cpu_online(cpu)) {
5712 			/*
5713 			 * Since this is a remote tick for full dynticks mode,
5714 			 * we are always sure that there is no proxy (only a
5715 			 * single task is running).
5716 			 */
5717 			WARN_ON_ONCE(rq->curr != rq->donor);
5718 			update_rq_clock(rq);
5719 
5720 			if (!is_idle_task(curr)) {
5721 				/*
5722 				 * Make sure the next tick runs within a
5723 				 * reasonable amount of time.
5724 				 */
5725 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5726 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 30);
5727 			}
5728 			curr->sched_class->task_tick(rq, curr, 0);
5729 
5730 			calc_load_nohz_remote(rq);
5731 		}
5732 	}
5733 
5734 	/*
5735 	 * Run the remote tick once per second (1Hz). This arbitrary
5736 	 * frequency is large enough to avoid overload but short enough
5737 	 * to keep scheduler internal stats reasonably up to date.  But
5738 	 * first update state to reflect hotplug activity if required.
5739 	 */
5740 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5741 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5742 	if (os == TICK_SCHED_REMOTE_RUNNING)
5743 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5744 }
5745 
5746 static void sched_tick_start(int cpu)
5747 {
5748 	int os;
5749 	struct tick_work *twork;
5750 
5751 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5752 		return;
5753 
5754 	WARN_ON_ONCE(!tick_work_cpu);
5755 
5756 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5757 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5758 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5759 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5760 		twork->cpu = cpu;
5761 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5762 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5763 	}
5764 }
5765 
5766 #ifdef CONFIG_HOTPLUG_CPU
5767 static void sched_tick_stop(int cpu)
5768 {
5769 	struct tick_work *twork;
5770 	int os;
5771 
5772 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5773 		return;
5774 
5775 	WARN_ON_ONCE(!tick_work_cpu);
5776 
5777 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5778 	/* There cannot be competing actions, but don't rely on stop-machine. */
5779 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5780 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5781 	/* Don't cancel, as this would mess up the state machine. */
5782 }
5783 #endif /* CONFIG_HOTPLUG_CPU */
5784 
5785 int __init sched_tick_offload_init(void)
5786 {
5787 	tick_work_cpu = alloc_percpu(struct tick_work);
5788 	BUG_ON(!tick_work_cpu);
5789 	return 0;
5790 }
5791 
5792 #else /* !CONFIG_NO_HZ_FULL: */
5793 static inline void sched_tick_start(int cpu) { }
5794 static inline void sched_tick_stop(int cpu) { }
5795 #endif /* !CONFIG_NO_HZ_FULL */
5796 
5797 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5798 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5799 /*
5800  * If the value passed in is equal to the current preempt count
5801  * then we just disabled preemption. Start timing the latency.
5802  */
5803 static inline void preempt_latency_start(int val)
5804 {
5805 	if (preempt_count() == val) {
5806 		unsigned long ip = get_lock_parent_ip();
5807 #ifdef CONFIG_DEBUG_PREEMPT
5808 		current->preempt_disable_ip = ip;
5809 #endif
5810 		trace_preempt_off(CALLER_ADDR0, ip);
5811 	}
5812 }
5813 
5814 void preempt_count_add(int val)
5815 {
5816 #ifdef CONFIG_DEBUG_PREEMPT
5817 	/*
5818 	 * Underflow?
5819 	 */
5820 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5821 		return;
5822 #endif
5823 	__preempt_count_add(val);
5824 #ifdef CONFIG_DEBUG_PREEMPT
5825 	/*
5826 	 * Spinlock count overflowing soon?
5827 	 */
5828 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5829 				PREEMPT_MASK - 10);
5830 #endif
5831 	preempt_latency_start(val);
5832 }
5833 EXPORT_SYMBOL(preempt_count_add);
5834 NOKPROBE_SYMBOL(preempt_count_add);
5835 
5836 /*
5837  * If the value passed in equals to the current preempt count
5838  * then we just enabled preemption. Stop timing the latency.
5839  */
5840 static inline void preempt_latency_stop(int val)
5841 {
5842 	if (preempt_count() == val)
5843 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5844 }
5845 
5846 void preempt_count_sub(int val)
5847 {
5848 #ifdef CONFIG_DEBUG_PREEMPT
5849 	/*
5850 	 * Underflow?
5851 	 */
5852 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5853 		return;
5854 	/*
5855 	 * Is the spinlock portion underflowing?
5856 	 */
5857 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5858 			!(preempt_count() & PREEMPT_MASK)))
5859 		return;
5860 #endif
5861 
5862 	preempt_latency_stop(val);
5863 	__preempt_count_sub(val);
5864 }
5865 EXPORT_SYMBOL(preempt_count_sub);
5866 NOKPROBE_SYMBOL(preempt_count_sub);
5867 
5868 #else
5869 static inline void preempt_latency_start(int val) { }
5870 static inline void preempt_latency_stop(int val) { }
5871 #endif
5872 
5873 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5874 {
5875 #ifdef CONFIG_DEBUG_PREEMPT
5876 	return p->preempt_disable_ip;
5877 #else
5878 	return 0;
5879 #endif
5880 }
5881 
5882 /*
5883  * Print scheduling while atomic bug:
5884  */
5885 static noinline void __schedule_bug(struct task_struct *prev)
5886 {
5887 	/* Save this before calling printk(), since that will clobber it */
5888 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5889 
5890 	if (oops_in_progress)
5891 		return;
5892 
5893 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5894 		prev->comm, prev->pid, preempt_count());
5895 
5896 	debug_show_held_locks(prev);
5897 	print_modules();
5898 	if (irqs_disabled())
5899 		print_irqtrace_events(prev);
5900 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5901 		pr_err("Preemption disabled at:");
5902 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5903 	}
5904 	check_panic_on_warn("scheduling while atomic");
5905 
5906 	dump_stack();
5907 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5908 }
5909 
5910 /*
5911  * Various schedule()-time debugging checks and statistics:
5912  */
5913 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5914 {
5915 #ifdef CONFIG_SCHED_STACK_END_CHECK
5916 	if (task_stack_end_corrupted(prev))
5917 		panic("corrupted stack end detected inside scheduler\n");
5918 
5919 	if (task_scs_end_corrupted(prev))
5920 		panic("corrupted shadow stack detected inside scheduler\n");
5921 #endif
5922 
5923 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5924 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5925 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5926 			prev->comm, prev->pid, prev->non_block_count);
5927 		dump_stack();
5928 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5929 	}
5930 #endif
5931 
5932 	if (unlikely(in_atomic_preempt_off())) {
5933 		__schedule_bug(prev);
5934 		preempt_count_set(PREEMPT_DISABLED);
5935 	}
5936 	rcu_sleep_check();
5937 	WARN_ON_ONCE(ct_state() == CT_STATE_USER);
5938 
5939 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5940 
5941 	schedstat_inc(this_rq()->sched_count);
5942 }
5943 
5944 static void prev_balance(struct rq *rq, struct task_struct *prev,
5945 			 struct rq_flags *rf)
5946 {
5947 	const struct sched_class *start_class = prev->sched_class;
5948 	const struct sched_class *class;
5949 
5950 	/*
5951 	 * We must do the balancing pass before put_prev_task(), such
5952 	 * that when we release the rq->lock the task is in the same
5953 	 * state as before we took rq->lock.
5954 	 *
5955 	 * We can terminate the balance pass as soon as we know there is
5956 	 * a runnable task of @class priority or higher.
5957 	 */
5958 	for_active_class_range(class, start_class, &idle_sched_class) {
5959 		if (class->balance && class->balance(rq, prev, rf))
5960 			break;
5961 	}
5962 }
5963 
5964 /*
5965  * Pick up the highest-prio task:
5966  */
5967 static inline struct task_struct *
5968 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5969 	__must_hold(__rq_lockp(rq))
5970 {
5971 	const struct sched_class *class;
5972 	struct task_struct *p;
5973 
5974 	rq->dl_server = NULL;
5975 
5976 	if (scx_enabled())
5977 		goto restart;
5978 
5979 	/*
5980 	 * Optimization: we know that if all tasks are in the fair class we can
5981 	 * call that function directly, but only if the @prev task wasn't of a
5982 	 * higher scheduling class, because otherwise those lose the
5983 	 * opportunity to pull in more work from other CPUs.
5984 	 */
5985 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5986 		   rq->nr_running == rq->cfs.h_nr_queued)) {
5987 
5988 		p = pick_next_task_fair(rq, prev, rf);
5989 		if (unlikely(p == RETRY_TASK))
5990 			goto restart;
5991 
5992 		/* Assume the next prioritized class is idle_sched_class */
5993 		if (!p) {
5994 			p = pick_task_idle(rq, rf);
5995 			put_prev_set_next_task(rq, prev, p);
5996 		}
5997 
5998 		return p;
5999 	}
6000 
6001 restart:
6002 	prev_balance(rq, prev, rf);
6003 
6004 	for_each_active_class(class) {
6005 		if (class->pick_next_task) {
6006 			p = class->pick_next_task(rq, prev, rf);
6007 			if (unlikely(p == RETRY_TASK))
6008 				goto restart;
6009 			if (p)
6010 				return p;
6011 		} else {
6012 			p = class->pick_task(rq, rf);
6013 			if (unlikely(p == RETRY_TASK))
6014 				goto restart;
6015 			if (p) {
6016 				put_prev_set_next_task(rq, prev, p);
6017 				return p;
6018 			}
6019 		}
6020 	}
6021 
6022 	BUG(); /* The idle class should always have a runnable task. */
6023 }
6024 
6025 #ifdef CONFIG_SCHED_CORE
6026 static inline bool is_task_rq_idle(struct task_struct *t)
6027 {
6028 	return (task_rq(t)->idle == t);
6029 }
6030 
6031 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6032 {
6033 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
6034 }
6035 
6036 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6037 {
6038 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
6039 		return true;
6040 
6041 	return a->core_cookie == b->core_cookie;
6042 }
6043 
6044 /*
6045  * Careful; this can return RETRY_TASK, it does not include the retry-loop
6046  * itself due to the whole SMT pick retry thing below.
6047  */
6048 static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
6049 {
6050 	const struct sched_class *class;
6051 	struct task_struct *p;
6052 
6053 	rq->dl_server = NULL;
6054 
6055 	for_each_active_class(class) {
6056 		p = class->pick_task(rq, rf);
6057 		if (p)
6058 			return p;
6059 	}
6060 
6061 	BUG(); /* The idle class should always have a runnable task. */
6062 }
6063 
6064 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6065 
6066 static void queue_core_balance(struct rq *rq);
6067 
6068 static struct task_struct *
6069 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6070 	__must_hold(__rq_lockp(rq))
6071 {
6072 	struct task_struct *next, *p, *max;
6073 	const struct cpumask *smt_mask;
6074 	bool fi_before = false;
6075 	bool core_clock_updated = (rq == rq->core);
6076 	unsigned long cookie;
6077 	int i, cpu, occ = 0;
6078 	struct rq *rq_i;
6079 	bool need_sync;
6080 
6081 	if (!sched_core_enabled(rq))
6082 		return __pick_next_task(rq, prev, rf);
6083 
6084 	cpu = cpu_of(rq);
6085 
6086 	/* Stopper task is switching into idle, no need core-wide selection. */
6087 	if (cpu_is_offline(cpu)) {
6088 		/*
6089 		 * Reset core_pick so that we don't enter the fastpath when
6090 		 * coming online. core_pick would already be migrated to
6091 		 * another cpu during offline.
6092 		 */
6093 		rq->core_pick = NULL;
6094 		rq->core_dl_server = NULL;
6095 		return __pick_next_task(rq, prev, rf);
6096 	}
6097 
6098 	/*
6099 	 * If there were no {en,de}queues since we picked (IOW, the task
6100 	 * pointers are all still valid), and we haven't scheduled the last
6101 	 * pick yet, do so now.
6102 	 *
6103 	 * rq->core_pick can be NULL if no selection was made for a CPU because
6104 	 * it was either offline or went offline during a sibling's core-wide
6105 	 * selection. In this case, do a core-wide selection.
6106 	 */
6107 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6108 	    rq->core->core_pick_seq != rq->core_sched_seq &&
6109 	    rq->core_pick) {
6110 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6111 
6112 		next = rq->core_pick;
6113 		rq->dl_server = rq->core_dl_server;
6114 		rq->core_pick = NULL;
6115 		rq->core_dl_server = NULL;
6116 		goto out_set_next;
6117 	}
6118 
6119 	prev_balance(rq, prev, rf);
6120 
6121 	smt_mask = cpu_smt_mask(cpu);
6122 	need_sync = !!rq->core->core_cookie;
6123 
6124 	/* reset state */
6125 	rq->core->core_cookie = 0UL;
6126 	if (rq->core->core_forceidle_count) {
6127 		if (!core_clock_updated) {
6128 			update_rq_clock(rq->core);
6129 			core_clock_updated = true;
6130 		}
6131 		sched_core_account_forceidle(rq);
6132 		/* reset after accounting force idle */
6133 		rq->core->core_forceidle_start = 0;
6134 		rq->core->core_forceidle_count = 0;
6135 		rq->core->core_forceidle_occupation = 0;
6136 		need_sync = true;
6137 		fi_before = true;
6138 	}
6139 
6140 	/*
6141 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6142 	 *
6143 	 * @task_seq guards the task state ({en,de}queues)
6144 	 * @pick_seq is the @task_seq we did a selection on
6145 	 * @sched_seq is the @pick_seq we scheduled
6146 	 *
6147 	 * However, preemptions can cause multiple picks on the same task set.
6148 	 * 'Fix' this by also increasing @task_seq for every pick.
6149 	 */
6150 	rq->core->core_task_seq++;
6151 
6152 	/*
6153 	 * Optimize for common case where this CPU has no cookies
6154 	 * and there are no cookied tasks running on siblings.
6155 	 */
6156 	if (!need_sync) {
6157 restart_single:
6158 		next = pick_task(rq, rf);
6159 		if (unlikely(next == RETRY_TASK))
6160 			goto restart_single;
6161 		if (!next->core_cookie) {
6162 			rq->core_pick = NULL;
6163 			rq->core_dl_server = NULL;
6164 			/*
6165 			 * For robustness, update the min_vruntime_fi for
6166 			 * unconstrained picks as well.
6167 			 */
6168 			WARN_ON_ONCE(fi_before);
6169 			task_vruntime_update(rq, next, false);
6170 			goto out_set_next;
6171 		}
6172 	}
6173 
6174 	/*
6175 	 * For each thread: do the regular task pick and find the max prio task
6176 	 * amongst them.
6177 	 *
6178 	 * Tie-break prio towards the current CPU
6179 	 */
6180 restart_multi:
6181 	max = NULL;
6182 	for_each_cpu_wrap(i, smt_mask, cpu) {
6183 		rq_i = cpu_rq(i);
6184 
6185 		/*
6186 		 * Current cpu always has its clock updated on entrance to
6187 		 * pick_next_task(). If the current cpu is not the core,
6188 		 * the core may also have been updated above.
6189 		 */
6190 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6191 			update_rq_clock(rq_i);
6192 
6193 		p = pick_task(rq_i, rf);
6194 		if (unlikely(p == RETRY_TASK))
6195 			goto restart_multi;
6196 
6197 		rq_i->core_pick = p;
6198 		rq_i->core_dl_server = rq_i->dl_server;
6199 
6200 		if (!max || prio_less(max, p, fi_before))
6201 			max = p;
6202 	}
6203 
6204 	cookie = rq->core->core_cookie = max->core_cookie;
6205 
6206 	/*
6207 	 * For each thread: try and find a runnable task that matches @max or
6208 	 * force idle.
6209 	 */
6210 	for_each_cpu(i, smt_mask) {
6211 		rq_i = cpu_rq(i);
6212 		p = rq_i->core_pick;
6213 
6214 		if (!cookie_equals(p, cookie)) {
6215 			p = NULL;
6216 			if (cookie)
6217 				p = sched_core_find(rq_i, cookie);
6218 			if (!p)
6219 				p = idle_sched_class.pick_task(rq_i, rf);
6220 		}
6221 
6222 		rq_i->core_pick = p;
6223 		rq_i->core_dl_server = NULL;
6224 
6225 		if (p == rq_i->idle) {
6226 			if (rq_i->nr_running) {
6227 				rq->core->core_forceidle_count++;
6228 				if (!fi_before)
6229 					rq->core->core_forceidle_seq++;
6230 			}
6231 		} else {
6232 			occ++;
6233 		}
6234 	}
6235 
6236 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6237 		rq->core->core_forceidle_start = rq_clock(rq->core);
6238 		rq->core->core_forceidle_occupation = occ;
6239 	}
6240 
6241 	rq->core->core_pick_seq = rq->core->core_task_seq;
6242 	next = rq->core_pick;
6243 	rq->core_sched_seq = rq->core->core_pick_seq;
6244 
6245 	/* Something should have been selected for current CPU */
6246 	WARN_ON_ONCE(!next);
6247 
6248 	/*
6249 	 * Reschedule siblings
6250 	 *
6251 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6252 	 * sending an IPI (below) ensures the sibling will no longer be running
6253 	 * their task. This ensures there is no inter-sibling overlap between
6254 	 * non-matching user state.
6255 	 */
6256 	for_each_cpu(i, smt_mask) {
6257 		rq_i = cpu_rq(i);
6258 
6259 		/*
6260 		 * An online sibling might have gone offline before a task
6261 		 * could be picked for it, or it might be offline but later
6262 		 * happen to come online, but its too late and nothing was
6263 		 * picked for it.  That's Ok - it will pick tasks for itself,
6264 		 * so ignore it.
6265 		 */
6266 		if (!rq_i->core_pick)
6267 			continue;
6268 
6269 		/*
6270 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6271 		 * fi_before     fi      update?
6272 		 *  0            0       1
6273 		 *  0            1       1
6274 		 *  1            0       1
6275 		 *  1            1       0
6276 		 */
6277 		if (!(fi_before && rq->core->core_forceidle_count))
6278 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6279 
6280 		rq_i->core_pick->core_occupation = occ;
6281 
6282 		if (i == cpu) {
6283 			rq_i->core_pick = NULL;
6284 			rq_i->core_dl_server = NULL;
6285 			continue;
6286 		}
6287 
6288 		/* Did we break L1TF mitigation requirements? */
6289 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6290 
6291 		if (rq_i->curr == rq_i->core_pick) {
6292 			rq_i->core_pick = NULL;
6293 			rq_i->core_dl_server = NULL;
6294 			continue;
6295 		}
6296 
6297 		resched_curr(rq_i);
6298 	}
6299 
6300 out_set_next:
6301 	put_prev_set_next_task(rq, prev, next);
6302 	if (rq->core->core_forceidle_count && next == rq->idle)
6303 		queue_core_balance(rq);
6304 
6305 	return next;
6306 }
6307 
6308 static bool try_steal_cookie(int this, int that)
6309 {
6310 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6311 	struct task_struct *p;
6312 	unsigned long cookie;
6313 	bool success = false;
6314 
6315 	guard(irq)();
6316 	guard(double_rq_lock)(dst, src);
6317 
6318 	cookie = dst->core->core_cookie;
6319 	if (!cookie)
6320 		return false;
6321 
6322 	if (dst->curr != dst->idle)
6323 		return false;
6324 
6325 	p = sched_core_find(src, cookie);
6326 	if (!p)
6327 		return false;
6328 
6329 	do {
6330 		if (p == src->core_pick || p == src->curr)
6331 			goto next;
6332 
6333 		if (!is_cpu_allowed(p, this))
6334 			goto next;
6335 
6336 		if (p->core_occupation > dst->idle->core_occupation)
6337 			goto next;
6338 		/*
6339 		 * sched_core_find() and sched_core_next() will ensure
6340 		 * that task @p is not throttled now, we also need to
6341 		 * check whether the runqueue of the destination CPU is
6342 		 * being throttled.
6343 		 */
6344 		if (sched_task_is_throttled(p, this))
6345 			goto next;
6346 
6347 		move_queued_task_locked(src, dst, p);
6348 		resched_curr(dst);
6349 
6350 		success = true;
6351 		break;
6352 
6353 next:
6354 		p = sched_core_next(p, cookie);
6355 	} while (p);
6356 
6357 	return success;
6358 }
6359 
6360 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6361 {
6362 	int i;
6363 
6364 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6365 		if (i == cpu)
6366 			continue;
6367 
6368 		if (need_resched())
6369 			break;
6370 
6371 		if (try_steal_cookie(cpu, i))
6372 			return true;
6373 	}
6374 
6375 	return false;
6376 }
6377 
6378 static void sched_core_balance(struct rq *rq)
6379 	__must_hold(__rq_lockp(rq))
6380 {
6381 	struct sched_domain *sd;
6382 	int cpu = cpu_of(rq);
6383 
6384 	guard(preempt)();
6385 	guard(rcu)();
6386 
6387 	raw_spin_rq_unlock_irq(rq);
6388 	for_each_domain(cpu, sd) {
6389 		if (need_resched())
6390 			break;
6391 
6392 		if (steal_cookie_task(cpu, sd))
6393 			break;
6394 	}
6395 	raw_spin_rq_lock_irq(rq);
6396 }
6397 
6398 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6399 
6400 static void queue_core_balance(struct rq *rq)
6401 {
6402 	if (!sched_core_enabled(rq))
6403 		return;
6404 
6405 	if (!rq->core->core_cookie)
6406 		return;
6407 
6408 	if (!rq->nr_running) /* not forced idle */
6409 		return;
6410 
6411 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6412 }
6413 
6414 DEFINE_LOCK_GUARD_1(core_lock, int,
6415 		    sched_core_lock(*_T->lock, &_T->flags),
6416 		    sched_core_unlock(*_T->lock, &_T->flags),
6417 		    unsigned long flags)
6418 
6419 static void sched_core_cpu_starting(unsigned int cpu)
6420 {
6421 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6422 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6423 	int t;
6424 
6425 	guard(core_lock)(&cpu);
6426 
6427 	WARN_ON_ONCE(rq->core != rq);
6428 
6429 	/* if we're the first, we'll be our own leader */
6430 	if (cpumask_weight(smt_mask) == 1)
6431 		return;
6432 
6433 	/* find the leader */
6434 	for_each_cpu(t, smt_mask) {
6435 		if (t == cpu)
6436 			continue;
6437 		rq = cpu_rq(t);
6438 		if (rq->core == rq) {
6439 			core_rq = rq;
6440 			break;
6441 		}
6442 	}
6443 
6444 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6445 		return;
6446 
6447 	/* install and validate core_rq */
6448 	for_each_cpu(t, smt_mask) {
6449 		rq = cpu_rq(t);
6450 
6451 		if (t == cpu)
6452 			rq->core = core_rq;
6453 
6454 		WARN_ON_ONCE(rq->core != core_rq);
6455 	}
6456 }
6457 
6458 static void sched_core_cpu_deactivate(unsigned int cpu)
6459 {
6460 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6461 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6462 	int t;
6463 
6464 	guard(core_lock)(&cpu);
6465 
6466 	/* if we're the last man standing, nothing to do */
6467 	if (cpumask_weight(smt_mask) == 1) {
6468 		WARN_ON_ONCE(rq->core != rq);
6469 		return;
6470 	}
6471 
6472 	/* if we're not the leader, nothing to do */
6473 	if (rq->core != rq)
6474 		return;
6475 
6476 	/* find a new leader */
6477 	for_each_cpu(t, smt_mask) {
6478 		if (t == cpu)
6479 			continue;
6480 		core_rq = cpu_rq(t);
6481 		break;
6482 	}
6483 
6484 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6485 		return;
6486 
6487 	/* copy the shared state to the new leader */
6488 	core_rq->core_task_seq             = rq->core_task_seq;
6489 	core_rq->core_pick_seq             = rq->core_pick_seq;
6490 	core_rq->core_cookie               = rq->core_cookie;
6491 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6492 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6493 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6494 
6495 	/*
6496 	 * Accounting edge for forced idle is handled in pick_next_task().
6497 	 * Don't need another one here, since the hotplug thread shouldn't
6498 	 * have a cookie.
6499 	 */
6500 	core_rq->core_forceidle_start = 0;
6501 
6502 	/* install new leader */
6503 	for_each_cpu(t, smt_mask) {
6504 		rq = cpu_rq(t);
6505 		rq->core = core_rq;
6506 	}
6507 }
6508 
6509 static inline void sched_core_cpu_dying(unsigned int cpu)
6510 {
6511 	struct rq *rq = cpu_rq(cpu);
6512 
6513 	if (rq->core != rq)
6514 		rq->core = rq;
6515 }
6516 
6517 #else /* !CONFIG_SCHED_CORE: */
6518 
6519 static inline void sched_core_cpu_starting(unsigned int cpu) {}
6520 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6521 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6522 
6523 static struct task_struct *
6524 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6525 	__must_hold(__rq_lockp(rq))
6526 {
6527 	return __pick_next_task(rq, prev, rf);
6528 }
6529 
6530 #endif /* !CONFIG_SCHED_CORE */
6531 
6532 /*
6533  * Constants for the sched_mode argument of __schedule().
6534  *
6535  * The mode argument allows RT enabled kernels to differentiate a
6536  * preemption from blocking on an 'sleeping' spin/rwlock.
6537  */
6538 #define SM_IDLE			(-1)
6539 #define SM_NONE			0
6540 #define SM_PREEMPT		1
6541 #define SM_RTLOCK_WAIT		2
6542 
6543 /*
6544  * Helper function for __schedule()
6545  *
6546  * Tries to deactivate the task, unless the should_block arg
6547  * is false or if a signal is pending. In the case a signal
6548  * is pending, marks the task's __state as RUNNING (and clear
6549  * blocked_on).
6550  */
6551 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6552 			      unsigned long *task_state_p, bool should_block)
6553 {
6554 	unsigned long task_state = *task_state_p;
6555 	int flags = DEQUEUE_NOCLOCK;
6556 
6557 	if (signal_pending_state(task_state, p)) {
6558 		WRITE_ONCE(p->__state, TASK_RUNNING);
6559 		*task_state_p = TASK_RUNNING;
6560 		return false;
6561 	}
6562 
6563 	/*
6564 	 * We check should_block after signal_pending because we
6565 	 * will want to wake the task in that case. But if
6566 	 * should_block is false, its likely due to the task being
6567 	 * blocked on a mutex, and we want to keep it on the runqueue
6568 	 * to be selectable for proxy-execution.
6569 	 */
6570 	if (!should_block)
6571 		return false;
6572 
6573 	p->sched_contributes_to_load =
6574 		(task_state & TASK_UNINTERRUPTIBLE) &&
6575 		!(task_state & TASK_NOLOAD) &&
6576 		!(task_state & TASK_FROZEN);
6577 
6578 	if (unlikely(is_special_task_state(task_state)))
6579 		flags |= DEQUEUE_SPECIAL;
6580 
6581 	/*
6582 	 * __schedule()			ttwu()
6583 	 *   prev_state = prev->state;    if (p->on_rq && ...)
6584 	 *   if (prev_state)		    goto out;
6585 	 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6586 	 *				  p->state = TASK_WAKING
6587 	 *
6588 	 * Where __schedule() and ttwu() have matching control dependencies.
6589 	 *
6590 	 * After this, schedule() must not care about p->state any more.
6591 	 */
6592 	block_task(rq, p, flags);
6593 	return true;
6594 }
6595 
6596 #ifdef CONFIG_SCHED_PROXY_EXEC
6597 static inline struct task_struct *proxy_resched_idle(struct rq *rq)
6598 {
6599 	put_prev_set_next_task(rq, rq->donor, rq->idle);
6600 	rq_set_donor(rq, rq->idle);
6601 	set_tsk_need_resched(rq->idle);
6602 	return rq->idle;
6603 }
6604 
6605 static bool __proxy_deactivate(struct rq *rq, struct task_struct *donor)
6606 {
6607 	unsigned long state = READ_ONCE(donor->__state);
6608 
6609 	/* Don't deactivate if the state has been changed to TASK_RUNNING */
6610 	if (state == TASK_RUNNING)
6611 		return false;
6612 	/*
6613 	 * Because we got donor from pick_next_task(), it is *crucial*
6614 	 * that we call proxy_resched_idle() before we deactivate it.
6615 	 * As once we deactivate donor, donor->on_rq is set to zero,
6616 	 * which allows ttwu() to immediately try to wake the task on
6617 	 * another rq. So we cannot use *any* references to donor
6618 	 * after that point. So things like cfs_rq->curr or rq->donor
6619 	 * need to be changed from next *before* we deactivate.
6620 	 */
6621 	proxy_resched_idle(rq);
6622 	return try_to_block_task(rq, donor, &state, true);
6623 }
6624 
6625 static struct task_struct *proxy_deactivate(struct rq *rq, struct task_struct *donor)
6626 {
6627 	if (!__proxy_deactivate(rq, donor)) {
6628 		/*
6629 		 * XXX: For now, if deactivation failed, set donor
6630 		 * as unblocked, as we aren't doing proxy-migrations
6631 		 * yet (more logic will be needed then).
6632 		 */
6633 		donor->blocked_on = NULL;
6634 	}
6635 	return NULL;
6636 }
6637 
6638 /*
6639  * Find runnable lock owner to proxy for mutex blocked donor
6640  *
6641  * Follow the blocked-on relation:
6642  *   task->blocked_on -> mutex->owner -> task...
6643  *
6644  * Lock order:
6645  *
6646  *   p->pi_lock
6647  *     rq->lock
6648  *       mutex->wait_lock
6649  *
6650  * Returns the task that is going to be used as execution context (the one
6651  * that is actually going to be run on cpu_of(rq)).
6652  */
6653 static struct task_struct *
6654 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6655 {
6656 	struct task_struct *owner = NULL;
6657 	int this_cpu = cpu_of(rq);
6658 	struct task_struct *p;
6659 	struct mutex *mutex;
6660 
6661 	/* Follow blocked_on chain. */
6662 	for (p = donor; task_is_blocked(p); p = owner) {
6663 		mutex = p->blocked_on;
6664 		/* Something changed in the chain, so pick again */
6665 		if (!mutex)
6666 			return NULL;
6667 		/*
6668 		 * By taking mutex->wait_lock we hold off concurrent mutex_unlock()
6669 		 * and ensure @owner sticks around.
6670 		 */
6671 		guard(raw_spinlock)(&mutex->wait_lock);
6672 
6673 		/* Check again that p is blocked with wait_lock held */
6674 		if (mutex != __get_task_blocked_on(p)) {
6675 			/*
6676 			 * Something changed in the blocked_on chain and
6677 			 * we don't know if only at this level. So, let's
6678 			 * just bail out completely and let __schedule()
6679 			 * figure things out (pick_again loop).
6680 			 */
6681 			return NULL;
6682 		}
6683 
6684 		owner = __mutex_owner(mutex);
6685 		if (!owner) {
6686 			__clear_task_blocked_on(p, mutex);
6687 			return p;
6688 		}
6689 
6690 		if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
6691 			/* XXX Don't handle blocked owners/delayed dequeue yet */
6692 			return proxy_deactivate(rq, donor);
6693 		}
6694 
6695 		if (task_cpu(owner) != this_cpu) {
6696 			/* XXX Don't handle migrations yet */
6697 			return proxy_deactivate(rq, donor);
6698 		}
6699 
6700 		if (task_on_rq_migrating(owner)) {
6701 			/*
6702 			 * One of the chain of mutex owners is currently migrating to this
6703 			 * CPU, but has not yet been enqueued because we are holding the
6704 			 * rq lock. As a simple solution, just schedule rq->idle to give
6705 			 * the migration a chance to complete. Much like the migrate_task
6706 			 * case we should end up back in find_proxy_task(), this time
6707 			 * hopefully with all relevant tasks already enqueued.
6708 			 */
6709 			return proxy_resched_idle(rq);
6710 		}
6711 
6712 		/*
6713 		 * Its possible to race where after we check owner->on_rq
6714 		 * but before we check (owner_cpu != this_cpu) that the
6715 		 * task on another cpu was migrated back to this cpu. In
6716 		 * that case it could slip by our  checks. So double check
6717 		 * we are still on this cpu and not migrating. If we get
6718 		 * inconsistent results, try again.
6719 		 */
6720 		if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
6721 			return NULL;
6722 
6723 		if (owner == p) {
6724 			/*
6725 			 * It's possible we interleave with mutex_unlock like:
6726 			 *
6727 			 *				lock(&rq->lock);
6728 			 *				  find_proxy_task()
6729 			 * mutex_unlock()
6730 			 *   lock(&wait_lock);
6731 			 *   donor(owner) = current->blocked_donor;
6732 			 *   unlock(&wait_lock);
6733 			 *
6734 			 *   wake_up_q();
6735 			 *     ...
6736 			 *       ttwu_runnable()
6737 			 *         __task_rq_lock()
6738 			 *				  lock(&wait_lock);
6739 			 *				  owner == p
6740 			 *
6741 			 * Which leaves us to finish the ttwu_runnable() and make it go.
6742 			 *
6743 			 * So schedule rq->idle so that ttwu_runnable() can get the rq
6744 			 * lock and mark owner as running.
6745 			 */
6746 			return proxy_resched_idle(rq);
6747 		}
6748 		/*
6749 		 * OK, now we're absolutely sure @owner is on this
6750 		 * rq, therefore holding @rq->lock is sufficient to
6751 		 * guarantee its existence, as per ttwu_remote().
6752 		 */
6753 	}
6754 
6755 	WARN_ON_ONCE(owner && !owner->on_rq);
6756 	return owner;
6757 }
6758 #else /* SCHED_PROXY_EXEC */
6759 static struct task_struct *
6760 find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
6761 {
6762 	WARN_ONCE(1, "This should never be called in the !SCHED_PROXY_EXEC case\n");
6763 	return donor;
6764 }
6765 #endif /* SCHED_PROXY_EXEC */
6766 
6767 static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
6768 {
6769 	if (!sched_proxy_exec())
6770 		return;
6771 	/*
6772 	 * pick_next_task() calls set_next_task() on the chosen task
6773 	 * at some point, which ensures it is not push/pullable.
6774 	 * However, the chosen/donor task *and* the mutex owner form an
6775 	 * atomic pair wrt push/pull.
6776 	 *
6777 	 * Make sure owner we run is not pushable. Unfortunately we can
6778 	 * only deal with that by means of a dequeue/enqueue cycle. :-/
6779 	 */
6780 	dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
6781 	enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
6782 }
6783 
6784 /*
6785  * __schedule() is the main scheduler function.
6786  *
6787  * The main means of driving the scheduler and thus entering this function are:
6788  *
6789  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6790  *
6791  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6792  *      paths. For example, see arch/x86/entry_64.S.
6793  *
6794  *      To drive preemption between tasks, the scheduler sets the flag in timer
6795  *      interrupt handler sched_tick().
6796  *
6797  *   3. Wakeups don't really cause entry into schedule(). They add a
6798  *      task to the run-queue and that's it.
6799  *
6800  *      Now, if the new task added to the run-queue preempts the current
6801  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6802  *      called on the nearest possible occasion:
6803  *
6804  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6805  *
6806  *         - in syscall or exception context, at the next outmost
6807  *           preempt_enable(). (this might be as soon as the wake_up()'s
6808  *           spin_unlock()!)
6809  *
6810  *         - in IRQ context, return from interrupt-handler to
6811  *           preemptible context
6812  *
6813  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6814  *         then at the next:
6815  *
6816  *          - cond_resched() call
6817  *          - explicit schedule() call
6818  *          - return from syscall or exception to user-space
6819  *          - return from interrupt-handler to user-space
6820  *
6821  * WARNING: must be called with preemption disabled!
6822  */
6823 static void __sched notrace __schedule(int sched_mode)
6824 {
6825 	struct task_struct *prev, *next;
6826 	/*
6827 	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6828 	 * as a preemption by schedule_debug() and RCU.
6829 	 */
6830 	bool preempt = sched_mode > SM_NONE;
6831 	bool is_switch = false;
6832 	unsigned long *switch_count;
6833 	unsigned long prev_state;
6834 	struct rq_flags rf;
6835 	struct rq *rq;
6836 	int cpu;
6837 
6838 	/* Trace preemptions consistently with task switches */
6839 	trace_sched_entry_tp(sched_mode == SM_PREEMPT);
6840 
6841 	cpu = smp_processor_id();
6842 	rq = cpu_rq(cpu);
6843 	prev = rq->curr;
6844 
6845 	schedule_debug(prev, preempt);
6846 
6847 	klp_sched_try_switch(prev);
6848 
6849 	local_irq_disable();
6850 	rcu_note_context_switch(preempt);
6851 	migrate_disable_switch(rq, prev);
6852 
6853 	/*
6854 	 * Make sure that signal_pending_state()->signal_pending() below
6855 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6856 	 * done by the caller to avoid the race with signal_wake_up():
6857 	 *
6858 	 * __set_current_state(@state)		signal_wake_up()
6859 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6860 	 *					  wake_up_state(p, state)
6861 	 *   LOCK rq->lock			    LOCK p->pi_state
6862 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6863 	 *     if (signal_pending_state())	    if (p->state & @state)
6864 	 *
6865 	 * Also, the membarrier system call requires a full memory barrier
6866 	 * after coming from user-space, before storing to rq->curr; this
6867 	 * barrier matches a full barrier in the proximity of the membarrier
6868 	 * system call exit.
6869 	 */
6870 	rq_lock(rq, &rf);
6871 	smp_mb__after_spinlock();
6872 
6873 	hrtick_schedule_enter(rq);
6874 
6875 	/* Promote REQ to ACT */
6876 	rq->clock_update_flags <<= 1;
6877 	update_rq_clock(rq);
6878 	rq->clock_update_flags = RQCF_UPDATED;
6879 
6880 	switch_count = &prev->nivcsw;
6881 
6882 	/* Task state changes only considers SM_PREEMPT as preemption */
6883 	preempt = sched_mode == SM_PREEMPT;
6884 
6885 	/*
6886 	 * We must load prev->state once (task_struct::state is volatile), such
6887 	 * that we form a control dependency vs deactivate_task() below.
6888 	 */
6889 	prev_state = READ_ONCE(prev->__state);
6890 	if (sched_mode == SM_IDLE) {
6891 		/* SCX must consult the BPF scheduler to tell if rq is empty */
6892 		if (!rq->nr_running && !scx_enabled()) {
6893 			next = prev;
6894 			rq->next_class = &idle_sched_class;
6895 			goto picked;
6896 		}
6897 	} else if (!preempt && prev_state) {
6898 		/*
6899 		 * We pass task_is_blocked() as the should_block arg
6900 		 * in order to keep mutex-blocked tasks on the runqueue
6901 		 * for slection with proxy-exec (without proxy-exec
6902 		 * task_is_blocked() will always be false).
6903 		 */
6904 		try_to_block_task(rq, prev, &prev_state,
6905 				  !task_is_blocked(prev));
6906 		switch_count = &prev->nvcsw;
6907 	}
6908 
6909 pick_again:
6910 	next = pick_next_task(rq, rq->donor, &rf);
6911 	rq_set_donor(rq, next);
6912 	rq->next_class = next->sched_class;
6913 	if (unlikely(task_is_blocked(next))) {
6914 		next = find_proxy_task(rq, next, &rf);
6915 		if (!next)
6916 			goto pick_again;
6917 		if (next == rq->idle)
6918 			goto keep_resched;
6919 	}
6920 picked:
6921 	clear_tsk_need_resched(prev);
6922 	clear_preempt_need_resched();
6923 keep_resched:
6924 	rq->last_seen_need_resched_ns = 0;
6925 
6926 	is_switch = prev != next;
6927 	if (likely(is_switch)) {
6928 		rq->nr_switches++;
6929 		/*
6930 		 * RCU users of rcu_dereference(rq->curr) may not see
6931 		 * changes to task_struct made by pick_next_task().
6932 		 */
6933 		RCU_INIT_POINTER(rq->curr, next);
6934 
6935 		if (!task_current_donor(rq, next))
6936 			proxy_tag_curr(rq, next);
6937 
6938 		/*
6939 		 * The membarrier system call requires each architecture
6940 		 * to have a full memory barrier after updating
6941 		 * rq->curr, before returning to user-space.
6942 		 *
6943 		 * Here are the schemes providing that barrier on the
6944 		 * various architectures:
6945 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6946 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6947 		 *   on PowerPC and on RISC-V.
6948 		 * - finish_lock_switch() for weakly-ordered
6949 		 *   architectures where spin_unlock is a full barrier,
6950 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6951 		 *   is a RELEASE barrier),
6952 		 *
6953 		 * The barrier matches a full barrier in the proximity of
6954 		 * the membarrier system call entry.
6955 		 *
6956 		 * On RISC-V, this barrier pairing is also needed for the
6957 		 * SYNC_CORE command when switching between processes, cf.
6958 		 * the inline comments in membarrier_arch_switch_mm().
6959 		 */
6960 		++*switch_count;
6961 
6962 		psi_account_irqtime(rq, prev, next);
6963 		psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6964 					     prev->se.sched_delayed);
6965 
6966 		trace_sched_switch(preempt, prev, next, prev_state);
6967 
6968 		/* Also unlocks the rq: */
6969 		rq = context_switch(rq, prev, next, &rf);
6970 	} else {
6971 		/* In case next was already curr but just got blocked_donor */
6972 		if (!task_current_donor(rq, next))
6973 			proxy_tag_curr(rq, next);
6974 
6975 		rq_unpin_lock(rq, &rf);
6976 		__balance_callbacks(rq, NULL);
6977 		hrtick_schedule_exit(rq);
6978 		raw_spin_rq_unlock_irq(rq);
6979 	}
6980 	trace_sched_exit_tp(is_switch);
6981 }
6982 
6983 void __noreturn do_task_dead(void)
6984 {
6985 	/* Causes final put_task_struct in finish_task_switch(): */
6986 	set_special_state(TASK_DEAD);
6987 
6988 	/* Tell freezer to ignore us: */
6989 	current->flags |= PF_NOFREEZE;
6990 
6991 	__schedule(SM_NONE);
6992 	BUG();
6993 
6994 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6995 	for (;;)
6996 		cpu_relax();
6997 }
6998 
6999 static inline void sched_submit_work(struct task_struct *tsk)
7000 {
7001 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
7002 	unsigned int task_flags;
7003 
7004 	/*
7005 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
7006 	 * will use a blocking primitive -- which would lead to recursion.
7007 	 */
7008 	lock_map_acquire_try(&sched_map);
7009 
7010 	task_flags = tsk->flags;
7011 	/*
7012 	 * If a worker goes to sleep, notify and ask workqueue whether it
7013 	 * wants to wake up a task to maintain concurrency.
7014 	 */
7015 	if (task_flags & PF_WQ_WORKER)
7016 		wq_worker_sleeping(tsk);
7017 	else if (task_flags & PF_IO_WORKER)
7018 		io_wq_worker_sleeping(tsk);
7019 
7020 	/*
7021 	 * spinlock and rwlock must not flush block requests.  This will
7022 	 * deadlock if the callback attempts to acquire a lock which is
7023 	 * already acquired.
7024 	 */
7025 	WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
7026 
7027 	/*
7028 	 * If we are going to sleep and we have plugged IO queued,
7029 	 * make sure to submit it to avoid deadlocks.
7030 	 */
7031 	blk_flush_plug(tsk->plug, true);
7032 
7033 	lock_map_release(&sched_map);
7034 }
7035 
7036 static void sched_update_worker(struct task_struct *tsk)
7037 {
7038 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
7039 		if (tsk->flags & PF_BLOCK_TS)
7040 			blk_plug_invalidate_ts(tsk);
7041 		if (tsk->flags & PF_WQ_WORKER)
7042 			wq_worker_running(tsk);
7043 		else if (tsk->flags & PF_IO_WORKER)
7044 			io_wq_worker_running(tsk);
7045 	}
7046 }
7047 
7048 static __always_inline void __schedule_loop(int sched_mode)
7049 {
7050 	do {
7051 		preempt_disable();
7052 		__schedule(sched_mode);
7053 		sched_preempt_enable_no_resched();
7054 	} while (need_resched());
7055 }
7056 
7057 asmlinkage __visible void __sched schedule(void)
7058 {
7059 	struct task_struct *tsk = current;
7060 
7061 #ifdef CONFIG_RT_MUTEXES
7062 	lockdep_assert(!tsk->sched_rt_mutex);
7063 #endif
7064 
7065 	if (!task_is_running(tsk))
7066 		sched_submit_work(tsk);
7067 	__schedule_loop(SM_NONE);
7068 	sched_update_worker(tsk);
7069 }
7070 EXPORT_SYMBOL(schedule);
7071 
7072 /*
7073  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
7074  * state (have scheduled out non-voluntarily) by making sure that all
7075  * tasks have either left the run queue or have gone into user space.
7076  * As idle tasks do not do either, they must not ever be preempted
7077  * (schedule out non-voluntarily).
7078  *
7079  * schedule_idle() is similar to schedule_preempt_disable() except that it
7080  * never enables preemption because it does not call sched_submit_work().
7081  */
7082 void __sched schedule_idle(void)
7083 {
7084 	/*
7085 	 * As this skips calling sched_submit_work(), which the idle task does
7086 	 * regardless because that function is a NOP when the task is in a
7087 	 * TASK_RUNNING state, make sure this isn't used someplace that the
7088 	 * current task can be in any other state. Note, idle is always in the
7089 	 * TASK_RUNNING state.
7090 	 */
7091 	WARN_ON_ONCE(current->__state);
7092 	do {
7093 		__schedule(SM_IDLE);
7094 	} while (need_resched());
7095 }
7096 
7097 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
7098 asmlinkage __visible void __sched schedule_user(void)
7099 {
7100 	/*
7101 	 * If we come here after a random call to set_need_resched(),
7102 	 * or we have been woken up remotely but the IPI has not yet arrived,
7103 	 * we haven't yet exited the RCU idle mode. Do it here manually until
7104 	 * we find a better solution.
7105 	 *
7106 	 * NB: There are buggy callers of this function.  Ideally we
7107 	 * should warn if prev_state != CT_STATE_USER, but that will trigger
7108 	 * too frequently to make sense yet.
7109 	 */
7110 	enum ctx_state prev_state = exception_enter();
7111 	schedule();
7112 	exception_exit(prev_state);
7113 }
7114 #endif
7115 
7116 /**
7117  * schedule_preempt_disabled - called with preemption disabled
7118  *
7119  * Returns with preemption disabled. Note: preempt_count must be 1
7120  */
7121 void __sched schedule_preempt_disabled(void)
7122 {
7123 	sched_preempt_enable_no_resched();
7124 	schedule();
7125 	preempt_disable();
7126 }
7127 
7128 #ifdef CONFIG_PREEMPT_RT
7129 void __sched notrace schedule_rtlock(void)
7130 {
7131 	__schedule_loop(SM_RTLOCK_WAIT);
7132 }
7133 NOKPROBE_SYMBOL(schedule_rtlock);
7134 #endif
7135 
7136 static void __sched notrace preempt_schedule_common(void)
7137 {
7138 	do {
7139 		/*
7140 		 * Because the function tracer can trace preempt_count_sub()
7141 		 * and it also uses preempt_enable/disable_notrace(), if
7142 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7143 		 * by the function tracer will call this function again and
7144 		 * cause infinite recursion.
7145 		 *
7146 		 * Preemption must be disabled here before the function
7147 		 * tracer can trace. Break up preempt_disable() into two
7148 		 * calls. One to disable preemption without fear of being
7149 		 * traced. The other to still record the preemption latency,
7150 		 * which can also be traced by the function tracer.
7151 		 */
7152 		preempt_disable_notrace();
7153 		preempt_latency_start(1);
7154 		__schedule(SM_PREEMPT);
7155 		preempt_latency_stop(1);
7156 		preempt_enable_no_resched_notrace();
7157 
7158 		/*
7159 		 * Check again in case we missed a preemption opportunity
7160 		 * between schedule and now.
7161 		 */
7162 	} while (need_resched());
7163 }
7164 
7165 #ifdef CONFIG_PREEMPTION
7166 /*
7167  * This is the entry point to schedule() from in-kernel preemption
7168  * off of preempt_enable.
7169  */
7170 asmlinkage __visible void __sched notrace preempt_schedule(void)
7171 {
7172 	/*
7173 	 * If there is a non-zero preempt_count or interrupts are disabled,
7174 	 * we do not want to preempt the current task. Just return..
7175 	 */
7176 	if (likely(!preemptible()))
7177 		return;
7178 	preempt_schedule_common();
7179 }
7180 NOKPROBE_SYMBOL(preempt_schedule);
7181 EXPORT_SYMBOL(preempt_schedule);
7182 
7183 #ifdef CONFIG_PREEMPT_DYNAMIC
7184 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7185 #  ifndef preempt_schedule_dynamic_enabled
7186 #   define preempt_schedule_dynamic_enabled	preempt_schedule
7187 #   define preempt_schedule_dynamic_disabled	NULL
7188 #  endif
7189 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
7190 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
7191 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7192 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
7193 void __sched notrace dynamic_preempt_schedule(void)
7194 {
7195 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
7196 		return;
7197 	preempt_schedule();
7198 }
7199 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
7200 EXPORT_SYMBOL(dynamic_preempt_schedule);
7201 # endif
7202 #endif /* CONFIG_PREEMPT_DYNAMIC */
7203 
7204 /**
7205  * preempt_schedule_notrace - preempt_schedule called by tracing
7206  *
7207  * The tracing infrastructure uses preempt_enable_notrace to prevent
7208  * recursion and tracing preempt enabling caused by the tracing
7209  * infrastructure itself. But as tracing can happen in areas coming
7210  * from userspace or just about to enter userspace, a preempt enable
7211  * can occur before user_exit() is called. This will cause the scheduler
7212  * to be called when the system is still in usermode.
7213  *
7214  * To prevent this, the preempt_enable_notrace will use this function
7215  * instead of preempt_schedule() to exit user context if needed before
7216  * calling the scheduler.
7217  */
7218 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7219 {
7220 	enum ctx_state prev_ctx;
7221 
7222 	if (likely(!preemptible()))
7223 		return;
7224 
7225 	do {
7226 		/*
7227 		 * Because the function tracer can trace preempt_count_sub()
7228 		 * and it also uses preempt_enable/disable_notrace(), if
7229 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7230 		 * by the function tracer will call this function again and
7231 		 * cause infinite recursion.
7232 		 *
7233 		 * Preemption must be disabled here before the function
7234 		 * tracer can trace. Break up preempt_disable() into two
7235 		 * calls. One to disable preemption without fear of being
7236 		 * traced. The other to still record the preemption latency,
7237 		 * which can also be traced by the function tracer.
7238 		 */
7239 		preempt_disable_notrace();
7240 		preempt_latency_start(1);
7241 		/*
7242 		 * Needs preempt disabled in case user_exit() is traced
7243 		 * and the tracer calls preempt_enable_notrace() causing
7244 		 * an infinite recursion.
7245 		 */
7246 		prev_ctx = exception_enter();
7247 		__schedule(SM_PREEMPT);
7248 		exception_exit(prev_ctx);
7249 
7250 		preempt_latency_stop(1);
7251 		preempt_enable_no_resched_notrace();
7252 	} while (need_resched());
7253 }
7254 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7255 
7256 #ifdef CONFIG_PREEMPT_DYNAMIC
7257 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7258 #  ifndef preempt_schedule_notrace_dynamic_enabled
7259 #   define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
7260 #   define preempt_schedule_notrace_dynamic_disabled	NULL
7261 #  endif
7262 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7263 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7264 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7265 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
7266 void __sched notrace dynamic_preempt_schedule_notrace(void)
7267 {
7268 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7269 		return;
7270 	preempt_schedule_notrace();
7271 }
7272 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7273 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7274 # endif
7275 #endif
7276 
7277 #endif /* CONFIG_PREEMPTION */
7278 
7279 /*
7280  * This is the entry point to schedule() from kernel preemption
7281  * off of IRQ context.
7282  * Note, that this is called and return with IRQs disabled. This will
7283  * protect us against recursive calling from IRQ contexts.
7284  */
7285 asmlinkage __visible void __sched preempt_schedule_irq(void)
7286 {
7287 	enum ctx_state prev_state;
7288 
7289 	/* Catch callers which need to be fixed */
7290 	BUG_ON(preempt_count() || !irqs_disabled());
7291 
7292 	prev_state = exception_enter();
7293 
7294 	do {
7295 		preempt_disable();
7296 		local_irq_enable();
7297 		__schedule(SM_PREEMPT);
7298 		local_irq_disable();
7299 		sched_preempt_enable_no_resched();
7300 	} while (need_resched());
7301 
7302 	exception_exit(prev_state);
7303 }
7304 
7305 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7306 			  void *key)
7307 {
7308 	WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7309 	return try_to_wake_up(curr->private, mode, wake_flags);
7310 }
7311 EXPORT_SYMBOL(default_wake_function);
7312 
7313 const struct sched_class *__setscheduler_class(int policy, int prio)
7314 {
7315 	if (dl_prio(prio))
7316 		return &dl_sched_class;
7317 
7318 	if (rt_prio(prio))
7319 		return &rt_sched_class;
7320 
7321 #ifdef CONFIG_SCHED_CLASS_EXT
7322 	if (task_should_scx(policy))
7323 		return &ext_sched_class;
7324 #endif
7325 
7326 	return &fair_sched_class;
7327 }
7328 
7329 #ifdef CONFIG_RT_MUTEXES
7330 
7331 /*
7332  * Would be more useful with typeof()/auto_type but they don't mix with
7333  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7334  * name such that if someone were to implement this function we get to compare
7335  * notes.
7336  */
7337 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7338 
7339 void rt_mutex_pre_schedule(void)
7340 {
7341 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7342 	sched_submit_work(current);
7343 }
7344 
7345 void rt_mutex_schedule(void)
7346 {
7347 	lockdep_assert(current->sched_rt_mutex);
7348 	__schedule_loop(SM_NONE);
7349 }
7350 
7351 void rt_mutex_post_schedule(void)
7352 {
7353 	sched_update_worker(current);
7354 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7355 }
7356 
7357 /*
7358  * rt_mutex_setprio - set the current priority of a task
7359  * @p: task to boost
7360  * @pi_task: donor task
7361  *
7362  * This function changes the 'effective' priority of a task. It does
7363  * not touch ->normal_prio like __setscheduler().
7364  *
7365  * Used by the rt_mutex code to implement priority inheritance
7366  * logic. Call site only calls if the priority of the task changed.
7367  */
7368 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7369 {
7370 	int prio, oldprio, queue_flag =
7371 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7372 	const struct sched_class *prev_class, *next_class;
7373 	struct rq_flags rf;
7374 	struct rq *rq;
7375 
7376 	/* XXX used to be waiter->prio, not waiter->task->prio */
7377 	prio = __rt_effective_prio(pi_task, p->normal_prio);
7378 
7379 	/*
7380 	 * If nothing changed; bail early.
7381 	 */
7382 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7383 		return;
7384 
7385 	rq = __task_rq_lock(p, &rf);
7386 	update_rq_clock(rq);
7387 	/*
7388 	 * Set under pi_lock && rq->lock, such that the value can be used under
7389 	 * either lock.
7390 	 *
7391 	 * Note that there is loads of tricky to make this pointer cache work
7392 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7393 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7394 	 * task is allowed to run again (and can exit). This ensures the pointer
7395 	 * points to a blocked task -- which guarantees the task is present.
7396 	 */
7397 	p->pi_top_task = pi_task;
7398 
7399 	/*
7400 	 * For FIFO/RR we only need to set prio, if that matches we're done.
7401 	 */
7402 	if (prio == p->prio && !dl_prio(prio))
7403 		goto out_unlock;
7404 
7405 	/*
7406 	 * Idle task boosting is a no-no in general. There is one
7407 	 * exception, when PREEMPT_RT and NOHZ is active:
7408 	 *
7409 	 * The idle task calls get_next_timer_interrupt() and holds
7410 	 * the timer wheel base->lock on the CPU and another CPU wants
7411 	 * to access the timer (probably to cancel it). We can safely
7412 	 * ignore the boosting request, as the idle CPU runs this code
7413 	 * with interrupts disabled and will complete the lock
7414 	 * protected section without being interrupted. So there is no
7415 	 * real need to boost.
7416 	 */
7417 	if (unlikely(p == rq->idle)) {
7418 		WARN_ON(p != rq->curr);
7419 		WARN_ON(p->pi_blocked_on);
7420 		goto out_unlock;
7421 	}
7422 
7423 	trace_sched_pi_setprio(p, pi_task);
7424 	oldprio = p->prio;
7425 
7426 	if (oldprio == prio && !dl_prio(prio))
7427 		queue_flag &= ~DEQUEUE_MOVE;
7428 
7429 	prev_class = p->sched_class;
7430 	next_class = __setscheduler_class(p->policy, prio);
7431 
7432 	if (prev_class != next_class)
7433 		queue_flag |= DEQUEUE_CLASS;
7434 
7435 	scoped_guard (sched_change, p, queue_flag) {
7436 		/*
7437 		 * Boosting condition are:
7438 		 * 1. -rt task is running and holds mutex A
7439 		 *      --> -dl task blocks on mutex A
7440 		 *
7441 		 * 2. -dl task is running and holds mutex A
7442 		 *      --> -dl task blocks on mutex A and could preempt the
7443 		 *          running task
7444 		 */
7445 		if (dl_prio(prio)) {
7446 			if (!dl_prio(p->normal_prio) ||
7447 			    (pi_task && dl_prio(pi_task->prio) &&
7448 			     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7449 				p->dl.pi_se = pi_task->dl.pi_se;
7450 				scope->flags |= ENQUEUE_REPLENISH;
7451 			} else {
7452 				p->dl.pi_se = &p->dl;
7453 			}
7454 		} else if (rt_prio(prio)) {
7455 			if (dl_prio(oldprio))
7456 				p->dl.pi_se = &p->dl;
7457 			if (oldprio < prio)
7458 				scope->flags |= ENQUEUE_HEAD;
7459 		} else {
7460 			if (dl_prio(oldprio))
7461 				p->dl.pi_se = &p->dl;
7462 			if (rt_prio(oldprio))
7463 				p->rt.timeout = 0;
7464 		}
7465 
7466 		p->sched_class = next_class;
7467 		p->prio = prio;
7468 	}
7469 out_unlock:
7470 	/* Caller holds task_struct::pi_lock, IRQs are still disabled */
7471 
7472 	__balance_callbacks(rq, &rf);
7473 	__task_rq_unlock(rq, p, &rf);
7474 }
7475 #endif /* CONFIG_RT_MUTEXES */
7476 
7477 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7478 int __sched __cond_resched(void)
7479 {
7480 	if (should_resched(0) && !irqs_disabled()) {
7481 		preempt_schedule_common();
7482 		return 1;
7483 	}
7484 	/*
7485 	 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
7486 	 * whether the current CPU is in an RCU read-side critical section,
7487 	 * so the tick can report quiescent states even for CPUs looping
7488 	 * in kernel context.  In contrast, in non-preemptible kernels,
7489 	 * RCU readers leave no in-memory hints, which means that CPU-bound
7490 	 * processes executing in kernel context might never report an
7491 	 * RCU quiescent state.  Therefore, the following code causes
7492 	 * cond_resched() to report a quiescent state, but only when RCU
7493 	 * is in urgent need of one.
7494 	 * A third case, preemptible, but non-PREEMPT_RCU provides for
7495 	 * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
7496 	 */
7497 #ifndef CONFIG_PREEMPT_RCU
7498 	rcu_all_qs();
7499 #endif
7500 	return 0;
7501 }
7502 EXPORT_SYMBOL(__cond_resched);
7503 #endif
7504 
7505 #ifdef CONFIG_PREEMPT_DYNAMIC
7506 # ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7507 #  define cond_resched_dynamic_enabled	__cond_resched
7508 #  define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
7509 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7510 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7511 
7512 #  define might_resched_dynamic_enabled	__cond_resched
7513 #  define might_resched_dynamic_disabled ((void *)&__static_call_return0)
7514 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7515 EXPORT_STATIC_CALL_TRAMP(might_resched);
7516 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7517 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7518 int __sched dynamic_cond_resched(void)
7519 {
7520 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7521 		return 0;
7522 	return __cond_resched();
7523 }
7524 EXPORT_SYMBOL(dynamic_cond_resched);
7525 
7526 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7527 int __sched dynamic_might_resched(void)
7528 {
7529 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
7530 		return 0;
7531 	return __cond_resched();
7532 }
7533 EXPORT_SYMBOL(dynamic_might_resched);
7534 # endif
7535 #endif /* CONFIG_PREEMPT_DYNAMIC */
7536 
7537 /*
7538  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7539  * call schedule, and on return reacquire the lock.
7540  *
7541  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7542  * operations here to prevent schedule() from being called twice (once via
7543  * spin_unlock(), once by hand).
7544  */
7545 int __cond_resched_lock(spinlock_t *lock)
7546 {
7547 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7548 	int ret = 0;
7549 
7550 	lockdep_assert_held(lock);
7551 
7552 	if (spin_needbreak(lock) || resched) {
7553 		spin_unlock(lock);
7554 		if (!_cond_resched())
7555 			cpu_relax();
7556 		ret = 1;
7557 		spin_lock(lock);
7558 	}
7559 	return ret;
7560 }
7561 EXPORT_SYMBOL(__cond_resched_lock);
7562 
7563 int __cond_resched_rwlock_read(rwlock_t *lock)
7564 {
7565 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7566 	int ret = 0;
7567 
7568 	lockdep_assert_held_read(lock);
7569 
7570 	if (rwlock_needbreak(lock) || resched) {
7571 		read_unlock(lock);
7572 		if (!_cond_resched())
7573 			cpu_relax();
7574 		ret = 1;
7575 		read_lock(lock);
7576 	}
7577 	return ret;
7578 }
7579 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7580 
7581 int __cond_resched_rwlock_write(rwlock_t *lock)
7582 {
7583 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7584 	int ret = 0;
7585 
7586 	lockdep_assert_held_write(lock);
7587 
7588 	if (rwlock_needbreak(lock) || resched) {
7589 		write_unlock(lock);
7590 		if (!_cond_resched())
7591 			cpu_relax();
7592 		ret = 1;
7593 		write_lock(lock);
7594 	}
7595 	return ret;
7596 }
7597 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7598 
7599 #ifdef CONFIG_PREEMPT_DYNAMIC
7600 
7601 # ifdef CONFIG_GENERIC_IRQ_ENTRY
7602 #  include <linux/irq-entry-common.h>
7603 # endif
7604 
7605 /*
7606  * SC:cond_resched
7607  * SC:might_resched
7608  * SC:preempt_schedule
7609  * SC:preempt_schedule_notrace
7610  * SC:irqentry_exit_cond_resched
7611  *
7612  *
7613  * NONE:
7614  *   cond_resched               <- __cond_resched
7615  *   might_resched              <- RET0
7616  *   preempt_schedule           <- NOP
7617  *   preempt_schedule_notrace   <- NOP
7618  *   irqentry_exit_cond_resched <- NOP
7619  *   dynamic_preempt_lazy       <- false
7620  *
7621  * VOLUNTARY:
7622  *   cond_resched               <- __cond_resched
7623  *   might_resched              <- __cond_resched
7624  *   preempt_schedule           <- NOP
7625  *   preempt_schedule_notrace   <- NOP
7626  *   irqentry_exit_cond_resched <- NOP
7627  *   dynamic_preempt_lazy       <- false
7628  *
7629  * FULL:
7630  *   cond_resched               <- RET0
7631  *   might_resched              <- RET0
7632  *   preempt_schedule           <- preempt_schedule
7633  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7634  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7635  *   dynamic_preempt_lazy       <- false
7636  *
7637  * LAZY:
7638  *   cond_resched               <- RET0
7639  *   might_resched              <- RET0
7640  *   preempt_schedule           <- preempt_schedule
7641  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7642  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7643  *   dynamic_preempt_lazy       <- true
7644  */
7645 
7646 enum {
7647 	preempt_dynamic_undefined = -1,
7648 	preempt_dynamic_none,
7649 	preempt_dynamic_voluntary,
7650 	preempt_dynamic_full,
7651 	preempt_dynamic_lazy,
7652 };
7653 
7654 int preempt_dynamic_mode = preempt_dynamic_undefined;
7655 
7656 int sched_dynamic_mode(const char *str)
7657 {
7658 # if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
7659 	if (!strcmp(str, "none"))
7660 		return preempt_dynamic_none;
7661 
7662 	if (!strcmp(str, "voluntary"))
7663 		return preempt_dynamic_voluntary;
7664 # endif
7665 
7666 	if (!strcmp(str, "full"))
7667 		return preempt_dynamic_full;
7668 
7669 # ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7670 	if (!strcmp(str, "lazy"))
7671 		return preempt_dynamic_lazy;
7672 # endif
7673 
7674 	return -EINVAL;
7675 }
7676 
7677 # define preempt_dynamic_key_enable(f)	static_key_enable(&sk_dynamic_##f.key)
7678 # define preempt_dynamic_key_disable(f)	static_key_disable(&sk_dynamic_##f.key)
7679 
7680 # if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7681 #  define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
7682 #  define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
7683 # elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7684 #  define preempt_dynamic_enable(f)	preempt_dynamic_key_enable(f)
7685 #  define preempt_dynamic_disable(f)	preempt_dynamic_key_disable(f)
7686 # else
7687 #  error "Unsupported PREEMPT_DYNAMIC mechanism"
7688 # endif
7689 
7690 static DEFINE_MUTEX(sched_dynamic_mutex);
7691 
7692 static void __sched_dynamic_update(int mode)
7693 {
7694 	/*
7695 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7696 	 * the ZERO state, which is invalid.
7697 	 */
7698 	preempt_dynamic_enable(cond_resched);
7699 	preempt_dynamic_enable(might_resched);
7700 	preempt_dynamic_enable(preempt_schedule);
7701 	preempt_dynamic_enable(preempt_schedule_notrace);
7702 	preempt_dynamic_enable(irqentry_exit_cond_resched);
7703 	preempt_dynamic_key_disable(preempt_lazy);
7704 
7705 	switch (mode) {
7706 	case preempt_dynamic_none:
7707 		preempt_dynamic_enable(cond_resched);
7708 		preempt_dynamic_disable(might_resched);
7709 		preempt_dynamic_disable(preempt_schedule);
7710 		preempt_dynamic_disable(preempt_schedule_notrace);
7711 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7712 		preempt_dynamic_key_disable(preempt_lazy);
7713 		if (mode != preempt_dynamic_mode)
7714 			pr_info("Dynamic Preempt: none\n");
7715 		break;
7716 
7717 	case preempt_dynamic_voluntary:
7718 		preempt_dynamic_enable(cond_resched);
7719 		preempt_dynamic_enable(might_resched);
7720 		preempt_dynamic_disable(preempt_schedule);
7721 		preempt_dynamic_disable(preempt_schedule_notrace);
7722 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7723 		preempt_dynamic_key_disable(preempt_lazy);
7724 		if (mode != preempt_dynamic_mode)
7725 			pr_info("Dynamic Preempt: voluntary\n");
7726 		break;
7727 
7728 	case preempt_dynamic_full:
7729 		preempt_dynamic_disable(cond_resched);
7730 		preempt_dynamic_disable(might_resched);
7731 		preempt_dynamic_enable(preempt_schedule);
7732 		preempt_dynamic_enable(preempt_schedule_notrace);
7733 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7734 		preempt_dynamic_key_disable(preempt_lazy);
7735 		if (mode != preempt_dynamic_mode)
7736 			pr_info("Dynamic Preempt: full\n");
7737 		break;
7738 
7739 	case preempt_dynamic_lazy:
7740 		preempt_dynamic_disable(cond_resched);
7741 		preempt_dynamic_disable(might_resched);
7742 		preempt_dynamic_enable(preempt_schedule);
7743 		preempt_dynamic_enable(preempt_schedule_notrace);
7744 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7745 		preempt_dynamic_key_enable(preempt_lazy);
7746 		if (mode != preempt_dynamic_mode)
7747 			pr_info("Dynamic Preempt: lazy\n");
7748 		break;
7749 	}
7750 
7751 	preempt_dynamic_mode = mode;
7752 }
7753 
7754 void sched_dynamic_update(int mode)
7755 {
7756 	mutex_lock(&sched_dynamic_mutex);
7757 	__sched_dynamic_update(mode);
7758 	mutex_unlock(&sched_dynamic_mutex);
7759 }
7760 
7761 static int __init setup_preempt_mode(char *str)
7762 {
7763 	int mode = sched_dynamic_mode(str);
7764 	if (mode < 0) {
7765 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7766 		return 0;
7767 	}
7768 
7769 	sched_dynamic_update(mode);
7770 	return 1;
7771 }
7772 __setup("preempt=", setup_preempt_mode);
7773 
7774 static void __init preempt_dynamic_init(void)
7775 {
7776 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7777 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7778 			sched_dynamic_update(preempt_dynamic_none);
7779 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7780 			sched_dynamic_update(preempt_dynamic_voluntary);
7781 		} else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7782 			sched_dynamic_update(preempt_dynamic_lazy);
7783 		} else {
7784 			/* Default static call setting, nothing to do */
7785 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7786 			preempt_dynamic_mode = preempt_dynamic_full;
7787 			pr_info("Dynamic Preempt: full\n");
7788 		}
7789 	}
7790 }
7791 
7792 # define PREEMPT_MODEL_ACCESSOR(mode) \
7793 	bool preempt_model_##mode(void)						 \
7794 	{									 \
7795 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7796 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
7797 	}									 \
7798 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
7799 
7800 PREEMPT_MODEL_ACCESSOR(none);
7801 PREEMPT_MODEL_ACCESSOR(voluntary);
7802 PREEMPT_MODEL_ACCESSOR(full);
7803 PREEMPT_MODEL_ACCESSOR(lazy);
7804 
7805 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7806 
7807 #define preempt_dynamic_mode -1
7808 
7809 static inline void preempt_dynamic_init(void) { }
7810 
7811 #endif /* CONFIG_PREEMPT_DYNAMIC */
7812 
7813 const char *preempt_modes[] = {
7814 	"none", "voluntary", "full", "lazy", NULL,
7815 };
7816 
7817 const char *preempt_model_str(void)
7818 {
7819 	bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) &&
7820 		(IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) ||
7821 		 IS_ENABLED(CONFIG_PREEMPT_LAZY));
7822 	static char buf[128];
7823 
7824 	if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) {
7825 		struct seq_buf s;
7826 
7827 		seq_buf_init(&s, buf, sizeof(buf));
7828 		seq_buf_puts(&s, "PREEMPT");
7829 
7830 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
7831 			seq_buf_printf(&s, "%sRT%s",
7832 				       brace ? "_{" : "_",
7833 				       brace ? "," : "");
7834 
7835 		if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) {
7836 			seq_buf_printf(&s, "(%s)%s",
7837 				       preempt_dynamic_mode >= 0 ?
7838 				       preempt_modes[preempt_dynamic_mode] : "undef",
7839 				       brace ? "}" : "");
7840 			return seq_buf_str(&s);
7841 		}
7842 
7843 		if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7844 			seq_buf_printf(&s, "LAZY%s",
7845 				       brace ? "}" : "");
7846 			return seq_buf_str(&s);
7847 		}
7848 
7849 		return seq_buf_str(&s);
7850 	}
7851 
7852 	if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD))
7853 		return "VOLUNTARY";
7854 
7855 	return "NONE";
7856 }
7857 
7858 int io_schedule_prepare(void)
7859 {
7860 	int old_iowait = current->in_iowait;
7861 
7862 	current->in_iowait = 1;
7863 	blk_flush_plug(current->plug, true);
7864 	return old_iowait;
7865 }
7866 
7867 void io_schedule_finish(int token)
7868 {
7869 	current->in_iowait = token;
7870 }
7871 
7872 /*
7873  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7874  * that process accounting knows that this is a task in IO wait state.
7875  */
7876 long __sched io_schedule_timeout(long timeout)
7877 {
7878 	int token;
7879 	long ret;
7880 
7881 	token = io_schedule_prepare();
7882 	ret = schedule_timeout(timeout);
7883 	io_schedule_finish(token);
7884 
7885 	return ret;
7886 }
7887 EXPORT_SYMBOL(io_schedule_timeout);
7888 
7889 void __sched io_schedule(void)
7890 {
7891 	int token;
7892 
7893 	token = io_schedule_prepare();
7894 	schedule();
7895 	io_schedule_finish(token);
7896 }
7897 EXPORT_SYMBOL(io_schedule);
7898 
7899 void sched_show_task(struct task_struct *p)
7900 {
7901 	unsigned long free;
7902 	int ppid;
7903 
7904 	if (!try_get_task_stack(p))
7905 		return;
7906 
7907 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7908 
7909 	if (task_is_running(p))
7910 		pr_cont("  running task    ");
7911 	free = stack_not_used(p);
7912 	ppid = 0;
7913 	rcu_read_lock();
7914 	if (pid_alive(p))
7915 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
7916 	rcu_read_unlock();
7917 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7918 		free, task_pid_nr(p), task_tgid_nr(p),
7919 		ppid, p->flags, read_task_thread_flags(p));
7920 
7921 	print_worker_info(KERN_INFO, p);
7922 	print_stop_info(KERN_INFO, p);
7923 	print_scx_info(KERN_INFO, p);
7924 	show_stack(p, NULL, KERN_INFO);
7925 	put_task_stack(p);
7926 }
7927 EXPORT_SYMBOL_GPL(sched_show_task);
7928 
7929 static inline bool
7930 state_filter_match(unsigned long state_filter, struct task_struct *p)
7931 {
7932 	unsigned int state = READ_ONCE(p->__state);
7933 
7934 	/* no filter, everything matches */
7935 	if (!state_filter)
7936 		return true;
7937 
7938 	/* filter, but doesn't match */
7939 	if (!(state & state_filter))
7940 		return false;
7941 
7942 	/*
7943 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7944 	 * TASK_KILLABLE).
7945 	 */
7946 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7947 		return false;
7948 
7949 	return true;
7950 }
7951 
7952 
7953 void show_state_filter(unsigned int state_filter)
7954 {
7955 	struct task_struct *g, *p;
7956 
7957 	rcu_read_lock();
7958 	for_each_process_thread(g, p) {
7959 		/*
7960 		 * reset the NMI-timeout, listing all files on a slow
7961 		 * console might take a lot of time:
7962 		 * Also, reset softlockup watchdogs on all CPUs, because
7963 		 * another CPU might be blocked waiting for us to process
7964 		 * an IPI.
7965 		 */
7966 		touch_nmi_watchdog();
7967 		touch_all_softlockup_watchdogs();
7968 		if (state_filter_match(state_filter, p))
7969 			sched_show_task(p);
7970 	}
7971 
7972 	if (!state_filter)
7973 		sysrq_sched_debug_show();
7974 
7975 	rcu_read_unlock();
7976 	/*
7977 	 * Only show locks if all tasks are dumped:
7978 	 */
7979 	if (!state_filter)
7980 		debug_show_all_locks();
7981 }
7982 
7983 /**
7984  * init_idle - set up an idle thread for a given CPU
7985  * @idle: task in question
7986  * @cpu: CPU the idle task belongs to
7987  *
7988  * NOTE: this function does not set the idle thread's NEED_RESCHED
7989  * flag, to make booting more robust.
7990  */
7991 void __init init_idle(struct task_struct *idle, int cpu)
7992 {
7993 	struct affinity_context ac = (struct affinity_context) {
7994 		.new_mask  = cpumask_of(cpu),
7995 		.flags     = 0,
7996 	};
7997 	struct rq *rq = cpu_rq(cpu);
7998 	unsigned long flags;
7999 
8000 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
8001 	raw_spin_rq_lock(rq);
8002 
8003 	idle->__state = TASK_RUNNING;
8004 	idle->se.exec_start = sched_clock();
8005 	/*
8006 	 * PF_KTHREAD should already be set at this point; regardless, make it
8007 	 * look like a proper per-CPU kthread.
8008 	 */
8009 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
8010 	kthread_set_per_cpu(idle, cpu);
8011 
8012 	/*
8013 	 * No validation and serialization required at boot time and for
8014 	 * setting up the idle tasks of not yet online CPUs.
8015 	 */
8016 	set_cpus_allowed_common(idle, &ac);
8017 	/*
8018 	 * We're having a chicken and egg problem, even though we are
8019 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
8020 	 * lockdep check in task_group() will fail.
8021 	 *
8022 	 * Similar case to sched_fork(). / Alternatively we could
8023 	 * use task_rq_lock() here and obtain the other rq->lock.
8024 	 *
8025 	 * Silence PROVE_RCU
8026 	 */
8027 	rcu_read_lock();
8028 	__set_task_cpu(idle, cpu);
8029 	rcu_read_unlock();
8030 
8031 	rq->idle = idle;
8032 	rq_set_donor(rq, idle);
8033 	rcu_assign_pointer(rq->curr, idle);
8034 	idle->on_rq = TASK_ON_RQ_QUEUED;
8035 	idle->on_cpu = 1;
8036 	raw_spin_rq_unlock(rq);
8037 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
8038 
8039 	/* Set the preempt count _outside_ the spinlocks! */
8040 	init_idle_preempt_count(idle, cpu);
8041 
8042 	/*
8043 	 * The idle tasks have their own, simple scheduling class:
8044 	 */
8045 	idle->sched_class = &idle_sched_class;
8046 	ftrace_graph_init_idle_task(idle, cpu);
8047 	vtime_init_idle(idle, cpu);
8048 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
8049 }
8050 
8051 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
8052 			      const struct cpumask *trial)
8053 {
8054 	int ret = 1;
8055 
8056 	if (cpumask_empty(cur))
8057 		return ret;
8058 
8059 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
8060 
8061 	return ret;
8062 }
8063 
8064 int task_can_attach(struct task_struct *p)
8065 {
8066 	int ret = 0;
8067 
8068 	/*
8069 	 * Kthreads which disallow setaffinity shouldn't be moved
8070 	 * to a new cpuset; we don't want to change their CPU
8071 	 * affinity and isolating such threads by their set of
8072 	 * allowed nodes is unnecessary.  Thus, cpusets are not
8073 	 * applicable for such threads.  This prevents checking for
8074 	 * success of set_cpus_allowed_ptr() on all attached tasks
8075 	 * before cpus_mask may be changed.
8076 	 */
8077 	if (p->flags & PF_NO_SETAFFINITY)
8078 		ret = -EINVAL;
8079 
8080 	return ret;
8081 }
8082 
8083 bool sched_smp_initialized __read_mostly;
8084 
8085 #ifdef CONFIG_NUMA_BALANCING
8086 /* Migrate current task p to target_cpu */
8087 int migrate_task_to(struct task_struct *p, int target_cpu)
8088 {
8089 	struct migration_arg arg = { p, target_cpu };
8090 	int curr_cpu = task_cpu(p);
8091 
8092 	if (curr_cpu == target_cpu)
8093 		return 0;
8094 
8095 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
8096 		return -EINVAL;
8097 
8098 	/* TODO: This is not properly updating schedstats */
8099 
8100 	trace_sched_move_numa(p, curr_cpu, target_cpu);
8101 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
8102 }
8103 
8104 /*
8105  * Requeue a task on a given node and accurately track the number of NUMA
8106  * tasks on the runqueues
8107  */
8108 void sched_setnuma(struct task_struct *p, int nid)
8109 {
8110 	guard(task_rq_lock)(p);
8111 	scoped_guard (sched_change, p, DEQUEUE_SAVE)
8112 		p->numa_preferred_nid = nid;
8113 }
8114 #endif /* CONFIG_NUMA_BALANCING */
8115 
8116 #ifdef CONFIG_HOTPLUG_CPU
8117 /*
8118  * Invoked on the outgoing CPU in context of the CPU hotplug thread
8119  * after ensuring that there are no user space tasks left on the CPU.
8120  *
8121  * If there is a lazy mm in use on the hotplug thread, drop it and
8122  * switch to init_mm.
8123  *
8124  * The reference count on init_mm is dropped in finish_cpu().
8125  */
8126 static void sched_force_init_mm(void)
8127 {
8128 	struct mm_struct *mm = current->active_mm;
8129 
8130 	if (mm != &init_mm) {
8131 		mmgrab_lazy_tlb(&init_mm);
8132 		local_irq_disable();
8133 		current->active_mm = &init_mm;
8134 		switch_mm_irqs_off(mm, &init_mm, current);
8135 		local_irq_enable();
8136 		finish_arch_post_lock_switch();
8137 		mmdrop_lazy_tlb(mm);
8138 	}
8139 
8140 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
8141 }
8142 
8143 static int __balance_push_cpu_stop(void *arg)
8144 {
8145 	struct task_struct *p = arg;
8146 	struct rq *rq = this_rq();
8147 	struct rq_flags rf;
8148 	int cpu;
8149 
8150 	scoped_guard (raw_spinlock_irq, &p->pi_lock) {
8151 		/*
8152 		 * We may change the underlying rq, but the locks held will
8153 		 * appropriately be "transferred" when switching.
8154 		 */
8155 		context_unsafe_alias(rq);
8156 
8157 		cpu = select_fallback_rq(rq->cpu, p);
8158 
8159 		rq_lock(rq, &rf);
8160 		update_rq_clock(rq);
8161 		if (task_rq(p) == rq && task_on_rq_queued(p))
8162 			rq = __migrate_task(rq, &rf, p, cpu);
8163 		rq_unlock(rq, &rf);
8164 	}
8165 
8166 	put_task_struct(p);
8167 
8168 	return 0;
8169 }
8170 
8171 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
8172 
8173 /*
8174  * Ensure we only run per-cpu kthreads once the CPU goes !active.
8175  *
8176  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
8177  * effective when the hotplug motion is down.
8178  */
8179 static void balance_push(struct rq *rq)
8180 	__must_hold(__rq_lockp(rq))
8181 {
8182 	struct task_struct *push_task = rq->curr;
8183 
8184 	lockdep_assert_rq_held(rq);
8185 
8186 	/*
8187 	 * Ensure the thing is persistent until balance_push_set(.on = false);
8188 	 */
8189 	rq->balance_callback = &balance_push_callback;
8190 
8191 	/*
8192 	 * Only active while going offline and when invoked on the outgoing
8193 	 * CPU.
8194 	 */
8195 	if (!cpu_dying(rq->cpu) || rq != this_rq())
8196 		return;
8197 
8198 	/*
8199 	 * Both the cpu-hotplug and stop task are in this case and are
8200 	 * required to complete the hotplug process.
8201 	 */
8202 	if (kthread_is_per_cpu(push_task) ||
8203 	    is_migration_disabled(push_task)) {
8204 
8205 		/*
8206 		 * If this is the idle task on the outgoing CPU try to wake
8207 		 * up the hotplug control thread which might wait for the
8208 		 * last task to vanish. The rcuwait_active() check is
8209 		 * accurate here because the waiter is pinned on this CPU
8210 		 * and can't obviously be running in parallel.
8211 		 *
8212 		 * On RT kernels this also has to check whether there are
8213 		 * pinned and scheduled out tasks on the runqueue. They
8214 		 * need to leave the migrate disabled section first.
8215 		 */
8216 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8217 		    rcuwait_active(&rq->hotplug_wait)) {
8218 			raw_spin_rq_unlock(rq);
8219 			rcuwait_wake_up(&rq->hotplug_wait);
8220 			raw_spin_rq_lock(rq);
8221 		}
8222 		return;
8223 	}
8224 
8225 	get_task_struct(push_task);
8226 	/*
8227 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
8228 	 * Both preemption and IRQs are still disabled.
8229 	 */
8230 	preempt_disable();
8231 	raw_spin_rq_unlock(rq);
8232 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8233 			    this_cpu_ptr(&push_work));
8234 	preempt_enable();
8235 	/*
8236 	 * At this point need_resched() is true and we'll take the loop in
8237 	 * schedule(). The next pick is obviously going to be the stop task
8238 	 * which kthread_is_per_cpu() and will push this task away.
8239 	 */
8240 	raw_spin_rq_lock(rq);
8241 }
8242 
8243 static void balance_push_set(int cpu, bool on)
8244 {
8245 	struct rq *rq = cpu_rq(cpu);
8246 	struct rq_flags rf;
8247 
8248 	rq_lock_irqsave(rq, &rf);
8249 	if (on) {
8250 		WARN_ON_ONCE(rq->balance_callback);
8251 		rq->balance_callback = &balance_push_callback;
8252 	} else if (rq->balance_callback == &balance_push_callback) {
8253 		rq->balance_callback = NULL;
8254 	}
8255 	rq_unlock_irqrestore(rq, &rf);
8256 }
8257 
8258 /*
8259  * Invoked from a CPUs hotplug control thread after the CPU has been marked
8260  * inactive. All tasks which are not per CPU kernel threads are either
8261  * pushed off this CPU now via balance_push() or placed on a different CPU
8262  * during wakeup. Wait until the CPU is quiescent.
8263  */
8264 static void balance_hotplug_wait(void)
8265 {
8266 	struct rq *rq = this_rq();
8267 
8268 	rcuwait_wait_event(&rq->hotplug_wait,
8269 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8270 			   TASK_UNINTERRUPTIBLE);
8271 }
8272 
8273 #else /* !CONFIG_HOTPLUG_CPU: */
8274 
8275 static inline void balance_push(struct rq *rq)
8276 {
8277 }
8278 
8279 static inline void balance_push_set(int cpu, bool on)
8280 {
8281 }
8282 
8283 static inline void balance_hotplug_wait(void)
8284 {
8285 }
8286 
8287 #endif /* !CONFIG_HOTPLUG_CPU */
8288 
8289 void set_rq_online(struct rq *rq)
8290 {
8291 	if (!rq->online) {
8292 		const struct sched_class *class;
8293 
8294 		cpumask_set_cpu(rq->cpu, rq->rd->online);
8295 		rq->online = 1;
8296 
8297 		for_each_class(class) {
8298 			if (class->rq_online)
8299 				class->rq_online(rq);
8300 		}
8301 	}
8302 }
8303 
8304 void set_rq_offline(struct rq *rq)
8305 {
8306 	if (rq->online) {
8307 		const struct sched_class *class;
8308 
8309 		update_rq_clock(rq);
8310 		for_each_class(class) {
8311 			if (class->rq_offline)
8312 				class->rq_offline(rq);
8313 		}
8314 
8315 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
8316 		rq->online = 0;
8317 	}
8318 }
8319 
8320 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8321 {
8322 	struct rq_flags rf;
8323 
8324 	rq_lock_irqsave(rq, &rf);
8325 	if (rq->rd) {
8326 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8327 		set_rq_online(rq);
8328 	}
8329 	rq_unlock_irqrestore(rq, &rf);
8330 }
8331 
8332 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8333 {
8334 	struct rq_flags rf;
8335 
8336 	rq_lock_irqsave(rq, &rf);
8337 	if (rq->rd) {
8338 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8339 		set_rq_offline(rq);
8340 	}
8341 	rq_unlock_irqrestore(rq, &rf);
8342 }
8343 
8344 /*
8345  * used to mark begin/end of suspend/resume:
8346  */
8347 static int num_cpus_frozen;
8348 
8349 /*
8350  * Update cpusets according to cpu_active mask.  If cpusets are
8351  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8352  * around partition_sched_domains().
8353  *
8354  * If we come here as part of a suspend/resume, don't touch cpusets because we
8355  * want to restore it back to its original state upon resume anyway.
8356  */
8357 static void cpuset_cpu_active(void)
8358 {
8359 	if (cpuhp_tasks_frozen) {
8360 		/*
8361 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
8362 		 * resume sequence. As long as this is not the last online
8363 		 * operation in the resume sequence, just build a single sched
8364 		 * domain, ignoring cpusets.
8365 		 */
8366 		cpuset_reset_sched_domains();
8367 		if (--num_cpus_frozen)
8368 			return;
8369 		/*
8370 		 * This is the last CPU online operation. So fall through and
8371 		 * restore the original sched domains by considering the
8372 		 * cpuset configurations.
8373 		 */
8374 		cpuset_force_rebuild();
8375 	}
8376 	cpuset_update_active_cpus();
8377 }
8378 
8379 static void cpuset_cpu_inactive(unsigned int cpu)
8380 {
8381 	if (!cpuhp_tasks_frozen) {
8382 		cpuset_update_active_cpus();
8383 	} else {
8384 		num_cpus_frozen++;
8385 		cpuset_reset_sched_domains();
8386 	}
8387 }
8388 
8389 static inline void sched_smt_present_inc(int cpu)
8390 {
8391 #ifdef CONFIG_SCHED_SMT
8392 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8393 		static_branch_inc_cpuslocked(&sched_smt_present);
8394 #endif
8395 }
8396 
8397 static inline void sched_smt_present_dec(int cpu)
8398 {
8399 #ifdef CONFIG_SCHED_SMT
8400 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8401 		static_branch_dec_cpuslocked(&sched_smt_present);
8402 #endif
8403 }
8404 
8405 int sched_cpu_activate(unsigned int cpu)
8406 {
8407 	struct rq *rq = cpu_rq(cpu);
8408 
8409 	/*
8410 	 * Clear the balance_push callback and prepare to schedule
8411 	 * regular tasks.
8412 	 */
8413 	balance_push_set(cpu, false);
8414 
8415 	/*
8416 	 * When going up, increment the number of cores with SMT present.
8417 	 */
8418 	sched_smt_present_inc(cpu);
8419 	set_cpu_active(cpu, true);
8420 
8421 	if (sched_smp_initialized) {
8422 		sched_update_numa(cpu, true);
8423 		sched_domains_numa_masks_set(cpu);
8424 		cpuset_cpu_active();
8425 	}
8426 
8427 	scx_rq_activate(rq);
8428 
8429 	/*
8430 	 * Put the rq online, if not already. This happens:
8431 	 *
8432 	 * 1) In the early boot process, because we build the real domains
8433 	 *    after all CPUs have been brought up.
8434 	 *
8435 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8436 	 *    domains.
8437 	 */
8438 	sched_set_rq_online(rq, cpu);
8439 
8440 	return 0;
8441 }
8442 
8443 int sched_cpu_deactivate(unsigned int cpu)
8444 {
8445 	struct rq *rq = cpu_rq(cpu);
8446 	int ret;
8447 
8448 	ret = dl_bw_deactivate(cpu);
8449 
8450 	if (ret)
8451 		return ret;
8452 
8453 	/*
8454 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8455 	 * load balancing when not active
8456 	 */
8457 	nohz_balance_exit_idle(rq);
8458 
8459 	set_cpu_active(cpu, false);
8460 
8461 	/*
8462 	 * From this point forward, this CPU will refuse to run any task that
8463 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8464 	 * push those tasks away until this gets cleared, see
8465 	 * sched_cpu_dying().
8466 	 */
8467 	balance_push_set(cpu, true);
8468 
8469 	/*
8470 	 * We've cleared cpu_active_mask / set balance_push, wait for all
8471 	 * preempt-disabled and RCU users of this state to go away such that
8472 	 * all new such users will observe it.
8473 	 *
8474 	 * Specifically, we rely on ttwu to no longer target this CPU, see
8475 	 * ttwu_queue_cond() and is_cpu_allowed().
8476 	 *
8477 	 * Do sync before park smpboot threads to take care the RCU boost case.
8478 	 */
8479 	synchronize_rcu();
8480 
8481 	sched_set_rq_offline(rq, cpu);
8482 
8483 	scx_rq_deactivate(rq);
8484 
8485 	/*
8486 	 * When going down, decrement the number of cores with SMT present.
8487 	 */
8488 	sched_smt_present_dec(cpu);
8489 
8490 #ifdef CONFIG_SCHED_SMT
8491 	sched_core_cpu_deactivate(cpu);
8492 #endif
8493 
8494 	if (!sched_smp_initialized)
8495 		return 0;
8496 
8497 	sched_update_numa(cpu, false);
8498 	cpuset_cpu_inactive(cpu);
8499 	sched_domains_numa_masks_clear(cpu);
8500 	return 0;
8501 }
8502 
8503 static void sched_rq_cpu_starting(unsigned int cpu)
8504 {
8505 	struct rq *rq = cpu_rq(cpu);
8506 
8507 	rq->calc_load_update = calc_load_update;
8508 	update_max_interval();
8509 }
8510 
8511 int sched_cpu_starting(unsigned int cpu)
8512 {
8513 	sched_core_cpu_starting(cpu);
8514 	sched_rq_cpu_starting(cpu);
8515 	sched_tick_start(cpu);
8516 	return 0;
8517 }
8518 
8519 #ifdef CONFIG_HOTPLUG_CPU
8520 
8521 /*
8522  * Invoked immediately before the stopper thread is invoked to bring the
8523  * CPU down completely. At this point all per CPU kthreads except the
8524  * hotplug thread (current) and the stopper thread (inactive) have been
8525  * either parked or have been unbound from the outgoing CPU. Ensure that
8526  * any of those which might be on the way out are gone.
8527  *
8528  * If after this point a bound task is being woken on this CPU then the
8529  * responsible hotplug callback has failed to do it's job.
8530  * sched_cpu_dying() will catch it with the appropriate fireworks.
8531  */
8532 int sched_cpu_wait_empty(unsigned int cpu)
8533 {
8534 	balance_hotplug_wait();
8535 	sched_force_init_mm();
8536 	return 0;
8537 }
8538 
8539 /*
8540  * Since this CPU is going 'away' for a while, fold any nr_active delta we
8541  * might have. Called from the CPU stopper task after ensuring that the
8542  * stopper is the last running task on the CPU, so nr_active count is
8543  * stable. We need to take the tear-down thread which is calling this into
8544  * account, so we hand in adjust = 1 to the load calculation.
8545  *
8546  * Also see the comment "Global load-average calculations".
8547  */
8548 static void calc_load_migrate(struct rq *rq)
8549 {
8550 	long delta = calc_load_fold_active(rq, 1);
8551 
8552 	if (delta)
8553 		atomic_long_add(delta, &calc_load_tasks);
8554 }
8555 
8556 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8557 {
8558 	struct task_struct *g, *p;
8559 	int cpu = cpu_of(rq);
8560 
8561 	lockdep_assert_rq_held(rq);
8562 
8563 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8564 	for_each_process_thread(g, p) {
8565 		if (task_cpu(p) != cpu)
8566 			continue;
8567 
8568 		if (!task_on_rq_queued(p))
8569 			continue;
8570 
8571 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8572 	}
8573 }
8574 
8575 int sched_cpu_dying(unsigned int cpu)
8576 {
8577 	struct rq *rq = cpu_rq(cpu);
8578 	struct rq_flags rf;
8579 
8580 	/* Handle pending wakeups and then migrate everything off */
8581 	sched_tick_stop(cpu);
8582 
8583 	rq_lock_irqsave(rq, &rf);
8584 	update_rq_clock(rq);
8585 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8586 		WARN(true, "Dying CPU not properly vacated!");
8587 		dump_rq_tasks(rq, KERN_WARNING);
8588 	}
8589 	dl_server_stop(&rq->fair_server);
8590 #ifdef CONFIG_SCHED_CLASS_EXT
8591 	dl_server_stop(&rq->ext_server);
8592 #endif
8593 	rq_unlock_irqrestore(rq, &rf);
8594 
8595 	calc_load_migrate(rq);
8596 	update_max_interval();
8597 	hrtick_clear(rq);
8598 	sched_core_cpu_dying(cpu);
8599 	return 0;
8600 }
8601 #endif /* CONFIG_HOTPLUG_CPU */
8602 
8603 void __init sched_init_smp(void)
8604 {
8605 	sched_init_numa(NUMA_NO_NODE);
8606 
8607 	prandom_init_once(&sched_rnd_state);
8608 
8609 	/*
8610 	 * There's no userspace yet to cause hotplug operations; hence all the
8611 	 * CPU masks are stable and all blatant races in the below code cannot
8612 	 * happen.
8613 	 */
8614 	sched_domains_mutex_lock();
8615 	sched_init_domains(cpu_active_mask);
8616 	sched_domains_mutex_unlock();
8617 
8618 	/* Move init over to a non-isolated CPU */
8619 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8620 		BUG();
8621 	current->flags &= ~PF_NO_SETAFFINITY;
8622 	sched_init_granularity();
8623 
8624 	init_sched_rt_class();
8625 	init_sched_dl_class();
8626 
8627 	sched_init_dl_servers();
8628 
8629 	sched_smp_initialized = true;
8630 }
8631 
8632 static int __init migration_init(void)
8633 {
8634 	sched_cpu_starting(smp_processor_id());
8635 	return 0;
8636 }
8637 early_initcall(migration_init);
8638 
8639 int in_sched_functions(unsigned long addr)
8640 {
8641 	return in_lock_functions(addr) ||
8642 		(addr >= (unsigned long)__sched_text_start
8643 		&& addr < (unsigned long)__sched_text_end);
8644 }
8645 
8646 #ifdef CONFIG_CGROUP_SCHED
8647 /*
8648  * Default task group.
8649  * Every task in system belongs to this group at bootup.
8650  */
8651 struct task_group root_task_group;
8652 LIST_HEAD(task_groups);
8653 
8654 /* Cacheline aligned slab cache for task_group */
8655 static struct kmem_cache *task_group_cache __ro_after_init;
8656 #endif
8657 
8658 void __init sched_init(void)
8659 {
8660 	unsigned long ptr = 0;
8661 	int i;
8662 
8663 	/* Make sure the linker didn't screw up */
8664 	BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8665 	BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8666 	BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8667 	BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8668 #ifdef CONFIG_SCHED_CLASS_EXT
8669 	BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8670 	BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8671 #endif
8672 
8673 	wait_bit_init();
8674 
8675 #ifdef CONFIG_FAIR_GROUP_SCHED
8676 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8677 #endif
8678 #ifdef CONFIG_RT_GROUP_SCHED
8679 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8680 #endif
8681 	if (ptr) {
8682 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8683 
8684 #ifdef CONFIG_FAIR_GROUP_SCHED
8685 		root_task_group.se = (struct sched_entity **)ptr;
8686 		ptr += nr_cpu_ids * sizeof(void **);
8687 
8688 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8689 		ptr += nr_cpu_ids * sizeof(void **);
8690 
8691 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8692 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8693 #endif /* CONFIG_FAIR_GROUP_SCHED */
8694 #ifdef CONFIG_EXT_GROUP_SCHED
8695 		scx_tg_init(&root_task_group);
8696 #endif /* CONFIG_EXT_GROUP_SCHED */
8697 #ifdef CONFIG_RT_GROUP_SCHED
8698 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8699 		ptr += nr_cpu_ids * sizeof(void **);
8700 
8701 		root_task_group.rt_rq = (struct rt_rq **)ptr;
8702 		ptr += nr_cpu_ids * sizeof(void **);
8703 
8704 #endif /* CONFIG_RT_GROUP_SCHED */
8705 	}
8706 
8707 	init_defrootdomain();
8708 
8709 #ifdef CONFIG_RT_GROUP_SCHED
8710 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8711 			global_rt_period(), global_rt_runtime());
8712 #endif /* CONFIG_RT_GROUP_SCHED */
8713 
8714 #ifdef CONFIG_CGROUP_SCHED
8715 	task_group_cache = KMEM_CACHE(task_group, 0);
8716 
8717 	list_add(&root_task_group.list, &task_groups);
8718 	INIT_LIST_HEAD(&root_task_group.children);
8719 	INIT_LIST_HEAD(&root_task_group.siblings);
8720 	autogroup_init(&init_task);
8721 #endif /* CONFIG_CGROUP_SCHED */
8722 
8723 	for_each_possible_cpu(i) {
8724 		struct rq *rq;
8725 
8726 		rq = cpu_rq(i);
8727 		raw_spin_lock_init(&rq->__lock);
8728 		rq->nr_running = 0;
8729 		rq->calc_load_active = 0;
8730 		rq->calc_load_update = jiffies + LOAD_FREQ;
8731 		init_cfs_rq(&rq->cfs);
8732 		init_rt_rq(&rq->rt);
8733 		init_dl_rq(&rq->dl);
8734 #ifdef CONFIG_FAIR_GROUP_SCHED
8735 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8736 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8737 		/*
8738 		 * How much CPU bandwidth does root_task_group get?
8739 		 *
8740 		 * In case of task-groups formed through the cgroup filesystem, it
8741 		 * gets 100% of the CPU resources in the system. This overall
8742 		 * system CPU resource is divided among the tasks of
8743 		 * root_task_group and its child task-groups in a fair manner,
8744 		 * based on each entity's (task or task-group's) weight
8745 		 * (se->load.weight).
8746 		 *
8747 		 * In other words, if root_task_group has 10 tasks of weight
8748 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8749 		 * then A0's share of the CPU resource is:
8750 		 *
8751 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8752 		 *
8753 		 * We achieve this by letting root_task_group's tasks sit
8754 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8755 		 */
8756 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8757 #endif /* CONFIG_FAIR_GROUP_SCHED */
8758 
8759 #ifdef CONFIG_RT_GROUP_SCHED
8760 		/*
8761 		 * This is required for init cpu because rt.c:__enable_runtime()
8762 		 * starts working after scheduler_running, which is not the case
8763 		 * yet.
8764 		 */
8765 		rq->rt.rt_runtime = global_rt_runtime();
8766 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8767 #endif
8768 		rq->next_class = &idle_sched_class;
8769 
8770 		rq->sd = NULL;
8771 		rq->rd = NULL;
8772 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8773 		rq->balance_callback = &balance_push_callback;
8774 		rq->active_balance = 0;
8775 		rq->next_balance = jiffies;
8776 		rq->push_cpu = 0;
8777 		rq->cpu = i;
8778 		rq->online = 0;
8779 		rq->idle_stamp = 0;
8780 		rq->avg_idle = 2*sysctl_sched_migration_cost;
8781 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8782 
8783 		INIT_LIST_HEAD(&rq->cfs_tasks);
8784 
8785 		rq_attach_root(rq, &def_root_domain);
8786 #ifdef CONFIG_NO_HZ_COMMON
8787 		rq->last_blocked_load_update_tick = jiffies;
8788 		atomic_set(&rq->nohz_flags, 0);
8789 
8790 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8791 #endif
8792 #ifdef CONFIG_HOTPLUG_CPU
8793 		rcuwait_init(&rq->hotplug_wait);
8794 #endif
8795 		hrtick_rq_init(rq);
8796 		atomic_set(&rq->nr_iowait, 0);
8797 		fair_server_init(rq);
8798 #ifdef CONFIG_SCHED_CLASS_EXT
8799 		ext_server_init(rq);
8800 #endif
8801 
8802 #ifdef CONFIG_SCHED_CORE
8803 		rq->core = rq;
8804 		rq->core_pick = NULL;
8805 		rq->core_dl_server = NULL;
8806 		rq->core_enabled = 0;
8807 		rq->core_tree = RB_ROOT;
8808 		rq->core_forceidle_count = 0;
8809 		rq->core_forceidle_occupation = 0;
8810 		rq->core_forceidle_start = 0;
8811 
8812 		rq->core_cookie = 0UL;
8813 #endif
8814 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8815 	}
8816 
8817 	set_load_weight(&init_task, false);
8818 	init_task.se.slice = sysctl_sched_base_slice,
8819 
8820 	/*
8821 	 * The boot idle thread does lazy MMU switching as well:
8822 	 */
8823 	mmgrab_lazy_tlb(&init_mm);
8824 	enter_lazy_tlb(&init_mm, current);
8825 
8826 	/*
8827 	 * The idle task doesn't need the kthread struct to function, but it
8828 	 * is dressed up as a per-CPU kthread and thus needs to play the part
8829 	 * if we want to avoid special-casing it in code that deals with per-CPU
8830 	 * kthreads.
8831 	 */
8832 	WARN_ON(!set_kthread_struct(current));
8833 
8834 	/*
8835 	 * Make us the idle thread. Technically, schedule() should not be
8836 	 * called from this thread, however somewhere below it might be,
8837 	 * but because we are the idle thread, we just pick up running again
8838 	 * when this runqueue becomes "idle".
8839 	 */
8840 	__sched_fork(0, current);
8841 	init_idle(current, smp_processor_id());
8842 
8843 	calc_load_update = jiffies + LOAD_FREQ;
8844 
8845 	idle_thread_set_boot_cpu();
8846 
8847 	balance_push_set(smp_processor_id(), false);
8848 	init_sched_fair_class();
8849 	init_sched_ext_class();
8850 
8851 	psi_init();
8852 
8853 	init_uclamp();
8854 
8855 	preempt_dynamic_init();
8856 
8857 	scheduler_running = 1;
8858 }
8859 
8860 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8861 
8862 void __might_sleep(const char *file, int line)
8863 {
8864 	unsigned int state = get_current_state();
8865 	/*
8866 	 * Blocking primitives will set (and therefore destroy) current->state,
8867 	 * since we will exit with TASK_RUNNING make sure we enter with it,
8868 	 * otherwise we will destroy state.
8869 	 */
8870 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8871 			"do not call blocking ops when !TASK_RUNNING; "
8872 			"state=%x set at [<%p>] %pS\n", state,
8873 			(void *)current->task_state_change,
8874 			(void *)current->task_state_change);
8875 
8876 	__might_resched(file, line, 0);
8877 }
8878 EXPORT_SYMBOL(__might_sleep);
8879 
8880 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8881 {
8882 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8883 		return;
8884 
8885 	if (preempt_count() == preempt_offset)
8886 		return;
8887 
8888 	pr_err("Preemption disabled at:");
8889 	print_ip_sym(KERN_ERR, ip);
8890 }
8891 
8892 static inline bool resched_offsets_ok(unsigned int offsets)
8893 {
8894 	unsigned int nested = preempt_count();
8895 
8896 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8897 
8898 	return nested == offsets;
8899 }
8900 
8901 void __might_resched(const char *file, int line, unsigned int offsets)
8902 {
8903 	/* Ratelimiting timestamp: */
8904 	static unsigned long prev_jiffy;
8905 
8906 	unsigned long preempt_disable_ip;
8907 
8908 	/* WARN_ON_ONCE() by default, no rate limit required: */
8909 	rcu_sleep_check();
8910 
8911 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8912 	     !is_idle_task(current) && !current->non_block_count) ||
8913 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8914 	    oops_in_progress)
8915 		return;
8916 
8917 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8918 		return;
8919 	prev_jiffy = jiffies;
8920 
8921 	/* Save this before calling printk(), since that will clobber it: */
8922 	preempt_disable_ip = get_preempt_disable_ip(current);
8923 
8924 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8925 	       file, line);
8926 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8927 	       in_atomic(), irqs_disabled(), current->non_block_count,
8928 	       current->pid, current->comm);
8929 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8930 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
8931 
8932 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8933 		pr_err("RCU nest depth: %d, expected: %u\n",
8934 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8935 	}
8936 
8937 	if (task_stack_end_corrupted(current))
8938 		pr_emerg("Thread overran stack, or stack corrupted\n");
8939 
8940 	debug_show_held_locks(current);
8941 	if (irqs_disabled())
8942 		print_irqtrace_events(current);
8943 
8944 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8945 				 preempt_disable_ip);
8946 
8947 	dump_stack();
8948 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8949 }
8950 EXPORT_SYMBOL(__might_resched);
8951 
8952 void __cant_sleep(const char *file, int line, int preempt_offset)
8953 {
8954 	static unsigned long prev_jiffy;
8955 
8956 	if (irqs_disabled())
8957 		return;
8958 
8959 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8960 		return;
8961 
8962 	if (preempt_count() > preempt_offset)
8963 		return;
8964 
8965 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8966 		return;
8967 	prev_jiffy = jiffies;
8968 
8969 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8970 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8971 			in_atomic(), irqs_disabled(),
8972 			current->pid, current->comm);
8973 
8974 	debug_show_held_locks(current);
8975 	dump_stack();
8976 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8977 }
8978 EXPORT_SYMBOL_GPL(__cant_sleep);
8979 
8980 # ifdef CONFIG_SMP
8981 void __cant_migrate(const char *file, int line)
8982 {
8983 	static unsigned long prev_jiffy;
8984 
8985 	if (irqs_disabled())
8986 		return;
8987 
8988 	if (is_migration_disabled(current))
8989 		return;
8990 
8991 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8992 		return;
8993 
8994 	if (preempt_count() > 0)
8995 		return;
8996 
8997 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8998 		return;
8999 	prev_jiffy = jiffies;
9000 
9001 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
9002 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
9003 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
9004 	       current->pid, current->comm);
9005 
9006 	debug_show_held_locks(current);
9007 	dump_stack();
9008 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
9009 }
9010 EXPORT_SYMBOL_GPL(__cant_migrate);
9011 # endif /* CONFIG_SMP */
9012 #endif /* CONFIG_DEBUG_ATOMIC_SLEEP */
9013 
9014 #ifdef CONFIG_MAGIC_SYSRQ
9015 void normalize_rt_tasks(void)
9016 {
9017 	struct task_struct *g, *p;
9018 	struct sched_attr attr = {
9019 		.sched_policy = SCHED_NORMAL,
9020 	};
9021 
9022 	read_lock(&tasklist_lock);
9023 	for_each_process_thread(g, p) {
9024 		/*
9025 		 * Only normalize user tasks:
9026 		 */
9027 		if (p->flags & PF_KTHREAD)
9028 			continue;
9029 
9030 		p->se.exec_start = 0;
9031 		schedstat_set(p->stats.wait_start,  0);
9032 		schedstat_set(p->stats.sleep_start, 0);
9033 		schedstat_set(p->stats.block_start, 0);
9034 
9035 		if (!rt_or_dl_task(p)) {
9036 			/*
9037 			 * Renice negative nice level userspace
9038 			 * tasks back to 0:
9039 			 */
9040 			if (task_nice(p) < 0)
9041 				set_user_nice(p, 0);
9042 			continue;
9043 		}
9044 
9045 		__sched_setscheduler(p, &attr, false, false);
9046 	}
9047 	read_unlock(&tasklist_lock);
9048 }
9049 
9050 #endif /* CONFIG_MAGIC_SYSRQ */
9051 
9052 #ifdef CONFIG_KGDB_KDB
9053 /*
9054  * These functions are only useful for KDB.
9055  *
9056  * They can only be called when the whole system has been
9057  * stopped - every CPU needs to be quiescent, and no scheduling
9058  * activity can take place. Using them for anything else would
9059  * be a serious bug, and as a result, they aren't even visible
9060  * under any other configuration.
9061  */
9062 
9063 /**
9064  * curr_task - return the current task for a given CPU.
9065  * @cpu: the processor in question.
9066  *
9067  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
9068  *
9069  * Return: The current task for @cpu.
9070  */
9071 struct task_struct *curr_task(int cpu)
9072 {
9073 	return cpu_curr(cpu);
9074 }
9075 
9076 #endif /* CONFIG_KGDB_KDB */
9077 
9078 #ifdef CONFIG_CGROUP_SCHED
9079 /* task_group_lock serializes the addition/removal of task groups */
9080 static DEFINE_SPINLOCK(task_group_lock);
9081 
9082 static inline void alloc_uclamp_sched_group(struct task_group *tg,
9083 					    struct task_group *parent)
9084 {
9085 #ifdef CONFIG_UCLAMP_TASK_GROUP
9086 	enum uclamp_id clamp_id;
9087 
9088 	for_each_clamp_id(clamp_id) {
9089 		uclamp_se_set(&tg->uclamp_req[clamp_id],
9090 			      uclamp_none(clamp_id), false);
9091 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
9092 	}
9093 #endif
9094 }
9095 
9096 static void sched_free_group(struct task_group *tg)
9097 {
9098 	free_fair_sched_group(tg);
9099 	free_rt_sched_group(tg);
9100 	autogroup_free(tg);
9101 	kmem_cache_free(task_group_cache, tg);
9102 }
9103 
9104 static void sched_free_group_rcu(struct rcu_head *rcu)
9105 {
9106 	sched_free_group(container_of(rcu, struct task_group, rcu));
9107 }
9108 
9109 static void sched_unregister_group(struct task_group *tg)
9110 {
9111 	unregister_fair_sched_group(tg);
9112 	unregister_rt_sched_group(tg);
9113 	/*
9114 	 * We have to wait for yet another RCU grace period to expire, as
9115 	 * print_cfs_stats() might run concurrently.
9116 	 */
9117 	call_rcu(&tg->rcu, sched_free_group_rcu);
9118 }
9119 
9120 /* allocate runqueue etc for a new task group */
9121 struct task_group *sched_create_group(struct task_group *parent)
9122 {
9123 	struct task_group *tg;
9124 
9125 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
9126 	if (!tg)
9127 		return ERR_PTR(-ENOMEM);
9128 
9129 	if (!alloc_fair_sched_group(tg, parent))
9130 		goto err;
9131 
9132 	if (!alloc_rt_sched_group(tg, parent))
9133 		goto err;
9134 
9135 	scx_tg_init(tg);
9136 	alloc_uclamp_sched_group(tg, parent);
9137 
9138 	return tg;
9139 
9140 err:
9141 	sched_free_group(tg);
9142 	return ERR_PTR(-ENOMEM);
9143 }
9144 
9145 void sched_online_group(struct task_group *tg, struct task_group *parent)
9146 {
9147 	unsigned long flags;
9148 
9149 	spin_lock_irqsave(&task_group_lock, flags);
9150 	list_add_tail_rcu(&tg->list, &task_groups);
9151 
9152 	/* Root should already exist: */
9153 	WARN_ON(!parent);
9154 
9155 	tg->parent = parent;
9156 	INIT_LIST_HEAD(&tg->children);
9157 	list_add_rcu(&tg->siblings, &parent->children);
9158 	spin_unlock_irqrestore(&task_group_lock, flags);
9159 
9160 	online_fair_sched_group(tg);
9161 }
9162 
9163 /* RCU callback to free various structures associated with a task group */
9164 static void sched_unregister_group_rcu(struct rcu_head *rhp)
9165 {
9166 	/* Now it should be safe to free those cfs_rqs: */
9167 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
9168 }
9169 
9170 void sched_destroy_group(struct task_group *tg)
9171 {
9172 	/* Wait for possible concurrent references to cfs_rqs complete: */
9173 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
9174 }
9175 
9176 void sched_release_group(struct task_group *tg)
9177 {
9178 	unsigned long flags;
9179 
9180 	/*
9181 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9182 	 * sched_cfs_period_timer()).
9183 	 *
9184 	 * For this to be effective, we have to wait for all pending users of
9185 	 * this task group to leave their RCU critical section to ensure no new
9186 	 * user will see our dying task group any more. Specifically ensure
9187 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9188 	 *
9189 	 * We therefore defer calling unregister_fair_sched_group() to
9190 	 * sched_unregister_group() which is guarantied to get called only after the
9191 	 * current RCU grace period has expired.
9192 	 */
9193 	spin_lock_irqsave(&task_group_lock, flags);
9194 	list_del_rcu(&tg->list);
9195 	list_del_rcu(&tg->siblings);
9196 	spin_unlock_irqrestore(&task_group_lock, flags);
9197 }
9198 
9199 static void sched_change_group(struct task_struct *tsk)
9200 {
9201 	struct task_group *tg;
9202 
9203 	/*
9204 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
9205 	 * which is pointless here. Thus, we pass "true" to task_css_check()
9206 	 * to prevent lockdep warnings.
9207 	 */
9208 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9209 			  struct task_group, css);
9210 	tg = autogroup_task_group(tsk, tg);
9211 	tsk->sched_task_group = tg;
9212 
9213 #ifdef CONFIG_FAIR_GROUP_SCHED
9214 	if (tsk->sched_class->task_change_group)
9215 		tsk->sched_class->task_change_group(tsk);
9216 	else
9217 #endif
9218 		set_task_rq(tsk, task_cpu(tsk));
9219 }
9220 
9221 /*
9222  * Change task's runqueue when it moves between groups.
9223  *
9224  * The caller of this function should have put the task in its new group by
9225  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9226  * its new group.
9227  */
9228 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9229 {
9230 	unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
9231 	bool resched = false;
9232 	bool queued = false;
9233 	struct rq *rq;
9234 
9235 	CLASS(task_rq_lock, rq_guard)(tsk);
9236 	rq = rq_guard.rq;
9237 
9238 	scoped_guard (sched_change, tsk, queue_flags) {
9239 		sched_change_group(tsk);
9240 		if (!for_autogroup)
9241 			scx_cgroup_move_task(tsk);
9242 		if (scope->running)
9243 			resched = true;
9244 		queued = scope->queued;
9245 	}
9246 
9247 	if (resched)
9248 		resched_curr(rq);
9249 	else if (queued)
9250 		wakeup_preempt(rq, tsk, 0);
9251 
9252 	__balance_callbacks(rq, &rq_guard.rf);
9253 }
9254 
9255 static struct cgroup_subsys_state *
9256 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9257 {
9258 	struct task_group *parent = css_tg(parent_css);
9259 	struct task_group *tg;
9260 
9261 	if (!parent) {
9262 		/* This is early initialization for the top cgroup */
9263 		return &root_task_group.css;
9264 	}
9265 
9266 	tg = sched_create_group(parent);
9267 	if (IS_ERR(tg))
9268 		return ERR_PTR(-ENOMEM);
9269 
9270 	return &tg->css;
9271 }
9272 
9273 /* Expose task group only after completing cgroup initialization */
9274 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9275 {
9276 	struct task_group *tg = css_tg(css);
9277 	struct task_group *parent = css_tg(css->parent);
9278 	int ret;
9279 
9280 	ret = scx_tg_online(tg);
9281 	if (ret)
9282 		return ret;
9283 
9284 	if (parent)
9285 		sched_online_group(tg, parent);
9286 
9287 #ifdef CONFIG_UCLAMP_TASK_GROUP
9288 	/* Propagate the effective uclamp value for the new group */
9289 	guard(mutex)(&uclamp_mutex);
9290 	guard(rcu)();
9291 	cpu_util_update_eff(css);
9292 #endif
9293 
9294 	return 0;
9295 }
9296 
9297 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9298 {
9299 	struct task_group *tg = css_tg(css);
9300 
9301 	scx_tg_offline(tg);
9302 }
9303 
9304 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9305 {
9306 	struct task_group *tg = css_tg(css);
9307 
9308 	sched_release_group(tg);
9309 }
9310 
9311 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9312 {
9313 	struct task_group *tg = css_tg(css);
9314 
9315 	/*
9316 	 * Relies on the RCU grace period between css_released() and this.
9317 	 */
9318 	sched_unregister_group(tg);
9319 }
9320 
9321 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9322 {
9323 #ifdef CONFIG_RT_GROUP_SCHED
9324 	struct task_struct *task;
9325 	struct cgroup_subsys_state *css;
9326 
9327 	if (!rt_group_sched_enabled())
9328 		goto scx_check;
9329 
9330 	cgroup_taskset_for_each(task, css, tset) {
9331 		if (!sched_rt_can_attach(css_tg(css), task))
9332 			return -EINVAL;
9333 	}
9334 scx_check:
9335 #endif /* CONFIG_RT_GROUP_SCHED */
9336 	return scx_cgroup_can_attach(tset);
9337 }
9338 
9339 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9340 {
9341 	struct task_struct *task;
9342 	struct cgroup_subsys_state *css;
9343 
9344 	cgroup_taskset_for_each(task, css, tset)
9345 		sched_move_task(task, false);
9346 }
9347 
9348 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9349 {
9350 	scx_cgroup_cancel_attach(tset);
9351 }
9352 
9353 #ifdef CONFIG_UCLAMP_TASK_GROUP
9354 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9355 {
9356 	struct cgroup_subsys_state *top_css = css;
9357 	struct uclamp_se *uc_parent = NULL;
9358 	struct uclamp_se *uc_se = NULL;
9359 	unsigned int eff[UCLAMP_CNT];
9360 	enum uclamp_id clamp_id;
9361 	unsigned int clamps;
9362 
9363 	lockdep_assert_held(&uclamp_mutex);
9364 	WARN_ON_ONCE(!rcu_read_lock_held());
9365 
9366 	css_for_each_descendant_pre(css, top_css) {
9367 		uc_parent = css_tg(css)->parent
9368 			? css_tg(css)->parent->uclamp : NULL;
9369 
9370 		for_each_clamp_id(clamp_id) {
9371 			/* Assume effective clamps matches requested clamps */
9372 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9373 			/* Cap effective clamps with parent's effective clamps */
9374 			if (uc_parent &&
9375 			    eff[clamp_id] > uc_parent[clamp_id].value) {
9376 				eff[clamp_id] = uc_parent[clamp_id].value;
9377 			}
9378 		}
9379 		/* Ensure protection is always capped by limit */
9380 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9381 
9382 		/* Propagate most restrictive effective clamps */
9383 		clamps = 0x0;
9384 		uc_se = css_tg(css)->uclamp;
9385 		for_each_clamp_id(clamp_id) {
9386 			if (eff[clamp_id] == uc_se[clamp_id].value)
9387 				continue;
9388 			uc_se[clamp_id].value = eff[clamp_id];
9389 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9390 			clamps |= (0x1 << clamp_id);
9391 		}
9392 		if (!clamps) {
9393 			css = css_rightmost_descendant(css);
9394 			continue;
9395 		}
9396 
9397 		/* Immediately update descendants RUNNABLE tasks */
9398 		uclamp_update_active_tasks(css);
9399 	}
9400 }
9401 
9402 /*
9403  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9404  * C expression. Since there is no way to convert a macro argument (N) into a
9405  * character constant, use two levels of macros.
9406  */
9407 #define _POW10(exp) ((unsigned int)1e##exp)
9408 #define POW10(exp) _POW10(exp)
9409 
9410 struct uclamp_request {
9411 #define UCLAMP_PERCENT_SHIFT	2
9412 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
9413 	s64 percent;
9414 	u64 util;
9415 	int ret;
9416 };
9417 
9418 static inline struct uclamp_request
9419 capacity_from_percent(char *buf)
9420 {
9421 	struct uclamp_request req = {
9422 		.percent = UCLAMP_PERCENT_SCALE,
9423 		.util = SCHED_CAPACITY_SCALE,
9424 		.ret = 0,
9425 	};
9426 
9427 	buf = strim(buf);
9428 	if (strcmp(buf, "max")) {
9429 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9430 					     &req.percent);
9431 		if (req.ret)
9432 			return req;
9433 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9434 			req.ret = -ERANGE;
9435 			return req;
9436 		}
9437 
9438 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
9439 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9440 	}
9441 
9442 	return req;
9443 }
9444 
9445 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9446 				size_t nbytes, loff_t off,
9447 				enum uclamp_id clamp_id)
9448 {
9449 	struct uclamp_request req;
9450 	struct task_group *tg;
9451 
9452 	req = capacity_from_percent(buf);
9453 	if (req.ret)
9454 		return req.ret;
9455 
9456 	sched_uclamp_enable();
9457 
9458 	guard(mutex)(&uclamp_mutex);
9459 	guard(rcu)();
9460 
9461 	tg = css_tg(of_css(of));
9462 	if (tg->uclamp_req[clamp_id].value != req.util)
9463 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9464 
9465 	/*
9466 	 * Because of not recoverable conversion rounding we keep track of the
9467 	 * exact requested value
9468 	 */
9469 	tg->uclamp_pct[clamp_id] = req.percent;
9470 
9471 	/* Update effective clamps to track the most restrictive value */
9472 	cpu_util_update_eff(of_css(of));
9473 
9474 	return nbytes;
9475 }
9476 
9477 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9478 				    char *buf, size_t nbytes,
9479 				    loff_t off)
9480 {
9481 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9482 }
9483 
9484 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9485 				    char *buf, size_t nbytes,
9486 				    loff_t off)
9487 {
9488 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9489 }
9490 
9491 static inline void cpu_uclamp_print(struct seq_file *sf,
9492 				    enum uclamp_id clamp_id)
9493 {
9494 	struct task_group *tg;
9495 	u64 util_clamp;
9496 	u64 percent;
9497 	u32 rem;
9498 
9499 	scoped_guard (rcu) {
9500 		tg = css_tg(seq_css(sf));
9501 		util_clamp = tg->uclamp_req[clamp_id].value;
9502 	}
9503 
9504 	if (util_clamp == SCHED_CAPACITY_SCALE) {
9505 		seq_puts(sf, "max\n");
9506 		return;
9507 	}
9508 
9509 	percent = tg->uclamp_pct[clamp_id];
9510 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9511 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9512 }
9513 
9514 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9515 {
9516 	cpu_uclamp_print(sf, UCLAMP_MIN);
9517 	return 0;
9518 }
9519 
9520 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9521 {
9522 	cpu_uclamp_print(sf, UCLAMP_MAX);
9523 	return 0;
9524 }
9525 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9526 
9527 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9528 static unsigned long tg_weight(struct task_group *tg)
9529 {
9530 #ifdef CONFIG_FAIR_GROUP_SCHED
9531 	return scale_load_down(tg->shares);
9532 #else
9533 	return sched_weight_from_cgroup(tg->scx.weight);
9534 #endif
9535 }
9536 
9537 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9538 				struct cftype *cftype, u64 shareval)
9539 {
9540 	int ret;
9541 
9542 	if (shareval > scale_load_down(ULONG_MAX))
9543 		shareval = MAX_SHARES;
9544 	ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9545 	if (!ret)
9546 		scx_group_set_weight(css_tg(css),
9547 				     sched_weight_to_cgroup(shareval));
9548 	return ret;
9549 }
9550 
9551 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9552 			       struct cftype *cft)
9553 {
9554 	return tg_weight(css_tg(css));
9555 }
9556 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9557 
9558 #ifdef CONFIG_CFS_BANDWIDTH
9559 static DEFINE_MUTEX(cfs_constraints_mutex);
9560 
9561 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9562 
9563 static int tg_set_cfs_bandwidth(struct task_group *tg,
9564 				u64 period_us, u64 quota_us, u64 burst_us)
9565 {
9566 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
9567 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9568 	u64 period, quota, burst;
9569 
9570 	period = (u64)period_us * NSEC_PER_USEC;
9571 
9572 	if (quota_us == RUNTIME_INF)
9573 		quota = RUNTIME_INF;
9574 	else
9575 		quota = (u64)quota_us * NSEC_PER_USEC;
9576 
9577 	burst = (u64)burst_us * NSEC_PER_USEC;
9578 
9579 	/*
9580 	 * Prevent race between setting of cfs_rq->runtime_enabled and
9581 	 * unthrottle_offline_cfs_rqs().
9582 	 */
9583 	guard(cpus_read_lock)();
9584 	guard(mutex)(&cfs_constraints_mutex);
9585 
9586 	ret = __cfs_schedulable(tg, period, quota);
9587 	if (ret)
9588 		return ret;
9589 
9590 	runtime_enabled = quota != RUNTIME_INF;
9591 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9592 	/*
9593 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
9594 	 * before making related changes, and on->off must occur afterwards
9595 	 */
9596 	if (runtime_enabled && !runtime_was_enabled)
9597 		cfs_bandwidth_usage_inc();
9598 
9599 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9600 		cfs_b->period = ns_to_ktime(period);
9601 		cfs_b->quota = quota;
9602 		cfs_b->burst = burst;
9603 
9604 		__refill_cfs_bandwidth_runtime(cfs_b);
9605 
9606 		/*
9607 		 * Restart the period timer (if active) to handle new
9608 		 * period expiry:
9609 		 */
9610 		if (runtime_enabled)
9611 			start_cfs_bandwidth(cfs_b);
9612 	}
9613 
9614 	for_each_online_cpu(i) {
9615 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9616 		struct rq *rq = cfs_rq->rq;
9617 
9618 		guard(rq_lock_irq)(rq);
9619 		cfs_rq->runtime_enabled = runtime_enabled;
9620 		cfs_rq->runtime_remaining = 1;
9621 
9622 		if (cfs_rq->throttled)
9623 			unthrottle_cfs_rq(cfs_rq);
9624 	}
9625 
9626 	if (runtime_was_enabled && !runtime_enabled)
9627 		cfs_bandwidth_usage_dec();
9628 
9629 	return 0;
9630 }
9631 
9632 static u64 tg_get_cfs_period(struct task_group *tg)
9633 {
9634 	u64 cfs_period_us;
9635 
9636 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9637 	do_div(cfs_period_us, NSEC_PER_USEC);
9638 
9639 	return cfs_period_us;
9640 }
9641 
9642 static u64 tg_get_cfs_quota(struct task_group *tg)
9643 {
9644 	u64 quota_us;
9645 
9646 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9647 		return RUNTIME_INF;
9648 
9649 	quota_us = tg->cfs_bandwidth.quota;
9650 	do_div(quota_us, NSEC_PER_USEC);
9651 
9652 	return quota_us;
9653 }
9654 
9655 static u64 tg_get_cfs_burst(struct task_group *tg)
9656 {
9657 	u64 burst_us;
9658 
9659 	burst_us = tg->cfs_bandwidth.burst;
9660 	do_div(burst_us, NSEC_PER_USEC);
9661 
9662 	return burst_us;
9663 }
9664 
9665 struct cfs_schedulable_data {
9666 	struct task_group *tg;
9667 	u64 period, quota;
9668 };
9669 
9670 /*
9671  * normalize group quota/period to be quota/max_period
9672  * note: units are usecs
9673  */
9674 static u64 normalize_cfs_quota(struct task_group *tg,
9675 			       struct cfs_schedulable_data *d)
9676 {
9677 	u64 quota, period;
9678 
9679 	if (tg == d->tg) {
9680 		period = d->period;
9681 		quota = d->quota;
9682 	} else {
9683 		period = tg_get_cfs_period(tg);
9684 		quota = tg_get_cfs_quota(tg);
9685 	}
9686 
9687 	/* note: these should typically be equivalent */
9688 	if (quota == RUNTIME_INF || quota == -1)
9689 		return RUNTIME_INF;
9690 
9691 	return to_ratio(period, quota);
9692 }
9693 
9694 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9695 {
9696 	struct cfs_schedulable_data *d = data;
9697 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9698 	s64 quota = 0, parent_quota = -1;
9699 
9700 	if (!tg->parent) {
9701 		quota = RUNTIME_INF;
9702 	} else {
9703 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9704 
9705 		quota = normalize_cfs_quota(tg, d);
9706 		parent_quota = parent_b->hierarchical_quota;
9707 
9708 		/*
9709 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9710 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
9711 		 * inherit when no limit is set. In both cases this is used
9712 		 * by the scheduler to determine if a given CFS task has a
9713 		 * bandwidth constraint at some higher level.
9714 		 */
9715 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9716 			if (quota == RUNTIME_INF)
9717 				quota = parent_quota;
9718 			else if (parent_quota != RUNTIME_INF)
9719 				quota = min(quota, parent_quota);
9720 		} else {
9721 			if (quota == RUNTIME_INF)
9722 				quota = parent_quota;
9723 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9724 				return -EINVAL;
9725 		}
9726 	}
9727 	cfs_b->hierarchical_quota = quota;
9728 
9729 	return 0;
9730 }
9731 
9732 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9733 {
9734 	struct cfs_schedulable_data data = {
9735 		.tg = tg,
9736 		.period = period,
9737 		.quota = quota,
9738 	};
9739 
9740 	if (quota != RUNTIME_INF) {
9741 		do_div(data.period, NSEC_PER_USEC);
9742 		do_div(data.quota, NSEC_PER_USEC);
9743 	}
9744 
9745 	guard(rcu)();
9746 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9747 }
9748 
9749 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9750 {
9751 	struct task_group *tg = css_tg(seq_css(sf));
9752 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9753 
9754 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9755 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9756 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9757 
9758 	if (schedstat_enabled() && tg != &root_task_group) {
9759 		struct sched_statistics *stats;
9760 		u64 ws = 0;
9761 		int i;
9762 
9763 		for_each_possible_cpu(i) {
9764 			stats = __schedstats_from_se(tg->se[i]);
9765 			ws += schedstat_val(stats->wait_sum);
9766 		}
9767 
9768 		seq_printf(sf, "wait_sum %llu\n", ws);
9769 	}
9770 
9771 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9772 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9773 
9774 	return 0;
9775 }
9776 
9777 static u64 throttled_time_self(struct task_group *tg)
9778 {
9779 	int i;
9780 	u64 total = 0;
9781 
9782 	for_each_possible_cpu(i) {
9783 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9784 	}
9785 
9786 	return total;
9787 }
9788 
9789 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9790 {
9791 	struct task_group *tg = css_tg(seq_css(sf));
9792 
9793 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9794 
9795 	return 0;
9796 }
9797 #endif /* CONFIG_CFS_BANDWIDTH */
9798 
9799 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9800 const u64 max_bw_quota_period_us = 1 * USEC_PER_SEC; /* 1s */
9801 static const u64 min_bw_quota_period_us = 1 * USEC_PER_MSEC; /* 1ms */
9802 /* More than 203 days if BW_SHIFT equals 20. */
9803 static const u64 max_bw_runtime_us = MAX_BW;
9804 
9805 static void tg_bandwidth(struct task_group *tg,
9806 			 u64 *period_us_p, u64 *quota_us_p, u64 *burst_us_p)
9807 {
9808 #ifdef CONFIG_CFS_BANDWIDTH
9809 	if (period_us_p)
9810 		*period_us_p = tg_get_cfs_period(tg);
9811 	if (quota_us_p)
9812 		*quota_us_p = tg_get_cfs_quota(tg);
9813 	if (burst_us_p)
9814 		*burst_us_p = tg_get_cfs_burst(tg);
9815 #else /* !CONFIG_CFS_BANDWIDTH */
9816 	if (period_us_p)
9817 		*period_us_p = tg->scx.bw_period_us;
9818 	if (quota_us_p)
9819 		*quota_us_p = tg->scx.bw_quota_us;
9820 	if (burst_us_p)
9821 		*burst_us_p = tg->scx.bw_burst_us;
9822 #endif /* CONFIG_CFS_BANDWIDTH */
9823 }
9824 
9825 static u64 cpu_period_read_u64(struct cgroup_subsys_state *css,
9826 			       struct cftype *cft)
9827 {
9828 	u64 period_us;
9829 
9830 	tg_bandwidth(css_tg(css), &period_us, NULL, NULL);
9831 	return period_us;
9832 }
9833 
9834 static int tg_set_bandwidth(struct task_group *tg,
9835 			    u64 period_us, u64 quota_us, u64 burst_us)
9836 {
9837 	const u64 max_usec = U64_MAX / NSEC_PER_USEC;
9838 	int ret = 0;
9839 
9840 	if (tg == &root_task_group)
9841 		return -EINVAL;
9842 
9843 	/* Values should survive translation to nsec */
9844 	if (period_us > max_usec ||
9845 	    (quota_us != RUNTIME_INF && quota_us > max_usec) ||
9846 	    burst_us > max_usec)
9847 		return -EINVAL;
9848 
9849 	/*
9850 	 * Ensure we have some amount of bandwidth every period. This is to
9851 	 * prevent reaching a state of large arrears when throttled via
9852 	 * entity_tick() resulting in prolonged exit starvation.
9853 	 */
9854 	if (quota_us < min_bw_quota_period_us ||
9855 	    period_us < min_bw_quota_period_us)
9856 		return -EINVAL;
9857 
9858 	/*
9859 	 * Likewise, bound things on the other side by preventing insane quota
9860 	 * periods.  This also allows us to normalize in computing quota
9861 	 * feasibility.
9862 	 */
9863 	if (period_us > max_bw_quota_period_us)
9864 		return -EINVAL;
9865 
9866 	/*
9867 	 * Bound quota to defend quota against overflow during bandwidth shift.
9868 	 */
9869 	if (quota_us != RUNTIME_INF && quota_us > max_bw_runtime_us)
9870 		return -EINVAL;
9871 
9872 	if (quota_us != RUNTIME_INF && (burst_us > quota_us ||
9873 					burst_us + quota_us > max_bw_runtime_us))
9874 		return -EINVAL;
9875 
9876 #ifdef CONFIG_CFS_BANDWIDTH
9877 	ret = tg_set_cfs_bandwidth(tg, period_us, quota_us, burst_us);
9878 #endif /* CONFIG_CFS_BANDWIDTH */
9879 	if (!ret)
9880 		scx_group_set_bandwidth(tg, period_us, quota_us, burst_us);
9881 	return ret;
9882 }
9883 
9884 static s64 cpu_quota_read_s64(struct cgroup_subsys_state *css,
9885 			      struct cftype *cft)
9886 {
9887 	u64 quota_us;
9888 
9889 	tg_bandwidth(css_tg(css), NULL, &quota_us, NULL);
9890 	return quota_us;	/* (s64)RUNTIME_INF becomes -1 */
9891 }
9892 
9893 static u64 cpu_burst_read_u64(struct cgroup_subsys_state *css,
9894 			      struct cftype *cft)
9895 {
9896 	u64 burst_us;
9897 
9898 	tg_bandwidth(css_tg(css), NULL, NULL, &burst_us);
9899 	return burst_us;
9900 }
9901 
9902 static int cpu_period_write_u64(struct cgroup_subsys_state *css,
9903 				struct cftype *cftype, u64 period_us)
9904 {
9905 	struct task_group *tg = css_tg(css);
9906 	u64 quota_us, burst_us;
9907 
9908 	tg_bandwidth(tg, NULL, &quota_us, &burst_us);
9909 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9910 }
9911 
9912 static int cpu_quota_write_s64(struct cgroup_subsys_state *css,
9913 			       struct cftype *cftype, s64 quota_us)
9914 {
9915 	struct task_group *tg = css_tg(css);
9916 	u64 period_us, burst_us;
9917 
9918 	if (quota_us < 0)
9919 		quota_us = RUNTIME_INF;
9920 
9921 	tg_bandwidth(tg, &period_us, NULL, &burst_us);
9922 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9923 }
9924 
9925 static int cpu_burst_write_u64(struct cgroup_subsys_state *css,
9926 			       struct cftype *cftype, u64 burst_us)
9927 {
9928 	struct task_group *tg = css_tg(css);
9929 	u64 period_us, quota_us;
9930 
9931 	tg_bandwidth(tg, &period_us, &quota_us, NULL);
9932 	return tg_set_bandwidth(tg, period_us, quota_us, burst_us);
9933 }
9934 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */
9935 
9936 #ifdef CONFIG_RT_GROUP_SCHED
9937 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9938 				struct cftype *cft, s64 val)
9939 {
9940 	return sched_group_set_rt_runtime(css_tg(css), val);
9941 }
9942 
9943 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9944 			       struct cftype *cft)
9945 {
9946 	return sched_group_rt_runtime(css_tg(css));
9947 }
9948 
9949 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9950 				    struct cftype *cftype, u64 rt_period_us)
9951 {
9952 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
9953 }
9954 
9955 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9956 				   struct cftype *cft)
9957 {
9958 	return sched_group_rt_period(css_tg(css));
9959 }
9960 #endif /* CONFIG_RT_GROUP_SCHED */
9961 
9962 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9963 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9964 			       struct cftype *cft)
9965 {
9966 	return css_tg(css)->idle;
9967 }
9968 
9969 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9970 				struct cftype *cft, s64 idle)
9971 {
9972 	int ret;
9973 
9974 	ret = sched_group_set_idle(css_tg(css), idle);
9975 	if (!ret)
9976 		scx_group_set_idle(css_tg(css), idle);
9977 	return ret;
9978 }
9979 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9980 
9981 static struct cftype cpu_legacy_files[] = {
9982 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9983 	{
9984 		.name = "shares",
9985 		.read_u64 = cpu_shares_read_u64,
9986 		.write_u64 = cpu_shares_write_u64,
9987 	},
9988 	{
9989 		.name = "idle",
9990 		.read_s64 = cpu_idle_read_s64,
9991 		.write_s64 = cpu_idle_write_s64,
9992 	},
9993 #endif
9994 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
9995 	{
9996 		.name = "cfs_period_us",
9997 		.read_u64 = cpu_period_read_u64,
9998 		.write_u64 = cpu_period_write_u64,
9999 	},
10000 	{
10001 		.name = "cfs_quota_us",
10002 		.read_s64 = cpu_quota_read_s64,
10003 		.write_s64 = cpu_quota_write_s64,
10004 	},
10005 	{
10006 		.name = "cfs_burst_us",
10007 		.read_u64 = cpu_burst_read_u64,
10008 		.write_u64 = cpu_burst_write_u64,
10009 	},
10010 #endif
10011 #ifdef CONFIG_CFS_BANDWIDTH
10012 	{
10013 		.name = "stat",
10014 		.seq_show = cpu_cfs_stat_show,
10015 	},
10016 	{
10017 		.name = "stat.local",
10018 		.seq_show = cpu_cfs_local_stat_show,
10019 	},
10020 #endif
10021 #ifdef CONFIG_UCLAMP_TASK_GROUP
10022 	{
10023 		.name = "uclamp.min",
10024 		.flags = CFTYPE_NOT_ON_ROOT,
10025 		.seq_show = cpu_uclamp_min_show,
10026 		.write = cpu_uclamp_min_write,
10027 	},
10028 	{
10029 		.name = "uclamp.max",
10030 		.flags = CFTYPE_NOT_ON_ROOT,
10031 		.seq_show = cpu_uclamp_max_show,
10032 		.write = cpu_uclamp_max_write,
10033 	},
10034 #endif
10035 	{ }	/* Terminate */
10036 };
10037 
10038 #ifdef CONFIG_RT_GROUP_SCHED
10039 static struct cftype rt_group_files[] = {
10040 	{
10041 		.name = "rt_runtime_us",
10042 		.read_s64 = cpu_rt_runtime_read,
10043 		.write_s64 = cpu_rt_runtime_write,
10044 	},
10045 	{
10046 		.name = "rt_period_us",
10047 		.read_u64 = cpu_rt_period_read_uint,
10048 		.write_u64 = cpu_rt_period_write_uint,
10049 	},
10050 	{ }	/* Terminate */
10051 };
10052 
10053 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED
10054 DEFINE_STATIC_KEY_FALSE(rt_group_sched);
10055 # else
10056 DEFINE_STATIC_KEY_TRUE(rt_group_sched);
10057 # endif
10058 
10059 static int __init setup_rt_group_sched(char *str)
10060 {
10061 	long val;
10062 
10063 	if (kstrtol(str, 0, &val) || val < 0 || val > 1) {
10064 		pr_warn("Unable to set rt_group_sched\n");
10065 		return 1;
10066 	}
10067 	if (val)
10068 		static_branch_enable(&rt_group_sched);
10069 	else
10070 		static_branch_disable(&rt_group_sched);
10071 
10072 	return 1;
10073 }
10074 __setup("rt_group_sched=", setup_rt_group_sched);
10075 
10076 static int __init cpu_rt_group_init(void)
10077 {
10078 	if (!rt_group_sched_enabled())
10079 		return 0;
10080 
10081 	WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files));
10082 	return 0;
10083 }
10084 subsys_initcall(cpu_rt_group_init);
10085 #endif /* CONFIG_RT_GROUP_SCHED */
10086 
10087 static int cpu_extra_stat_show(struct seq_file *sf,
10088 			       struct cgroup_subsys_state *css)
10089 {
10090 #ifdef CONFIG_CFS_BANDWIDTH
10091 	{
10092 		struct task_group *tg = css_tg(css);
10093 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10094 		u64 throttled_usec, burst_usec;
10095 
10096 		throttled_usec = cfs_b->throttled_time;
10097 		do_div(throttled_usec, NSEC_PER_USEC);
10098 		burst_usec = cfs_b->burst_time;
10099 		do_div(burst_usec, NSEC_PER_USEC);
10100 
10101 		seq_printf(sf, "nr_periods %d\n"
10102 			   "nr_throttled %d\n"
10103 			   "throttled_usec %llu\n"
10104 			   "nr_bursts %d\n"
10105 			   "burst_usec %llu\n",
10106 			   cfs_b->nr_periods, cfs_b->nr_throttled,
10107 			   throttled_usec, cfs_b->nr_burst, burst_usec);
10108 	}
10109 #endif /* CONFIG_CFS_BANDWIDTH */
10110 	return 0;
10111 }
10112 
10113 static int cpu_local_stat_show(struct seq_file *sf,
10114 			       struct cgroup_subsys_state *css)
10115 {
10116 #ifdef CONFIG_CFS_BANDWIDTH
10117 	{
10118 		struct task_group *tg = css_tg(css);
10119 		u64 throttled_self_usec;
10120 
10121 		throttled_self_usec = throttled_time_self(tg);
10122 		do_div(throttled_self_usec, NSEC_PER_USEC);
10123 
10124 		seq_printf(sf, "throttled_usec %llu\n",
10125 			   throttled_self_usec);
10126 	}
10127 #endif
10128 	return 0;
10129 }
10130 
10131 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10132 
10133 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
10134 			       struct cftype *cft)
10135 {
10136 	return sched_weight_to_cgroup(tg_weight(css_tg(css)));
10137 }
10138 
10139 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
10140 				struct cftype *cft, u64 cgrp_weight)
10141 {
10142 	unsigned long weight;
10143 	int ret;
10144 
10145 	if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
10146 		return -ERANGE;
10147 
10148 	weight = sched_weight_from_cgroup(cgrp_weight);
10149 
10150 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10151 	if (!ret)
10152 		scx_group_set_weight(css_tg(css), cgrp_weight);
10153 	return ret;
10154 }
10155 
10156 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
10157 				    struct cftype *cft)
10158 {
10159 	unsigned long weight = tg_weight(css_tg(css));
10160 	int last_delta = INT_MAX;
10161 	int prio, delta;
10162 
10163 	/* find the closest nice value to the current weight */
10164 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
10165 		delta = abs(sched_prio_to_weight[prio] - weight);
10166 		if (delta >= last_delta)
10167 			break;
10168 		last_delta = delta;
10169 	}
10170 
10171 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
10172 }
10173 
10174 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
10175 				     struct cftype *cft, s64 nice)
10176 {
10177 	unsigned long weight;
10178 	int idx, ret;
10179 
10180 	if (nice < MIN_NICE || nice > MAX_NICE)
10181 		return -ERANGE;
10182 
10183 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
10184 	idx = array_index_nospec(idx, 40);
10185 	weight = sched_prio_to_weight[idx];
10186 
10187 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
10188 	if (!ret)
10189 		scx_group_set_weight(css_tg(css),
10190 				     sched_weight_to_cgroup(weight));
10191 	return ret;
10192 }
10193 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
10194 
10195 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
10196 						  long period, long quota)
10197 {
10198 	if (quota < 0)
10199 		seq_puts(sf, "max");
10200 	else
10201 		seq_printf(sf, "%ld", quota);
10202 
10203 	seq_printf(sf, " %ld\n", period);
10204 }
10205 
10206 /* caller should put the current value in *@periodp before calling */
10207 static int __maybe_unused cpu_period_quota_parse(char *buf, u64 *period_us_p,
10208 						 u64 *quota_us_p)
10209 {
10210 	char tok[21];	/* U64_MAX */
10211 
10212 	if (sscanf(buf, "%20s %llu", tok, period_us_p) < 1)
10213 		return -EINVAL;
10214 
10215 	if (sscanf(tok, "%llu", quota_us_p) < 1) {
10216 		if (!strcmp(tok, "max"))
10217 			*quota_us_p = RUNTIME_INF;
10218 		else
10219 			return -EINVAL;
10220 	}
10221 
10222 	return 0;
10223 }
10224 
10225 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10226 static int cpu_max_show(struct seq_file *sf, void *v)
10227 {
10228 	struct task_group *tg = css_tg(seq_css(sf));
10229 	u64 period_us, quota_us;
10230 
10231 	tg_bandwidth(tg, &period_us, &quota_us, NULL);
10232 	cpu_period_quota_print(sf, period_us, quota_us);
10233 	return 0;
10234 }
10235 
10236 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10237 			     char *buf, size_t nbytes, loff_t off)
10238 {
10239 	struct task_group *tg = css_tg(of_css(of));
10240 	u64 period_us, quota_us, burst_us;
10241 	int ret;
10242 
10243 	tg_bandwidth(tg, &period_us, NULL, &burst_us);
10244 	ret = cpu_period_quota_parse(buf, &period_us, &quota_us);
10245 	if (!ret)
10246 		ret = tg_set_bandwidth(tg, period_us, quota_us, burst_us);
10247 	return ret ?: nbytes;
10248 }
10249 #endif /* CONFIG_CFS_BANDWIDTH */
10250 
10251 static struct cftype cpu_files[] = {
10252 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10253 	{
10254 		.name = "weight",
10255 		.flags = CFTYPE_NOT_ON_ROOT,
10256 		.read_u64 = cpu_weight_read_u64,
10257 		.write_u64 = cpu_weight_write_u64,
10258 	},
10259 	{
10260 		.name = "weight.nice",
10261 		.flags = CFTYPE_NOT_ON_ROOT,
10262 		.read_s64 = cpu_weight_nice_read_s64,
10263 		.write_s64 = cpu_weight_nice_write_s64,
10264 	},
10265 	{
10266 		.name = "idle",
10267 		.flags = CFTYPE_NOT_ON_ROOT,
10268 		.read_s64 = cpu_idle_read_s64,
10269 		.write_s64 = cpu_idle_write_s64,
10270 	},
10271 #endif
10272 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH
10273 	{
10274 		.name = "max",
10275 		.flags = CFTYPE_NOT_ON_ROOT,
10276 		.seq_show = cpu_max_show,
10277 		.write = cpu_max_write,
10278 	},
10279 	{
10280 		.name = "max.burst",
10281 		.flags = CFTYPE_NOT_ON_ROOT,
10282 		.read_u64 = cpu_burst_read_u64,
10283 		.write_u64 = cpu_burst_write_u64,
10284 	},
10285 #endif /* CONFIG_CFS_BANDWIDTH */
10286 #ifdef CONFIG_UCLAMP_TASK_GROUP
10287 	{
10288 		.name = "uclamp.min",
10289 		.flags = CFTYPE_NOT_ON_ROOT,
10290 		.seq_show = cpu_uclamp_min_show,
10291 		.write = cpu_uclamp_min_write,
10292 	},
10293 	{
10294 		.name = "uclamp.max",
10295 		.flags = CFTYPE_NOT_ON_ROOT,
10296 		.seq_show = cpu_uclamp_max_show,
10297 		.write = cpu_uclamp_max_write,
10298 	},
10299 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10300 	{ }	/* terminate */
10301 };
10302 
10303 struct cgroup_subsys cpu_cgrp_subsys = {
10304 	.css_alloc	= cpu_cgroup_css_alloc,
10305 	.css_online	= cpu_cgroup_css_online,
10306 	.css_offline	= cpu_cgroup_css_offline,
10307 	.css_released	= cpu_cgroup_css_released,
10308 	.css_free	= cpu_cgroup_css_free,
10309 	.css_extra_stat_show = cpu_extra_stat_show,
10310 	.css_local_stat_show = cpu_local_stat_show,
10311 	.can_attach	= cpu_cgroup_can_attach,
10312 	.attach		= cpu_cgroup_attach,
10313 	.cancel_attach	= cpu_cgroup_cancel_attach,
10314 	.legacy_cftypes	= cpu_legacy_files,
10315 	.dfl_cftypes	= cpu_files,
10316 	.early_init	= true,
10317 	.threaded	= true,
10318 };
10319 
10320 #endif /* CONFIG_CGROUP_SCHED */
10321 
10322 void dump_cpu_task(int cpu)
10323 {
10324 	if (in_hardirq() && cpu == smp_processor_id()) {
10325 		struct pt_regs *regs;
10326 
10327 		regs = get_irq_regs();
10328 		if (regs) {
10329 			show_regs(regs);
10330 			return;
10331 		}
10332 	}
10333 
10334 	if (trigger_single_cpu_backtrace(cpu))
10335 		return;
10336 
10337 	pr_info("Task dump for CPU %d:\n", cpu);
10338 	sched_show_task(cpu_curr(cpu));
10339 }
10340 
10341 /*
10342  * Nice levels are multiplicative, with a gentle 10% change for every
10343  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10344  * nice 1, it will get ~10% less CPU time than another CPU-bound task
10345  * that remained on nice 0.
10346  *
10347  * The "10% effect" is relative and cumulative: from _any_ nice level,
10348  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10349  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10350  * If a task goes up by ~10% and another task goes down by ~10% then
10351  * the relative distance between them is ~25%.)
10352  */
10353 const int sched_prio_to_weight[40] = {
10354  /* -20 */     88761,     71755,     56483,     46273,     36291,
10355  /* -15 */     29154,     23254,     18705,     14949,     11916,
10356  /* -10 */      9548,      7620,      6100,      4904,      3906,
10357  /*  -5 */      3121,      2501,      1991,      1586,      1277,
10358  /*   0 */      1024,       820,       655,       526,       423,
10359  /*   5 */       335,       272,       215,       172,       137,
10360  /*  10 */       110,        87,        70,        56,        45,
10361  /*  15 */        36,        29,        23,        18,        15,
10362 };
10363 
10364 /*
10365  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10366  *
10367  * In cases where the weight does not change often, we can use the
10368  * pre-calculated inverse to speed up arithmetics by turning divisions
10369  * into multiplications:
10370  */
10371 const u32 sched_prio_to_wmult[40] = {
10372  /* -20 */     48388,     59856,     76040,     92818,    118348,
10373  /* -15 */    147320,    184698,    229616,    287308,    360437,
10374  /* -10 */    449829,    563644,    704093,    875809,   1099582,
10375  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
10376  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
10377  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
10378  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
10379  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10380 };
10381 
10382 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10383 {
10384         trace_sched_update_nr_running_tp(rq, count);
10385 }
10386 
10387 #ifdef CONFIG_SCHED_MM_CID
10388 /*
10389  * Concurrency IDentifier management
10390  *
10391  * Serialization rules:
10392  *
10393  * mm::mm_cid::mutex:	Serializes fork() and exit() and therefore
10394  *			protects mm::mm_cid::users and mode switch
10395  *			transitions
10396  *
10397  * mm::mm_cid::lock:	Serializes mm_update_max_cids() and
10398  *			mm_update_cpus_allowed(). Nests in mm_cid::mutex
10399  *			and runqueue lock.
10400  *
10401  * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks
10402  * and can only be modified with atomic operations.
10403  *
10404  * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue
10405  * lock.
10406  *
10407  * CID ownership:
10408  *
10409  * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or
10410  * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the
10411  * MM_CID_ONCPU bit set.
10412  *
10413  * During the transition of ownership mode, the MM_CID_TRANSIT bit is set
10414  * on the CIDs. When this bit is set the tasks drop the CID back into the
10415  * pool when scheduling out.
10416  *
10417  * Both bits (ONCPU and TRANSIT) are filtered out by task_cid() when the
10418  * CID is actually handed over to user space in the RSEQ memory.
10419  *
10420  * Mode switching:
10421  *
10422  * The ownership mode is per process and stored in mm:mm_cid::mode with the
10423  * following possible states:
10424  *
10425  *	0:				Per task ownership
10426  *	0 | MM_CID_TRANSIT:		Transition from per CPU to per task
10427  *	MM_CID_ONCPU:			Per CPU ownership
10428  *	MM_CID_ONCPU | MM_CID_TRANSIT:	Transition from per task to per CPU
10429  *
10430  * All transitions of ownership mode happen in two phases:
10431  *
10432  *  1) mm:mm_cid::mode has the MM_CID_TRANSIT bit set. This is OR'ed on the
10433  *     CIDs and denotes that the CID is only temporarily owned by a
10434  *     task. When the task schedules out it drops the CID back into the
10435  *     pool if this bit is set.
10436  *
10437  *  2) The initiating context walks the per CPU space or the tasks to fixup
10438  *     or drop the CIDs and after completion it clears MM_CID_TRANSIT in
10439  *     mm:mm_cid::mode. After that point the CIDs are strictly task or CPU
10440  *     owned again.
10441  *
10442  * This two phase transition is required to prevent CID space exhaustion
10443  * during the transition as a direct transfer of ownership would fail:
10444  *
10445  *   - On task to CPU mode switch if a task is scheduled in on one CPU and
10446  *     then migrated to another CPU before the fixup freed enough per task
10447  *     CIDs.
10448  *
10449  *   - On CPU to task mode switch if two tasks are scheduled in on the same
10450  *     CPU before the fixup freed per CPU CIDs.
10451  *
10452  *   Both scenarios can result in a live lock because sched_in() is invoked
10453  *   with runqueue lock held and loops in search of a CID and the fixup
10454  *   thread can't make progress freeing them up because it is stuck on the
10455  *   same runqueue lock.
10456  *
10457  * While MM_CID_TRANSIT is active during the transition phase the MM_CID
10458  * bitmap can be contended, but that's a temporary contention bound to the
10459  * transition period. After that everything goes back into steady state and
10460  * nothing except fork() and exit() will touch the bitmap. This is an
10461  * acceptable tradeoff as it completely avoids complex serialization,
10462  * memory barriers and atomic operations for the common case.
10463  *
10464  * Aside of that this mechanism also ensures RT compability:
10465  *
10466  *   - The task which runs the fixup is fully preemptible except for the
10467  *     short runqueue lock held sections.
10468  *
10469  *   - The transient impact of the bitmap contention is only problematic
10470  *     when there is a thundering herd scenario of tasks scheduling in and
10471  *     out concurrently. There is not much which can be done about that
10472  *     except for avoiding mode switching by a proper overall system
10473  *     configuration.
10474  *
10475  * Switching to per CPU mode happens when the user count becomes greater
10476  * than the maximum number of CIDs, which is calculated by:
10477  *
10478  *	opt_cids = min(mm_cid::nr_cpus_allowed, mm_cid::users);
10479  *	max_cids = min(1.25 * opt_cids, num_possible_cpus());
10480  *
10481  * The +25% allowance is useful for tight CPU masks in scenarios where only
10482  * a few threads are created and destroyed to avoid frequent mode
10483  * switches. Though this allowance shrinks, the closer opt_cids becomes to
10484  * num_possible_cpus(), which is the (unfortunate) hard ABI limit.
10485  *
10486  * At the point of switching to per CPU mode the new user is not yet
10487  * visible in the system, so the task which initiated the fork() runs the
10488  * fixup function. mm_cid_fixup_tasks_to_cpu() walks the thread list and
10489  * either marks each task owned CID with MM_CID_TRANSIT if the task is
10490  * running on a CPU or drops it into the CID pool if a task is not on a
10491  * CPU. Tasks which schedule in before the task walk reaches them do the
10492  * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus()
10493  * completes it is guaranteed that no task related to that MM owns a CID
10494  * anymore.
10495  *
10496  * Switching back to task mode happens when the user count goes below the
10497  * threshold which was recorded on the per CPU mode switch:
10498  *
10499  *	pcpu_thrs = min(opt_cids - (opt_cids / 4), num_possible_cpus() / 2);
10500  *
10501  * This threshold is updated when a affinity change increases the number of
10502  * allowed CPUs for the MM, which might cause a switch back to per task
10503  * mode.
10504  *
10505  * If the switch back was initiated by a exiting task, then that task runs
10506  * the fixup function. If it was initiated by a affinity change, then it's
10507  * run either in the deferred update function in context of a workqueue or
10508  * by a task which forks a new one or by a task which exits. Whatever
10509  * happens first. mm_cid_fixup_cpus_to_task() walks through the possible
10510  * CPUs and either marks the CPU owned CIDs with MM_CID_TRANSIT if a
10511  * related task is running on the CPU or drops it into the pool. Tasks
10512  * which are scheduled in before the fixup covered them do the handover
10513  * themself. When mm_cid_fixup_cpus_to_tasks() completes it is guaranteed
10514  * that no CID related to that MM is owned by a CPU anymore.
10515  */
10516 
10517 /*
10518  * Update the CID range properties when the constraints change. Invoked via
10519  * fork(), exit() and affinity changes
10520  */
10521 static void __mm_update_max_cids(struct mm_mm_cid *mc)
10522 {
10523 	unsigned int opt_cids, max_cids;
10524 
10525 	/* Calculate the new optimal constraint */
10526 	opt_cids = min(mc->nr_cpus_allowed, mc->users);
10527 
10528 	/* Adjust the maximum CIDs to +25% limited by the number of possible CPUs */
10529 	max_cids = min(opt_cids + (opt_cids / 4), num_possible_cpus());
10530 	WRITE_ONCE(mc->max_cids, max_cids);
10531 }
10532 
10533 static inline unsigned int mm_cid_calc_pcpu_thrs(struct mm_mm_cid *mc)
10534 {
10535 	unsigned int opt_cids;
10536 
10537 	opt_cids = min(mc->nr_cpus_allowed, mc->users);
10538 	/* Has to be at least 1 because 0 indicates PCPU mode off */
10539 	return max(min(opt_cids - opt_cids / 4, num_possible_cpus() / 2), 1);
10540 }
10541 
10542 static bool mm_update_max_cids(struct mm_struct *mm)
10543 {
10544 	struct mm_mm_cid *mc = &mm->mm_cid;
10545 	bool percpu = cid_on_cpu(mc->mode);
10546 
10547 	lockdep_assert_held(&mm->mm_cid.lock);
10548 
10549 	/* Clear deferred mode switch flag. A change is handled by the caller */
10550 	mc->update_deferred = false;
10551 	__mm_update_max_cids(mc);
10552 
10553 	/* Check whether owner mode must be changed */
10554 	if (!percpu) {
10555 		/* Enable per CPU mode when the number of users is above max_cids */
10556 		if (mc->users > mc->max_cids)
10557 			mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10558 	} else {
10559 		/* Switch back to per task if user count under threshold */
10560 		if (mc->users < mc->pcpu_thrs)
10561 			mc->pcpu_thrs = 0;
10562 	}
10563 
10564 	/* Mode change required? */
10565 	if (percpu == !!mc->pcpu_thrs)
10566 		return false;
10567 
10568 	/* Flip the mode and set the transition flag to bridge the transfer */
10569 	WRITE_ONCE(mc->mode, mc->mode ^ (MM_CID_TRANSIT | MM_CID_ONCPU));
10570 	/*
10571 	 * Order the store against the subsequent fixups so that
10572 	 * acquire(rq::lock) cannot be reordered by the CPU before the
10573 	 * store.
10574 	 */
10575 	smp_mb();
10576 	return true;
10577 }
10578 
10579 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk)
10580 {
10581 	struct cpumask *mm_allowed;
10582 	struct mm_mm_cid *mc;
10583 	unsigned int weight;
10584 
10585 	if (!mm || !READ_ONCE(mm->mm_cid.users))
10586 		return;
10587 	/*
10588 	 * mm::mm_cid::mm_cpus_allowed is the superset of each threads
10589 	 * allowed CPUs mask which means it can only grow.
10590 	 */
10591 	mc = &mm->mm_cid;
10592 	guard(raw_spinlock)(&mc->lock);
10593 	mm_allowed = mm_cpus_allowed(mm);
10594 	weight = cpumask_weighted_or(mm_allowed, mm_allowed, affmsk);
10595 	if (weight == mc->nr_cpus_allowed)
10596 		return;
10597 
10598 	WRITE_ONCE(mc->nr_cpus_allowed, weight);
10599 	__mm_update_max_cids(mc);
10600 	if (!cid_on_cpu(mc->mode))
10601 		return;
10602 
10603 	/* Adjust the threshold to the wider set */
10604 	mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);
10605 	/* Switch back to per task mode? */
10606 	if (mc->users >= mc->pcpu_thrs)
10607 		return;
10608 
10609 	/* Don't queue twice */
10610 	if (mc->update_deferred)
10611 		return;
10612 
10613 	/* Queue the irq work, which schedules the real work */
10614 	mc->update_deferred = true;
10615 	irq_work_queue(&mc->irq_work);
10616 }
10617 
10618 static inline void mm_cid_complete_transit(struct mm_struct *mm, unsigned int mode)
10619 {
10620 	/*
10621 	 * Ensure that the store removing the TRANSIT bit cannot be
10622 	 * reordered by the CPU before the fixups have been completed.
10623 	 */
10624 	smp_mb();
10625 	WRITE_ONCE(mm->mm_cid.mode, mode);
10626 }
10627 
10628 static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)
10629 {
10630 	if (cid_on_cpu(t->mm_cid.cid)) {
10631 		unsigned int cid = cpu_cid_to_cid(t->mm_cid.cid);
10632 
10633 		t->mm_cid.cid = cid_to_transit_cid(cid);
10634 		pcp->cid = t->mm_cid.cid;
10635 	}
10636 }
10637 
10638 static void mm_cid_fixup_cpus_to_tasks(struct mm_struct *mm)
10639 {
10640 	unsigned int cpu;
10641 
10642 	/* Walk the CPUs and fixup all stale CIDs */
10643 	for_each_possible_cpu(cpu) {
10644 		struct mm_cid_pcpu *pcp = per_cpu_ptr(mm->mm_cid.pcpu, cpu);
10645 		struct rq *rq = cpu_rq(cpu);
10646 
10647 		/* Remote access to mm::mm_cid::pcpu requires rq_lock */
10648 		guard(rq_lock_irq)(rq);
10649 		/* Is the CID still owned by the CPU? */
10650 		if (cid_on_cpu(pcp->cid)) {
10651 			/*
10652 			 * If rq->curr has @mm, transfer it with the
10653 			 * transition bit set. Otherwise drop it.
10654 			 */
10655 			if (rq->curr->mm == mm && rq->curr->mm_cid.active)
10656 				mm_cid_transit_to_task(rq->curr, pcp);
10657 			else
10658 				mm_drop_cid_on_cpu(mm, pcp);
10659 
10660 		} else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
10661 			unsigned int cid = rq->curr->mm_cid.cid;
10662 
10663 			/* Ensure it has the transition bit set */
10664 			if (!cid_in_transit(cid)) {
10665 				cid = cid_to_transit_cid(cid);
10666 				rq->curr->mm_cid.cid = cid;
10667 				pcp->cid = cid;
10668 			}
10669 		}
10670 	}
10671 	mm_cid_complete_transit(mm, 0);
10672 }
10673 
10674 static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)
10675 {
10676 	if (cid_on_task(t->mm_cid.cid)) {
10677 		t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid);
10678 		pcp->cid = t->mm_cid.cid;
10679 	}
10680 }
10681 
10682 static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
10683 {
10684 	/* Remote access to mm::mm_cid::pcpu requires rq_lock */
10685 	guard(task_rq_lock)(t);
10686 	if (cid_on_task(t->mm_cid.cid)) {
10687 		/* If running on the CPU, put the CID in transit mode, otherwise drop it */
10688 		if (task_rq(t)->curr == t)
10689 			mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));
10690 		else
10691 			mm_unset_cid_on_task(t);
10692 	}
10693 }
10694 
10695 static void mm_cid_fixup_tasks_to_cpus(void)
10696 {
10697 	struct mm_struct *mm = current->mm;
10698 	struct task_struct *t;
10699 
10700 	lockdep_assert_held(&mm->mm_cid.mutex);
10701 
10702 	hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) {
10703 		/* Current has already transferred before invoking the fixup. */
10704 		if (t != current)
10705 			mm_cid_fixup_task_to_cpu(t, mm);
10706 	}
10707 
10708 	mm_cid_complete_transit(mm, MM_CID_ONCPU);
10709 }
10710 
10711 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
10712 {
10713 	lockdep_assert_held(&mm->mm_cid.lock);
10714 
10715 	t->mm_cid.active = 1;
10716 	hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list);
10717 	mm->mm_cid.users++;
10718 	return mm_update_max_cids(mm);
10719 }
10720 
10721 static void sched_mm_cid_fork(struct task_struct *t)
10722 {
10723 	struct mm_struct *mm = t->mm;
10724 	bool percpu;
10725 
10726 	if (!mm)
10727 		return;
10728 
10729 	WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET);
10730 
10731 	guard(mutex)(&mm->mm_cid.mutex);
10732 	scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10733 		struct mm_cid_pcpu *pcp = this_cpu_ptr(mm->mm_cid.pcpu);
10734 
10735 		/* First user ? */
10736 		if (!mm->mm_cid.users) {
10737 			sched_mm_cid_add_user(t, mm);
10738 			t->mm_cid.cid = mm_get_cid(mm);
10739 			/* Required for execve() */
10740 			pcp->cid = t->mm_cid.cid;
10741 			return;
10742 		}
10743 
10744 		if (!sched_mm_cid_add_user(t, mm)) {
10745 			if (!cid_on_cpu(mm->mm_cid.mode))
10746 				t->mm_cid.cid = mm_get_cid(mm);
10747 			return;
10748 		}
10749 
10750 		/* Handle the mode change and transfer current's CID */
10751 		percpu = cid_on_cpu(mm->mm_cid.mode);
10752 		if (!percpu)
10753 			mm_cid_transit_to_task(current, pcp);
10754 		else
10755 			mm_cid_transit_to_cpu(current, pcp);
10756 	}
10757 
10758 	if (percpu) {
10759 		mm_cid_fixup_tasks_to_cpus();
10760 	} else {
10761 		mm_cid_fixup_cpus_to_tasks(mm);
10762 		t->mm_cid.cid = mm_get_cid(mm);
10763 	}
10764 }
10765 
10766 static bool sched_mm_cid_remove_user(struct task_struct *t)
10767 {
10768 	lockdep_assert_held(&t->mm->mm_cid.lock);
10769 
10770 	t->mm_cid.active = 0;
10771 	/* Clear the transition bit */
10772 	t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
10773 	mm_unset_cid_on_task(t);
10774 	hlist_del_init(&t->mm_cid.node);
10775 	t->mm->mm_cid.users--;
10776 	return mm_update_max_cids(t->mm);
10777 }
10778 
10779 static bool __sched_mm_cid_exit(struct task_struct *t)
10780 {
10781 	struct mm_struct *mm = t->mm;
10782 
10783 	if (!sched_mm_cid_remove_user(t))
10784 		return false;
10785 	/*
10786 	 * Contrary to fork() this only deals with a switch back to per
10787 	 * task mode either because the above decreased users or an
10788 	 * affinity change increased the number of allowed CPUs and the
10789 	 * deferred fixup did not run yet.
10790 	 */
10791 	if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
10792 		return false;
10793 	/*
10794 	 * A failed fork(2) cleanup never gets here, so @current must have
10795 	 * the same MM as @t. That's true for exit() and the failed
10796 	 * pthread_create() cleanup case.
10797 	 */
10798 	if (WARN_ON_ONCE(current->mm != mm))
10799 		return false;
10800 	return true;
10801 }
10802 
10803 /*
10804  * When a task exits, the MM CID held by the task is not longer required as
10805  * the task cannot return to user space.
10806  */
10807 void sched_mm_cid_exit(struct task_struct *t)
10808 {
10809 	struct mm_struct *mm = t->mm;
10810 
10811 	if (!mm || !t->mm_cid.active)
10812 		return;
10813 	/*
10814 	 * Ensure that only one instance is doing MM CID operations within
10815 	 * a MM. The common case is uncontended. The rare fixup case adds
10816 	 * some overhead.
10817 	 */
10818 	scoped_guard(mutex, &mm->mm_cid.mutex) {
10819 		/* mm_cid::mutex is sufficient to protect mm_cid::users */
10820 		if (likely(mm->mm_cid.users > 1)) {
10821 			scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10822 				if (!__sched_mm_cid_exit(t))
10823 					return;
10824 				/*
10825 				 * Mode change. The task has the CID unset
10826 				 * already and dealt with an eventually set
10827 				 * TRANSIT bit. If the CID is owned by the CPU
10828 				 * then drop it.
10829 				 */
10830 				mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu));
10831 			}
10832 			mm_cid_fixup_cpus_to_tasks(mm);
10833 			return;
10834 		}
10835 		/* Last user */
10836 		scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10837 			/* Required across execve() */
10838 			if (t == current)
10839 				mm_cid_transit_to_task(t, this_cpu_ptr(mm->mm_cid.pcpu));
10840 			/* Ignore mode change. There is nothing to do. */
10841 			sched_mm_cid_remove_user(t);
10842 		}
10843 	}
10844 
10845 	/*
10846 	 * As this is the last user (execve(), process exit or failed
10847 	 * fork(2)) there is no concurrency anymore.
10848 	 *
10849 	 * Synchronize eventually pending work to ensure that there are no
10850 	 * dangling references left. @t->mm_cid.users is zero so nothing
10851 	 * can queue this work anymore.
10852 	 */
10853 	irq_work_sync(&mm->mm_cid.irq_work);
10854 	cancel_work_sync(&mm->mm_cid.work);
10855 }
10856 
10857 /* Deactivate MM CID allocation across execve() */
10858 void sched_mm_cid_before_execve(struct task_struct *t)
10859 {
10860 	sched_mm_cid_exit(t);
10861 }
10862 
10863 /* Reactivate MM CID after execve() */
10864 void sched_mm_cid_after_execve(struct task_struct *t)
10865 {
10866 	if (t->mm)
10867 		sched_mm_cid_fork(t);
10868 }
10869 
10870 static void mm_cid_work_fn(struct work_struct *work)
10871 {
10872 	struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.work);
10873 
10874 	guard(mutex)(&mm->mm_cid.mutex);
10875 	/* Did the last user task exit already? */
10876 	if (!mm->mm_cid.users)
10877 		return;
10878 
10879 	scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
10880 		/* Have fork() or exit() handled it already? */
10881 		if (!mm->mm_cid.update_deferred)
10882 			return;
10883 		/* This clears mm_cid::update_deferred */
10884 		if (!mm_update_max_cids(mm))
10885 			return;
10886 		/* Affinity changes can only switch back to task mode */
10887 		if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))
10888 			return;
10889 	}
10890 	mm_cid_fixup_cpus_to_tasks(mm);
10891 }
10892 
10893 static void mm_cid_irq_work(struct irq_work *work)
10894 {
10895 	struct mm_struct *mm = container_of(work, struct mm_struct, mm_cid.irq_work);
10896 
10897 	/*
10898 	 * Needs to be unconditional because mm_cid::lock cannot be held
10899 	 * when scheduling work as mm_update_cpus_allowed() nests inside
10900 	 * rq::lock and schedule_work() might end up in wakeup...
10901 	 */
10902 	schedule_work(&mm->mm_cid.work);
10903 }
10904 
10905 void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
10906 {
10907 	mm->mm_cid.max_cids = 0;
10908 	mm->mm_cid.mode = 0;
10909 	mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;
10910 	mm->mm_cid.users = 0;
10911 	mm->mm_cid.pcpu_thrs = 0;
10912 	mm->mm_cid.update_deferred = 0;
10913 	raw_spin_lock_init(&mm->mm_cid.lock);
10914 	mutex_init(&mm->mm_cid.mutex);
10915 	mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
10916 	INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
10917 	INIT_HLIST_HEAD(&mm->mm_cid.user_list);
10918 	cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
10919 	bitmap_zero(mm_cidmask(mm), num_possible_cpus());
10920 }
10921 #else /* CONFIG_SCHED_MM_CID */
10922 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
10923 static inline void sched_mm_cid_fork(struct task_struct *t) { }
10924 #endif /* !CONFIG_SCHED_MM_CID */
10925 
10926 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
10927 
10928 struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int flags)
10929 {
10930 	struct sched_change_ctx *ctx = this_cpu_ptr(&sched_change_ctx);
10931 	struct rq *rq = task_rq(p);
10932 
10933 	/*
10934 	 * Must exclusively use matched flags since this is both dequeue and
10935 	 * enqueue.
10936 	 */
10937 	WARN_ON_ONCE(flags & 0xFFFF0000);
10938 
10939 	lockdep_assert_rq_held(rq);
10940 
10941 	if (!(flags & DEQUEUE_NOCLOCK)) {
10942 		update_rq_clock(rq);
10943 		flags |= DEQUEUE_NOCLOCK;
10944 	}
10945 
10946 	if ((flags & DEQUEUE_CLASS) && p->sched_class->switching_from)
10947 		p->sched_class->switching_from(rq, p);
10948 
10949 	*ctx = (struct sched_change_ctx){
10950 		.p = p,
10951 		.class = p->sched_class,
10952 		.flags = flags,
10953 		.queued = task_on_rq_queued(p),
10954 		.running = task_current_donor(rq, p),
10955 	};
10956 
10957 	if (!(flags & DEQUEUE_CLASS)) {
10958 		if (p->sched_class->get_prio)
10959 			ctx->prio = p->sched_class->get_prio(rq, p);
10960 		else
10961 			ctx->prio = p->prio;
10962 	}
10963 
10964 	if (ctx->queued)
10965 		dequeue_task(rq, p, flags);
10966 	if (ctx->running)
10967 		put_prev_task(rq, p);
10968 
10969 	if ((flags & DEQUEUE_CLASS) && p->sched_class->switched_from)
10970 		p->sched_class->switched_from(rq, p);
10971 
10972 	return ctx;
10973 }
10974 
10975 void sched_change_end(struct sched_change_ctx *ctx)
10976 {
10977 	struct task_struct *p = ctx->p;
10978 	struct rq *rq = task_rq(p);
10979 
10980 	lockdep_assert_rq_held(rq);
10981 
10982 	/*
10983 	 * Changing class without *QUEUE_CLASS is bad.
10984 	 */
10985 	WARN_ON_ONCE(p->sched_class != ctx->class && !(ctx->flags & ENQUEUE_CLASS));
10986 
10987 	if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switching_to)
10988 		p->sched_class->switching_to(rq, p);
10989 
10990 	if (ctx->queued)
10991 		enqueue_task(rq, p, ctx->flags);
10992 	if (ctx->running)
10993 		set_next_task(rq, p);
10994 
10995 	if (ctx->flags & ENQUEUE_CLASS) {
10996 		if (p->sched_class->switched_to)
10997 			p->sched_class->switched_to(rq, p);
10998 
10999 		if (ctx->running) {
11000 			/*
11001 			 * If this was a class promotion; let the old class
11002 			 * know it got preempted. Note that none of the
11003 			 * switch*_from() methods know the new class and none
11004 			 * of the switch*_to() methods know the old class.
11005 			 */
11006 			if (sched_class_above(p->sched_class, ctx->class)) {
11007 				rq->next_class->wakeup_preempt(rq, p, 0);
11008 				rq->next_class = p->sched_class;
11009 			}
11010 			/*
11011 			 * If this was a degradation in class; make sure to
11012 			 * reschedule.
11013 			 */
11014 			if (sched_class_above(ctx->class, p->sched_class))
11015 				resched_curr(rq);
11016 		}
11017 	} else {
11018 		p->sched_class->prio_changed(rq, p, ctx->prio);
11019 	}
11020 }
11021