xref: /linux/kernel/sched/core.c (revision a19ce320c379e0519b68178c596e43d1d5dda03b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel scheduler code and related syscalls
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  */
9 #include <linux/highmem.h>
10 #include <linux/hrtimer_api.h>
11 #include <linux/ktime_api.h>
12 #include <linux/sched/signal.h>
13 #include <linux/syscalls_api.h>
14 #include <linux/debug_locks.h>
15 #include <linux/prefetch.h>
16 #include <linux/capability.h>
17 #include <linux/pgtable_api.h>
18 #include <linux/wait_bit.h>
19 #include <linux/jiffies.h>
20 #include <linux/spinlock_api.h>
21 #include <linux/cpumask_api.h>
22 #include <linux/lockdep_api.h>
23 #include <linux/hardirq.h>
24 #include <linux/softirq.h>
25 #include <linux/refcount_api.h>
26 #include <linux/topology.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched/cond_resched.h>
29 #include <linux/sched/cputime.h>
30 #include <linux/sched/debug.h>
31 #include <linux/sched/hotplug.h>
32 #include <linux/sched/init.h>
33 #include <linux/sched/isolation.h>
34 #include <linux/sched/loadavg.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/nohz.h>
37 #include <linux/sched/rseq_api.h>
38 #include <linux/sched/rt.h>
39 
40 #include <linux/blkdev.h>
41 #include <linux/context_tracking.h>
42 #include <linux/cpuset.h>
43 #include <linux/delayacct.h>
44 #include <linux/init_task.h>
45 #include <linux/interrupt.h>
46 #include <linux/ioprio.h>
47 #include <linux/kallsyms.h>
48 #include <linux/kcov.h>
49 #include <linux/kprobes.h>
50 #include <linux/llist_api.h>
51 #include <linux/mmu_context.h>
52 #include <linux/mmzone.h>
53 #include <linux/mutex_api.h>
54 #include <linux/nmi.h>
55 #include <linux/nospec.h>
56 #include <linux/perf_event_api.h>
57 #include <linux/profile.h>
58 #include <linux/psi.h>
59 #include <linux/rcuwait_api.h>
60 #include <linux/rseq.h>
61 #include <linux/sched/wake_q.h>
62 #include <linux/scs.h>
63 #include <linux/slab.h>
64 #include <linux/syscalls.h>
65 #include <linux/vtime.h>
66 #include <linux/wait_api.h>
67 #include <linux/workqueue_api.h>
68 
69 #ifdef CONFIG_PREEMPT_DYNAMIC
70 # ifdef CONFIG_GENERIC_ENTRY
71 #  include <linux/entry-common.h>
72 # endif
73 #endif
74 
75 #include <uapi/linux/sched/types.h>
76 
77 #include <asm/irq_regs.h>
78 #include <asm/switch_to.h>
79 #include <asm/tlb.h>
80 
81 #define CREATE_TRACE_POINTS
82 #include <linux/sched/rseq_api.h>
83 #include <trace/events/sched.h>
84 #include <trace/events/ipi.h>
85 #undef CREATE_TRACE_POINTS
86 
87 #include "sched.h"
88 #include "stats.h"
89 
90 #include "autogroup.h"
91 #include "pelt.h"
92 #include "smp.h"
93 #include "stats.h"
94 
95 #include "../workqueue_internal.h"
96 #include "../../io_uring/io-wq.h"
97 #include "../smpboot.h"
98 
99 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
101 
102 /*
103  * Export tracepoints that act as a bare tracehook (ie: have no trace event
104  * associated with them) to allow external modules to probe them.
105  */
106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
118 
119 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
120 
121 #ifdef CONFIG_SCHED_DEBUG
122 /*
123  * Debugging: various feature bits
124  *
125  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
126  * sysctl_sched_features, defined in sched.h, to allow constants propagation
127  * at compile time and compiler optimization based on features default.
128  */
129 #define SCHED_FEAT(name, enabled)	\
130 	(1UL << __SCHED_FEAT_##name) * enabled |
131 const_debug unsigned int sysctl_sched_features =
132 #include "features.h"
133 	0;
134 #undef SCHED_FEAT
135 
136 /*
137  * Print a warning if need_resched is set for the given duration (if
138  * LATENCY_WARN is enabled).
139  *
140  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
141  * per boot.
142  */
143 __read_mostly int sysctl_resched_latency_warn_ms = 100;
144 __read_mostly int sysctl_resched_latency_warn_once = 1;
145 #endif /* CONFIG_SCHED_DEBUG */
146 
147 /*
148  * Number of tasks to iterate in a single balance run.
149  * Limited because this is done with IRQs disabled.
150  */
151 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
152 
153 __read_mostly int scheduler_running;
154 
155 #ifdef CONFIG_SCHED_CORE
156 
157 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
158 
159 /* kernel prio, less is more */
160 static inline int __task_prio(const struct task_struct *p)
161 {
162 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
163 		return -2;
164 
165 	if (rt_prio(p->prio)) /* includes deadline */
166 		return p->prio; /* [-1, 99] */
167 
168 	if (p->sched_class == &idle_sched_class)
169 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
170 
171 	return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
172 }
173 
174 /*
175  * l(a,b)
176  * le(a,b) := !l(b,a)
177  * g(a,b)  := l(b,a)
178  * ge(a,b) := !l(a,b)
179  */
180 
181 /* real prio, less is less */
182 static inline bool prio_less(const struct task_struct *a,
183 			     const struct task_struct *b, bool in_fi)
184 {
185 
186 	int pa = __task_prio(a), pb = __task_prio(b);
187 
188 	if (-pa < -pb)
189 		return true;
190 
191 	if (-pb < -pa)
192 		return false;
193 
194 	if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
195 		return !dl_time_before(a->dl.deadline, b->dl.deadline);
196 
197 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
198 		return cfs_prio_less(a, b, in_fi);
199 
200 	return false;
201 }
202 
203 static inline bool __sched_core_less(const struct task_struct *a,
204 				     const struct task_struct *b)
205 {
206 	if (a->core_cookie < b->core_cookie)
207 		return true;
208 
209 	if (a->core_cookie > b->core_cookie)
210 		return false;
211 
212 	/* flip prio, so high prio is leftmost */
213 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
214 		return true;
215 
216 	return false;
217 }
218 
219 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
220 
221 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
222 {
223 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
224 }
225 
226 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
227 {
228 	const struct task_struct *p = __node_2_sc(node);
229 	unsigned long cookie = (unsigned long)key;
230 
231 	if (cookie < p->core_cookie)
232 		return -1;
233 
234 	if (cookie > p->core_cookie)
235 		return 1;
236 
237 	return 0;
238 }
239 
240 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
241 {
242 	rq->core->core_task_seq++;
243 
244 	if (!p->core_cookie)
245 		return;
246 
247 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
248 }
249 
250 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
251 {
252 	rq->core->core_task_seq++;
253 
254 	if (sched_core_enqueued(p)) {
255 		rb_erase(&p->core_node, &rq->core_tree);
256 		RB_CLEAR_NODE(&p->core_node);
257 	}
258 
259 	/*
260 	 * Migrating the last task off the cpu, with the cpu in forced idle
261 	 * state. Reschedule to create an accounting edge for forced idle,
262 	 * and re-examine whether the core is still in forced idle state.
263 	 */
264 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
265 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
266 		resched_curr(rq);
267 }
268 
269 static int sched_task_is_throttled(struct task_struct *p, int cpu)
270 {
271 	if (p->sched_class->task_is_throttled)
272 		return p->sched_class->task_is_throttled(p, cpu);
273 
274 	return 0;
275 }
276 
277 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
278 {
279 	struct rb_node *node = &p->core_node;
280 	int cpu = task_cpu(p);
281 
282 	do {
283 		node = rb_next(node);
284 		if (!node)
285 			return NULL;
286 
287 		p = __node_2_sc(node);
288 		if (p->core_cookie != cookie)
289 			return NULL;
290 
291 	} while (sched_task_is_throttled(p, cpu));
292 
293 	return p;
294 }
295 
296 /*
297  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
298  * If no suitable task is found, NULL will be returned.
299  */
300 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
301 {
302 	struct task_struct *p;
303 	struct rb_node *node;
304 
305 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
306 	if (!node)
307 		return NULL;
308 
309 	p = __node_2_sc(node);
310 	if (!sched_task_is_throttled(p, rq->cpu))
311 		return p;
312 
313 	return sched_core_next(p, cookie);
314 }
315 
316 /*
317  * Magic required such that:
318  *
319  *	raw_spin_rq_lock(rq);
320  *	...
321  *	raw_spin_rq_unlock(rq);
322  *
323  * ends up locking and unlocking the _same_ lock, and all CPUs
324  * always agree on what rq has what lock.
325  *
326  * XXX entirely possible to selectively enable cores, don't bother for now.
327  */
328 
329 static DEFINE_MUTEX(sched_core_mutex);
330 static atomic_t sched_core_count;
331 static struct cpumask sched_core_mask;
332 
333 static void sched_core_lock(int cpu, unsigned long *flags)
334 {
335 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
336 	int t, i = 0;
337 
338 	local_irq_save(*flags);
339 	for_each_cpu(t, smt_mask)
340 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
341 }
342 
343 static void sched_core_unlock(int cpu, unsigned long *flags)
344 {
345 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
346 	int t;
347 
348 	for_each_cpu(t, smt_mask)
349 		raw_spin_unlock(&cpu_rq(t)->__lock);
350 	local_irq_restore(*flags);
351 }
352 
353 static void __sched_core_flip(bool enabled)
354 {
355 	unsigned long flags;
356 	int cpu, t;
357 
358 	cpus_read_lock();
359 
360 	/*
361 	 * Toggle the online cores, one by one.
362 	 */
363 	cpumask_copy(&sched_core_mask, cpu_online_mask);
364 	for_each_cpu(cpu, &sched_core_mask) {
365 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
366 
367 		sched_core_lock(cpu, &flags);
368 
369 		for_each_cpu(t, smt_mask)
370 			cpu_rq(t)->core_enabled = enabled;
371 
372 		cpu_rq(cpu)->core->core_forceidle_start = 0;
373 
374 		sched_core_unlock(cpu, &flags);
375 
376 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
377 	}
378 
379 	/*
380 	 * Toggle the offline CPUs.
381 	 */
382 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
383 		cpu_rq(cpu)->core_enabled = enabled;
384 
385 	cpus_read_unlock();
386 }
387 
388 static void sched_core_assert_empty(void)
389 {
390 	int cpu;
391 
392 	for_each_possible_cpu(cpu)
393 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
394 }
395 
396 static void __sched_core_enable(void)
397 {
398 	static_branch_enable(&__sched_core_enabled);
399 	/*
400 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
401 	 * and future ones will observe !sched_core_disabled().
402 	 */
403 	synchronize_rcu();
404 	__sched_core_flip(true);
405 	sched_core_assert_empty();
406 }
407 
408 static void __sched_core_disable(void)
409 {
410 	sched_core_assert_empty();
411 	__sched_core_flip(false);
412 	static_branch_disable(&__sched_core_enabled);
413 }
414 
415 void sched_core_get(void)
416 {
417 	if (atomic_inc_not_zero(&sched_core_count))
418 		return;
419 
420 	mutex_lock(&sched_core_mutex);
421 	if (!atomic_read(&sched_core_count))
422 		__sched_core_enable();
423 
424 	smp_mb__before_atomic();
425 	atomic_inc(&sched_core_count);
426 	mutex_unlock(&sched_core_mutex);
427 }
428 
429 static void __sched_core_put(struct work_struct *work)
430 {
431 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
432 		__sched_core_disable();
433 		mutex_unlock(&sched_core_mutex);
434 	}
435 }
436 
437 void sched_core_put(void)
438 {
439 	static DECLARE_WORK(_work, __sched_core_put);
440 
441 	/*
442 	 * "There can be only one"
443 	 *
444 	 * Either this is the last one, or we don't actually need to do any
445 	 * 'work'. If it is the last *again*, we rely on
446 	 * WORK_STRUCT_PENDING_BIT.
447 	 */
448 	if (!atomic_add_unless(&sched_core_count, -1, 1))
449 		schedule_work(&_work);
450 }
451 
452 #else /* !CONFIG_SCHED_CORE */
453 
454 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
455 static inline void
456 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
457 
458 #endif /* CONFIG_SCHED_CORE */
459 
460 /*
461  * Serialization rules:
462  *
463  * Lock order:
464  *
465  *   p->pi_lock
466  *     rq->lock
467  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
468  *
469  *  rq1->lock
470  *    rq2->lock  where: rq1 < rq2
471  *
472  * Regular state:
473  *
474  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
475  * local CPU's rq->lock, it optionally removes the task from the runqueue and
476  * always looks at the local rq data structures to find the most eligible task
477  * to run next.
478  *
479  * Task enqueue is also under rq->lock, possibly taken from another CPU.
480  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
481  * the local CPU to avoid bouncing the runqueue state around [ see
482  * ttwu_queue_wakelist() ]
483  *
484  * Task wakeup, specifically wakeups that involve migration, are horribly
485  * complicated to avoid having to take two rq->locks.
486  *
487  * Special state:
488  *
489  * System-calls and anything external will use task_rq_lock() which acquires
490  * both p->pi_lock and rq->lock. As a consequence the state they change is
491  * stable while holding either lock:
492  *
493  *  - sched_setaffinity()/
494  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
495  *  - set_user_nice():		p->se.load, p->*prio
496  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
497  *				p->se.load, p->rt_priority,
498  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
499  *  - sched_setnuma():		p->numa_preferred_nid
500  *  - sched_move_task():	p->sched_task_group
501  *  - uclamp_update_active()	p->uclamp*
502  *
503  * p->state <- TASK_*:
504  *
505  *   is changed locklessly using set_current_state(), __set_current_state() or
506  *   set_special_state(), see their respective comments, or by
507  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
508  *   concurrent self.
509  *
510  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
511  *
512  *   is set by activate_task() and cleared by deactivate_task(), under
513  *   rq->lock. Non-zero indicates the task is runnable, the special
514  *   ON_RQ_MIGRATING state is used for migration without holding both
515  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
516  *
517  * p->on_cpu <- { 0, 1 }:
518  *
519  *   is set by prepare_task() and cleared by finish_task() such that it will be
520  *   set before p is scheduled-in and cleared after p is scheduled-out, both
521  *   under rq->lock. Non-zero indicates the task is running on its CPU.
522  *
523  *   [ The astute reader will observe that it is possible for two tasks on one
524  *     CPU to have ->on_cpu = 1 at the same time. ]
525  *
526  * task_cpu(p): is changed by set_task_cpu(), the rules are:
527  *
528  *  - Don't call set_task_cpu() on a blocked task:
529  *
530  *    We don't care what CPU we're not running on, this simplifies hotplug,
531  *    the CPU assignment of blocked tasks isn't required to be valid.
532  *
533  *  - for try_to_wake_up(), called under p->pi_lock:
534  *
535  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
536  *
537  *  - for migration called under rq->lock:
538  *    [ see task_on_rq_migrating() in task_rq_lock() ]
539  *
540  *    o move_queued_task()
541  *    o detach_task()
542  *
543  *  - for migration called under double_rq_lock():
544  *
545  *    o __migrate_swap_task()
546  *    o push_rt_task() / pull_rt_task()
547  *    o push_dl_task() / pull_dl_task()
548  *    o dl_task_offline_migration()
549  *
550  */
551 
552 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
553 {
554 	raw_spinlock_t *lock;
555 
556 	/* Matches synchronize_rcu() in __sched_core_enable() */
557 	preempt_disable();
558 	if (sched_core_disabled()) {
559 		raw_spin_lock_nested(&rq->__lock, subclass);
560 		/* preempt_count *MUST* be > 1 */
561 		preempt_enable_no_resched();
562 		return;
563 	}
564 
565 	for (;;) {
566 		lock = __rq_lockp(rq);
567 		raw_spin_lock_nested(lock, subclass);
568 		if (likely(lock == __rq_lockp(rq))) {
569 			/* preempt_count *MUST* be > 1 */
570 			preempt_enable_no_resched();
571 			return;
572 		}
573 		raw_spin_unlock(lock);
574 	}
575 }
576 
577 bool raw_spin_rq_trylock(struct rq *rq)
578 {
579 	raw_spinlock_t *lock;
580 	bool ret;
581 
582 	/* Matches synchronize_rcu() in __sched_core_enable() */
583 	preempt_disable();
584 	if (sched_core_disabled()) {
585 		ret = raw_spin_trylock(&rq->__lock);
586 		preempt_enable();
587 		return ret;
588 	}
589 
590 	for (;;) {
591 		lock = __rq_lockp(rq);
592 		ret = raw_spin_trylock(lock);
593 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
594 			preempt_enable();
595 			return ret;
596 		}
597 		raw_spin_unlock(lock);
598 	}
599 }
600 
601 void raw_spin_rq_unlock(struct rq *rq)
602 {
603 	raw_spin_unlock(rq_lockp(rq));
604 }
605 
606 #ifdef CONFIG_SMP
607 /*
608  * double_rq_lock - safely lock two runqueues
609  */
610 void double_rq_lock(struct rq *rq1, struct rq *rq2)
611 {
612 	lockdep_assert_irqs_disabled();
613 
614 	if (rq_order_less(rq2, rq1))
615 		swap(rq1, rq2);
616 
617 	raw_spin_rq_lock(rq1);
618 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
619 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
620 
621 	double_rq_clock_clear_update(rq1, rq2);
622 }
623 #endif
624 
625 /*
626  * __task_rq_lock - lock the rq @p resides on.
627  */
628 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
629 	__acquires(rq->lock)
630 {
631 	struct rq *rq;
632 
633 	lockdep_assert_held(&p->pi_lock);
634 
635 	for (;;) {
636 		rq = task_rq(p);
637 		raw_spin_rq_lock(rq);
638 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
639 			rq_pin_lock(rq, rf);
640 			return rq;
641 		}
642 		raw_spin_rq_unlock(rq);
643 
644 		while (unlikely(task_on_rq_migrating(p)))
645 			cpu_relax();
646 	}
647 }
648 
649 /*
650  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
651  */
652 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
653 	__acquires(p->pi_lock)
654 	__acquires(rq->lock)
655 {
656 	struct rq *rq;
657 
658 	for (;;) {
659 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
660 		rq = task_rq(p);
661 		raw_spin_rq_lock(rq);
662 		/*
663 		 *	move_queued_task()		task_rq_lock()
664 		 *
665 		 *	ACQUIRE (rq->lock)
666 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
667 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
668 		 *	[S] ->cpu = new_cpu		[L] task_rq()
669 		 *					[L] ->on_rq
670 		 *	RELEASE (rq->lock)
671 		 *
672 		 * If we observe the old CPU in task_rq_lock(), the acquire of
673 		 * the old rq->lock will fully serialize against the stores.
674 		 *
675 		 * If we observe the new CPU in task_rq_lock(), the address
676 		 * dependency headed by '[L] rq = task_rq()' and the acquire
677 		 * will pair with the WMB to ensure we then also see migrating.
678 		 */
679 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
680 			rq_pin_lock(rq, rf);
681 			return rq;
682 		}
683 		raw_spin_rq_unlock(rq);
684 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
685 
686 		while (unlikely(task_on_rq_migrating(p)))
687 			cpu_relax();
688 	}
689 }
690 
691 /*
692  * RQ-clock updating methods:
693  */
694 
695 static void update_rq_clock_task(struct rq *rq, s64 delta)
696 {
697 /*
698  * In theory, the compile should just see 0 here, and optimize out the call
699  * to sched_rt_avg_update. But I don't trust it...
700  */
701 	s64 __maybe_unused steal = 0, irq_delta = 0;
702 
703 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
704 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
705 
706 	/*
707 	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
708 	 * this case when a previous update_rq_clock() happened inside a
709 	 * {soft,}irq region.
710 	 *
711 	 * When this happens, we stop ->clock_task and only update the
712 	 * prev_irq_time stamp to account for the part that fit, so that a next
713 	 * update will consume the rest. This ensures ->clock_task is
714 	 * monotonic.
715 	 *
716 	 * It does however cause some slight miss-attribution of {soft,}irq
717 	 * time, a more accurate solution would be to update the irq_time using
718 	 * the current rq->clock timestamp, except that would require using
719 	 * atomic ops.
720 	 */
721 	if (irq_delta > delta)
722 		irq_delta = delta;
723 
724 	rq->prev_irq_time += irq_delta;
725 	delta -= irq_delta;
726 	delayacct_irq(rq->curr, irq_delta);
727 #endif
728 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
729 	if (static_key_false((&paravirt_steal_rq_enabled))) {
730 		steal = paravirt_steal_clock(cpu_of(rq));
731 		steal -= rq->prev_steal_time_rq;
732 
733 		if (unlikely(steal > delta))
734 			steal = delta;
735 
736 		rq->prev_steal_time_rq += steal;
737 		delta -= steal;
738 	}
739 #endif
740 
741 	rq->clock_task += delta;
742 
743 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
744 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
745 		update_irq_load_avg(rq, irq_delta + steal);
746 #endif
747 	update_rq_clock_pelt(rq, delta);
748 }
749 
750 void update_rq_clock(struct rq *rq)
751 {
752 	s64 delta;
753 
754 	lockdep_assert_rq_held(rq);
755 
756 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
757 		return;
758 
759 #ifdef CONFIG_SCHED_DEBUG
760 	if (sched_feat(WARN_DOUBLE_CLOCK))
761 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
762 	rq->clock_update_flags |= RQCF_UPDATED;
763 #endif
764 
765 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
766 	if (delta < 0)
767 		return;
768 	rq->clock += delta;
769 	update_rq_clock_task(rq, delta);
770 }
771 
772 #ifdef CONFIG_SCHED_HRTICK
773 /*
774  * Use HR-timers to deliver accurate preemption points.
775  */
776 
777 static void hrtick_clear(struct rq *rq)
778 {
779 	if (hrtimer_active(&rq->hrtick_timer))
780 		hrtimer_cancel(&rq->hrtick_timer);
781 }
782 
783 /*
784  * High-resolution timer tick.
785  * Runs from hardirq context with interrupts disabled.
786  */
787 static enum hrtimer_restart hrtick(struct hrtimer *timer)
788 {
789 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
790 	struct rq_flags rf;
791 
792 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
793 
794 	rq_lock(rq, &rf);
795 	update_rq_clock(rq);
796 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
797 	rq_unlock(rq, &rf);
798 
799 	return HRTIMER_NORESTART;
800 }
801 
802 #ifdef CONFIG_SMP
803 
804 static void __hrtick_restart(struct rq *rq)
805 {
806 	struct hrtimer *timer = &rq->hrtick_timer;
807 	ktime_t time = rq->hrtick_time;
808 
809 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
810 }
811 
812 /*
813  * called from hardirq (IPI) context
814  */
815 static void __hrtick_start(void *arg)
816 {
817 	struct rq *rq = arg;
818 	struct rq_flags rf;
819 
820 	rq_lock(rq, &rf);
821 	__hrtick_restart(rq);
822 	rq_unlock(rq, &rf);
823 }
824 
825 /*
826  * Called to set the hrtick timer state.
827  *
828  * called with rq->lock held and irqs disabled
829  */
830 void hrtick_start(struct rq *rq, u64 delay)
831 {
832 	struct hrtimer *timer = &rq->hrtick_timer;
833 	s64 delta;
834 
835 	/*
836 	 * Don't schedule slices shorter than 10000ns, that just
837 	 * doesn't make sense and can cause timer DoS.
838 	 */
839 	delta = max_t(s64, delay, 10000LL);
840 	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
841 
842 	if (rq == this_rq())
843 		__hrtick_restart(rq);
844 	else
845 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
846 }
847 
848 #else
849 /*
850  * Called to set the hrtick timer state.
851  *
852  * called with rq->lock held and irqs disabled
853  */
854 void hrtick_start(struct rq *rq, u64 delay)
855 {
856 	/*
857 	 * Don't schedule slices shorter than 10000ns, that just
858 	 * doesn't make sense. Rely on vruntime for fairness.
859 	 */
860 	delay = max_t(u64, delay, 10000LL);
861 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
862 		      HRTIMER_MODE_REL_PINNED_HARD);
863 }
864 
865 #endif /* CONFIG_SMP */
866 
867 static void hrtick_rq_init(struct rq *rq)
868 {
869 #ifdef CONFIG_SMP
870 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
871 #endif
872 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
873 	rq->hrtick_timer.function = hrtick;
874 }
875 #else	/* CONFIG_SCHED_HRTICK */
876 static inline void hrtick_clear(struct rq *rq)
877 {
878 }
879 
880 static inline void hrtick_rq_init(struct rq *rq)
881 {
882 }
883 #endif	/* CONFIG_SCHED_HRTICK */
884 
885 /*
886  * cmpxchg based fetch_or, macro so it works for different integer types
887  */
888 #define fetch_or(ptr, mask)						\
889 	({								\
890 		typeof(ptr) _ptr = (ptr);				\
891 		typeof(mask) _mask = (mask);				\
892 		typeof(*_ptr) _val = *_ptr;				\
893 									\
894 		do {							\
895 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
896 	_val;								\
897 })
898 
899 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
900 /*
901  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
902  * this avoids any races wrt polling state changes and thereby avoids
903  * spurious IPIs.
904  */
905 static inline bool set_nr_and_not_polling(struct task_struct *p)
906 {
907 	struct thread_info *ti = task_thread_info(p);
908 	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
909 }
910 
911 /*
912  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
913  *
914  * If this returns true, then the idle task promises to call
915  * sched_ttwu_pending() and reschedule soon.
916  */
917 static bool set_nr_if_polling(struct task_struct *p)
918 {
919 	struct thread_info *ti = task_thread_info(p);
920 	typeof(ti->flags) val = READ_ONCE(ti->flags);
921 
922 	do {
923 		if (!(val & _TIF_POLLING_NRFLAG))
924 			return false;
925 		if (val & _TIF_NEED_RESCHED)
926 			return true;
927 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
928 
929 	return true;
930 }
931 
932 #else
933 static inline bool set_nr_and_not_polling(struct task_struct *p)
934 {
935 	set_tsk_need_resched(p);
936 	return true;
937 }
938 
939 #ifdef CONFIG_SMP
940 static inline bool set_nr_if_polling(struct task_struct *p)
941 {
942 	return false;
943 }
944 #endif
945 #endif
946 
947 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
948 {
949 	struct wake_q_node *node = &task->wake_q;
950 
951 	/*
952 	 * Atomically grab the task, if ->wake_q is !nil already it means
953 	 * it's already queued (either by us or someone else) and will get the
954 	 * wakeup due to that.
955 	 *
956 	 * In order to ensure that a pending wakeup will observe our pending
957 	 * state, even in the failed case, an explicit smp_mb() must be used.
958 	 */
959 	smp_mb__before_atomic();
960 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
961 		return false;
962 
963 	/*
964 	 * The head is context local, there can be no concurrency.
965 	 */
966 	*head->lastp = node;
967 	head->lastp = &node->next;
968 	return true;
969 }
970 
971 /**
972  * wake_q_add() - queue a wakeup for 'later' waking.
973  * @head: the wake_q_head to add @task to
974  * @task: the task to queue for 'later' wakeup
975  *
976  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
977  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
978  * instantly.
979  *
980  * This function must be used as-if it were wake_up_process(); IOW the task
981  * must be ready to be woken at this location.
982  */
983 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
984 {
985 	if (__wake_q_add(head, task))
986 		get_task_struct(task);
987 }
988 
989 /**
990  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
991  * @head: the wake_q_head to add @task to
992  * @task: the task to queue for 'later' wakeup
993  *
994  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
995  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
996  * instantly.
997  *
998  * This function must be used as-if it were wake_up_process(); IOW the task
999  * must be ready to be woken at this location.
1000  *
1001  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1002  * that already hold reference to @task can call the 'safe' version and trust
1003  * wake_q to do the right thing depending whether or not the @task is already
1004  * queued for wakeup.
1005  */
1006 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1007 {
1008 	if (!__wake_q_add(head, task))
1009 		put_task_struct(task);
1010 }
1011 
1012 void wake_up_q(struct wake_q_head *head)
1013 {
1014 	struct wake_q_node *node = head->first;
1015 
1016 	while (node != WAKE_Q_TAIL) {
1017 		struct task_struct *task;
1018 
1019 		task = container_of(node, struct task_struct, wake_q);
1020 		/* Task can safely be re-inserted now: */
1021 		node = node->next;
1022 		task->wake_q.next = NULL;
1023 
1024 		/*
1025 		 * wake_up_process() executes a full barrier, which pairs with
1026 		 * the queueing in wake_q_add() so as not to miss wakeups.
1027 		 */
1028 		wake_up_process(task);
1029 		put_task_struct(task);
1030 	}
1031 }
1032 
1033 /*
1034  * resched_curr - mark rq's current task 'to be rescheduled now'.
1035  *
1036  * On UP this means the setting of the need_resched flag, on SMP it
1037  * might also involve a cross-CPU call to trigger the scheduler on
1038  * the target CPU.
1039  */
1040 void resched_curr(struct rq *rq)
1041 {
1042 	struct task_struct *curr = rq->curr;
1043 	int cpu;
1044 
1045 	lockdep_assert_rq_held(rq);
1046 
1047 	if (test_tsk_need_resched(curr))
1048 		return;
1049 
1050 	cpu = cpu_of(rq);
1051 
1052 	if (cpu == smp_processor_id()) {
1053 		set_tsk_need_resched(curr);
1054 		set_preempt_need_resched();
1055 		return;
1056 	}
1057 
1058 	if (set_nr_and_not_polling(curr))
1059 		smp_send_reschedule(cpu);
1060 	else
1061 		trace_sched_wake_idle_without_ipi(cpu);
1062 }
1063 
1064 void resched_cpu(int cpu)
1065 {
1066 	struct rq *rq = cpu_rq(cpu);
1067 	unsigned long flags;
1068 
1069 	raw_spin_rq_lock_irqsave(rq, flags);
1070 	if (cpu_online(cpu) || cpu == smp_processor_id())
1071 		resched_curr(rq);
1072 	raw_spin_rq_unlock_irqrestore(rq, flags);
1073 }
1074 
1075 #ifdef CONFIG_SMP
1076 #ifdef CONFIG_NO_HZ_COMMON
1077 /*
1078  * In the semi idle case, use the nearest busy CPU for migrating timers
1079  * from an idle CPU.  This is good for power-savings.
1080  *
1081  * We don't do similar optimization for completely idle system, as
1082  * selecting an idle CPU will add more delays to the timers than intended
1083  * (as that CPU's timer base may not be uptodate wrt jiffies etc).
1084  */
1085 int get_nohz_timer_target(void)
1086 {
1087 	int i, cpu = smp_processor_id(), default_cpu = -1;
1088 	struct sched_domain *sd;
1089 	const struct cpumask *hk_mask;
1090 
1091 	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1092 		if (!idle_cpu(cpu))
1093 			return cpu;
1094 		default_cpu = cpu;
1095 	}
1096 
1097 	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1098 
1099 	guard(rcu)();
1100 
1101 	for_each_domain(cpu, sd) {
1102 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1103 			if (cpu == i)
1104 				continue;
1105 
1106 			if (!idle_cpu(i))
1107 				return i;
1108 		}
1109 	}
1110 
1111 	if (default_cpu == -1)
1112 		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1113 
1114 	return default_cpu;
1115 }
1116 
1117 /*
1118  * When add_timer_on() enqueues a timer into the timer wheel of an
1119  * idle CPU then this timer might expire before the next timer event
1120  * which is scheduled to wake up that CPU. In case of a completely
1121  * idle system the next event might even be infinite time into the
1122  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1123  * leaves the inner idle loop so the newly added timer is taken into
1124  * account when the CPU goes back to idle and evaluates the timer
1125  * wheel for the next timer event.
1126  */
1127 static void wake_up_idle_cpu(int cpu)
1128 {
1129 	struct rq *rq = cpu_rq(cpu);
1130 
1131 	if (cpu == smp_processor_id())
1132 		return;
1133 
1134 	/*
1135 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1136 	 * part of the idle loop. This forces an exit from the idle loop
1137 	 * and a round trip to schedule(). Now this could be optimized
1138 	 * because a simple new idle loop iteration is enough to
1139 	 * re-evaluate the next tick. Provided some re-ordering of tick
1140 	 * nohz functions that would need to follow TIF_NR_POLLING
1141 	 * clearing:
1142 	 *
1143 	 * - On most archs, a simple fetch_or on ti::flags with a
1144 	 *   "0" value would be enough to know if an IPI needs to be sent.
1145 	 *
1146 	 * - x86 needs to perform a last need_resched() check between
1147 	 *   monitor and mwait which doesn't take timers into account.
1148 	 *   There a dedicated TIF_TIMER flag would be required to
1149 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1150 	 *   before mwait().
1151 	 *
1152 	 * However, remote timer enqueue is not such a frequent event
1153 	 * and testing of the above solutions didn't appear to report
1154 	 * much benefits.
1155 	 */
1156 	if (set_nr_and_not_polling(rq->idle))
1157 		smp_send_reschedule(cpu);
1158 	else
1159 		trace_sched_wake_idle_without_ipi(cpu);
1160 }
1161 
1162 static bool wake_up_full_nohz_cpu(int cpu)
1163 {
1164 	/*
1165 	 * We just need the target to call irq_exit() and re-evaluate
1166 	 * the next tick. The nohz full kick at least implies that.
1167 	 * If needed we can still optimize that later with an
1168 	 * empty IRQ.
1169 	 */
1170 	if (cpu_is_offline(cpu))
1171 		return true;  /* Don't try to wake offline CPUs. */
1172 	if (tick_nohz_full_cpu(cpu)) {
1173 		if (cpu != smp_processor_id() ||
1174 		    tick_nohz_tick_stopped())
1175 			tick_nohz_full_kick_cpu(cpu);
1176 		return true;
1177 	}
1178 
1179 	return false;
1180 }
1181 
1182 /*
1183  * Wake up the specified CPU.  If the CPU is going offline, it is the
1184  * caller's responsibility to deal with the lost wakeup, for example,
1185  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1186  */
1187 void wake_up_nohz_cpu(int cpu)
1188 {
1189 	if (!wake_up_full_nohz_cpu(cpu))
1190 		wake_up_idle_cpu(cpu);
1191 }
1192 
1193 static void nohz_csd_func(void *info)
1194 {
1195 	struct rq *rq = info;
1196 	int cpu = cpu_of(rq);
1197 	unsigned int flags;
1198 
1199 	/*
1200 	 * Release the rq::nohz_csd.
1201 	 */
1202 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1203 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1204 
1205 	rq->idle_balance = idle_cpu(cpu);
1206 	if (rq->idle_balance && !need_resched()) {
1207 		rq->nohz_idle_balance = flags;
1208 		raise_softirq_irqoff(SCHED_SOFTIRQ);
1209 	}
1210 }
1211 
1212 #endif /* CONFIG_NO_HZ_COMMON */
1213 
1214 #ifdef CONFIG_NO_HZ_FULL
1215 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1216 {
1217 	if (rq->nr_running != 1)
1218 		return false;
1219 
1220 	if (p->sched_class != &fair_sched_class)
1221 		return false;
1222 
1223 	if (!task_on_rq_queued(p))
1224 		return false;
1225 
1226 	return true;
1227 }
1228 
1229 bool sched_can_stop_tick(struct rq *rq)
1230 {
1231 	int fifo_nr_running;
1232 
1233 	/* Deadline tasks, even if single, need the tick */
1234 	if (rq->dl.dl_nr_running)
1235 		return false;
1236 
1237 	/*
1238 	 * If there are more than one RR tasks, we need the tick to affect the
1239 	 * actual RR behaviour.
1240 	 */
1241 	if (rq->rt.rr_nr_running) {
1242 		if (rq->rt.rr_nr_running == 1)
1243 			return true;
1244 		else
1245 			return false;
1246 	}
1247 
1248 	/*
1249 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1250 	 * forced preemption between FIFO tasks.
1251 	 */
1252 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1253 	if (fifo_nr_running)
1254 		return true;
1255 
1256 	/*
1257 	 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1258 	 * if there's more than one we need the tick for involuntary
1259 	 * preemption.
1260 	 */
1261 	if (rq->nr_running > 1)
1262 		return false;
1263 
1264 	/*
1265 	 * If there is one task and it has CFS runtime bandwidth constraints
1266 	 * and it's on the cpu now we don't want to stop the tick.
1267 	 * This check prevents clearing the bit if a newly enqueued task here is
1268 	 * dequeued by migrating while the constrained task continues to run.
1269 	 * E.g. going from 2->1 without going through pick_next_task().
1270 	 */
1271 	if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
1272 		if (cfs_task_bw_constrained(rq->curr))
1273 			return false;
1274 	}
1275 
1276 	return true;
1277 }
1278 #endif /* CONFIG_NO_HZ_FULL */
1279 #endif /* CONFIG_SMP */
1280 
1281 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1282 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1283 /*
1284  * Iterate task_group tree rooted at *from, calling @down when first entering a
1285  * node and @up when leaving it for the final time.
1286  *
1287  * Caller must hold rcu_lock or sufficient equivalent.
1288  */
1289 int walk_tg_tree_from(struct task_group *from,
1290 			     tg_visitor down, tg_visitor up, void *data)
1291 {
1292 	struct task_group *parent, *child;
1293 	int ret;
1294 
1295 	parent = from;
1296 
1297 down:
1298 	ret = (*down)(parent, data);
1299 	if (ret)
1300 		goto out;
1301 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1302 		parent = child;
1303 		goto down;
1304 
1305 up:
1306 		continue;
1307 	}
1308 	ret = (*up)(parent, data);
1309 	if (ret || parent == from)
1310 		goto out;
1311 
1312 	child = parent;
1313 	parent = parent->parent;
1314 	if (parent)
1315 		goto up;
1316 out:
1317 	return ret;
1318 }
1319 
1320 int tg_nop(struct task_group *tg, void *data)
1321 {
1322 	return 0;
1323 }
1324 #endif
1325 
1326 static void set_load_weight(struct task_struct *p, bool update_load)
1327 {
1328 	int prio = p->static_prio - MAX_RT_PRIO;
1329 	struct load_weight *load = &p->se.load;
1330 
1331 	/*
1332 	 * SCHED_IDLE tasks get minimal weight:
1333 	 */
1334 	if (task_has_idle_policy(p)) {
1335 		load->weight = scale_load(WEIGHT_IDLEPRIO);
1336 		load->inv_weight = WMULT_IDLEPRIO;
1337 		return;
1338 	}
1339 
1340 	/*
1341 	 * SCHED_OTHER tasks have to update their load when changing their
1342 	 * weight
1343 	 */
1344 	if (update_load && p->sched_class == &fair_sched_class) {
1345 		reweight_task(p, prio);
1346 	} else {
1347 		load->weight = scale_load(sched_prio_to_weight[prio]);
1348 		load->inv_weight = sched_prio_to_wmult[prio];
1349 	}
1350 }
1351 
1352 #ifdef CONFIG_UCLAMP_TASK
1353 /*
1354  * Serializes updates of utilization clamp values
1355  *
1356  * The (slow-path) user-space triggers utilization clamp value updates which
1357  * can require updates on (fast-path) scheduler's data structures used to
1358  * support enqueue/dequeue operations.
1359  * While the per-CPU rq lock protects fast-path update operations, user-space
1360  * requests are serialized using a mutex to reduce the risk of conflicting
1361  * updates or API abuses.
1362  */
1363 static DEFINE_MUTEX(uclamp_mutex);
1364 
1365 /* Max allowed minimum utilization */
1366 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1367 
1368 /* Max allowed maximum utilization */
1369 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1370 
1371 /*
1372  * By default RT tasks run at the maximum performance point/capacity of the
1373  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1374  * SCHED_CAPACITY_SCALE.
1375  *
1376  * This knob allows admins to change the default behavior when uclamp is being
1377  * used. In battery powered devices, particularly, running at the maximum
1378  * capacity and frequency will increase energy consumption and shorten the
1379  * battery life.
1380  *
1381  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1382  *
1383  * This knob will not override the system default sched_util_clamp_min defined
1384  * above.
1385  */
1386 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1387 
1388 /* All clamps are required to be less or equal than these values */
1389 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1390 
1391 /*
1392  * This static key is used to reduce the uclamp overhead in the fast path. It
1393  * primarily disables the call to uclamp_rq_{inc, dec}() in
1394  * enqueue/dequeue_task().
1395  *
1396  * This allows users to continue to enable uclamp in their kernel config with
1397  * minimum uclamp overhead in the fast path.
1398  *
1399  * As soon as userspace modifies any of the uclamp knobs, the static key is
1400  * enabled, since we have an actual users that make use of uclamp
1401  * functionality.
1402  *
1403  * The knobs that would enable this static key are:
1404  *
1405  *   * A task modifying its uclamp value with sched_setattr().
1406  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1407  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1408  */
1409 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1410 
1411 /* Integer rounded range for each bucket */
1412 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
1413 
1414 #define for_each_clamp_id(clamp_id) \
1415 	for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++)
1416 
1417 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
1418 {
1419 	return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
1420 }
1421 
1422 static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
1423 {
1424 	if (clamp_id == UCLAMP_MIN)
1425 		return 0;
1426 	return SCHED_CAPACITY_SCALE;
1427 }
1428 
1429 static inline void uclamp_se_set(struct uclamp_se *uc_se,
1430 				 unsigned int value, bool user_defined)
1431 {
1432 	uc_se->value = value;
1433 	uc_se->bucket_id = uclamp_bucket_id(value);
1434 	uc_se->user_defined = user_defined;
1435 }
1436 
1437 static inline unsigned int
1438 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1439 		  unsigned int clamp_value)
1440 {
1441 	/*
1442 	 * Avoid blocked utilization pushing up the frequency when we go
1443 	 * idle (which drops the max-clamp) by retaining the last known
1444 	 * max-clamp.
1445 	 */
1446 	if (clamp_id == UCLAMP_MAX) {
1447 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1448 		return clamp_value;
1449 	}
1450 
1451 	return uclamp_none(UCLAMP_MIN);
1452 }
1453 
1454 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1455 				     unsigned int clamp_value)
1456 {
1457 	/* Reset max-clamp retention only on idle exit */
1458 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1459 		return;
1460 
1461 	uclamp_rq_set(rq, clamp_id, clamp_value);
1462 }
1463 
1464 static inline
1465 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1466 				   unsigned int clamp_value)
1467 {
1468 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1469 	int bucket_id = UCLAMP_BUCKETS - 1;
1470 
1471 	/*
1472 	 * Since both min and max clamps are max aggregated, find the
1473 	 * top most bucket with tasks in.
1474 	 */
1475 	for ( ; bucket_id >= 0; bucket_id--) {
1476 		if (!bucket[bucket_id].tasks)
1477 			continue;
1478 		return bucket[bucket_id].value;
1479 	}
1480 
1481 	/* No tasks -- default clamp values */
1482 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1483 }
1484 
1485 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1486 {
1487 	unsigned int default_util_min;
1488 	struct uclamp_se *uc_se;
1489 
1490 	lockdep_assert_held(&p->pi_lock);
1491 
1492 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1493 
1494 	/* Only sync if user didn't override the default */
1495 	if (uc_se->user_defined)
1496 		return;
1497 
1498 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1499 	uclamp_se_set(uc_se, default_util_min, false);
1500 }
1501 
1502 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1503 {
1504 	if (!rt_task(p))
1505 		return;
1506 
1507 	/* Protect updates to p->uclamp_* */
1508 	guard(task_rq_lock)(p);
1509 	__uclamp_update_util_min_rt_default(p);
1510 }
1511 
1512 static inline struct uclamp_se
1513 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1514 {
1515 	/* Copy by value as we could modify it */
1516 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1517 #ifdef CONFIG_UCLAMP_TASK_GROUP
1518 	unsigned int tg_min, tg_max, value;
1519 
1520 	/*
1521 	 * Tasks in autogroups or root task group will be
1522 	 * restricted by system defaults.
1523 	 */
1524 	if (task_group_is_autogroup(task_group(p)))
1525 		return uc_req;
1526 	if (task_group(p) == &root_task_group)
1527 		return uc_req;
1528 
1529 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1530 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1531 	value = uc_req.value;
1532 	value = clamp(value, tg_min, tg_max);
1533 	uclamp_se_set(&uc_req, value, false);
1534 #endif
1535 
1536 	return uc_req;
1537 }
1538 
1539 /*
1540  * The effective clamp bucket index of a task depends on, by increasing
1541  * priority:
1542  * - the task specific clamp value, when explicitly requested from userspace
1543  * - the task group effective clamp value, for tasks not either in the root
1544  *   group or in an autogroup
1545  * - the system default clamp value, defined by the sysadmin
1546  */
1547 static inline struct uclamp_se
1548 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1549 {
1550 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1551 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1552 
1553 	/* System default restrictions always apply */
1554 	if (unlikely(uc_req.value > uc_max.value))
1555 		return uc_max;
1556 
1557 	return uc_req;
1558 }
1559 
1560 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1561 {
1562 	struct uclamp_se uc_eff;
1563 
1564 	/* Task currently refcounted: use back-annotated (effective) value */
1565 	if (p->uclamp[clamp_id].active)
1566 		return (unsigned long)p->uclamp[clamp_id].value;
1567 
1568 	uc_eff = uclamp_eff_get(p, clamp_id);
1569 
1570 	return (unsigned long)uc_eff.value;
1571 }
1572 
1573 /*
1574  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1575  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1576  * updates the rq's clamp value if required.
1577  *
1578  * Tasks can have a task-specific value requested from user-space, track
1579  * within each bucket the maximum value for tasks refcounted in it.
1580  * This "local max aggregation" allows to track the exact "requested" value
1581  * for each bucket when all its RUNNABLE tasks require the same clamp.
1582  */
1583 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1584 				    enum uclamp_id clamp_id)
1585 {
1586 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1587 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1588 	struct uclamp_bucket *bucket;
1589 
1590 	lockdep_assert_rq_held(rq);
1591 
1592 	/* Update task effective clamp */
1593 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1594 
1595 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1596 	bucket->tasks++;
1597 	uc_se->active = true;
1598 
1599 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1600 
1601 	/*
1602 	 * Local max aggregation: rq buckets always track the max
1603 	 * "requested" clamp value of its RUNNABLE tasks.
1604 	 */
1605 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1606 		bucket->value = uc_se->value;
1607 
1608 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1609 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1610 }
1611 
1612 /*
1613  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1614  * is released. If this is the last task reference counting the rq's max
1615  * active clamp value, then the rq's clamp value is updated.
1616  *
1617  * Both refcounted tasks and rq's cached clamp values are expected to be
1618  * always valid. If it's detected they are not, as defensive programming,
1619  * enforce the expected state and warn.
1620  */
1621 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1622 				    enum uclamp_id clamp_id)
1623 {
1624 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1625 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1626 	struct uclamp_bucket *bucket;
1627 	unsigned int bkt_clamp;
1628 	unsigned int rq_clamp;
1629 
1630 	lockdep_assert_rq_held(rq);
1631 
1632 	/*
1633 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1634 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1635 	 *
1636 	 * In this case the uc_se->active flag should be false since no uclamp
1637 	 * accounting was performed at enqueue time and we can just return
1638 	 * here.
1639 	 *
1640 	 * Need to be careful of the following enqueue/dequeue ordering
1641 	 * problem too
1642 	 *
1643 	 *	enqueue(taskA)
1644 	 *	// sched_uclamp_used gets enabled
1645 	 *	enqueue(taskB)
1646 	 *	dequeue(taskA)
1647 	 *	// Must not decrement bucket->tasks here
1648 	 *	dequeue(taskB)
1649 	 *
1650 	 * where we could end up with stale data in uc_se and
1651 	 * bucket[uc_se->bucket_id].
1652 	 *
1653 	 * The following check here eliminates the possibility of such race.
1654 	 */
1655 	if (unlikely(!uc_se->active))
1656 		return;
1657 
1658 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1659 
1660 	SCHED_WARN_ON(!bucket->tasks);
1661 	if (likely(bucket->tasks))
1662 		bucket->tasks--;
1663 
1664 	uc_se->active = false;
1665 
1666 	/*
1667 	 * Keep "local max aggregation" simple and accept to (possibly)
1668 	 * overboost some RUNNABLE tasks in the same bucket.
1669 	 * The rq clamp bucket value is reset to its base value whenever
1670 	 * there are no more RUNNABLE tasks refcounting it.
1671 	 */
1672 	if (likely(bucket->tasks))
1673 		return;
1674 
1675 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1676 	/*
1677 	 * Defensive programming: this should never happen. If it happens,
1678 	 * e.g. due to future modification, warn and fixup the expected value.
1679 	 */
1680 	SCHED_WARN_ON(bucket->value > rq_clamp);
1681 	if (bucket->value >= rq_clamp) {
1682 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1683 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1684 	}
1685 }
1686 
1687 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1688 {
1689 	enum uclamp_id clamp_id;
1690 
1691 	/*
1692 	 * Avoid any overhead until uclamp is actually used by the userspace.
1693 	 *
1694 	 * The condition is constructed such that a NOP is generated when
1695 	 * sched_uclamp_used is disabled.
1696 	 */
1697 	if (!static_branch_unlikely(&sched_uclamp_used))
1698 		return;
1699 
1700 	if (unlikely(!p->sched_class->uclamp_enabled))
1701 		return;
1702 
1703 	for_each_clamp_id(clamp_id)
1704 		uclamp_rq_inc_id(rq, p, clamp_id);
1705 
1706 	/* Reset clamp idle holding when there is one RUNNABLE task */
1707 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1708 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1709 }
1710 
1711 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1712 {
1713 	enum uclamp_id clamp_id;
1714 
1715 	/*
1716 	 * Avoid any overhead until uclamp is actually used by the userspace.
1717 	 *
1718 	 * The condition is constructed such that a NOP is generated when
1719 	 * sched_uclamp_used is disabled.
1720 	 */
1721 	if (!static_branch_unlikely(&sched_uclamp_used))
1722 		return;
1723 
1724 	if (unlikely(!p->sched_class->uclamp_enabled))
1725 		return;
1726 
1727 	for_each_clamp_id(clamp_id)
1728 		uclamp_rq_dec_id(rq, p, clamp_id);
1729 }
1730 
1731 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1732 				      enum uclamp_id clamp_id)
1733 {
1734 	if (!p->uclamp[clamp_id].active)
1735 		return;
1736 
1737 	uclamp_rq_dec_id(rq, p, clamp_id);
1738 	uclamp_rq_inc_id(rq, p, clamp_id);
1739 
1740 	/*
1741 	 * Make sure to clear the idle flag if we've transiently reached 0
1742 	 * active tasks on rq.
1743 	 */
1744 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1745 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1746 }
1747 
1748 static inline void
1749 uclamp_update_active(struct task_struct *p)
1750 {
1751 	enum uclamp_id clamp_id;
1752 	struct rq_flags rf;
1753 	struct rq *rq;
1754 
1755 	/*
1756 	 * Lock the task and the rq where the task is (or was) queued.
1757 	 *
1758 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1759 	 * price to pay to safely serialize util_{min,max} updates with
1760 	 * enqueues, dequeues and migration operations.
1761 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1762 	 */
1763 	rq = task_rq_lock(p, &rf);
1764 
1765 	/*
1766 	 * Setting the clamp bucket is serialized by task_rq_lock().
1767 	 * If the task is not yet RUNNABLE and its task_struct is not
1768 	 * affecting a valid clamp bucket, the next time it's enqueued,
1769 	 * it will already see the updated clamp bucket value.
1770 	 */
1771 	for_each_clamp_id(clamp_id)
1772 		uclamp_rq_reinc_id(rq, p, clamp_id);
1773 
1774 	task_rq_unlock(rq, p, &rf);
1775 }
1776 
1777 #ifdef CONFIG_UCLAMP_TASK_GROUP
1778 static inline void
1779 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1780 {
1781 	struct css_task_iter it;
1782 	struct task_struct *p;
1783 
1784 	css_task_iter_start(css, 0, &it);
1785 	while ((p = css_task_iter_next(&it)))
1786 		uclamp_update_active(p);
1787 	css_task_iter_end(&it);
1788 }
1789 
1790 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1791 #endif
1792 
1793 #ifdef CONFIG_SYSCTL
1794 #ifdef CONFIG_UCLAMP_TASK_GROUP
1795 static void uclamp_update_root_tg(void)
1796 {
1797 	struct task_group *tg = &root_task_group;
1798 
1799 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1800 		      sysctl_sched_uclamp_util_min, false);
1801 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1802 		      sysctl_sched_uclamp_util_max, false);
1803 
1804 	guard(rcu)();
1805 	cpu_util_update_eff(&root_task_group.css);
1806 }
1807 #else
1808 static void uclamp_update_root_tg(void) { }
1809 #endif
1810 
1811 static void uclamp_sync_util_min_rt_default(void)
1812 {
1813 	struct task_struct *g, *p;
1814 
1815 	/*
1816 	 * copy_process()			sysctl_uclamp
1817 	 *					  uclamp_min_rt = X;
1818 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1819 	 *   // link thread			  smp_mb__after_spinlock()
1820 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1821 	 *   sched_post_fork()			  for_each_process_thread()
1822 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1823 	 *
1824 	 * Ensures that either sched_post_fork() will observe the new
1825 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1826 	 * task.
1827 	 */
1828 	read_lock(&tasklist_lock);
1829 	smp_mb__after_spinlock();
1830 	read_unlock(&tasklist_lock);
1831 
1832 	guard(rcu)();
1833 	for_each_process_thread(g, p)
1834 		uclamp_update_util_min_rt_default(p);
1835 }
1836 
1837 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
1838 				void *buffer, size_t *lenp, loff_t *ppos)
1839 {
1840 	bool update_root_tg = false;
1841 	int old_min, old_max, old_min_rt;
1842 	int result;
1843 
1844 	guard(mutex)(&uclamp_mutex);
1845 
1846 	old_min = sysctl_sched_uclamp_util_min;
1847 	old_max = sysctl_sched_uclamp_util_max;
1848 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1849 
1850 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1851 	if (result)
1852 		goto undo;
1853 	if (!write)
1854 		return 0;
1855 
1856 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1857 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1858 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1859 
1860 		result = -EINVAL;
1861 		goto undo;
1862 	}
1863 
1864 	if (old_min != sysctl_sched_uclamp_util_min) {
1865 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1866 			      sysctl_sched_uclamp_util_min, false);
1867 		update_root_tg = true;
1868 	}
1869 	if (old_max != sysctl_sched_uclamp_util_max) {
1870 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1871 			      sysctl_sched_uclamp_util_max, false);
1872 		update_root_tg = true;
1873 	}
1874 
1875 	if (update_root_tg) {
1876 		static_branch_enable(&sched_uclamp_used);
1877 		uclamp_update_root_tg();
1878 	}
1879 
1880 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1881 		static_branch_enable(&sched_uclamp_used);
1882 		uclamp_sync_util_min_rt_default();
1883 	}
1884 
1885 	/*
1886 	 * We update all RUNNABLE tasks only when task groups are in use.
1887 	 * Otherwise, keep it simple and do just a lazy update at each next
1888 	 * task enqueue time.
1889 	 */
1890 	return 0;
1891 
1892 undo:
1893 	sysctl_sched_uclamp_util_min = old_min;
1894 	sysctl_sched_uclamp_util_max = old_max;
1895 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1896 	return result;
1897 }
1898 #endif
1899 
1900 static int uclamp_validate(struct task_struct *p,
1901 			   const struct sched_attr *attr)
1902 {
1903 	int util_min = p->uclamp_req[UCLAMP_MIN].value;
1904 	int util_max = p->uclamp_req[UCLAMP_MAX].value;
1905 
1906 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) {
1907 		util_min = attr->sched_util_min;
1908 
1909 		if (util_min + 1 > SCHED_CAPACITY_SCALE + 1)
1910 			return -EINVAL;
1911 	}
1912 
1913 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) {
1914 		util_max = attr->sched_util_max;
1915 
1916 		if (util_max + 1 > SCHED_CAPACITY_SCALE + 1)
1917 			return -EINVAL;
1918 	}
1919 
1920 	if (util_min != -1 && util_max != -1 && util_min > util_max)
1921 		return -EINVAL;
1922 
1923 	/*
1924 	 * We have valid uclamp attributes; make sure uclamp is enabled.
1925 	 *
1926 	 * We need to do that here, because enabling static branches is a
1927 	 * blocking operation which obviously cannot be done while holding
1928 	 * scheduler locks.
1929 	 */
1930 	static_branch_enable(&sched_uclamp_used);
1931 
1932 	return 0;
1933 }
1934 
1935 static bool uclamp_reset(const struct sched_attr *attr,
1936 			 enum uclamp_id clamp_id,
1937 			 struct uclamp_se *uc_se)
1938 {
1939 	/* Reset on sched class change for a non user-defined clamp value. */
1940 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) &&
1941 	    !uc_se->user_defined)
1942 		return true;
1943 
1944 	/* Reset on sched_util_{min,max} == -1. */
1945 	if (clamp_id == UCLAMP_MIN &&
1946 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1947 	    attr->sched_util_min == -1) {
1948 		return true;
1949 	}
1950 
1951 	if (clamp_id == UCLAMP_MAX &&
1952 	    attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1953 	    attr->sched_util_max == -1) {
1954 		return true;
1955 	}
1956 
1957 	return false;
1958 }
1959 
1960 static void __setscheduler_uclamp(struct task_struct *p,
1961 				  const struct sched_attr *attr)
1962 {
1963 	enum uclamp_id clamp_id;
1964 
1965 	for_each_clamp_id(clamp_id) {
1966 		struct uclamp_se *uc_se = &p->uclamp_req[clamp_id];
1967 		unsigned int value;
1968 
1969 		if (!uclamp_reset(attr, clamp_id, uc_se))
1970 			continue;
1971 
1972 		/*
1973 		 * RT by default have a 100% boost value that could be modified
1974 		 * at runtime.
1975 		 */
1976 		if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
1977 			value = sysctl_sched_uclamp_util_min_rt_default;
1978 		else
1979 			value = uclamp_none(clamp_id);
1980 
1981 		uclamp_se_set(uc_se, value, false);
1982 
1983 	}
1984 
1985 	if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)))
1986 		return;
1987 
1988 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN &&
1989 	    attr->sched_util_min != -1) {
1990 		uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
1991 			      attr->sched_util_min, true);
1992 	}
1993 
1994 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
1995 	    attr->sched_util_max != -1) {
1996 		uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
1997 			      attr->sched_util_max, true);
1998 	}
1999 }
2000 
2001 static void uclamp_fork(struct task_struct *p)
2002 {
2003 	enum uclamp_id clamp_id;
2004 
2005 	/*
2006 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
2007 	 * as the task is still at its early fork stages.
2008 	 */
2009 	for_each_clamp_id(clamp_id)
2010 		p->uclamp[clamp_id].active = false;
2011 
2012 	if (likely(!p->sched_reset_on_fork))
2013 		return;
2014 
2015 	for_each_clamp_id(clamp_id) {
2016 		uclamp_se_set(&p->uclamp_req[clamp_id],
2017 			      uclamp_none(clamp_id), false);
2018 	}
2019 }
2020 
2021 static void uclamp_post_fork(struct task_struct *p)
2022 {
2023 	uclamp_update_util_min_rt_default(p);
2024 }
2025 
2026 static void __init init_uclamp_rq(struct rq *rq)
2027 {
2028 	enum uclamp_id clamp_id;
2029 	struct uclamp_rq *uc_rq = rq->uclamp;
2030 
2031 	for_each_clamp_id(clamp_id) {
2032 		uc_rq[clamp_id] = (struct uclamp_rq) {
2033 			.value = uclamp_none(clamp_id)
2034 		};
2035 	}
2036 
2037 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2038 }
2039 
2040 static void __init init_uclamp(void)
2041 {
2042 	struct uclamp_se uc_max = {};
2043 	enum uclamp_id clamp_id;
2044 	int cpu;
2045 
2046 	for_each_possible_cpu(cpu)
2047 		init_uclamp_rq(cpu_rq(cpu));
2048 
2049 	for_each_clamp_id(clamp_id) {
2050 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
2051 			      uclamp_none(clamp_id), false);
2052 	}
2053 
2054 	/* System defaults allow max clamp values for both indexes */
2055 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2056 	for_each_clamp_id(clamp_id) {
2057 		uclamp_default[clamp_id] = uc_max;
2058 #ifdef CONFIG_UCLAMP_TASK_GROUP
2059 		root_task_group.uclamp_req[clamp_id] = uc_max;
2060 		root_task_group.uclamp[clamp_id] = uc_max;
2061 #endif
2062 	}
2063 }
2064 
2065 #else /* !CONFIG_UCLAMP_TASK */
2066 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
2067 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
2068 static inline int uclamp_validate(struct task_struct *p,
2069 				  const struct sched_attr *attr)
2070 {
2071 	return -EOPNOTSUPP;
2072 }
2073 static void __setscheduler_uclamp(struct task_struct *p,
2074 				  const struct sched_attr *attr) { }
2075 static inline void uclamp_fork(struct task_struct *p) { }
2076 static inline void uclamp_post_fork(struct task_struct *p) { }
2077 static inline void init_uclamp(void) { }
2078 #endif /* CONFIG_UCLAMP_TASK */
2079 
2080 bool sched_task_on_rq(struct task_struct *p)
2081 {
2082 	return task_on_rq_queued(p);
2083 }
2084 
2085 unsigned long get_wchan(struct task_struct *p)
2086 {
2087 	unsigned long ip = 0;
2088 	unsigned int state;
2089 
2090 	if (!p || p == current)
2091 		return 0;
2092 
2093 	/* Only get wchan if task is blocked and we can keep it that way. */
2094 	raw_spin_lock_irq(&p->pi_lock);
2095 	state = READ_ONCE(p->__state);
2096 	smp_rmb(); /* see try_to_wake_up() */
2097 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2098 		ip = __get_wchan(p);
2099 	raw_spin_unlock_irq(&p->pi_lock);
2100 
2101 	return ip;
2102 }
2103 
2104 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2105 {
2106 	if (!(flags & ENQUEUE_NOCLOCK))
2107 		update_rq_clock(rq);
2108 
2109 	if (!(flags & ENQUEUE_RESTORE)) {
2110 		sched_info_enqueue(rq, p);
2111 		psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
2112 	}
2113 
2114 	uclamp_rq_inc(rq, p);
2115 	p->sched_class->enqueue_task(rq, p, flags);
2116 
2117 	if (sched_core_enabled(rq))
2118 		sched_core_enqueue(rq, p);
2119 }
2120 
2121 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2122 {
2123 	if (sched_core_enabled(rq))
2124 		sched_core_dequeue(rq, p, flags);
2125 
2126 	if (!(flags & DEQUEUE_NOCLOCK))
2127 		update_rq_clock(rq);
2128 
2129 	if (!(flags & DEQUEUE_SAVE)) {
2130 		sched_info_dequeue(rq, p);
2131 		psi_dequeue(p, flags & DEQUEUE_SLEEP);
2132 	}
2133 
2134 	uclamp_rq_dec(rq, p);
2135 	p->sched_class->dequeue_task(rq, p, flags);
2136 }
2137 
2138 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2139 {
2140 	if (task_on_rq_migrating(p))
2141 		flags |= ENQUEUE_MIGRATED;
2142 	if (flags & ENQUEUE_MIGRATED)
2143 		sched_mm_cid_migrate_to(rq, p);
2144 
2145 	enqueue_task(rq, p, flags);
2146 
2147 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2148 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2149 }
2150 
2151 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2152 {
2153 	WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
2154 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2155 
2156 	dequeue_task(rq, p, flags);
2157 }
2158 
2159 static inline int __normal_prio(int policy, int rt_prio, int nice)
2160 {
2161 	int prio;
2162 
2163 	if (dl_policy(policy))
2164 		prio = MAX_DL_PRIO - 1;
2165 	else if (rt_policy(policy))
2166 		prio = MAX_RT_PRIO - 1 - rt_prio;
2167 	else
2168 		prio = NICE_TO_PRIO(nice);
2169 
2170 	return prio;
2171 }
2172 
2173 /*
2174  * Calculate the expected normal priority: i.e. priority
2175  * without taking RT-inheritance into account. Might be
2176  * boosted by interactivity modifiers. Changes upon fork,
2177  * setprio syscalls, and whenever the interactivity
2178  * estimator recalculates.
2179  */
2180 static inline int normal_prio(struct task_struct *p)
2181 {
2182 	return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
2183 }
2184 
2185 /*
2186  * Calculate the current priority, i.e. the priority
2187  * taken into account by the scheduler. This value might
2188  * be boosted by RT tasks, or might be boosted by
2189  * interactivity modifiers. Will be RT if the task got
2190  * RT-boosted. If not then it returns p->normal_prio.
2191  */
2192 static int effective_prio(struct task_struct *p)
2193 {
2194 	p->normal_prio = normal_prio(p);
2195 	/*
2196 	 * If we are RT tasks or we were boosted to RT priority,
2197 	 * keep the priority unchanged. Otherwise, update priority
2198 	 * to the normal priority:
2199 	 */
2200 	if (!rt_prio(p->prio))
2201 		return p->normal_prio;
2202 	return p->prio;
2203 }
2204 
2205 /**
2206  * task_curr - is this task currently executing on a CPU?
2207  * @p: the task in question.
2208  *
2209  * Return: 1 if the task is currently executing. 0 otherwise.
2210  */
2211 inline int task_curr(const struct task_struct *p)
2212 {
2213 	return cpu_curr(task_cpu(p)) == p;
2214 }
2215 
2216 /*
2217  * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2218  * use the balance_callback list if you want balancing.
2219  *
2220  * this means any call to check_class_changed() must be followed by a call to
2221  * balance_callback().
2222  */
2223 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2224 				       const struct sched_class *prev_class,
2225 				       int oldprio)
2226 {
2227 	if (prev_class != p->sched_class) {
2228 		if (prev_class->switched_from)
2229 			prev_class->switched_from(rq, p);
2230 
2231 		p->sched_class->switched_to(rq, p);
2232 	} else if (oldprio != p->prio || dl_task(p))
2233 		p->sched_class->prio_changed(rq, p, oldprio);
2234 }
2235 
2236 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2237 {
2238 	if (p->sched_class == rq->curr->sched_class)
2239 		rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2240 	else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2241 		resched_curr(rq);
2242 
2243 	/*
2244 	 * A queue event has occurred, and we're going to schedule.  In
2245 	 * this case, we can save a useless back to back clock update.
2246 	 */
2247 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2248 		rq_clock_skip_update(rq);
2249 }
2250 
2251 static __always_inline
2252 int __task_state_match(struct task_struct *p, unsigned int state)
2253 {
2254 	if (READ_ONCE(p->__state) & state)
2255 		return 1;
2256 
2257 	if (READ_ONCE(p->saved_state) & state)
2258 		return -1;
2259 
2260 	return 0;
2261 }
2262 
2263 static __always_inline
2264 int task_state_match(struct task_struct *p, unsigned int state)
2265 {
2266 	/*
2267 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2268 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2269 	 */
2270 	guard(raw_spinlock_irq)(&p->pi_lock);
2271 	return __task_state_match(p, state);
2272 }
2273 
2274 /*
2275  * wait_task_inactive - wait for a thread to unschedule.
2276  *
2277  * Wait for the thread to block in any of the states set in @match_state.
2278  * If it changes, i.e. @p might have woken up, then return zero.  When we
2279  * succeed in waiting for @p to be off its CPU, we return a positive number
2280  * (its total switch count).  If a second call a short while later returns the
2281  * same number, the caller can be sure that @p has remained unscheduled the
2282  * whole time.
2283  *
2284  * The caller must ensure that the task *will* unschedule sometime soon,
2285  * else this function might spin for a *long* time. This function can't
2286  * be called with interrupts off, or it may introduce deadlock with
2287  * smp_call_function() if an IPI is sent by the same process we are
2288  * waiting to become inactive.
2289  */
2290 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2291 {
2292 	int running, queued, match;
2293 	struct rq_flags rf;
2294 	unsigned long ncsw;
2295 	struct rq *rq;
2296 
2297 	for (;;) {
2298 		/*
2299 		 * We do the initial early heuristics without holding
2300 		 * any task-queue locks at all. We'll only try to get
2301 		 * the runqueue lock when things look like they will
2302 		 * work out!
2303 		 */
2304 		rq = task_rq(p);
2305 
2306 		/*
2307 		 * If the task is actively running on another CPU
2308 		 * still, just relax and busy-wait without holding
2309 		 * any locks.
2310 		 *
2311 		 * NOTE! Since we don't hold any locks, it's not
2312 		 * even sure that "rq" stays as the right runqueue!
2313 		 * But we don't care, since "task_on_cpu()" will
2314 		 * return false if the runqueue has changed and p
2315 		 * is actually now running somewhere else!
2316 		 */
2317 		while (task_on_cpu(rq, p)) {
2318 			if (!task_state_match(p, match_state))
2319 				return 0;
2320 			cpu_relax();
2321 		}
2322 
2323 		/*
2324 		 * Ok, time to look more closely! We need the rq
2325 		 * lock now, to be *sure*. If we're wrong, we'll
2326 		 * just go back and repeat.
2327 		 */
2328 		rq = task_rq_lock(p, &rf);
2329 		trace_sched_wait_task(p);
2330 		running = task_on_cpu(rq, p);
2331 		queued = task_on_rq_queued(p);
2332 		ncsw = 0;
2333 		if ((match = __task_state_match(p, match_state))) {
2334 			/*
2335 			 * When matching on p->saved_state, consider this task
2336 			 * still queued so it will wait.
2337 			 */
2338 			if (match < 0)
2339 				queued = 1;
2340 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2341 		}
2342 		task_rq_unlock(rq, p, &rf);
2343 
2344 		/*
2345 		 * If it changed from the expected state, bail out now.
2346 		 */
2347 		if (unlikely(!ncsw))
2348 			break;
2349 
2350 		/*
2351 		 * Was it really running after all now that we
2352 		 * checked with the proper locks actually held?
2353 		 *
2354 		 * Oops. Go back and try again..
2355 		 */
2356 		if (unlikely(running)) {
2357 			cpu_relax();
2358 			continue;
2359 		}
2360 
2361 		/*
2362 		 * It's not enough that it's not actively running,
2363 		 * it must be off the runqueue _entirely_, and not
2364 		 * preempted!
2365 		 *
2366 		 * So if it was still runnable (but just not actively
2367 		 * running right now), it's preempted, and we should
2368 		 * yield - it could be a while.
2369 		 */
2370 		if (unlikely(queued)) {
2371 			ktime_t to = NSEC_PER_SEC / HZ;
2372 
2373 			set_current_state(TASK_UNINTERRUPTIBLE);
2374 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2375 			continue;
2376 		}
2377 
2378 		/*
2379 		 * Ahh, all good. It wasn't running, and it wasn't
2380 		 * runnable, which means that it will never become
2381 		 * running in the future either. We're all done!
2382 		 */
2383 		break;
2384 	}
2385 
2386 	return ncsw;
2387 }
2388 
2389 #ifdef CONFIG_SMP
2390 
2391 static void
2392 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2393 
2394 static int __set_cpus_allowed_ptr(struct task_struct *p,
2395 				  struct affinity_context *ctx);
2396 
2397 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2398 {
2399 	struct affinity_context ac = {
2400 		.new_mask  = cpumask_of(rq->cpu),
2401 		.flags     = SCA_MIGRATE_DISABLE,
2402 	};
2403 
2404 	if (likely(!p->migration_disabled))
2405 		return;
2406 
2407 	if (p->cpus_ptr != &p->cpus_mask)
2408 		return;
2409 
2410 	/*
2411 	 * Violates locking rules! see comment in __do_set_cpus_allowed().
2412 	 */
2413 	__do_set_cpus_allowed(p, &ac);
2414 }
2415 
2416 void migrate_disable(void)
2417 {
2418 	struct task_struct *p = current;
2419 
2420 	if (p->migration_disabled) {
2421 		p->migration_disabled++;
2422 		return;
2423 	}
2424 
2425 	guard(preempt)();
2426 	this_rq()->nr_pinned++;
2427 	p->migration_disabled = 1;
2428 }
2429 EXPORT_SYMBOL_GPL(migrate_disable);
2430 
2431 void migrate_enable(void)
2432 {
2433 	struct task_struct *p = current;
2434 	struct affinity_context ac = {
2435 		.new_mask  = &p->cpus_mask,
2436 		.flags     = SCA_MIGRATE_ENABLE,
2437 	};
2438 
2439 	if (p->migration_disabled > 1) {
2440 		p->migration_disabled--;
2441 		return;
2442 	}
2443 
2444 	if (WARN_ON_ONCE(!p->migration_disabled))
2445 		return;
2446 
2447 	/*
2448 	 * Ensure stop_task runs either before or after this, and that
2449 	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2450 	 */
2451 	guard(preempt)();
2452 	if (p->cpus_ptr != &p->cpus_mask)
2453 		__set_cpus_allowed_ptr(p, &ac);
2454 	/*
2455 	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2456 	 * regular cpus_mask, otherwise things that race (eg.
2457 	 * select_fallback_rq) get confused.
2458 	 */
2459 	barrier();
2460 	p->migration_disabled = 0;
2461 	this_rq()->nr_pinned--;
2462 }
2463 EXPORT_SYMBOL_GPL(migrate_enable);
2464 
2465 static inline bool rq_has_pinned_tasks(struct rq *rq)
2466 {
2467 	return rq->nr_pinned;
2468 }
2469 
2470 /*
2471  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2472  * __set_cpus_allowed_ptr() and select_fallback_rq().
2473  */
2474 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2475 {
2476 	/* When not in the task's cpumask, no point in looking further. */
2477 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2478 		return false;
2479 
2480 	/* migrate_disabled() must be allowed to finish. */
2481 	if (is_migration_disabled(p))
2482 		return cpu_online(cpu);
2483 
2484 	/* Non kernel threads are not allowed during either online or offline. */
2485 	if (!(p->flags & PF_KTHREAD))
2486 		return cpu_active(cpu) && task_cpu_possible(cpu, p);
2487 
2488 	/* KTHREAD_IS_PER_CPU is always allowed. */
2489 	if (kthread_is_per_cpu(p))
2490 		return cpu_online(cpu);
2491 
2492 	/* Regular kernel threads don't get to stay during offline. */
2493 	if (cpu_dying(cpu))
2494 		return false;
2495 
2496 	/* But are allowed during online. */
2497 	return cpu_online(cpu);
2498 }
2499 
2500 /*
2501  * This is how migration works:
2502  *
2503  * 1) we invoke migration_cpu_stop() on the target CPU using
2504  *    stop_one_cpu().
2505  * 2) stopper starts to run (implicitly forcing the migrated thread
2506  *    off the CPU)
2507  * 3) it checks whether the migrated task is still in the wrong runqueue.
2508  * 4) if it's in the wrong runqueue then the migration thread removes
2509  *    it and puts it into the right queue.
2510  * 5) stopper completes and stop_one_cpu() returns and the migration
2511  *    is done.
2512  */
2513 
2514 /*
2515  * move_queued_task - move a queued task to new rq.
2516  *
2517  * Returns (locked) new rq. Old rq's lock is released.
2518  */
2519 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2520 				   struct task_struct *p, int new_cpu)
2521 {
2522 	lockdep_assert_rq_held(rq);
2523 
2524 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2525 	set_task_cpu(p, new_cpu);
2526 	rq_unlock(rq, rf);
2527 
2528 	rq = cpu_rq(new_cpu);
2529 
2530 	rq_lock(rq, rf);
2531 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2532 	activate_task(rq, p, 0);
2533 	wakeup_preempt(rq, p, 0);
2534 
2535 	return rq;
2536 }
2537 
2538 struct migration_arg {
2539 	struct task_struct		*task;
2540 	int				dest_cpu;
2541 	struct set_affinity_pending	*pending;
2542 };
2543 
2544 /*
2545  * @refs: number of wait_for_completion()
2546  * @stop_pending: is @stop_work in use
2547  */
2548 struct set_affinity_pending {
2549 	refcount_t		refs;
2550 	unsigned int		stop_pending;
2551 	struct completion	done;
2552 	struct cpu_stop_work	stop_work;
2553 	struct migration_arg	arg;
2554 };
2555 
2556 /*
2557  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2558  * this because either it can't run here any more (set_cpus_allowed()
2559  * away from this CPU, or CPU going down), or because we're
2560  * attempting to rebalance this task on exec (sched_exec).
2561  *
2562  * So we race with normal scheduler movements, but that's OK, as long
2563  * as the task is no longer on this CPU.
2564  */
2565 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2566 				 struct task_struct *p, int dest_cpu)
2567 {
2568 	/* Affinity changed (again). */
2569 	if (!is_cpu_allowed(p, dest_cpu))
2570 		return rq;
2571 
2572 	rq = move_queued_task(rq, rf, p, dest_cpu);
2573 
2574 	return rq;
2575 }
2576 
2577 /*
2578  * migration_cpu_stop - this will be executed by a highprio stopper thread
2579  * and performs thread migration by bumping thread off CPU then
2580  * 'pushing' onto another runqueue.
2581  */
2582 static int migration_cpu_stop(void *data)
2583 {
2584 	struct migration_arg *arg = data;
2585 	struct set_affinity_pending *pending = arg->pending;
2586 	struct task_struct *p = arg->task;
2587 	struct rq *rq = this_rq();
2588 	bool complete = false;
2589 	struct rq_flags rf;
2590 
2591 	/*
2592 	 * The original target CPU might have gone down and we might
2593 	 * be on another CPU but it doesn't matter.
2594 	 */
2595 	local_irq_save(rf.flags);
2596 	/*
2597 	 * We need to explicitly wake pending tasks before running
2598 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2599 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2600 	 */
2601 	flush_smp_call_function_queue();
2602 
2603 	raw_spin_lock(&p->pi_lock);
2604 	rq_lock(rq, &rf);
2605 
2606 	/*
2607 	 * If we were passed a pending, then ->stop_pending was set, thus
2608 	 * p->migration_pending must have remained stable.
2609 	 */
2610 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2611 
2612 	/*
2613 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2614 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2615 	 * we're holding p->pi_lock.
2616 	 */
2617 	if (task_rq(p) == rq) {
2618 		if (is_migration_disabled(p))
2619 			goto out;
2620 
2621 		if (pending) {
2622 			p->migration_pending = NULL;
2623 			complete = true;
2624 
2625 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2626 				goto out;
2627 		}
2628 
2629 		if (task_on_rq_queued(p)) {
2630 			update_rq_clock(rq);
2631 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2632 		} else {
2633 			p->wake_cpu = arg->dest_cpu;
2634 		}
2635 
2636 		/*
2637 		 * XXX __migrate_task() can fail, at which point we might end
2638 		 * up running on a dodgy CPU, AFAICT this can only happen
2639 		 * during CPU hotplug, at which point we'll get pushed out
2640 		 * anyway, so it's probably not a big deal.
2641 		 */
2642 
2643 	} else if (pending) {
2644 		/*
2645 		 * This happens when we get migrated between migrate_enable()'s
2646 		 * preempt_enable() and scheduling the stopper task. At that
2647 		 * point we're a regular task again and not current anymore.
2648 		 *
2649 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2650 		 * more likely.
2651 		 */
2652 
2653 		/*
2654 		 * The task moved before the stopper got to run. We're holding
2655 		 * ->pi_lock, so the allowed mask is stable - if it got
2656 		 * somewhere allowed, we're done.
2657 		 */
2658 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2659 			p->migration_pending = NULL;
2660 			complete = true;
2661 			goto out;
2662 		}
2663 
2664 		/*
2665 		 * When migrate_enable() hits a rq mis-match we can't reliably
2666 		 * determine is_migration_disabled() and so have to chase after
2667 		 * it.
2668 		 */
2669 		WARN_ON_ONCE(!pending->stop_pending);
2670 		preempt_disable();
2671 		task_rq_unlock(rq, p, &rf);
2672 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2673 				    &pending->arg, &pending->stop_work);
2674 		preempt_enable();
2675 		return 0;
2676 	}
2677 out:
2678 	if (pending)
2679 		pending->stop_pending = false;
2680 	task_rq_unlock(rq, p, &rf);
2681 
2682 	if (complete)
2683 		complete_all(&pending->done);
2684 
2685 	return 0;
2686 }
2687 
2688 int push_cpu_stop(void *arg)
2689 {
2690 	struct rq *lowest_rq = NULL, *rq = this_rq();
2691 	struct task_struct *p = arg;
2692 
2693 	raw_spin_lock_irq(&p->pi_lock);
2694 	raw_spin_rq_lock(rq);
2695 
2696 	if (task_rq(p) != rq)
2697 		goto out_unlock;
2698 
2699 	if (is_migration_disabled(p)) {
2700 		p->migration_flags |= MDF_PUSH;
2701 		goto out_unlock;
2702 	}
2703 
2704 	p->migration_flags &= ~MDF_PUSH;
2705 
2706 	if (p->sched_class->find_lock_rq)
2707 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2708 
2709 	if (!lowest_rq)
2710 		goto out_unlock;
2711 
2712 	// XXX validate p is still the highest prio task
2713 	if (task_rq(p) == rq) {
2714 		deactivate_task(rq, p, 0);
2715 		set_task_cpu(p, lowest_rq->cpu);
2716 		activate_task(lowest_rq, p, 0);
2717 		resched_curr(lowest_rq);
2718 	}
2719 
2720 	double_unlock_balance(rq, lowest_rq);
2721 
2722 out_unlock:
2723 	rq->push_busy = false;
2724 	raw_spin_rq_unlock(rq);
2725 	raw_spin_unlock_irq(&p->pi_lock);
2726 
2727 	put_task_struct(p);
2728 	return 0;
2729 }
2730 
2731 /*
2732  * sched_class::set_cpus_allowed must do the below, but is not required to
2733  * actually call this function.
2734  */
2735 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2736 {
2737 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2738 		p->cpus_ptr = ctx->new_mask;
2739 		return;
2740 	}
2741 
2742 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2743 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2744 
2745 	/*
2746 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2747 	 */
2748 	if (ctx->flags & SCA_USER)
2749 		swap(p->user_cpus_ptr, ctx->user_mask);
2750 }
2751 
2752 static void
2753 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2754 {
2755 	struct rq *rq = task_rq(p);
2756 	bool queued, running;
2757 
2758 	/*
2759 	 * This here violates the locking rules for affinity, since we're only
2760 	 * supposed to change these variables while holding both rq->lock and
2761 	 * p->pi_lock.
2762 	 *
2763 	 * HOWEVER, it magically works, because ttwu() is the only code that
2764 	 * accesses these variables under p->pi_lock and only does so after
2765 	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2766 	 * before finish_task().
2767 	 *
2768 	 * XXX do further audits, this smells like something putrid.
2769 	 */
2770 	if (ctx->flags & SCA_MIGRATE_DISABLE)
2771 		SCHED_WARN_ON(!p->on_cpu);
2772 	else
2773 		lockdep_assert_held(&p->pi_lock);
2774 
2775 	queued = task_on_rq_queued(p);
2776 	running = task_current(rq, p);
2777 
2778 	if (queued) {
2779 		/*
2780 		 * Because __kthread_bind() calls this on blocked tasks without
2781 		 * holding rq->lock.
2782 		 */
2783 		lockdep_assert_rq_held(rq);
2784 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2785 	}
2786 	if (running)
2787 		put_prev_task(rq, p);
2788 
2789 	p->sched_class->set_cpus_allowed(p, ctx);
2790 
2791 	if (queued)
2792 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2793 	if (running)
2794 		set_next_task(rq, p);
2795 }
2796 
2797 /*
2798  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2799  * affinity (if any) should be destroyed too.
2800  */
2801 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2802 {
2803 	struct affinity_context ac = {
2804 		.new_mask  = new_mask,
2805 		.user_mask = NULL,
2806 		.flags     = SCA_USER,	/* clear the user requested mask */
2807 	};
2808 	union cpumask_rcuhead {
2809 		cpumask_t cpumask;
2810 		struct rcu_head rcu;
2811 	};
2812 
2813 	__do_set_cpus_allowed(p, &ac);
2814 
2815 	/*
2816 	 * Because this is called with p->pi_lock held, it is not possible
2817 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2818 	 * kfree_rcu().
2819 	 */
2820 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2821 }
2822 
2823 static cpumask_t *alloc_user_cpus_ptr(int node)
2824 {
2825 	/*
2826 	 * See do_set_cpus_allowed() above for the rcu_head usage.
2827 	 */
2828 	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
2829 
2830 	return kmalloc_node(size, GFP_KERNEL, node);
2831 }
2832 
2833 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2834 		      int node)
2835 {
2836 	cpumask_t *user_mask;
2837 	unsigned long flags;
2838 
2839 	/*
2840 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2841 	 * may differ by now due to racing.
2842 	 */
2843 	dst->user_cpus_ptr = NULL;
2844 
2845 	/*
2846 	 * This check is racy and losing the race is a valid situation.
2847 	 * It is not worth the extra overhead of taking the pi_lock on
2848 	 * every fork/clone.
2849 	 */
2850 	if (data_race(!src->user_cpus_ptr))
2851 		return 0;
2852 
2853 	user_mask = alloc_user_cpus_ptr(node);
2854 	if (!user_mask)
2855 		return -ENOMEM;
2856 
2857 	/*
2858 	 * Use pi_lock to protect content of user_cpus_ptr
2859 	 *
2860 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2861 	 * do_set_cpus_allowed().
2862 	 */
2863 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2864 	if (src->user_cpus_ptr) {
2865 		swap(dst->user_cpus_ptr, user_mask);
2866 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2867 	}
2868 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2869 
2870 	if (unlikely(user_mask))
2871 		kfree(user_mask);
2872 
2873 	return 0;
2874 }
2875 
2876 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2877 {
2878 	struct cpumask *user_mask = NULL;
2879 
2880 	swap(p->user_cpus_ptr, user_mask);
2881 
2882 	return user_mask;
2883 }
2884 
2885 void release_user_cpus_ptr(struct task_struct *p)
2886 {
2887 	kfree(clear_user_cpus_ptr(p));
2888 }
2889 
2890 /*
2891  * This function is wildly self concurrent; here be dragons.
2892  *
2893  *
2894  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2895  * designated task is enqueued on an allowed CPU. If that task is currently
2896  * running, we have to kick it out using the CPU stopper.
2897  *
2898  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2899  * Consider:
2900  *
2901  *     Initial conditions: P0->cpus_mask = [0, 1]
2902  *
2903  *     P0@CPU0                  P1
2904  *
2905  *     migrate_disable();
2906  *     <preempted>
2907  *                              set_cpus_allowed_ptr(P0, [1]);
2908  *
2909  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2910  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2911  * This means we need the following scheme:
2912  *
2913  *     P0@CPU0                  P1
2914  *
2915  *     migrate_disable();
2916  *     <preempted>
2917  *                              set_cpus_allowed_ptr(P0, [1]);
2918  *                                <blocks>
2919  *     <resumes>
2920  *     migrate_enable();
2921  *       __set_cpus_allowed_ptr();
2922  *       <wakes local stopper>
2923  *                         `--> <woken on migration completion>
2924  *
2925  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2926  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2927  * task p are serialized by p->pi_lock, which we can leverage: the one that
2928  * should come into effect at the end of the Migrate-Disable region is the last
2929  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2930  * but we still need to properly signal those waiting tasks at the appropriate
2931  * moment.
2932  *
2933  * This is implemented using struct set_affinity_pending. The first
2934  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2935  * setup an instance of that struct and install it on the targeted task_struct.
2936  * Any and all further callers will reuse that instance. Those then wait for
2937  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2938  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2939  *
2940  *
2941  * (1) In the cases covered above. There is one more where the completion is
2942  * signaled within affine_move_task() itself: when a subsequent affinity request
2943  * occurs after the stopper bailed out due to the targeted task still being
2944  * Migrate-Disable. Consider:
2945  *
2946  *     Initial conditions: P0->cpus_mask = [0, 1]
2947  *
2948  *     CPU0		  P1				P2
2949  *     <P0>
2950  *       migrate_disable();
2951  *       <preempted>
2952  *                        set_cpus_allowed_ptr(P0, [1]);
2953  *                          <blocks>
2954  *     <migration/0>
2955  *       migration_cpu_stop()
2956  *         is_migration_disabled()
2957  *           <bails>
2958  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2959  *                                                         <signal completion>
2960  *                          <awakes>
2961  *
2962  * Note that the above is safe vs a concurrent migrate_enable(), as any
2963  * pending affinity completion is preceded by an uninstallation of
2964  * p->migration_pending done with p->pi_lock held.
2965  */
2966 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2967 			    int dest_cpu, unsigned int flags)
2968 	__releases(rq->lock)
2969 	__releases(p->pi_lock)
2970 {
2971 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2972 	bool stop_pending, complete = false;
2973 
2974 	/* Can the task run on the task's current CPU? If so, we're done */
2975 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2976 		struct task_struct *push_task = NULL;
2977 
2978 		if ((flags & SCA_MIGRATE_ENABLE) &&
2979 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2980 			rq->push_busy = true;
2981 			push_task = get_task_struct(p);
2982 		}
2983 
2984 		/*
2985 		 * If there are pending waiters, but no pending stop_work,
2986 		 * then complete now.
2987 		 */
2988 		pending = p->migration_pending;
2989 		if (pending && !pending->stop_pending) {
2990 			p->migration_pending = NULL;
2991 			complete = true;
2992 		}
2993 
2994 		preempt_disable();
2995 		task_rq_unlock(rq, p, rf);
2996 		if (push_task) {
2997 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2998 					    p, &rq->push_work);
2999 		}
3000 		preempt_enable();
3001 
3002 		if (complete)
3003 			complete_all(&pending->done);
3004 
3005 		return 0;
3006 	}
3007 
3008 	if (!(flags & SCA_MIGRATE_ENABLE)) {
3009 		/* serialized by p->pi_lock */
3010 		if (!p->migration_pending) {
3011 			/* Install the request */
3012 			refcount_set(&my_pending.refs, 1);
3013 			init_completion(&my_pending.done);
3014 			my_pending.arg = (struct migration_arg) {
3015 				.task = p,
3016 				.dest_cpu = dest_cpu,
3017 				.pending = &my_pending,
3018 			};
3019 
3020 			p->migration_pending = &my_pending;
3021 		} else {
3022 			pending = p->migration_pending;
3023 			refcount_inc(&pending->refs);
3024 			/*
3025 			 * Affinity has changed, but we've already installed a
3026 			 * pending. migration_cpu_stop() *must* see this, else
3027 			 * we risk a completion of the pending despite having a
3028 			 * task on a disallowed CPU.
3029 			 *
3030 			 * Serialized by p->pi_lock, so this is safe.
3031 			 */
3032 			pending->arg.dest_cpu = dest_cpu;
3033 		}
3034 	}
3035 	pending = p->migration_pending;
3036 	/*
3037 	 * - !MIGRATE_ENABLE:
3038 	 *   we'll have installed a pending if there wasn't one already.
3039 	 *
3040 	 * - MIGRATE_ENABLE:
3041 	 *   we're here because the current CPU isn't matching anymore,
3042 	 *   the only way that can happen is because of a concurrent
3043 	 *   set_cpus_allowed_ptr() call, which should then still be
3044 	 *   pending completion.
3045 	 *
3046 	 * Either way, we really should have a @pending here.
3047 	 */
3048 	if (WARN_ON_ONCE(!pending)) {
3049 		task_rq_unlock(rq, p, rf);
3050 		return -EINVAL;
3051 	}
3052 
3053 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3054 		/*
3055 		 * MIGRATE_ENABLE gets here because 'p == current', but for
3056 		 * anything else we cannot do is_migration_disabled(), punt
3057 		 * and have the stopper function handle it all race-free.
3058 		 */
3059 		stop_pending = pending->stop_pending;
3060 		if (!stop_pending)
3061 			pending->stop_pending = true;
3062 
3063 		if (flags & SCA_MIGRATE_ENABLE)
3064 			p->migration_flags &= ~MDF_PUSH;
3065 
3066 		preempt_disable();
3067 		task_rq_unlock(rq, p, rf);
3068 		if (!stop_pending) {
3069 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3070 					    &pending->arg, &pending->stop_work);
3071 		}
3072 		preempt_enable();
3073 
3074 		if (flags & SCA_MIGRATE_ENABLE)
3075 			return 0;
3076 	} else {
3077 
3078 		if (!is_migration_disabled(p)) {
3079 			if (task_on_rq_queued(p))
3080 				rq = move_queued_task(rq, rf, p, dest_cpu);
3081 
3082 			if (!pending->stop_pending) {
3083 				p->migration_pending = NULL;
3084 				complete = true;
3085 			}
3086 		}
3087 		task_rq_unlock(rq, p, rf);
3088 
3089 		if (complete)
3090 			complete_all(&pending->done);
3091 	}
3092 
3093 	wait_for_completion(&pending->done);
3094 
3095 	if (refcount_dec_and_test(&pending->refs))
3096 		wake_up_var(&pending->refs); /* No UaF, just an address */
3097 
3098 	/*
3099 	 * Block the original owner of &pending until all subsequent callers
3100 	 * have seen the completion and decremented the refcount
3101 	 */
3102 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3103 
3104 	/* ARGH */
3105 	WARN_ON_ONCE(my_pending.stop_pending);
3106 
3107 	return 0;
3108 }
3109 
3110 /*
3111  * Called with both p->pi_lock and rq->lock held; drops both before returning.
3112  */
3113 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3114 					 struct affinity_context *ctx,
3115 					 struct rq *rq,
3116 					 struct rq_flags *rf)
3117 	__releases(rq->lock)
3118 	__releases(p->pi_lock)
3119 {
3120 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3121 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3122 	bool kthread = p->flags & PF_KTHREAD;
3123 	unsigned int dest_cpu;
3124 	int ret = 0;
3125 
3126 	update_rq_clock(rq);
3127 
3128 	if (kthread || is_migration_disabled(p)) {
3129 		/*
3130 		 * Kernel threads are allowed on online && !active CPUs,
3131 		 * however, during cpu-hot-unplug, even these might get pushed
3132 		 * away if not KTHREAD_IS_PER_CPU.
3133 		 *
3134 		 * Specifically, migration_disabled() tasks must not fail the
3135 		 * cpumask_any_and_distribute() pick below, esp. so on
3136 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3137 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3138 		 */
3139 		cpu_valid_mask = cpu_online_mask;
3140 	}
3141 
3142 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3143 		ret = -EINVAL;
3144 		goto out;
3145 	}
3146 
3147 	/*
3148 	 * Must re-check here, to close a race against __kthread_bind(),
3149 	 * sched_setaffinity() is not guaranteed to observe the flag.
3150 	 */
3151 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3152 		ret = -EINVAL;
3153 		goto out;
3154 	}
3155 
3156 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3157 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3158 			if (ctx->flags & SCA_USER)
3159 				swap(p->user_cpus_ptr, ctx->user_mask);
3160 			goto out;
3161 		}
3162 
3163 		if (WARN_ON_ONCE(p == current &&
3164 				 is_migration_disabled(p) &&
3165 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3166 			ret = -EBUSY;
3167 			goto out;
3168 		}
3169 	}
3170 
3171 	/*
3172 	 * Picking a ~random cpu helps in cases where we are changing affinity
3173 	 * for groups of tasks (ie. cpuset), so that load balancing is not
3174 	 * immediately required to distribute the tasks within their new mask.
3175 	 */
3176 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3177 	if (dest_cpu >= nr_cpu_ids) {
3178 		ret = -EINVAL;
3179 		goto out;
3180 	}
3181 
3182 	__do_set_cpus_allowed(p, ctx);
3183 
3184 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3185 
3186 out:
3187 	task_rq_unlock(rq, p, rf);
3188 
3189 	return ret;
3190 }
3191 
3192 /*
3193  * Change a given task's CPU affinity. Migrate the thread to a
3194  * proper CPU and schedule it away if the CPU it's executing on
3195  * is removed from the allowed bitmask.
3196  *
3197  * NOTE: the caller must have a valid reference to the task, the
3198  * task must not exit() & deallocate itself prematurely. The
3199  * call is not atomic; no spinlocks may be held.
3200  */
3201 static int __set_cpus_allowed_ptr(struct task_struct *p,
3202 				  struct affinity_context *ctx)
3203 {
3204 	struct rq_flags rf;
3205 	struct rq *rq;
3206 
3207 	rq = task_rq_lock(p, &rf);
3208 	/*
3209 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3210 	 * flags are set.
3211 	 */
3212 	if (p->user_cpus_ptr &&
3213 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3214 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3215 		ctx->new_mask = rq->scratch_mask;
3216 
3217 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3218 }
3219 
3220 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3221 {
3222 	struct affinity_context ac = {
3223 		.new_mask  = new_mask,
3224 		.flags     = 0,
3225 	};
3226 
3227 	return __set_cpus_allowed_ptr(p, &ac);
3228 }
3229 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3230 
3231 /*
3232  * Change a given task's CPU affinity to the intersection of its current
3233  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3234  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3235  * affinity or use cpu_online_mask instead.
3236  *
3237  * If the resulting mask is empty, leave the affinity unchanged and return
3238  * -EINVAL.
3239  */
3240 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3241 				     struct cpumask *new_mask,
3242 				     const struct cpumask *subset_mask)
3243 {
3244 	struct affinity_context ac = {
3245 		.new_mask  = new_mask,
3246 		.flags     = 0,
3247 	};
3248 	struct rq_flags rf;
3249 	struct rq *rq;
3250 	int err;
3251 
3252 	rq = task_rq_lock(p, &rf);
3253 
3254 	/*
3255 	 * Forcefully restricting the affinity of a deadline task is
3256 	 * likely to cause problems, so fail and noisily override the
3257 	 * mask entirely.
3258 	 */
3259 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3260 		err = -EPERM;
3261 		goto err_unlock;
3262 	}
3263 
3264 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3265 		err = -EINVAL;
3266 		goto err_unlock;
3267 	}
3268 
3269 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3270 
3271 err_unlock:
3272 	task_rq_unlock(rq, p, &rf);
3273 	return err;
3274 }
3275 
3276 /*
3277  * Restrict the CPU affinity of task @p so that it is a subset of
3278  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3279  * old affinity mask. If the resulting mask is empty, we warn and walk
3280  * up the cpuset hierarchy until we find a suitable mask.
3281  */
3282 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3283 {
3284 	cpumask_var_t new_mask;
3285 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3286 
3287 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3288 
3289 	/*
3290 	 * __migrate_task() can fail silently in the face of concurrent
3291 	 * offlining of the chosen destination CPU, so take the hotplug
3292 	 * lock to ensure that the migration succeeds.
3293 	 */
3294 	cpus_read_lock();
3295 	if (!cpumask_available(new_mask))
3296 		goto out_set_mask;
3297 
3298 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3299 		goto out_free_mask;
3300 
3301 	/*
3302 	 * We failed to find a valid subset of the affinity mask for the
3303 	 * task, so override it based on its cpuset hierarchy.
3304 	 */
3305 	cpuset_cpus_allowed(p, new_mask);
3306 	override_mask = new_mask;
3307 
3308 out_set_mask:
3309 	if (printk_ratelimit()) {
3310 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3311 				task_pid_nr(p), p->comm,
3312 				cpumask_pr_args(override_mask));
3313 	}
3314 
3315 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3316 out_free_mask:
3317 	cpus_read_unlock();
3318 	free_cpumask_var(new_mask);
3319 }
3320 
3321 static int
3322 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3323 
3324 /*
3325  * Restore the affinity of a task @p which was previously restricted by a
3326  * call to force_compatible_cpus_allowed_ptr().
3327  *
3328  * It is the caller's responsibility to serialise this with any calls to
3329  * force_compatible_cpus_allowed_ptr(@p).
3330  */
3331 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3332 {
3333 	struct affinity_context ac = {
3334 		.new_mask  = task_user_cpus(p),
3335 		.flags     = 0,
3336 	};
3337 	int ret;
3338 
3339 	/*
3340 	 * Try to restore the old affinity mask with __sched_setaffinity().
3341 	 * Cpuset masking will be done there too.
3342 	 */
3343 	ret = __sched_setaffinity(p, &ac);
3344 	WARN_ON_ONCE(ret);
3345 }
3346 
3347 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3348 {
3349 #ifdef CONFIG_SCHED_DEBUG
3350 	unsigned int state = READ_ONCE(p->__state);
3351 
3352 	/*
3353 	 * We should never call set_task_cpu() on a blocked task,
3354 	 * ttwu() will sort out the placement.
3355 	 */
3356 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3357 
3358 	/*
3359 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3360 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3361 	 * time relying on p->on_rq.
3362 	 */
3363 	WARN_ON_ONCE(state == TASK_RUNNING &&
3364 		     p->sched_class == &fair_sched_class &&
3365 		     (p->on_rq && !task_on_rq_migrating(p)));
3366 
3367 #ifdef CONFIG_LOCKDEP
3368 	/*
3369 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3370 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3371 	 *
3372 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3373 	 * see task_group().
3374 	 *
3375 	 * Furthermore, all task_rq users should acquire both locks, see
3376 	 * task_rq_lock().
3377 	 */
3378 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3379 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3380 #endif
3381 	/*
3382 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3383 	 */
3384 	WARN_ON_ONCE(!cpu_online(new_cpu));
3385 
3386 	WARN_ON_ONCE(is_migration_disabled(p));
3387 #endif
3388 
3389 	trace_sched_migrate_task(p, new_cpu);
3390 
3391 	if (task_cpu(p) != new_cpu) {
3392 		if (p->sched_class->migrate_task_rq)
3393 			p->sched_class->migrate_task_rq(p, new_cpu);
3394 		p->se.nr_migrations++;
3395 		rseq_migrate(p);
3396 		sched_mm_cid_migrate_from(p);
3397 		perf_event_task_migrate(p);
3398 	}
3399 
3400 	__set_task_cpu(p, new_cpu);
3401 }
3402 
3403 #ifdef CONFIG_NUMA_BALANCING
3404 static void __migrate_swap_task(struct task_struct *p, int cpu)
3405 {
3406 	if (task_on_rq_queued(p)) {
3407 		struct rq *src_rq, *dst_rq;
3408 		struct rq_flags srf, drf;
3409 
3410 		src_rq = task_rq(p);
3411 		dst_rq = cpu_rq(cpu);
3412 
3413 		rq_pin_lock(src_rq, &srf);
3414 		rq_pin_lock(dst_rq, &drf);
3415 
3416 		deactivate_task(src_rq, p, 0);
3417 		set_task_cpu(p, cpu);
3418 		activate_task(dst_rq, p, 0);
3419 		wakeup_preempt(dst_rq, p, 0);
3420 
3421 		rq_unpin_lock(dst_rq, &drf);
3422 		rq_unpin_lock(src_rq, &srf);
3423 
3424 	} else {
3425 		/*
3426 		 * Task isn't running anymore; make it appear like we migrated
3427 		 * it before it went to sleep. This means on wakeup we make the
3428 		 * previous CPU our target instead of where it really is.
3429 		 */
3430 		p->wake_cpu = cpu;
3431 	}
3432 }
3433 
3434 struct migration_swap_arg {
3435 	struct task_struct *src_task, *dst_task;
3436 	int src_cpu, dst_cpu;
3437 };
3438 
3439 static int migrate_swap_stop(void *data)
3440 {
3441 	struct migration_swap_arg *arg = data;
3442 	struct rq *src_rq, *dst_rq;
3443 
3444 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3445 		return -EAGAIN;
3446 
3447 	src_rq = cpu_rq(arg->src_cpu);
3448 	dst_rq = cpu_rq(arg->dst_cpu);
3449 
3450 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3451 	guard(double_rq_lock)(src_rq, dst_rq);
3452 
3453 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3454 		return -EAGAIN;
3455 
3456 	if (task_cpu(arg->src_task) != arg->src_cpu)
3457 		return -EAGAIN;
3458 
3459 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3460 		return -EAGAIN;
3461 
3462 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3463 		return -EAGAIN;
3464 
3465 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3466 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3467 
3468 	return 0;
3469 }
3470 
3471 /*
3472  * Cross migrate two tasks
3473  */
3474 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3475 		int target_cpu, int curr_cpu)
3476 {
3477 	struct migration_swap_arg arg;
3478 	int ret = -EINVAL;
3479 
3480 	arg = (struct migration_swap_arg){
3481 		.src_task = cur,
3482 		.src_cpu = curr_cpu,
3483 		.dst_task = p,
3484 		.dst_cpu = target_cpu,
3485 	};
3486 
3487 	if (arg.src_cpu == arg.dst_cpu)
3488 		goto out;
3489 
3490 	/*
3491 	 * These three tests are all lockless; this is OK since all of them
3492 	 * will be re-checked with proper locks held further down the line.
3493 	 */
3494 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3495 		goto out;
3496 
3497 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3498 		goto out;
3499 
3500 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3501 		goto out;
3502 
3503 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3504 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3505 
3506 out:
3507 	return ret;
3508 }
3509 #endif /* CONFIG_NUMA_BALANCING */
3510 
3511 /***
3512  * kick_process - kick a running thread to enter/exit the kernel
3513  * @p: the to-be-kicked thread
3514  *
3515  * Cause a process which is running on another CPU to enter
3516  * kernel-mode, without any delay. (to get signals handled.)
3517  *
3518  * NOTE: this function doesn't have to take the runqueue lock,
3519  * because all it wants to ensure is that the remote task enters
3520  * the kernel. If the IPI races and the task has been migrated
3521  * to another CPU then no harm is done and the purpose has been
3522  * achieved as well.
3523  */
3524 void kick_process(struct task_struct *p)
3525 {
3526 	guard(preempt)();
3527 	int cpu = task_cpu(p);
3528 
3529 	if ((cpu != smp_processor_id()) && task_curr(p))
3530 		smp_send_reschedule(cpu);
3531 }
3532 EXPORT_SYMBOL_GPL(kick_process);
3533 
3534 /*
3535  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3536  *
3537  * A few notes on cpu_active vs cpu_online:
3538  *
3539  *  - cpu_active must be a subset of cpu_online
3540  *
3541  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3542  *    see __set_cpus_allowed_ptr(). At this point the newly online
3543  *    CPU isn't yet part of the sched domains, and balancing will not
3544  *    see it.
3545  *
3546  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3547  *    avoid the load balancer to place new tasks on the to be removed
3548  *    CPU. Existing tasks will remain running there and will be taken
3549  *    off.
3550  *
3551  * This means that fallback selection must not select !active CPUs.
3552  * And can assume that any active CPU must be online. Conversely
3553  * select_task_rq() below may allow selection of !active CPUs in order
3554  * to satisfy the above rules.
3555  */
3556 static int select_fallback_rq(int cpu, struct task_struct *p)
3557 {
3558 	int nid = cpu_to_node(cpu);
3559 	const struct cpumask *nodemask = NULL;
3560 	enum { cpuset, possible, fail } state = cpuset;
3561 	int dest_cpu;
3562 
3563 	/*
3564 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3565 	 * will return -1. There is no CPU on the node, and we should
3566 	 * select the CPU on the other node.
3567 	 */
3568 	if (nid != -1) {
3569 		nodemask = cpumask_of_node(nid);
3570 
3571 		/* Look for allowed, online CPU in same node. */
3572 		for_each_cpu(dest_cpu, nodemask) {
3573 			if (is_cpu_allowed(p, dest_cpu))
3574 				return dest_cpu;
3575 		}
3576 	}
3577 
3578 	for (;;) {
3579 		/* Any allowed, online CPU? */
3580 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3581 			if (!is_cpu_allowed(p, dest_cpu))
3582 				continue;
3583 
3584 			goto out;
3585 		}
3586 
3587 		/* No more Mr. Nice Guy. */
3588 		switch (state) {
3589 		case cpuset:
3590 			if (cpuset_cpus_allowed_fallback(p)) {
3591 				state = possible;
3592 				break;
3593 			}
3594 			fallthrough;
3595 		case possible:
3596 			/*
3597 			 * XXX When called from select_task_rq() we only
3598 			 * hold p->pi_lock and again violate locking order.
3599 			 *
3600 			 * More yuck to audit.
3601 			 */
3602 			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3603 			state = fail;
3604 			break;
3605 		case fail:
3606 			BUG();
3607 			break;
3608 		}
3609 	}
3610 
3611 out:
3612 	if (state != cpuset) {
3613 		/*
3614 		 * Don't tell them about moving exiting tasks or
3615 		 * kernel threads (both mm NULL), since they never
3616 		 * leave kernel.
3617 		 */
3618 		if (p->mm && printk_ratelimit()) {
3619 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3620 					task_pid_nr(p), p->comm, cpu);
3621 		}
3622 	}
3623 
3624 	return dest_cpu;
3625 }
3626 
3627 /*
3628  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3629  */
3630 static inline
3631 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3632 {
3633 	lockdep_assert_held(&p->pi_lock);
3634 
3635 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3636 		cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3637 	else
3638 		cpu = cpumask_any(p->cpus_ptr);
3639 
3640 	/*
3641 	 * In order not to call set_task_cpu() on a blocking task we need
3642 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3643 	 * CPU.
3644 	 *
3645 	 * Since this is common to all placement strategies, this lives here.
3646 	 *
3647 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3648 	 *   not worry about this generic constraint ]
3649 	 */
3650 	if (unlikely(!is_cpu_allowed(p, cpu)))
3651 		cpu = select_fallback_rq(task_cpu(p), p);
3652 
3653 	return cpu;
3654 }
3655 
3656 void sched_set_stop_task(int cpu, struct task_struct *stop)
3657 {
3658 	static struct lock_class_key stop_pi_lock;
3659 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3660 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3661 
3662 	if (stop) {
3663 		/*
3664 		 * Make it appear like a SCHED_FIFO task, its something
3665 		 * userspace knows about and won't get confused about.
3666 		 *
3667 		 * Also, it will make PI more or less work without too
3668 		 * much confusion -- but then, stop work should not
3669 		 * rely on PI working anyway.
3670 		 */
3671 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3672 
3673 		stop->sched_class = &stop_sched_class;
3674 
3675 		/*
3676 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3677 		 * adjust the effective priority of a task. As a result,
3678 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3679 		 * which can then trigger wakeups of the stop thread to push
3680 		 * around the current task.
3681 		 *
3682 		 * The stop task itself will never be part of the PI-chain, it
3683 		 * never blocks, therefore that ->pi_lock recursion is safe.
3684 		 * Tell lockdep about this by placing the stop->pi_lock in its
3685 		 * own class.
3686 		 */
3687 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3688 	}
3689 
3690 	cpu_rq(cpu)->stop = stop;
3691 
3692 	if (old_stop) {
3693 		/*
3694 		 * Reset it back to a normal scheduling class so that
3695 		 * it can die in pieces.
3696 		 */
3697 		old_stop->sched_class = &rt_sched_class;
3698 	}
3699 }
3700 
3701 #else /* CONFIG_SMP */
3702 
3703 static inline int __set_cpus_allowed_ptr(struct task_struct *p,
3704 					 struct affinity_context *ctx)
3705 {
3706 	return set_cpus_allowed_ptr(p, ctx->new_mask);
3707 }
3708 
3709 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3710 
3711 static inline bool rq_has_pinned_tasks(struct rq *rq)
3712 {
3713 	return false;
3714 }
3715 
3716 static inline cpumask_t *alloc_user_cpus_ptr(int node)
3717 {
3718 	return NULL;
3719 }
3720 
3721 #endif /* !CONFIG_SMP */
3722 
3723 static void
3724 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3725 {
3726 	struct rq *rq;
3727 
3728 	if (!schedstat_enabled())
3729 		return;
3730 
3731 	rq = this_rq();
3732 
3733 #ifdef CONFIG_SMP
3734 	if (cpu == rq->cpu) {
3735 		__schedstat_inc(rq->ttwu_local);
3736 		__schedstat_inc(p->stats.nr_wakeups_local);
3737 	} else {
3738 		struct sched_domain *sd;
3739 
3740 		__schedstat_inc(p->stats.nr_wakeups_remote);
3741 
3742 		guard(rcu)();
3743 		for_each_domain(rq->cpu, sd) {
3744 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3745 				__schedstat_inc(sd->ttwu_wake_remote);
3746 				break;
3747 			}
3748 		}
3749 	}
3750 
3751 	if (wake_flags & WF_MIGRATED)
3752 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3753 #endif /* CONFIG_SMP */
3754 
3755 	__schedstat_inc(rq->ttwu_count);
3756 	__schedstat_inc(p->stats.nr_wakeups);
3757 
3758 	if (wake_flags & WF_SYNC)
3759 		__schedstat_inc(p->stats.nr_wakeups_sync);
3760 }
3761 
3762 /*
3763  * Mark the task runnable.
3764  */
3765 static inline void ttwu_do_wakeup(struct task_struct *p)
3766 {
3767 	WRITE_ONCE(p->__state, TASK_RUNNING);
3768 	trace_sched_wakeup(p);
3769 }
3770 
3771 static void
3772 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3773 		 struct rq_flags *rf)
3774 {
3775 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3776 
3777 	lockdep_assert_rq_held(rq);
3778 
3779 	if (p->sched_contributes_to_load)
3780 		rq->nr_uninterruptible--;
3781 
3782 #ifdef CONFIG_SMP
3783 	if (wake_flags & WF_MIGRATED)
3784 		en_flags |= ENQUEUE_MIGRATED;
3785 	else
3786 #endif
3787 	if (p->in_iowait) {
3788 		delayacct_blkio_end(p);
3789 		atomic_dec(&task_rq(p)->nr_iowait);
3790 	}
3791 
3792 	activate_task(rq, p, en_flags);
3793 	wakeup_preempt(rq, p, wake_flags);
3794 
3795 	ttwu_do_wakeup(p);
3796 
3797 #ifdef CONFIG_SMP
3798 	if (p->sched_class->task_woken) {
3799 		/*
3800 		 * Our task @p is fully woken up and running; so it's safe to
3801 		 * drop the rq->lock, hereafter rq is only used for statistics.
3802 		 */
3803 		rq_unpin_lock(rq, rf);
3804 		p->sched_class->task_woken(rq, p);
3805 		rq_repin_lock(rq, rf);
3806 	}
3807 
3808 	if (rq->idle_stamp) {
3809 		u64 delta = rq_clock(rq) - rq->idle_stamp;
3810 		u64 max = 2*rq->max_idle_balance_cost;
3811 
3812 		update_avg(&rq->avg_idle, delta);
3813 
3814 		if (rq->avg_idle > max)
3815 			rq->avg_idle = max;
3816 
3817 		rq->idle_stamp = 0;
3818 	}
3819 #endif
3820 
3821 	p->dl_server = NULL;
3822 }
3823 
3824 /*
3825  * Consider @p being inside a wait loop:
3826  *
3827  *   for (;;) {
3828  *      set_current_state(TASK_UNINTERRUPTIBLE);
3829  *
3830  *      if (CONDITION)
3831  *         break;
3832  *
3833  *      schedule();
3834  *   }
3835  *   __set_current_state(TASK_RUNNING);
3836  *
3837  * between set_current_state() and schedule(). In this case @p is still
3838  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3839  * an atomic manner.
3840  *
3841  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3842  * then schedule() must still happen and p->state can be changed to
3843  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3844  * need to do a full wakeup with enqueue.
3845  *
3846  * Returns: %true when the wakeup is done,
3847  *          %false otherwise.
3848  */
3849 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3850 {
3851 	struct rq_flags rf;
3852 	struct rq *rq;
3853 	int ret = 0;
3854 
3855 	rq = __task_rq_lock(p, &rf);
3856 	if (task_on_rq_queued(p)) {
3857 		if (!task_on_cpu(rq, p)) {
3858 			/*
3859 			 * When on_rq && !on_cpu the task is preempted, see if
3860 			 * it should preempt the task that is current now.
3861 			 */
3862 			update_rq_clock(rq);
3863 			wakeup_preempt(rq, p, wake_flags);
3864 		}
3865 		ttwu_do_wakeup(p);
3866 		ret = 1;
3867 	}
3868 	__task_rq_unlock(rq, &rf);
3869 
3870 	return ret;
3871 }
3872 
3873 #ifdef CONFIG_SMP
3874 void sched_ttwu_pending(void *arg)
3875 {
3876 	struct llist_node *llist = arg;
3877 	struct rq *rq = this_rq();
3878 	struct task_struct *p, *t;
3879 	struct rq_flags rf;
3880 
3881 	if (!llist)
3882 		return;
3883 
3884 	rq_lock_irqsave(rq, &rf);
3885 	update_rq_clock(rq);
3886 
3887 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3888 		if (WARN_ON_ONCE(p->on_cpu))
3889 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3890 
3891 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3892 			set_task_cpu(p, cpu_of(rq));
3893 
3894 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3895 	}
3896 
3897 	/*
3898 	 * Must be after enqueueing at least once task such that
3899 	 * idle_cpu() does not observe a false-negative -- if it does,
3900 	 * it is possible for select_idle_siblings() to stack a number
3901 	 * of tasks on this CPU during that window.
3902 	 *
3903 	 * It is ok to clear ttwu_pending when another task pending.
3904 	 * We will receive IPI after local irq enabled and then enqueue it.
3905 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3906 	 */
3907 	WRITE_ONCE(rq->ttwu_pending, 0);
3908 	rq_unlock_irqrestore(rq, &rf);
3909 }
3910 
3911 /*
3912  * Prepare the scene for sending an IPI for a remote smp_call
3913  *
3914  * Returns true if the caller can proceed with sending the IPI.
3915  * Returns false otherwise.
3916  */
3917 bool call_function_single_prep_ipi(int cpu)
3918 {
3919 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3920 		trace_sched_wake_idle_without_ipi(cpu);
3921 		return false;
3922 	}
3923 
3924 	return true;
3925 }
3926 
3927 /*
3928  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3929  * necessary. The wakee CPU on receipt of the IPI will queue the task
3930  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3931  * of the wakeup instead of the waker.
3932  */
3933 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3934 {
3935 	struct rq *rq = cpu_rq(cpu);
3936 
3937 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3938 
3939 	WRITE_ONCE(rq->ttwu_pending, 1);
3940 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3941 }
3942 
3943 void wake_up_if_idle(int cpu)
3944 {
3945 	struct rq *rq = cpu_rq(cpu);
3946 
3947 	guard(rcu)();
3948 	if (is_idle_task(rcu_dereference(rq->curr))) {
3949 		guard(rq_lock_irqsave)(rq);
3950 		if (is_idle_task(rq->curr))
3951 			resched_curr(rq);
3952 	}
3953 }
3954 
3955 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3956 {
3957 	if (!sched_asym_cpucap_active())
3958 		return true;
3959 
3960 	if (this_cpu == that_cpu)
3961 		return true;
3962 
3963 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3964 }
3965 
3966 bool cpus_share_cache(int this_cpu, int that_cpu)
3967 {
3968 	if (this_cpu == that_cpu)
3969 		return true;
3970 
3971 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3972 }
3973 
3974 /*
3975  * Whether CPUs are share cache resources, which means LLC on non-cluster
3976  * machines and LLC tag or L2 on machines with clusters.
3977  */
3978 bool cpus_share_resources(int this_cpu, int that_cpu)
3979 {
3980 	if (this_cpu == that_cpu)
3981 		return true;
3982 
3983 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3984 }
3985 
3986 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3987 {
3988 	/*
3989 	 * Do not complicate things with the async wake_list while the CPU is
3990 	 * in hotplug state.
3991 	 */
3992 	if (!cpu_active(cpu))
3993 		return false;
3994 
3995 	/* Ensure the task will still be allowed to run on the CPU. */
3996 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3997 		return false;
3998 
3999 	/*
4000 	 * If the CPU does not share cache, then queue the task on the
4001 	 * remote rqs wakelist to avoid accessing remote data.
4002 	 */
4003 	if (!cpus_share_cache(smp_processor_id(), cpu))
4004 		return true;
4005 
4006 	if (cpu == smp_processor_id())
4007 		return false;
4008 
4009 	/*
4010 	 * If the wakee cpu is idle, or the task is descheduling and the
4011 	 * only running task on the CPU, then use the wakelist to offload
4012 	 * the task activation to the idle (or soon-to-be-idle) CPU as
4013 	 * the current CPU is likely busy. nr_running is checked to
4014 	 * avoid unnecessary task stacking.
4015 	 *
4016 	 * Note that we can only get here with (wakee) p->on_rq=0,
4017 	 * p->on_cpu can be whatever, we've done the dequeue, so
4018 	 * the wakee has been accounted out of ->nr_running.
4019 	 */
4020 	if (!cpu_rq(cpu)->nr_running)
4021 		return true;
4022 
4023 	return false;
4024 }
4025 
4026 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4027 {
4028 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
4029 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
4030 		__ttwu_queue_wakelist(p, cpu, wake_flags);
4031 		return true;
4032 	}
4033 
4034 	return false;
4035 }
4036 
4037 #else /* !CONFIG_SMP */
4038 
4039 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
4040 {
4041 	return false;
4042 }
4043 
4044 #endif /* CONFIG_SMP */
4045 
4046 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
4047 {
4048 	struct rq *rq = cpu_rq(cpu);
4049 	struct rq_flags rf;
4050 
4051 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
4052 		return;
4053 
4054 	rq_lock(rq, &rf);
4055 	update_rq_clock(rq);
4056 	ttwu_do_activate(rq, p, wake_flags, &rf);
4057 	rq_unlock(rq, &rf);
4058 }
4059 
4060 /*
4061  * Invoked from try_to_wake_up() to check whether the task can be woken up.
4062  *
4063  * The caller holds p::pi_lock if p != current or has preemption
4064  * disabled when p == current.
4065  *
4066  * The rules of saved_state:
4067  *
4068  *   The related locking code always holds p::pi_lock when updating
4069  *   p::saved_state, which means the code is fully serialized in both cases.
4070  *
4071  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4072  *   No other bits set. This allows to distinguish all wakeup scenarios.
4073  *
4074  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4075  *   allows us to prevent early wakeup of tasks before they can be run on
4076  *   asymmetric ISA architectures (eg ARMv9).
4077  */
4078 static __always_inline
4079 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4080 {
4081 	int match;
4082 
4083 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4084 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4085 			     state != TASK_RTLOCK_WAIT);
4086 	}
4087 
4088 	*success = !!(match = __task_state_match(p, state));
4089 
4090 	/*
4091 	 * Saved state preserves the task state across blocking on
4092 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
4093 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
4094 	 * because it waits for a lock wakeup or __thaw_task(). Also
4095 	 * indicate success because from the regular waker's point of
4096 	 * view this has succeeded.
4097 	 *
4098 	 * After acquiring the lock the task will restore p::__state
4099 	 * from p::saved_state which ensures that the regular
4100 	 * wakeup is not lost. The restore will also set
4101 	 * p::saved_state to TASK_RUNNING so any further tests will
4102 	 * not result in false positives vs. @success
4103 	 */
4104 	if (match < 0)
4105 		p->saved_state = TASK_RUNNING;
4106 
4107 	return match > 0;
4108 }
4109 
4110 /*
4111  * Notes on Program-Order guarantees on SMP systems.
4112  *
4113  *  MIGRATION
4114  *
4115  * The basic program-order guarantee on SMP systems is that when a task [t]
4116  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4117  * execution on its new CPU [c1].
4118  *
4119  * For migration (of runnable tasks) this is provided by the following means:
4120  *
4121  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
4122  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
4123  *     rq(c1)->lock (if not at the same time, then in that order).
4124  *  C) LOCK of the rq(c1)->lock scheduling in task
4125  *
4126  * Release/acquire chaining guarantees that B happens after A and C after B.
4127  * Note: the CPU doing B need not be c0 or c1
4128  *
4129  * Example:
4130  *
4131  *   CPU0            CPU1            CPU2
4132  *
4133  *   LOCK rq(0)->lock
4134  *   sched-out X
4135  *   sched-in Y
4136  *   UNLOCK rq(0)->lock
4137  *
4138  *                                   LOCK rq(0)->lock // orders against CPU0
4139  *                                   dequeue X
4140  *                                   UNLOCK rq(0)->lock
4141  *
4142  *                                   LOCK rq(1)->lock
4143  *                                   enqueue X
4144  *                                   UNLOCK rq(1)->lock
4145  *
4146  *                   LOCK rq(1)->lock // orders against CPU2
4147  *                   sched-out Z
4148  *                   sched-in X
4149  *                   UNLOCK rq(1)->lock
4150  *
4151  *
4152  *  BLOCKING -- aka. SLEEP + WAKEUP
4153  *
4154  * For blocking we (obviously) need to provide the same guarantee as for
4155  * migration. However the means are completely different as there is no lock
4156  * chain to provide order. Instead we do:
4157  *
4158  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4159  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4160  *
4161  * Example:
4162  *
4163  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4164  *
4165  *   LOCK rq(0)->lock LOCK X->pi_lock
4166  *   dequeue X
4167  *   sched-out X
4168  *   smp_store_release(X->on_cpu, 0);
4169  *
4170  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4171  *                    X->state = WAKING
4172  *                    set_task_cpu(X,2)
4173  *
4174  *                    LOCK rq(2)->lock
4175  *                    enqueue X
4176  *                    X->state = RUNNING
4177  *                    UNLOCK rq(2)->lock
4178  *
4179  *                                          LOCK rq(2)->lock // orders against CPU1
4180  *                                          sched-out Z
4181  *                                          sched-in X
4182  *                                          UNLOCK rq(2)->lock
4183  *
4184  *                    UNLOCK X->pi_lock
4185  *   UNLOCK rq(0)->lock
4186  *
4187  *
4188  * However, for wakeups there is a second guarantee we must provide, namely we
4189  * must ensure that CONDITION=1 done by the caller can not be reordered with
4190  * accesses to the task state; see try_to_wake_up() and set_current_state().
4191  */
4192 
4193 /**
4194  * try_to_wake_up - wake up a thread
4195  * @p: the thread to be awakened
4196  * @state: the mask of task states that can be woken
4197  * @wake_flags: wake modifier flags (WF_*)
4198  *
4199  * Conceptually does:
4200  *
4201  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4202  *
4203  * If the task was not queued/runnable, also place it back on a runqueue.
4204  *
4205  * This function is atomic against schedule() which would dequeue the task.
4206  *
4207  * It issues a full memory barrier before accessing @p->state, see the comment
4208  * with set_current_state().
4209  *
4210  * Uses p->pi_lock to serialize against concurrent wake-ups.
4211  *
4212  * Relies on p->pi_lock stabilizing:
4213  *  - p->sched_class
4214  *  - p->cpus_ptr
4215  *  - p->sched_task_group
4216  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4217  *
4218  * Tries really hard to only take one task_rq(p)->lock for performance.
4219  * Takes rq->lock in:
4220  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4221  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4222  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4223  *
4224  * As a consequence we race really badly with just about everything. See the
4225  * many memory barriers and their comments for details.
4226  *
4227  * Return: %true if @p->state changes (an actual wakeup was done),
4228  *	   %false otherwise.
4229  */
4230 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4231 {
4232 	guard(preempt)();
4233 	int cpu, success = 0;
4234 
4235 	if (p == current) {
4236 		/*
4237 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4238 		 * == smp_processor_id()'. Together this means we can special
4239 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4240 		 * without taking any locks.
4241 		 *
4242 		 * In particular:
4243 		 *  - we rely on Program-Order guarantees for all the ordering,
4244 		 *  - we're serialized against set_special_state() by virtue of
4245 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4246 		 */
4247 		if (!ttwu_state_match(p, state, &success))
4248 			goto out;
4249 
4250 		trace_sched_waking(p);
4251 		ttwu_do_wakeup(p);
4252 		goto out;
4253 	}
4254 
4255 	/*
4256 	 * If we are going to wake up a thread waiting for CONDITION we
4257 	 * need to ensure that CONDITION=1 done by the caller can not be
4258 	 * reordered with p->state check below. This pairs with smp_store_mb()
4259 	 * in set_current_state() that the waiting thread does.
4260 	 */
4261 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4262 		smp_mb__after_spinlock();
4263 		if (!ttwu_state_match(p, state, &success))
4264 			break;
4265 
4266 		trace_sched_waking(p);
4267 
4268 		/*
4269 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4270 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4271 		 * in smp_cond_load_acquire() below.
4272 		 *
4273 		 * sched_ttwu_pending()			try_to_wake_up()
4274 		 *   STORE p->on_rq = 1			  LOAD p->state
4275 		 *   UNLOCK rq->lock
4276 		 *
4277 		 * __schedule() (switch to task 'p')
4278 		 *   LOCK rq->lock			  smp_rmb();
4279 		 *   smp_mb__after_spinlock();
4280 		 *   UNLOCK rq->lock
4281 		 *
4282 		 * [task p]
4283 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4284 		 *
4285 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4286 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4287 		 *
4288 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4289 		 */
4290 		smp_rmb();
4291 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4292 			break;
4293 
4294 #ifdef CONFIG_SMP
4295 		/*
4296 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4297 		 * possible to, falsely, observe p->on_cpu == 0.
4298 		 *
4299 		 * One must be running (->on_cpu == 1) in order to remove oneself
4300 		 * from the runqueue.
4301 		 *
4302 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4303 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4304 		 *   UNLOCK rq->lock
4305 		 *
4306 		 * __schedule() (put 'p' to sleep)
4307 		 *   LOCK rq->lock			  smp_rmb();
4308 		 *   smp_mb__after_spinlock();
4309 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4310 		 *
4311 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4312 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4313 		 *
4314 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4315 		 * schedule()'s deactivate_task() has 'happened' and p will no longer
4316 		 * care about it's own p->state. See the comment in __schedule().
4317 		 */
4318 		smp_acquire__after_ctrl_dep();
4319 
4320 		/*
4321 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4322 		 * == 0), which means we need to do an enqueue, change p->state to
4323 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4324 		 * enqueue, such as ttwu_queue_wakelist().
4325 		 */
4326 		WRITE_ONCE(p->__state, TASK_WAKING);
4327 
4328 		/*
4329 		 * If the owning (remote) CPU is still in the middle of schedule() with
4330 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4331 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4332 		 * let the waker make forward progress. This is safe because IRQs are
4333 		 * disabled and the IPI will deliver after on_cpu is cleared.
4334 		 *
4335 		 * Ensure we load task_cpu(p) after p->on_cpu:
4336 		 *
4337 		 * set_task_cpu(p, cpu);
4338 		 *   STORE p->cpu = @cpu
4339 		 * __schedule() (switch to task 'p')
4340 		 *   LOCK rq->lock
4341 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4342 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4343 		 *
4344 		 * to ensure we observe the correct CPU on which the task is currently
4345 		 * scheduling.
4346 		 */
4347 		if (smp_load_acquire(&p->on_cpu) &&
4348 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4349 			break;
4350 
4351 		/*
4352 		 * If the owning (remote) CPU is still in the middle of schedule() with
4353 		 * this task as prev, wait until it's done referencing the task.
4354 		 *
4355 		 * Pairs with the smp_store_release() in finish_task().
4356 		 *
4357 		 * This ensures that tasks getting woken will be fully ordered against
4358 		 * their previous state and preserve Program Order.
4359 		 */
4360 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4361 
4362 		cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4363 		if (task_cpu(p) != cpu) {
4364 			if (p->in_iowait) {
4365 				delayacct_blkio_end(p);
4366 				atomic_dec(&task_rq(p)->nr_iowait);
4367 			}
4368 
4369 			wake_flags |= WF_MIGRATED;
4370 			psi_ttwu_dequeue(p);
4371 			set_task_cpu(p, cpu);
4372 		}
4373 #else
4374 		cpu = task_cpu(p);
4375 #endif /* CONFIG_SMP */
4376 
4377 		ttwu_queue(p, cpu, wake_flags);
4378 	}
4379 out:
4380 	if (success)
4381 		ttwu_stat(p, task_cpu(p), wake_flags);
4382 
4383 	return success;
4384 }
4385 
4386 static bool __task_needs_rq_lock(struct task_struct *p)
4387 {
4388 	unsigned int state = READ_ONCE(p->__state);
4389 
4390 	/*
4391 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4392 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4393 	 * locks at the end, see ttwu_queue_wakelist().
4394 	 */
4395 	if (state == TASK_RUNNING || state == TASK_WAKING)
4396 		return true;
4397 
4398 	/*
4399 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4400 	 * possible to, falsely, observe p->on_rq == 0.
4401 	 *
4402 	 * See try_to_wake_up() for a longer comment.
4403 	 */
4404 	smp_rmb();
4405 	if (p->on_rq)
4406 		return true;
4407 
4408 #ifdef CONFIG_SMP
4409 	/*
4410 	 * Ensure the task has finished __schedule() and will not be referenced
4411 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4412 	 */
4413 	smp_rmb();
4414 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4415 #endif
4416 
4417 	return false;
4418 }
4419 
4420 /**
4421  * task_call_func - Invoke a function on task in fixed state
4422  * @p: Process for which the function is to be invoked, can be @current.
4423  * @func: Function to invoke.
4424  * @arg: Argument to function.
4425  *
4426  * Fix the task in it's current state by avoiding wakeups and or rq operations
4427  * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
4428  * to work out what the state is, if required.  Given that @func can be invoked
4429  * with a runqueue lock held, it had better be quite lightweight.
4430  *
4431  * Returns:
4432  *   Whatever @func returns
4433  */
4434 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4435 {
4436 	struct rq *rq = NULL;
4437 	struct rq_flags rf;
4438 	int ret;
4439 
4440 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4441 
4442 	if (__task_needs_rq_lock(p))
4443 		rq = __task_rq_lock(p, &rf);
4444 
4445 	/*
4446 	 * At this point the task is pinned; either:
4447 	 *  - blocked and we're holding off wakeups	 (pi->lock)
4448 	 *  - woken, and we're holding off enqueue	 (rq->lock)
4449 	 *  - queued, and we're holding off schedule	 (rq->lock)
4450 	 *  - running, and we're holding off de-schedule (rq->lock)
4451 	 *
4452 	 * The called function (@func) can use: task_curr(), p->on_rq and
4453 	 * p->__state to differentiate between these states.
4454 	 */
4455 	ret = func(p, arg);
4456 
4457 	if (rq)
4458 		rq_unlock(rq, &rf);
4459 
4460 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4461 	return ret;
4462 }
4463 
4464 /**
4465  * cpu_curr_snapshot - Return a snapshot of the currently running task
4466  * @cpu: The CPU on which to snapshot the task.
4467  *
4468  * Returns the task_struct pointer of the task "currently" running on
4469  * the specified CPU.
4470  *
4471  * If the specified CPU was offline, the return value is whatever it
4472  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4473  * task, but there is no guarantee.  Callers wishing a useful return
4474  * value must take some action to ensure that the specified CPU remains
4475  * online throughout.
4476  *
4477  * This function executes full memory barriers before and after fetching
4478  * the pointer, which permits the caller to confine this function's fetch
4479  * with respect to the caller's accesses to other shared variables.
4480  */
4481 struct task_struct *cpu_curr_snapshot(int cpu)
4482 {
4483 	struct rq *rq = cpu_rq(cpu);
4484 	struct task_struct *t;
4485 	struct rq_flags rf;
4486 
4487 	rq_lock_irqsave(rq, &rf);
4488 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4489 	t = rcu_dereference(cpu_curr(cpu));
4490 	rq_unlock_irqrestore(rq, &rf);
4491 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4492 
4493 	return t;
4494 }
4495 
4496 /**
4497  * wake_up_process - Wake up a specific process
4498  * @p: The process to be woken up.
4499  *
4500  * Attempt to wake up the nominated process and move it to the set of runnable
4501  * processes.
4502  *
4503  * Return: 1 if the process was woken up, 0 if it was already running.
4504  *
4505  * This function executes a full memory barrier before accessing the task state.
4506  */
4507 int wake_up_process(struct task_struct *p)
4508 {
4509 	return try_to_wake_up(p, TASK_NORMAL, 0);
4510 }
4511 EXPORT_SYMBOL(wake_up_process);
4512 
4513 int wake_up_state(struct task_struct *p, unsigned int state)
4514 {
4515 	return try_to_wake_up(p, state, 0);
4516 }
4517 
4518 /*
4519  * Perform scheduler related setup for a newly forked process p.
4520  * p is forked by current.
4521  *
4522  * __sched_fork() is basic setup used by init_idle() too:
4523  */
4524 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4525 {
4526 	p->on_rq			= 0;
4527 
4528 	p->se.on_rq			= 0;
4529 	p->se.exec_start		= 0;
4530 	p->se.sum_exec_runtime		= 0;
4531 	p->se.prev_sum_exec_runtime	= 0;
4532 	p->se.nr_migrations		= 0;
4533 	p->se.vruntime			= 0;
4534 	p->se.vlag			= 0;
4535 	p->se.slice			= sysctl_sched_base_slice;
4536 	INIT_LIST_HEAD(&p->se.group_node);
4537 
4538 #ifdef CONFIG_FAIR_GROUP_SCHED
4539 	p->se.cfs_rq			= NULL;
4540 #endif
4541 
4542 #ifdef CONFIG_SCHEDSTATS
4543 	/* Even if schedstat is disabled, there should not be garbage */
4544 	memset(&p->stats, 0, sizeof(p->stats));
4545 #endif
4546 
4547 	init_dl_entity(&p->dl);
4548 
4549 	INIT_LIST_HEAD(&p->rt.run_list);
4550 	p->rt.timeout		= 0;
4551 	p->rt.time_slice	= sched_rr_timeslice;
4552 	p->rt.on_rq		= 0;
4553 	p->rt.on_list		= 0;
4554 
4555 #ifdef CONFIG_PREEMPT_NOTIFIERS
4556 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4557 #endif
4558 
4559 #ifdef CONFIG_COMPACTION
4560 	p->capture_control = NULL;
4561 #endif
4562 	init_numa_balancing(clone_flags, p);
4563 #ifdef CONFIG_SMP
4564 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4565 	p->migration_pending = NULL;
4566 #endif
4567 	init_sched_mm_cid(p);
4568 }
4569 
4570 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4571 
4572 #ifdef CONFIG_NUMA_BALANCING
4573 
4574 int sysctl_numa_balancing_mode;
4575 
4576 static void __set_numabalancing_state(bool enabled)
4577 {
4578 	if (enabled)
4579 		static_branch_enable(&sched_numa_balancing);
4580 	else
4581 		static_branch_disable(&sched_numa_balancing);
4582 }
4583 
4584 void set_numabalancing_state(bool enabled)
4585 {
4586 	if (enabled)
4587 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4588 	else
4589 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4590 	__set_numabalancing_state(enabled);
4591 }
4592 
4593 #ifdef CONFIG_PROC_SYSCTL
4594 static void reset_memory_tiering(void)
4595 {
4596 	struct pglist_data *pgdat;
4597 
4598 	for_each_online_pgdat(pgdat) {
4599 		pgdat->nbp_threshold = 0;
4600 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4601 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4602 	}
4603 }
4604 
4605 static int sysctl_numa_balancing(struct ctl_table *table, int write,
4606 			  void *buffer, size_t *lenp, loff_t *ppos)
4607 {
4608 	struct ctl_table t;
4609 	int err;
4610 	int state = sysctl_numa_balancing_mode;
4611 
4612 	if (write && !capable(CAP_SYS_ADMIN))
4613 		return -EPERM;
4614 
4615 	t = *table;
4616 	t.data = &state;
4617 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4618 	if (err < 0)
4619 		return err;
4620 	if (write) {
4621 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4622 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4623 			reset_memory_tiering();
4624 		sysctl_numa_balancing_mode = state;
4625 		__set_numabalancing_state(state);
4626 	}
4627 	return err;
4628 }
4629 #endif
4630 #endif
4631 
4632 #ifdef CONFIG_SCHEDSTATS
4633 
4634 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4635 
4636 static void set_schedstats(bool enabled)
4637 {
4638 	if (enabled)
4639 		static_branch_enable(&sched_schedstats);
4640 	else
4641 		static_branch_disable(&sched_schedstats);
4642 }
4643 
4644 void force_schedstat_enabled(void)
4645 {
4646 	if (!schedstat_enabled()) {
4647 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4648 		static_branch_enable(&sched_schedstats);
4649 	}
4650 }
4651 
4652 static int __init setup_schedstats(char *str)
4653 {
4654 	int ret = 0;
4655 	if (!str)
4656 		goto out;
4657 
4658 	if (!strcmp(str, "enable")) {
4659 		set_schedstats(true);
4660 		ret = 1;
4661 	} else if (!strcmp(str, "disable")) {
4662 		set_schedstats(false);
4663 		ret = 1;
4664 	}
4665 out:
4666 	if (!ret)
4667 		pr_warn("Unable to parse schedstats=\n");
4668 
4669 	return ret;
4670 }
4671 __setup("schedstats=", setup_schedstats);
4672 
4673 #ifdef CONFIG_PROC_SYSCTL
4674 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
4675 		size_t *lenp, loff_t *ppos)
4676 {
4677 	struct ctl_table t;
4678 	int err;
4679 	int state = static_branch_likely(&sched_schedstats);
4680 
4681 	if (write && !capable(CAP_SYS_ADMIN))
4682 		return -EPERM;
4683 
4684 	t = *table;
4685 	t.data = &state;
4686 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4687 	if (err < 0)
4688 		return err;
4689 	if (write)
4690 		set_schedstats(state);
4691 	return err;
4692 }
4693 #endif /* CONFIG_PROC_SYSCTL */
4694 #endif /* CONFIG_SCHEDSTATS */
4695 
4696 #ifdef CONFIG_SYSCTL
4697 static struct ctl_table sched_core_sysctls[] = {
4698 #ifdef CONFIG_SCHEDSTATS
4699 	{
4700 		.procname       = "sched_schedstats",
4701 		.data           = NULL,
4702 		.maxlen         = sizeof(unsigned int),
4703 		.mode           = 0644,
4704 		.proc_handler   = sysctl_schedstats,
4705 		.extra1         = SYSCTL_ZERO,
4706 		.extra2         = SYSCTL_ONE,
4707 	},
4708 #endif /* CONFIG_SCHEDSTATS */
4709 #ifdef CONFIG_UCLAMP_TASK
4710 	{
4711 		.procname       = "sched_util_clamp_min",
4712 		.data           = &sysctl_sched_uclamp_util_min,
4713 		.maxlen         = sizeof(unsigned int),
4714 		.mode           = 0644,
4715 		.proc_handler   = sysctl_sched_uclamp_handler,
4716 	},
4717 	{
4718 		.procname       = "sched_util_clamp_max",
4719 		.data           = &sysctl_sched_uclamp_util_max,
4720 		.maxlen         = sizeof(unsigned int),
4721 		.mode           = 0644,
4722 		.proc_handler   = sysctl_sched_uclamp_handler,
4723 	},
4724 	{
4725 		.procname       = "sched_util_clamp_min_rt_default",
4726 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4727 		.maxlen         = sizeof(unsigned int),
4728 		.mode           = 0644,
4729 		.proc_handler   = sysctl_sched_uclamp_handler,
4730 	},
4731 #endif /* CONFIG_UCLAMP_TASK */
4732 #ifdef CONFIG_NUMA_BALANCING
4733 	{
4734 		.procname	= "numa_balancing",
4735 		.data		= NULL, /* filled in by handler */
4736 		.maxlen		= sizeof(unsigned int),
4737 		.mode		= 0644,
4738 		.proc_handler	= sysctl_numa_balancing,
4739 		.extra1		= SYSCTL_ZERO,
4740 		.extra2		= SYSCTL_FOUR,
4741 	},
4742 #endif /* CONFIG_NUMA_BALANCING */
4743 };
4744 static int __init sched_core_sysctl_init(void)
4745 {
4746 	register_sysctl_init("kernel", sched_core_sysctls);
4747 	return 0;
4748 }
4749 late_initcall(sched_core_sysctl_init);
4750 #endif /* CONFIG_SYSCTL */
4751 
4752 /*
4753  * fork()/clone()-time setup:
4754  */
4755 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4756 {
4757 	__sched_fork(clone_flags, p);
4758 	/*
4759 	 * We mark the process as NEW here. This guarantees that
4760 	 * nobody will actually run it, and a signal or other external
4761 	 * event cannot wake it up and insert it on the runqueue either.
4762 	 */
4763 	p->__state = TASK_NEW;
4764 
4765 	/*
4766 	 * Make sure we do not leak PI boosting priority to the child.
4767 	 */
4768 	p->prio = current->normal_prio;
4769 
4770 	uclamp_fork(p);
4771 
4772 	/*
4773 	 * Revert to default priority/policy on fork if requested.
4774 	 */
4775 	if (unlikely(p->sched_reset_on_fork)) {
4776 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4777 			p->policy = SCHED_NORMAL;
4778 			p->static_prio = NICE_TO_PRIO(0);
4779 			p->rt_priority = 0;
4780 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4781 			p->static_prio = NICE_TO_PRIO(0);
4782 
4783 		p->prio = p->normal_prio = p->static_prio;
4784 		set_load_weight(p, false);
4785 
4786 		/*
4787 		 * We don't need the reset flag anymore after the fork. It has
4788 		 * fulfilled its duty:
4789 		 */
4790 		p->sched_reset_on_fork = 0;
4791 	}
4792 
4793 	if (dl_prio(p->prio))
4794 		return -EAGAIN;
4795 	else if (rt_prio(p->prio))
4796 		p->sched_class = &rt_sched_class;
4797 	else
4798 		p->sched_class = &fair_sched_class;
4799 
4800 	init_entity_runnable_average(&p->se);
4801 
4802 
4803 #ifdef CONFIG_SCHED_INFO
4804 	if (likely(sched_info_on()))
4805 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4806 #endif
4807 #if defined(CONFIG_SMP)
4808 	p->on_cpu = 0;
4809 #endif
4810 	init_task_preempt_count(p);
4811 #ifdef CONFIG_SMP
4812 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4813 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4814 #endif
4815 	return 0;
4816 }
4817 
4818 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4819 {
4820 	unsigned long flags;
4821 
4822 	/*
4823 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4824 	 * required yet, but lockdep gets upset if rules are violated.
4825 	 */
4826 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4827 #ifdef CONFIG_CGROUP_SCHED
4828 	if (1) {
4829 		struct task_group *tg;
4830 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4831 				  struct task_group, css);
4832 		tg = autogroup_task_group(p, tg);
4833 		p->sched_task_group = tg;
4834 	}
4835 #endif
4836 	rseq_migrate(p);
4837 	/*
4838 	 * We're setting the CPU for the first time, we don't migrate,
4839 	 * so use __set_task_cpu().
4840 	 */
4841 	__set_task_cpu(p, smp_processor_id());
4842 	if (p->sched_class->task_fork)
4843 		p->sched_class->task_fork(p);
4844 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4845 }
4846 
4847 void sched_post_fork(struct task_struct *p)
4848 {
4849 	uclamp_post_fork(p);
4850 }
4851 
4852 unsigned long to_ratio(u64 period, u64 runtime)
4853 {
4854 	if (runtime == RUNTIME_INF)
4855 		return BW_UNIT;
4856 
4857 	/*
4858 	 * Doing this here saves a lot of checks in all
4859 	 * the calling paths, and returning zero seems
4860 	 * safe for them anyway.
4861 	 */
4862 	if (period == 0)
4863 		return 0;
4864 
4865 	return div64_u64(runtime << BW_SHIFT, period);
4866 }
4867 
4868 /*
4869  * wake_up_new_task - wake up a newly created task for the first time.
4870  *
4871  * This function will do some initial scheduler statistics housekeeping
4872  * that must be done for every newly created context, then puts the task
4873  * on the runqueue and wakes it.
4874  */
4875 void wake_up_new_task(struct task_struct *p)
4876 {
4877 	struct rq_flags rf;
4878 	struct rq *rq;
4879 
4880 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4881 	WRITE_ONCE(p->__state, TASK_RUNNING);
4882 #ifdef CONFIG_SMP
4883 	/*
4884 	 * Fork balancing, do it here and not earlier because:
4885 	 *  - cpus_ptr can change in the fork path
4886 	 *  - any previously selected CPU might disappear through hotplug
4887 	 *
4888 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4889 	 * as we're not fully set-up yet.
4890 	 */
4891 	p->recent_used_cpu = task_cpu(p);
4892 	rseq_migrate(p);
4893 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4894 #endif
4895 	rq = __task_rq_lock(p, &rf);
4896 	update_rq_clock(rq);
4897 	post_init_entity_util_avg(p);
4898 
4899 	activate_task(rq, p, ENQUEUE_NOCLOCK);
4900 	trace_sched_wakeup_new(p);
4901 	wakeup_preempt(rq, p, WF_FORK);
4902 #ifdef CONFIG_SMP
4903 	if (p->sched_class->task_woken) {
4904 		/*
4905 		 * Nothing relies on rq->lock after this, so it's fine to
4906 		 * drop it.
4907 		 */
4908 		rq_unpin_lock(rq, &rf);
4909 		p->sched_class->task_woken(rq, p);
4910 		rq_repin_lock(rq, &rf);
4911 	}
4912 #endif
4913 	task_rq_unlock(rq, p, &rf);
4914 }
4915 
4916 #ifdef CONFIG_PREEMPT_NOTIFIERS
4917 
4918 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4919 
4920 void preempt_notifier_inc(void)
4921 {
4922 	static_branch_inc(&preempt_notifier_key);
4923 }
4924 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4925 
4926 void preempt_notifier_dec(void)
4927 {
4928 	static_branch_dec(&preempt_notifier_key);
4929 }
4930 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4931 
4932 /**
4933  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4934  * @notifier: notifier struct to register
4935  */
4936 void preempt_notifier_register(struct preempt_notifier *notifier)
4937 {
4938 	if (!static_branch_unlikely(&preempt_notifier_key))
4939 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4940 
4941 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4942 }
4943 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4944 
4945 /**
4946  * preempt_notifier_unregister - no longer interested in preemption notifications
4947  * @notifier: notifier struct to unregister
4948  *
4949  * This is *not* safe to call from within a preemption notifier.
4950  */
4951 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4952 {
4953 	hlist_del(&notifier->link);
4954 }
4955 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4956 
4957 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4958 {
4959 	struct preempt_notifier *notifier;
4960 
4961 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4962 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4963 }
4964 
4965 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4966 {
4967 	if (static_branch_unlikely(&preempt_notifier_key))
4968 		__fire_sched_in_preempt_notifiers(curr);
4969 }
4970 
4971 static void
4972 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4973 				   struct task_struct *next)
4974 {
4975 	struct preempt_notifier *notifier;
4976 
4977 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4978 		notifier->ops->sched_out(notifier, next);
4979 }
4980 
4981 static __always_inline void
4982 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4983 				 struct task_struct *next)
4984 {
4985 	if (static_branch_unlikely(&preempt_notifier_key))
4986 		__fire_sched_out_preempt_notifiers(curr, next);
4987 }
4988 
4989 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4990 
4991 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4992 {
4993 }
4994 
4995 static inline void
4996 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4997 				 struct task_struct *next)
4998 {
4999 }
5000 
5001 #endif /* CONFIG_PREEMPT_NOTIFIERS */
5002 
5003 static inline void prepare_task(struct task_struct *next)
5004 {
5005 #ifdef CONFIG_SMP
5006 	/*
5007 	 * Claim the task as running, we do this before switching to it
5008 	 * such that any running task will have this set.
5009 	 *
5010 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
5011 	 * its ordering comment.
5012 	 */
5013 	WRITE_ONCE(next->on_cpu, 1);
5014 #endif
5015 }
5016 
5017 static inline void finish_task(struct task_struct *prev)
5018 {
5019 #ifdef CONFIG_SMP
5020 	/*
5021 	 * This must be the very last reference to @prev from this CPU. After
5022 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
5023 	 * must ensure this doesn't happen until the switch is completely
5024 	 * finished.
5025 	 *
5026 	 * In particular, the load of prev->state in finish_task_switch() must
5027 	 * happen before this.
5028 	 *
5029 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5030 	 */
5031 	smp_store_release(&prev->on_cpu, 0);
5032 #endif
5033 }
5034 
5035 #ifdef CONFIG_SMP
5036 
5037 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5038 {
5039 	void (*func)(struct rq *rq);
5040 	struct balance_callback *next;
5041 
5042 	lockdep_assert_rq_held(rq);
5043 
5044 	while (head) {
5045 		func = (void (*)(struct rq *))head->func;
5046 		next = head->next;
5047 		head->next = NULL;
5048 		head = next;
5049 
5050 		func(rq);
5051 	}
5052 }
5053 
5054 static void balance_push(struct rq *rq);
5055 
5056 /*
5057  * balance_push_callback is a right abuse of the callback interface and plays
5058  * by significantly different rules.
5059  *
5060  * Where the normal balance_callback's purpose is to be ran in the same context
5061  * that queued it (only later, when it's safe to drop rq->lock again),
5062  * balance_push_callback is specifically targeted at __schedule().
5063  *
5064  * This abuse is tolerated because it places all the unlikely/odd cases behind
5065  * a single test, namely: rq->balance_callback == NULL.
5066  */
5067 struct balance_callback balance_push_callback = {
5068 	.next = NULL,
5069 	.func = balance_push,
5070 };
5071 
5072 static inline struct balance_callback *
5073 __splice_balance_callbacks(struct rq *rq, bool split)
5074 {
5075 	struct balance_callback *head = rq->balance_callback;
5076 
5077 	if (likely(!head))
5078 		return NULL;
5079 
5080 	lockdep_assert_rq_held(rq);
5081 	/*
5082 	 * Must not take balance_push_callback off the list when
5083 	 * splice_balance_callbacks() and balance_callbacks() are not
5084 	 * in the same rq->lock section.
5085 	 *
5086 	 * In that case it would be possible for __schedule() to interleave
5087 	 * and observe the list empty.
5088 	 */
5089 	if (split && head == &balance_push_callback)
5090 		head = NULL;
5091 	else
5092 		rq->balance_callback = NULL;
5093 
5094 	return head;
5095 }
5096 
5097 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5098 {
5099 	return __splice_balance_callbacks(rq, true);
5100 }
5101 
5102 static void __balance_callbacks(struct rq *rq)
5103 {
5104 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5105 }
5106 
5107 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5108 {
5109 	unsigned long flags;
5110 
5111 	if (unlikely(head)) {
5112 		raw_spin_rq_lock_irqsave(rq, flags);
5113 		do_balance_callbacks(rq, head);
5114 		raw_spin_rq_unlock_irqrestore(rq, flags);
5115 	}
5116 }
5117 
5118 #else
5119 
5120 static inline void __balance_callbacks(struct rq *rq)
5121 {
5122 }
5123 
5124 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
5125 {
5126 	return NULL;
5127 }
5128 
5129 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
5130 {
5131 }
5132 
5133 #endif
5134 
5135 static inline void
5136 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5137 {
5138 	/*
5139 	 * Since the runqueue lock will be released by the next
5140 	 * task (which is an invalid locking op but in the case
5141 	 * of the scheduler it's an obvious special-case), so we
5142 	 * do an early lockdep release here:
5143 	 */
5144 	rq_unpin_lock(rq, rf);
5145 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5146 #ifdef CONFIG_DEBUG_SPINLOCK
5147 	/* this is a valid case when another task releases the spinlock */
5148 	rq_lockp(rq)->owner = next;
5149 #endif
5150 }
5151 
5152 static inline void finish_lock_switch(struct rq *rq)
5153 {
5154 	/*
5155 	 * If we are tracking spinlock dependencies then we have to
5156 	 * fix up the runqueue lock - which gets 'carried over' from
5157 	 * prev into current:
5158 	 */
5159 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5160 	__balance_callbacks(rq);
5161 	raw_spin_rq_unlock_irq(rq);
5162 }
5163 
5164 /*
5165  * NOP if the arch has not defined these:
5166  */
5167 
5168 #ifndef prepare_arch_switch
5169 # define prepare_arch_switch(next)	do { } while (0)
5170 #endif
5171 
5172 #ifndef finish_arch_post_lock_switch
5173 # define finish_arch_post_lock_switch()	do { } while (0)
5174 #endif
5175 
5176 static inline void kmap_local_sched_out(void)
5177 {
5178 #ifdef CONFIG_KMAP_LOCAL
5179 	if (unlikely(current->kmap_ctrl.idx))
5180 		__kmap_local_sched_out();
5181 #endif
5182 }
5183 
5184 static inline void kmap_local_sched_in(void)
5185 {
5186 #ifdef CONFIG_KMAP_LOCAL
5187 	if (unlikely(current->kmap_ctrl.idx))
5188 		__kmap_local_sched_in();
5189 #endif
5190 }
5191 
5192 /**
5193  * prepare_task_switch - prepare to switch tasks
5194  * @rq: the runqueue preparing to switch
5195  * @prev: the current task that is being switched out
5196  * @next: the task we are going to switch to.
5197  *
5198  * This is called with the rq lock held and interrupts off. It must
5199  * be paired with a subsequent finish_task_switch after the context
5200  * switch.
5201  *
5202  * prepare_task_switch sets up locking and calls architecture specific
5203  * hooks.
5204  */
5205 static inline void
5206 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5207 		    struct task_struct *next)
5208 {
5209 	kcov_prepare_switch(prev);
5210 	sched_info_switch(rq, prev, next);
5211 	perf_event_task_sched_out(prev, next);
5212 	rseq_preempt(prev);
5213 	fire_sched_out_preempt_notifiers(prev, next);
5214 	kmap_local_sched_out();
5215 	prepare_task(next);
5216 	prepare_arch_switch(next);
5217 }
5218 
5219 /**
5220  * finish_task_switch - clean up after a task-switch
5221  * @prev: the thread we just switched away from.
5222  *
5223  * finish_task_switch must be called after the context switch, paired
5224  * with a prepare_task_switch call before the context switch.
5225  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5226  * and do any other architecture-specific cleanup actions.
5227  *
5228  * Note that we may have delayed dropping an mm in context_switch(). If
5229  * so, we finish that here outside of the runqueue lock. (Doing it
5230  * with the lock held can cause deadlocks; see schedule() for
5231  * details.)
5232  *
5233  * The context switch have flipped the stack from under us and restored the
5234  * local variables which were saved when this task called schedule() in the
5235  * past. prev == current is still correct but we need to recalculate this_rq
5236  * because prev may have moved to another CPU.
5237  */
5238 static struct rq *finish_task_switch(struct task_struct *prev)
5239 	__releases(rq->lock)
5240 {
5241 	struct rq *rq = this_rq();
5242 	struct mm_struct *mm = rq->prev_mm;
5243 	unsigned int prev_state;
5244 
5245 	/*
5246 	 * The previous task will have left us with a preempt_count of 2
5247 	 * because it left us after:
5248 	 *
5249 	 *	schedule()
5250 	 *	  preempt_disable();			// 1
5251 	 *	  __schedule()
5252 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5253 	 *
5254 	 * Also, see FORK_PREEMPT_COUNT.
5255 	 */
5256 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5257 		      "corrupted preempt_count: %s/%d/0x%x\n",
5258 		      current->comm, current->pid, preempt_count()))
5259 		preempt_count_set(FORK_PREEMPT_COUNT);
5260 
5261 	rq->prev_mm = NULL;
5262 
5263 	/*
5264 	 * A task struct has one reference for the use as "current".
5265 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5266 	 * schedule one last time. The schedule call will never return, and
5267 	 * the scheduled task must drop that reference.
5268 	 *
5269 	 * We must observe prev->state before clearing prev->on_cpu (in
5270 	 * finish_task), otherwise a concurrent wakeup can get prev
5271 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5272 	 * transition, resulting in a double drop.
5273 	 */
5274 	prev_state = READ_ONCE(prev->__state);
5275 	vtime_task_switch(prev);
5276 	perf_event_task_sched_in(prev, current);
5277 	finish_task(prev);
5278 	tick_nohz_task_switch();
5279 	finish_lock_switch(rq);
5280 	finish_arch_post_lock_switch();
5281 	kcov_finish_switch(current);
5282 	/*
5283 	 * kmap_local_sched_out() is invoked with rq::lock held and
5284 	 * interrupts disabled. There is no requirement for that, but the
5285 	 * sched out code does not have an interrupt enabled section.
5286 	 * Restoring the maps on sched in does not require interrupts being
5287 	 * disabled either.
5288 	 */
5289 	kmap_local_sched_in();
5290 
5291 	fire_sched_in_preempt_notifiers(current);
5292 	/*
5293 	 * When switching through a kernel thread, the loop in
5294 	 * membarrier_{private,global}_expedited() may have observed that
5295 	 * kernel thread and not issued an IPI. It is therefore possible to
5296 	 * schedule between user->kernel->user threads without passing though
5297 	 * switch_mm(). Membarrier requires a barrier after storing to
5298 	 * rq->curr, before returning to userspace, so provide them here:
5299 	 *
5300 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5301 	 *   provided by mmdrop_lazy_tlb(),
5302 	 * - a sync_core for SYNC_CORE.
5303 	 */
5304 	if (mm) {
5305 		membarrier_mm_sync_core_before_usermode(mm);
5306 		mmdrop_lazy_tlb_sched(mm);
5307 	}
5308 
5309 	if (unlikely(prev_state == TASK_DEAD)) {
5310 		if (prev->sched_class->task_dead)
5311 			prev->sched_class->task_dead(prev);
5312 
5313 		/* Task is done with its stack. */
5314 		put_task_stack(prev);
5315 
5316 		put_task_struct_rcu_user(prev);
5317 	}
5318 
5319 	return rq;
5320 }
5321 
5322 /**
5323  * schedule_tail - first thing a freshly forked thread must call.
5324  * @prev: the thread we just switched away from.
5325  */
5326 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5327 	__releases(rq->lock)
5328 {
5329 	/*
5330 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5331 	 * finish_task_switch() for details.
5332 	 *
5333 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5334 	 * and the preempt_enable() will end up enabling preemption (on
5335 	 * PREEMPT_COUNT kernels).
5336 	 */
5337 
5338 	finish_task_switch(prev);
5339 	preempt_enable();
5340 
5341 	if (current->set_child_tid)
5342 		put_user(task_pid_vnr(current), current->set_child_tid);
5343 
5344 	calculate_sigpending();
5345 }
5346 
5347 /*
5348  * context_switch - switch to the new MM and the new thread's register state.
5349  */
5350 static __always_inline struct rq *
5351 context_switch(struct rq *rq, struct task_struct *prev,
5352 	       struct task_struct *next, struct rq_flags *rf)
5353 {
5354 	prepare_task_switch(rq, prev, next);
5355 
5356 	/*
5357 	 * For paravirt, this is coupled with an exit in switch_to to
5358 	 * combine the page table reload and the switch backend into
5359 	 * one hypercall.
5360 	 */
5361 	arch_start_context_switch(prev);
5362 
5363 	/*
5364 	 * kernel -> kernel   lazy + transfer active
5365 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5366 	 *
5367 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5368 	 *   user ->   user   switch
5369 	 *
5370 	 * switch_mm_cid() needs to be updated if the barriers provided
5371 	 * by context_switch() are modified.
5372 	 */
5373 	if (!next->mm) {                                // to kernel
5374 		enter_lazy_tlb(prev->active_mm, next);
5375 
5376 		next->active_mm = prev->active_mm;
5377 		if (prev->mm)                           // from user
5378 			mmgrab_lazy_tlb(prev->active_mm);
5379 		else
5380 			prev->active_mm = NULL;
5381 	} else {                                        // to user
5382 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5383 		/*
5384 		 * sys_membarrier() requires an smp_mb() between setting
5385 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5386 		 *
5387 		 * The below provides this either through switch_mm(), or in
5388 		 * case 'prev->active_mm == next->mm' through
5389 		 * finish_task_switch()'s mmdrop().
5390 		 */
5391 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5392 		lru_gen_use_mm(next->mm);
5393 
5394 		if (!prev->mm) {                        // from kernel
5395 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5396 			rq->prev_mm = prev->active_mm;
5397 			prev->active_mm = NULL;
5398 		}
5399 	}
5400 
5401 	/* switch_mm_cid() requires the memory barriers above. */
5402 	switch_mm_cid(rq, prev, next);
5403 
5404 	prepare_lock_switch(rq, next, rf);
5405 
5406 	/* Here we just switch the register state and the stack. */
5407 	switch_to(prev, next, prev);
5408 	barrier();
5409 
5410 	return finish_task_switch(prev);
5411 }
5412 
5413 /*
5414  * nr_running and nr_context_switches:
5415  *
5416  * externally visible scheduler statistics: current number of runnable
5417  * threads, total number of context switches performed since bootup.
5418  */
5419 unsigned int nr_running(void)
5420 {
5421 	unsigned int i, sum = 0;
5422 
5423 	for_each_online_cpu(i)
5424 		sum += cpu_rq(i)->nr_running;
5425 
5426 	return sum;
5427 }
5428 
5429 /*
5430  * Check if only the current task is running on the CPU.
5431  *
5432  * Caution: this function does not check that the caller has disabled
5433  * preemption, thus the result might have a time-of-check-to-time-of-use
5434  * race.  The caller is responsible to use it correctly, for example:
5435  *
5436  * - from a non-preemptible section (of course)
5437  *
5438  * - from a thread that is bound to a single CPU
5439  *
5440  * - in a loop with very short iterations (e.g. a polling loop)
5441  */
5442 bool single_task_running(void)
5443 {
5444 	return raw_rq()->nr_running == 1;
5445 }
5446 EXPORT_SYMBOL(single_task_running);
5447 
5448 unsigned long long nr_context_switches_cpu(int cpu)
5449 {
5450 	return cpu_rq(cpu)->nr_switches;
5451 }
5452 
5453 unsigned long long nr_context_switches(void)
5454 {
5455 	int i;
5456 	unsigned long long sum = 0;
5457 
5458 	for_each_possible_cpu(i)
5459 		sum += cpu_rq(i)->nr_switches;
5460 
5461 	return sum;
5462 }
5463 
5464 /*
5465  * Consumers of these two interfaces, like for example the cpuidle menu
5466  * governor, are using nonsensical data. Preferring shallow idle state selection
5467  * for a CPU that has IO-wait which might not even end up running the task when
5468  * it does become runnable.
5469  */
5470 
5471 unsigned int nr_iowait_cpu(int cpu)
5472 {
5473 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5474 }
5475 
5476 /*
5477  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5478  *
5479  * The idea behind IO-wait account is to account the idle time that we could
5480  * have spend running if it were not for IO. That is, if we were to improve the
5481  * storage performance, we'd have a proportional reduction in IO-wait time.
5482  *
5483  * This all works nicely on UP, where, when a task blocks on IO, we account
5484  * idle time as IO-wait, because if the storage were faster, it could've been
5485  * running and we'd not be idle.
5486  *
5487  * This has been extended to SMP, by doing the same for each CPU. This however
5488  * is broken.
5489  *
5490  * Imagine for instance the case where two tasks block on one CPU, only the one
5491  * CPU will have IO-wait accounted, while the other has regular idle. Even
5492  * though, if the storage were faster, both could've ran at the same time,
5493  * utilising both CPUs.
5494  *
5495  * This means, that when looking globally, the current IO-wait accounting on
5496  * SMP is a lower bound, by reason of under accounting.
5497  *
5498  * Worse, since the numbers are provided per CPU, they are sometimes
5499  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5500  * associated with any one particular CPU, it can wake to another CPU than it
5501  * blocked on. This means the per CPU IO-wait number is meaningless.
5502  *
5503  * Task CPU affinities can make all that even more 'interesting'.
5504  */
5505 
5506 unsigned int nr_iowait(void)
5507 {
5508 	unsigned int i, sum = 0;
5509 
5510 	for_each_possible_cpu(i)
5511 		sum += nr_iowait_cpu(i);
5512 
5513 	return sum;
5514 }
5515 
5516 #ifdef CONFIG_SMP
5517 
5518 /*
5519  * sched_exec - execve() is a valuable balancing opportunity, because at
5520  * this point the task has the smallest effective memory and cache footprint.
5521  */
5522 void sched_exec(void)
5523 {
5524 	struct task_struct *p = current;
5525 	struct migration_arg arg;
5526 	int dest_cpu;
5527 
5528 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5529 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5530 		if (dest_cpu == smp_processor_id())
5531 			return;
5532 
5533 		if (unlikely(!cpu_active(dest_cpu)))
5534 			return;
5535 
5536 		arg = (struct migration_arg){ p, dest_cpu };
5537 	}
5538 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5539 }
5540 
5541 #endif
5542 
5543 DEFINE_PER_CPU(struct kernel_stat, kstat);
5544 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5545 
5546 EXPORT_PER_CPU_SYMBOL(kstat);
5547 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5548 
5549 /*
5550  * The function fair_sched_class.update_curr accesses the struct curr
5551  * and its field curr->exec_start; when called from task_sched_runtime(),
5552  * we observe a high rate of cache misses in practice.
5553  * Prefetching this data results in improved performance.
5554  */
5555 static inline void prefetch_curr_exec_start(struct task_struct *p)
5556 {
5557 #ifdef CONFIG_FAIR_GROUP_SCHED
5558 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
5559 #else
5560 	struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
5561 #endif
5562 	prefetch(curr);
5563 	prefetch(&curr->exec_start);
5564 }
5565 
5566 /*
5567  * Return accounted runtime for the task.
5568  * In case the task is currently running, return the runtime plus current's
5569  * pending runtime that have not been accounted yet.
5570  */
5571 unsigned long long task_sched_runtime(struct task_struct *p)
5572 {
5573 	struct rq_flags rf;
5574 	struct rq *rq;
5575 	u64 ns;
5576 
5577 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5578 	/*
5579 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5580 	 * So we have a optimization chance when the task's delta_exec is 0.
5581 	 * Reading ->on_cpu is racy, but this is ok.
5582 	 *
5583 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5584 	 * If we race with it entering CPU, unaccounted time is 0. This is
5585 	 * indistinguishable from the read occurring a few cycles earlier.
5586 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5587 	 * been accounted, so we're correct here as well.
5588 	 */
5589 	if (!p->on_cpu || !task_on_rq_queued(p))
5590 		return p->se.sum_exec_runtime;
5591 #endif
5592 
5593 	rq = task_rq_lock(p, &rf);
5594 	/*
5595 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5596 	 * project cycles that may never be accounted to this
5597 	 * thread, breaking clock_gettime().
5598 	 */
5599 	if (task_current(rq, p) && task_on_rq_queued(p)) {
5600 		prefetch_curr_exec_start(p);
5601 		update_rq_clock(rq);
5602 		p->sched_class->update_curr(rq);
5603 	}
5604 	ns = p->se.sum_exec_runtime;
5605 	task_rq_unlock(rq, p, &rf);
5606 
5607 	return ns;
5608 }
5609 
5610 #ifdef CONFIG_SCHED_DEBUG
5611 static u64 cpu_resched_latency(struct rq *rq)
5612 {
5613 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5614 	u64 resched_latency, now = rq_clock(rq);
5615 	static bool warned_once;
5616 
5617 	if (sysctl_resched_latency_warn_once && warned_once)
5618 		return 0;
5619 
5620 	if (!need_resched() || !latency_warn_ms)
5621 		return 0;
5622 
5623 	if (system_state == SYSTEM_BOOTING)
5624 		return 0;
5625 
5626 	if (!rq->last_seen_need_resched_ns) {
5627 		rq->last_seen_need_resched_ns = now;
5628 		rq->ticks_without_resched = 0;
5629 		return 0;
5630 	}
5631 
5632 	rq->ticks_without_resched++;
5633 	resched_latency = now - rq->last_seen_need_resched_ns;
5634 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5635 		return 0;
5636 
5637 	warned_once = true;
5638 
5639 	return resched_latency;
5640 }
5641 
5642 static int __init setup_resched_latency_warn_ms(char *str)
5643 {
5644 	long val;
5645 
5646 	if ((kstrtol(str, 0, &val))) {
5647 		pr_warn("Unable to set resched_latency_warn_ms\n");
5648 		return 1;
5649 	}
5650 
5651 	sysctl_resched_latency_warn_ms = val;
5652 	return 1;
5653 }
5654 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5655 #else
5656 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5657 #endif /* CONFIG_SCHED_DEBUG */
5658 
5659 /*
5660  * This function gets called by the timer code, with HZ frequency.
5661  * We call it with interrupts disabled.
5662  */
5663 void sched_tick(void)
5664 {
5665 	int cpu = smp_processor_id();
5666 	struct rq *rq = cpu_rq(cpu);
5667 	struct task_struct *curr;
5668 	struct rq_flags rf;
5669 	unsigned long hw_pressure;
5670 	u64 resched_latency;
5671 
5672 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5673 		arch_scale_freq_tick();
5674 
5675 	sched_clock_tick();
5676 
5677 	rq_lock(rq, &rf);
5678 
5679 	curr = rq->curr;
5680 	psi_account_irqtime(rq, curr, NULL);
5681 
5682 	update_rq_clock(rq);
5683 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5684 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5685 	curr->sched_class->task_tick(rq, curr, 0);
5686 	if (sched_feat(LATENCY_WARN))
5687 		resched_latency = cpu_resched_latency(rq);
5688 	calc_global_load_tick(rq);
5689 	sched_core_tick(rq);
5690 	task_tick_mm_cid(rq, curr);
5691 
5692 	rq_unlock(rq, &rf);
5693 
5694 	if (sched_feat(LATENCY_WARN) && resched_latency)
5695 		resched_latency_warn(cpu, resched_latency);
5696 
5697 	perf_event_task_tick();
5698 
5699 	if (curr->flags & PF_WQ_WORKER)
5700 		wq_worker_tick(curr);
5701 
5702 #ifdef CONFIG_SMP
5703 	rq->idle_balance = idle_cpu(cpu);
5704 	sched_balance_trigger(rq);
5705 #endif
5706 }
5707 
5708 #ifdef CONFIG_NO_HZ_FULL
5709 
5710 struct tick_work {
5711 	int			cpu;
5712 	atomic_t		state;
5713 	struct delayed_work	work;
5714 };
5715 /* Values for ->state, see diagram below. */
5716 #define TICK_SCHED_REMOTE_OFFLINE	0
5717 #define TICK_SCHED_REMOTE_OFFLINING	1
5718 #define TICK_SCHED_REMOTE_RUNNING	2
5719 
5720 /*
5721  * State diagram for ->state:
5722  *
5723  *
5724  *          TICK_SCHED_REMOTE_OFFLINE
5725  *                    |   ^
5726  *                    |   |
5727  *                    |   | sched_tick_remote()
5728  *                    |   |
5729  *                    |   |
5730  *                    +--TICK_SCHED_REMOTE_OFFLINING
5731  *                    |   ^
5732  *                    |   |
5733  * sched_tick_start() |   | sched_tick_stop()
5734  *                    |   |
5735  *                    V   |
5736  *          TICK_SCHED_REMOTE_RUNNING
5737  *
5738  *
5739  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5740  * and sched_tick_start() are happy to leave the state in RUNNING.
5741  */
5742 
5743 static struct tick_work __percpu *tick_work_cpu;
5744 
5745 static void sched_tick_remote(struct work_struct *work)
5746 {
5747 	struct delayed_work *dwork = to_delayed_work(work);
5748 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5749 	int cpu = twork->cpu;
5750 	struct rq *rq = cpu_rq(cpu);
5751 	int os;
5752 
5753 	/*
5754 	 * Handle the tick only if it appears the remote CPU is running in full
5755 	 * dynticks mode. The check is racy by nature, but missing a tick or
5756 	 * having one too much is no big deal because the scheduler tick updates
5757 	 * statistics and checks timeslices in a time-independent way, regardless
5758 	 * of when exactly it is running.
5759 	 */
5760 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5761 		guard(rq_lock_irq)(rq);
5762 		struct task_struct *curr = rq->curr;
5763 
5764 		if (cpu_online(cpu)) {
5765 			update_rq_clock(rq);
5766 
5767 			if (!is_idle_task(curr)) {
5768 				/*
5769 				 * Make sure the next tick runs within a
5770 				 * reasonable amount of time.
5771 				 */
5772 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5773 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5774 			}
5775 			curr->sched_class->task_tick(rq, curr, 0);
5776 
5777 			calc_load_nohz_remote(rq);
5778 		}
5779 	}
5780 
5781 	/*
5782 	 * Run the remote tick once per second (1Hz). This arbitrary
5783 	 * frequency is large enough to avoid overload but short enough
5784 	 * to keep scheduler internal stats reasonably up to date.  But
5785 	 * first update state to reflect hotplug activity if required.
5786 	 */
5787 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5788 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5789 	if (os == TICK_SCHED_REMOTE_RUNNING)
5790 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5791 }
5792 
5793 static void sched_tick_start(int cpu)
5794 {
5795 	int os;
5796 	struct tick_work *twork;
5797 
5798 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5799 		return;
5800 
5801 	WARN_ON_ONCE(!tick_work_cpu);
5802 
5803 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5804 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5805 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5806 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5807 		twork->cpu = cpu;
5808 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5809 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5810 	}
5811 }
5812 
5813 #ifdef CONFIG_HOTPLUG_CPU
5814 static void sched_tick_stop(int cpu)
5815 {
5816 	struct tick_work *twork;
5817 	int os;
5818 
5819 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5820 		return;
5821 
5822 	WARN_ON_ONCE(!tick_work_cpu);
5823 
5824 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5825 	/* There cannot be competing actions, but don't rely on stop-machine. */
5826 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5827 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5828 	/* Don't cancel, as this would mess up the state machine. */
5829 }
5830 #endif /* CONFIG_HOTPLUG_CPU */
5831 
5832 int __init sched_tick_offload_init(void)
5833 {
5834 	tick_work_cpu = alloc_percpu(struct tick_work);
5835 	BUG_ON(!tick_work_cpu);
5836 	return 0;
5837 }
5838 
5839 #else /* !CONFIG_NO_HZ_FULL */
5840 static inline void sched_tick_start(int cpu) { }
5841 static inline void sched_tick_stop(int cpu) { }
5842 #endif
5843 
5844 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5845 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5846 /*
5847  * If the value passed in is equal to the current preempt count
5848  * then we just disabled preemption. Start timing the latency.
5849  */
5850 static inline void preempt_latency_start(int val)
5851 {
5852 	if (preempt_count() == val) {
5853 		unsigned long ip = get_lock_parent_ip();
5854 #ifdef CONFIG_DEBUG_PREEMPT
5855 		current->preempt_disable_ip = ip;
5856 #endif
5857 		trace_preempt_off(CALLER_ADDR0, ip);
5858 	}
5859 }
5860 
5861 void preempt_count_add(int val)
5862 {
5863 #ifdef CONFIG_DEBUG_PREEMPT
5864 	/*
5865 	 * Underflow?
5866 	 */
5867 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5868 		return;
5869 #endif
5870 	__preempt_count_add(val);
5871 #ifdef CONFIG_DEBUG_PREEMPT
5872 	/*
5873 	 * Spinlock count overflowing soon?
5874 	 */
5875 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5876 				PREEMPT_MASK - 10);
5877 #endif
5878 	preempt_latency_start(val);
5879 }
5880 EXPORT_SYMBOL(preempt_count_add);
5881 NOKPROBE_SYMBOL(preempt_count_add);
5882 
5883 /*
5884  * If the value passed in equals to the current preempt count
5885  * then we just enabled preemption. Stop timing the latency.
5886  */
5887 static inline void preempt_latency_stop(int val)
5888 {
5889 	if (preempt_count() == val)
5890 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5891 }
5892 
5893 void preempt_count_sub(int val)
5894 {
5895 #ifdef CONFIG_DEBUG_PREEMPT
5896 	/*
5897 	 * Underflow?
5898 	 */
5899 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5900 		return;
5901 	/*
5902 	 * Is the spinlock portion underflowing?
5903 	 */
5904 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5905 			!(preempt_count() & PREEMPT_MASK)))
5906 		return;
5907 #endif
5908 
5909 	preempt_latency_stop(val);
5910 	__preempt_count_sub(val);
5911 }
5912 EXPORT_SYMBOL(preempt_count_sub);
5913 NOKPROBE_SYMBOL(preempt_count_sub);
5914 
5915 #else
5916 static inline void preempt_latency_start(int val) { }
5917 static inline void preempt_latency_stop(int val) { }
5918 #endif
5919 
5920 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5921 {
5922 #ifdef CONFIG_DEBUG_PREEMPT
5923 	return p->preempt_disable_ip;
5924 #else
5925 	return 0;
5926 #endif
5927 }
5928 
5929 /*
5930  * Print scheduling while atomic bug:
5931  */
5932 static noinline void __schedule_bug(struct task_struct *prev)
5933 {
5934 	/* Save this before calling printk(), since that will clobber it */
5935 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5936 
5937 	if (oops_in_progress)
5938 		return;
5939 
5940 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5941 		prev->comm, prev->pid, preempt_count());
5942 
5943 	debug_show_held_locks(prev);
5944 	print_modules();
5945 	if (irqs_disabled())
5946 		print_irqtrace_events(prev);
5947 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5948 		pr_err("Preemption disabled at:");
5949 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5950 	}
5951 	check_panic_on_warn("scheduling while atomic");
5952 
5953 	dump_stack();
5954 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5955 }
5956 
5957 /*
5958  * Various schedule()-time debugging checks and statistics:
5959  */
5960 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5961 {
5962 #ifdef CONFIG_SCHED_STACK_END_CHECK
5963 	if (task_stack_end_corrupted(prev))
5964 		panic("corrupted stack end detected inside scheduler\n");
5965 
5966 	if (task_scs_end_corrupted(prev))
5967 		panic("corrupted shadow stack detected inside scheduler\n");
5968 #endif
5969 
5970 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5971 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5972 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5973 			prev->comm, prev->pid, prev->non_block_count);
5974 		dump_stack();
5975 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5976 	}
5977 #endif
5978 
5979 	if (unlikely(in_atomic_preempt_off())) {
5980 		__schedule_bug(prev);
5981 		preempt_count_set(PREEMPT_DISABLED);
5982 	}
5983 	rcu_sleep_check();
5984 	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5985 
5986 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5987 
5988 	schedstat_inc(this_rq()->sched_count);
5989 }
5990 
5991 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5992 				  struct rq_flags *rf)
5993 {
5994 #ifdef CONFIG_SMP
5995 	const struct sched_class *class;
5996 	/*
5997 	 * We must do the balancing pass before put_prev_task(), such
5998 	 * that when we release the rq->lock the task is in the same
5999 	 * state as before we took rq->lock.
6000 	 *
6001 	 * We can terminate the balance pass as soon as we know there is
6002 	 * a runnable task of @class priority or higher.
6003 	 */
6004 	for_class_range(class, prev->sched_class, &idle_sched_class) {
6005 		if (class->balance(rq, prev, rf))
6006 			break;
6007 	}
6008 #endif
6009 
6010 	put_prev_task(rq, prev);
6011 }
6012 
6013 /*
6014  * Pick up the highest-prio task:
6015  */
6016 static inline struct task_struct *
6017 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6018 {
6019 	const struct sched_class *class;
6020 	struct task_struct *p;
6021 
6022 	/*
6023 	 * Optimization: we know that if all tasks are in the fair class we can
6024 	 * call that function directly, but only if the @prev task wasn't of a
6025 	 * higher scheduling class, because otherwise those lose the
6026 	 * opportunity to pull in more work from other CPUs.
6027 	 */
6028 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6029 		   rq->nr_running == rq->cfs.h_nr_running)) {
6030 
6031 		p = pick_next_task_fair(rq, prev, rf);
6032 		if (unlikely(p == RETRY_TASK))
6033 			goto restart;
6034 
6035 		/* Assume the next prioritized class is idle_sched_class */
6036 		if (!p) {
6037 			put_prev_task(rq, prev);
6038 			p = pick_next_task_idle(rq);
6039 		}
6040 
6041 		/*
6042 		 * This is the fast path; it cannot be a DL server pick;
6043 		 * therefore even if @p == @prev, ->dl_server must be NULL.
6044 		 */
6045 		if (p->dl_server)
6046 			p->dl_server = NULL;
6047 
6048 		return p;
6049 	}
6050 
6051 restart:
6052 	put_prev_task_balance(rq, prev, rf);
6053 
6054 	/*
6055 	 * We've updated @prev and no longer need the server link, clear it.
6056 	 * Must be done before ->pick_next_task() because that can (re)set
6057 	 * ->dl_server.
6058 	 */
6059 	if (prev->dl_server)
6060 		prev->dl_server = NULL;
6061 
6062 	for_each_class(class) {
6063 		p = class->pick_next_task(rq);
6064 		if (p)
6065 			return p;
6066 	}
6067 
6068 	BUG(); /* The idle class should always have a runnable task. */
6069 }
6070 
6071 #ifdef CONFIG_SCHED_CORE
6072 static inline bool is_task_rq_idle(struct task_struct *t)
6073 {
6074 	return (task_rq(t)->idle == t);
6075 }
6076 
6077 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6078 {
6079 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
6080 }
6081 
6082 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6083 {
6084 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
6085 		return true;
6086 
6087 	return a->core_cookie == b->core_cookie;
6088 }
6089 
6090 static inline struct task_struct *pick_task(struct rq *rq)
6091 {
6092 	const struct sched_class *class;
6093 	struct task_struct *p;
6094 
6095 	for_each_class(class) {
6096 		p = class->pick_task(rq);
6097 		if (p)
6098 			return p;
6099 	}
6100 
6101 	BUG(); /* The idle class should always have a runnable task. */
6102 }
6103 
6104 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6105 
6106 static void queue_core_balance(struct rq *rq);
6107 
6108 static struct task_struct *
6109 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6110 {
6111 	struct task_struct *next, *p, *max = NULL;
6112 	const struct cpumask *smt_mask;
6113 	bool fi_before = false;
6114 	bool core_clock_updated = (rq == rq->core);
6115 	unsigned long cookie;
6116 	int i, cpu, occ = 0;
6117 	struct rq *rq_i;
6118 	bool need_sync;
6119 
6120 	if (!sched_core_enabled(rq))
6121 		return __pick_next_task(rq, prev, rf);
6122 
6123 	cpu = cpu_of(rq);
6124 
6125 	/* Stopper task is switching into idle, no need core-wide selection. */
6126 	if (cpu_is_offline(cpu)) {
6127 		/*
6128 		 * Reset core_pick so that we don't enter the fastpath when
6129 		 * coming online. core_pick would already be migrated to
6130 		 * another cpu during offline.
6131 		 */
6132 		rq->core_pick = NULL;
6133 		return __pick_next_task(rq, prev, rf);
6134 	}
6135 
6136 	/*
6137 	 * If there were no {en,de}queues since we picked (IOW, the task
6138 	 * pointers are all still valid), and we haven't scheduled the last
6139 	 * pick yet, do so now.
6140 	 *
6141 	 * rq->core_pick can be NULL if no selection was made for a CPU because
6142 	 * it was either offline or went offline during a sibling's core-wide
6143 	 * selection. In this case, do a core-wide selection.
6144 	 */
6145 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6146 	    rq->core->core_pick_seq != rq->core_sched_seq &&
6147 	    rq->core_pick) {
6148 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6149 
6150 		next = rq->core_pick;
6151 		if (next != prev) {
6152 			put_prev_task(rq, prev);
6153 			set_next_task(rq, next);
6154 		}
6155 
6156 		rq->core_pick = NULL;
6157 		goto out;
6158 	}
6159 
6160 	put_prev_task_balance(rq, prev, rf);
6161 
6162 	smt_mask = cpu_smt_mask(cpu);
6163 	need_sync = !!rq->core->core_cookie;
6164 
6165 	/* reset state */
6166 	rq->core->core_cookie = 0UL;
6167 	if (rq->core->core_forceidle_count) {
6168 		if (!core_clock_updated) {
6169 			update_rq_clock(rq->core);
6170 			core_clock_updated = true;
6171 		}
6172 		sched_core_account_forceidle(rq);
6173 		/* reset after accounting force idle */
6174 		rq->core->core_forceidle_start = 0;
6175 		rq->core->core_forceidle_count = 0;
6176 		rq->core->core_forceidle_occupation = 0;
6177 		need_sync = true;
6178 		fi_before = true;
6179 	}
6180 
6181 	/*
6182 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6183 	 *
6184 	 * @task_seq guards the task state ({en,de}queues)
6185 	 * @pick_seq is the @task_seq we did a selection on
6186 	 * @sched_seq is the @pick_seq we scheduled
6187 	 *
6188 	 * However, preemptions can cause multiple picks on the same task set.
6189 	 * 'Fix' this by also increasing @task_seq for every pick.
6190 	 */
6191 	rq->core->core_task_seq++;
6192 
6193 	/*
6194 	 * Optimize for common case where this CPU has no cookies
6195 	 * and there are no cookied tasks running on siblings.
6196 	 */
6197 	if (!need_sync) {
6198 		next = pick_task(rq);
6199 		if (!next->core_cookie) {
6200 			rq->core_pick = NULL;
6201 			/*
6202 			 * For robustness, update the min_vruntime_fi for
6203 			 * unconstrained picks as well.
6204 			 */
6205 			WARN_ON_ONCE(fi_before);
6206 			task_vruntime_update(rq, next, false);
6207 			goto out_set_next;
6208 		}
6209 	}
6210 
6211 	/*
6212 	 * For each thread: do the regular task pick and find the max prio task
6213 	 * amongst them.
6214 	 *
6215 	 * Tie-break prio towards the current CPU
6216 	 */
6217 	for_each_cpu_wrap(i, smt_mask, cpu) {
6218 		rq_i = cpu_rq(i);
6219 
6220 		/*
6221 		 * Current cpu always has its clock updated on entrance to
6222 		 * pick_next_task(). If the current cpu is not the core,
6223 		 * the core may also have been updated above.
6224 		 */
6225 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6226 			update_rq_clock(rq_i);
6227 
6228 		p = rq_i->core_pick = pick_task(rq_i);
6229 		if (!max || prio_less(max, p, fi_before))
6230 			max = p;
6231 	}
6232 
6233 	cookie = rq->core->core_cookie = max->core_cookie;
6234 
6235 	/*
6236 	 * For each thread: try and find a runnable task that matches @max or
6237 	 * force idle.
6238 	 */
6239 	for_each_cpu(i, smt_mask) {
6240 		rq_i = cpu_rq(i);
6241 		p = rq_i->core_pick;
6242 
6243 		if (!cookie_equals(p, cookie)) {
6244 			p = NULL;
6245 			if (cookie)
6246 				p = sched_core_find(rq_i, cookie);
6247 			if (!p)
6248 				p = idle_sched_class.pick_task(rq_i);
6249 		}
6250 
6251 		rq_i->core_pick = p;
6252 
6253 		if (p == rq_i->idle) {
6254 			if (rq_i->nr_running) {
6255 				rq->core->core_forceidle_count++;
6256 				if (!fi_before)
6257 					rq->core->core_forceidle_seq++;
6258 			}
6259 		} else {
6260 			occ++;
6261 		}
6262 	}
6263 
6264 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6265 		rq->core->core_forceidle_start = rq_clock(rq->core);
6266 		rq->core->core_forceidle_occupation = occ;
6267 	}
6268 
6269 	rq->core->core_pick_seq = rq->core->core_task_seq;
6270 	next = rq->core_pick;
6271 	rq->core_sched_seq = rq->core->core_pick_seq;
6272 
6273 	/* Something should have been selected for current CPU */
6274 	WARN_ON_ONCE(!next);
6275 
6276 	/*
6277 	 * Reschedule siblings
6278 	 *
6279 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6280 	 * sending an IPI (below) ensures the sibling will no longer be running
6281 	 * their task. This ensures there is no inter-sibling overlap between
6282 	 * non-matching user state.
6283 	 */
6284 	for_each_cpu(i, smt_mask) {
6285 		rq_i = cpu_rq(i);
6286 
6287 		/*
6288 		 * An online sibling might have gone offline before a task
6289 		 * could be picked for it, or it might be offline but later
6290 		 * happen to come online, but its too late and nothing was
6291 		 * picked for it.  That's Ok - it will pick tasks for itself,
6292 		 * so ignore it.
6293 		 */
6294 		if (!rq_i->core_pick)
6295 			continue;
6296 
6297 		/*
6298 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6299 		 * fi_before     fi      update?
6300 		 *  0            0       1
6301 		 *  0            1       1
6302 		 *  1            0       1
6303 		 *  1            1       0
6304 		 */
6305 		if (!(fi_before && rq->core->core_forceidle_count))
6306 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6307 
6308 		rq_i->core_pick->core_occupation = occ;
6309 
6310 		if (i == cpu) {
6311 			rq_i->core_pick = NULL;
6312 			continue;
6313 		}
6314 
6315 		/* Did we break L1TF mitigation requirements? */
6316 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6317 
6318 		if (rq_i->curr == rq_i->core_pick) {
6319 			rq_i->core_pick = NULL;
6320 			continue;
6321 		}
6322 
6323 		resched_curr(rq_i);
6324 	}
6325 
6326 out_set_next:
6327 	set_next_task(rq, next);
6328 out:
6329 	if (rq->core->core_forceidle_count && next == rq->idle)
6330 		queue_core_balance(rq);
6331 
6332 	return next;
6333 }
6334 
6335 static bool try_steal_cookie(int this, int that)
6336 {
6337 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6338 	struct task_struct *p;
6339 	unsigned long cookie;
6340 	bool success = false;
6341 
6342 	guard(irq)();
6343 	guard(double_rq_lock)(dst, src);
6344 
6345 	cookie = dst->core->core_cookie;
6346 	if (!cookie)
6347 		return false;
6348 
6349 	if (dst->curr != dst->idle)
6350 		return false;
6351 
6352 	p = sched_core_find(src, cookie);
6353 	if (!p)
6354 		return false;
6355 
6356 	do {
6357 		if (p == src->core_pick || p == src->curr)
6358 			goto next;
6359 
6360 		if (!is_cpu_allowed(p, this))
6361 			goto next;
6362 
6363 		if (p->core_occupation > dst->idle->core_occupation)
6364 			goto next;
6365 		/*
6366 		 * sched_core_find() and sched_core_next() will ensure
6367 		 * that task @p is not throttled now, we also need to
6368 		 * check whether the runqueue of the destination CPU is
6369 		 * being throttled.
6370 		 */
6371 		if (sched_task_is_throttled(p, this))
6372 			goto next;
6373 
6374 		deactivate_task(src, p, 0);
6375 		set_task_cpu(p, this);
6376 		activate_task(dst, p, 0);
6377 
6378 		resched_curr(dst);
6379 
6380 		success = true;
6381 		break;
6382 
6383 next:
6384 		p = sched_core_next(p, cookie);
6385 	} while (p);
6386 
6387 	return success;
6388 }
6389 
6390 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6391 {
6392 	int i;
6393 
6394 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6395 		if (i == cpu)
6396 			continue;
6397 
6398 		if (need_resched())
6399 			break;
6400 
6401 		if (try_steal_cookie(cpu, i))
6402 			return true;
6403 	}
6404 
6405 	return false;
6406 }
6407 
6408 static void sched_core_balance(struct rq *rq)
6409 {
6410 	struct sched_domain *sd;
6411 	int cpu = cpu_of(rq);
6412 
6413 	guard(preempt)();
6414 	guard(rcu)();
6415 
6416 	raw_spin_rq_unlock_irq(rq);
6417 	for_each_domain(cpu, sd) {
6418 		if (need_resched())
6419 			break;
6420 
6421 		if (steal_cookie_task(cpu, sd))
6422 			break;
6423 	}
6424 	raw_spin_rq_lock_irq(rq);
6425 }
6426 
6427 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6428 
6429 static void queue_core_balance(struct rq *rq)
6430 {
6431 	if (!sched_core_enabled(rq))
6432 		return;
6433 
6434 	if (!rq->core->core_cookie)
6435 		return;
6436 
6437 	if (!rq->nr_running) /* not forced idle */
6438 		return;
6439 
6440 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6441 }
6442 
6443 DEFINE_LOCK_GUARD_1(core_lock, int,
6444 		    sched_core_lock(*_T->lock, &_T->flags),
6445 		    sched_core_unlock(*_T->lock, &_T->flags),
6446 		    unsigned long flags)
6447 
6448 static void sched_core_cpu_starting(unsigned int cpu)
6449 {
6450 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6451 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6452 	int t;
6453 
6454 	guard(core_lock)(&cpu);
6455 
6456 	WARN_ON_ONCE(rq->core != rq);
6457 
6458 	/* if we're the first, we'll be our own leader */
6459 	if (cpumask_weight(smt_mask) == 1)
6460 		return;
6461 
6462 	/* find the leader */
6463 	for_each_cpu(t, smt_mask) {
6464 		if (t == cpu)
6465 			continue;
6466 		rq = cpu_rq(t);
6467 		if (rq->core == rq) {
6468 			core_rq = rq;
6469 			break;
6470 		}
6471 	}
6472 
6473 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6474 		return;
6475 
6476 	/* install and validate core_rq */
6477 	for_each_cpu(t, smt_mask) {
6478 		rq = cpu_rq(t);
6479 
6480 		if (t == cpu)
6481 			rq->core = core_rq;
6482 
6483 		WARN_ON_ONCE(rq->core != core_rq);
6484 	}
6485 }
6486 
6487 static void sched_core_cpu_deactivate(unsigned int cpu)
6488 {
6489 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6490 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6491 	int t;
6492 
6493 	guard(core_lock)(&cpu);
6494 
6495 	/* if we're the last man standing, nothing to do */
6496 	if (cpumask_weight(smt_mask) == 1) {
6497 		WARN_ON_ONCE(rq->core != rq);
6498 		return;
6499 	}
6500 
6501 	/* if we're not the leader, nothing to do */
6502 	if (rq->core != rq)
6503 		return;
6504 
6505 	/* find a new leader */
6506 	for_each_cpu(t, smt_mask) {
6507 		if (t == cpu)
6508 			continue;
6509 		core_rq = cpu_rq(t);
6510 		break;
6511 	}
6512 
6513 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6514 		return;
6515 
6516 	/* copy the shared state to the new leader */
6517 	core_rq->core_task_seq             = rq->core_task_seq;
6518 	core_rq->core_pick_seq             = rq->core_pick_seq;
6519 	core_rq->core_cookie               = rq->core_cookie;
6520 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6521 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6522 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6523 
6524 	/*
6525 	 * Accounting edge for forced idle is handled in pick_next_task().
6526 	 * Don't need another one here, since the hotplug thread shouldn't
6527 	 * have a cookie.
6528 	 */
6529 	core_rq->core_forceidle_start = 0;
6530 
6531 	/* install new leader */
6532 	for_each_cpu(t, smt_mask) {
6533 		rq = cpu_rq(t);
6534 		rq->core = core_rq;
6535 	}
6536 }
6537 
6538 static inline void sched_core_cpu_dying(unsigned int cpu)
6539 {
6540 	struct rq *rq = cpu_rq(cpu);
6541 
6542 	if (rq->core != rq)
6543 		rq->core = rq;
6544 }
6545 
6546 #else /* !CONFIG_SCHED_CORE */
6547 
6548 static inline void sched_core_cpu_starting(unsigned int cpu) {}
6549 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6550 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6551 
6552 static struct task_struct *
6553 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6554 {
6555 	return __pick_next_task(rq, prev, rf);
6556 }
6557 
6558 #endif /* CONFIG_SCHED_CORE */
6559 
6560 /*
6561  * Constants for the sched_mode argument of __schedule().
6562  *
6563  * The mode argument allows RT enabled kernels to differentiate a
6564  * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6565  * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6566  * optimize the AND operation out and just check for zero.
6567  */
6568 #define SM_NONE			0x0
6569 #define SM_PREEMPT		0x1
6570 #define SM_RTLOCK_WAIT		0x2
6571 
6572 #ifndef CONFIG_PREEMPT_RT
6573 # define SM_MASK_PREEMPT	(~0U)
6574 #else
6575 # define SM_MASK_PREEMPT	SM_PREEMPT
6576 #endif
6577 
6578 /*
6579  * __schedule() is the main scheduler function.
6580  *
6581  * The main means of driving the scheduler and thus entering this function are:
6582  *
6583  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6584  *
6585  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6586  *      paths. For example, see arch/x86/entry_64.S.
6587  *
6588  *      To drive preemption between tasks, the scheduler sets the flag in timer
6589  *      interrupt handler sched_tick().
6590  *
6591  *   3. Wakeups don't really cause entry into schedule(). They add a
6592  *      task to the run-queue and that's it.
6593  *
6594  *      Now, if the new task added to the run-queue preempts the current
6595  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6596  *      called on the nearest possible occasion:
6597  *
6598  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6599  *
6600  *         - in syscall or exception context, at the next outmost
6601  *           preempt_enable(). (this might be as soon as the wake_up()'s
6602  *           spin_unlock()!)
6603  *
6604  *         - in IRQ context, return from interrupt-handler to
6605  *           preemptible context
6606  *
6607  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6608  *         then at the next:
6609  *
6610  *          - cond_resched() call
6611  *          - explicit schedule() call
6612  *          - return from syscall or exception to user-space
6613  *          - return from interrupt-handler to user-space
6614  *
6615  * WARNING: must be called with preemption disabled!
6616  */
6617 static void __sched notrace __schedule(unsigned int sched_mode)
6618 {
6619 	struct task_struct *prev, *next;
6620 	unsigned long *switch_count;
6621 	unsigned long prev_state;
6622 	struct rq_flags rf;
6623 	struct rq *rq;
6624 	int cpu;
6625 
6626 	cpu = smp_processor_id();
6627 	rq = cpu_rq(cpu);
6628 	prev = rq->curr;
6629 
6630 	schedule_debug(prev, !!sched_mode);
6631 
6632 	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6633 		hrtick_clear(rq);
6634 
6635 	local_irq_disable();
6636 	rcu_note_context_switch(!!sched_mode);
6637 
6638 	/*
6639 	 * Make sure that signal_pending_state()->signal_pending() below
6640 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6641 	 * done by the caller to avoid the race with signal_wake_up():
6642 	 *
6643 	 * __set_current_state(@state)		signal_wake_up()
6644 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6645 	 *					  wake_up_state(p, state)
6646 	 *   LOCK rq->lock			    LOCK p->pi_state
6647 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6648 	 *     if (signal_pending_state())	    if (p->state & @state)
6649 	 *
6650 	 * Also, the membarrier system call requires a full memory barrier
6651 	 * after coming from user-space, before storing to rq->curr; this
6652 	 * barrier matches a full barrier in the proximity of the membarrier
6653 	 * system call exit.
6654 	 */
6655 	rq_lock(rq, &rf);
6656 	smp_mb__after_spinlock();
6657 
6658 	/* Promote REQ to ACT */
6659 	rq->clock_update_flags <<= 1;
6660 	update_rq_clock(rq);
6661 	rq->clock_update_flags = RQCF_UPDATED;
6662 
6663 	switch_count = &prev->nivcsw;
6664 
6665 	/*
6666 	 * We must load prev->state once (task_struct::state is volatile), such
6667 	 * that we form a control dependency vs deactivate_task() below.
6668 	 */
6669 	prev_state = READ_ONCE(prev->__state);
6670 	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6671 		if (signal_pending_state(prev_state, prev)) {
6672 			WRITE_ONCE(prev->__state, TASK_RUNNING);
6673 		} else {
6674 			prev->sched_contributes_to_load =
6675 				(prev_state & TASK_UNINTERRUPTIBLE) &&
6676 				!(prev_state & TASK_NOLOAD) &&
6677 				!(prev_state & TASK_FROZEN);
6678 
6679 			if (prev->sched_contributes_to_load)
6680 				rq->nr_uninterruptible++;
6681 
6682 			/*
6683 			 * __schedule()			ttwu()
6684 			 *   prev_state = prev->state;    if (p->on_rq && ...)
6685 			 *   if (prev_state)		    goto out;
6686 			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6687 			 *				  p->state = TASK_WAKING
6688 			 *
6689 			 * Where __schedule() and ttwu() have matching control dependencies.
6690 			 *
6691 			 * After this, schedule() must not care about p->state any more.
6692 			 */
6693 			deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6694 
6695 			if (prev->in_iowait) {
6696 				atomic_inc(&rq->nr_iowait);
6697 				delayacct_blkio_start();
6698 			}
6699 		}
6700 		switch_count = &prev->nvcsw;
6701 	}
6702 
6703 	next = pick_next_task(rq, prev, &rf);
6704 	clear_tsk_need_resched(prev);
6705 	clear_preempt_need_resched();
6706 #ifdef CONFIG_SCHED_DEBUG
6707 	rq->last_seen_need_resched_ns = 0;
6708 #endif
6709 
6710 	if (likely(prev != next)) {
6711 		rq->nr_switches++;
6712 		/*
6713 		 * RCU users of rcu_dereference(rq->curr) may not see
6714 		 * changes to task_struct made by pick_next_task().
6715 		 */
6716 		RCU_INIT_POINTER(rq->curr, next);
6717 		/*
6718 		 * The membarrier system call requires each architecture
6719 		 * to have a full memory barrier after updating
6720 		 * rq->curr, before returning to user-space.
6721 		 *
6722 		 * Here are the schemes providing that barrier on the
6723 		 * various architectures:
6724 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6725 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6726 		 *   on PowerPC and on RISC-V.
6727 		 * - finish_lock_switch() for weakly-ordered
6728 		 *   architectures where spin_unlock is a full barrier,
6729 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6730 		 *   is a RELEASE barrier),
6731 		 *
6732 		 * The barrier matches a full barrier in the proximity of
6733 		 * the membarrier system call entry.
6734 		 *
6735 		 * On RISC-V, this barrier pairing is also needed for the
6736 		 * SYNC_CORE command when switching between processes, cf.
6737 		 * the inline comments in membarrier_arch_switch_mm().
6738 		 */
6739 		++*switch_count;
6740 
6741 		migrate_disable_switch(rq, prev);
6742 		psi_account_irqtime(rq, prev, next);
6743 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6744 
6745 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6746 
6747 		/* Also unlocks the rq: */
6748 		rq = context_switch(rq, prev, next, &rf);
6749 	} else {
6750 		rq_unpin_lock(rq, &rf);
6751 		__balance_callbacks(rq);
6752 		raw_spin_rq_unlock_irq(rq);
6753 	}
6754 }
6755 
6756 void __noreturn do_task_dead(void)
6757 {
6758 	/* Causes final put_task_struct in finish_task_switch(): */
6759 	set_special_state(TASK_DEAD);
6760 
6761 	/* Tell freezer to ignore us: */
6762 	current->flags |= PF_NOFREEZE;
6763 
6764 	__schedule(SM_NONE);
6765 	BUG();
6766 
6767 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6768 	for (;;)
6769 		cpu_relax();
6770 }
6771 
6772 static inline void sched_submit_work(struct task_struct *tsk)
6773 {
6774 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6775 	unsigned int task_flags;
6776 
6777 	/*
6778 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6779 	 * will use a blocking primitive -- which would lead to recursion.
6780 	 */
6781 	lock_map_acquire_try(&sched_map);
6782 
6783 	task_flags = tsk->flags;
6784 	/*
6785 	 * If a worker goes to sleep, notify and ask workqueue whether it
6786 	 * wants to wake up a task to maintain concurrency.
6787 	 */
6788 	if (task_flags & PF_WQ_WORKER)
6789 		wq_worker_sleeping(tsk);
6790 	else if (task_flags & PF_IO_WORKER)
6791 		io_wq_worker_sleeping(tsk);
6792 
6793 	/*
6794 	 * spinlock and rwlock must not flush block requests.  This will
6795 	 * deadlock if the callback attempts to acquire a lock which is
6796 	 * already acquired.
6797 	 */
6798 	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6799 
6800 	/*
6801 	 * If we are going to sleep and we have plugged IO queued,
6802 	 * make sure to submit it to avoid deadlocks.
6803 	 */
6804 	blk_flush_plug(tsk->plug, true);
6805 
6806 	lock_map_release(&sched_map);
6807 }
6808 
6809 static void sched_update_worker(struct task_struct *tsk)
6810 {
6811 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6812 		if (tsk->flags & PF_BLOCK_TS)
6813 			blk_plug_invalidate_ts(tsk);
6814 		if (tsk->flags & PF_WQ_WORKER)
6815 			wq_worker_running(tsk);
6816 		else if (tsk->flags & PF_IO_WORKER)
6817 			io_wq_worker_running(tsk);
6818 	}
6819 }
6820 
6821 static __always_inline void __schedule_loop(unsigned int sched_mode)
6822 {
6823 	do {
6824 		preempt_disable();
6825 		__schedule(sched_mode);
6826 		sched_preempt_enable_no_resched();
6827 	} while (need_resched());
6828 }
6829 
6830 asmlinkage __visible void __sched schedule(void)
6831 {
6832 	struct task_struct *tsk = current;
6833 
6834 #ifdef CONFIG_RT_MUTEXES
6835 	lockdep_assert(!tsk->sched_rt_mutex);
6836 #endif
6837 
6838 	if (!task_is_running(tsk))
6839 		sched_submit_work(tsk);
6840 	__schedule_loop(SM_NONE);
6841 	sched_update_worker(tsk);
6842 }
6843 EXPORT_SYMBOL(schedule);
6844 
6845 /*
6846  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6847  * state (have scheduled out non-voluntarily) by making sure that all
6848  * tasks have either left the run queue or have gone into user space.
6849  * As idle tasks do not do either, they must not ever be preempted
6850  * (schedule out non-voluntarily).
6851  *
6852  * schedule_idle() is similar to schedule_preempt_disable() except that it
6853  * never enables preemption because it does not call sched_submit_work().
6854  */
6855 void __sched schedule_idle(void)
6856 {
6857 	/*
6858 	 * As this skips calling sched_submit_work(), which the idle task does
6859 	 * regardless because that function is a nop when the task is in a
6860 	 * TASK_RUNNING state, make sure this isn't used someplace that the
6861 	 * current task can be in any other state. Note, idle is always in the
6862 	 * TASK_RUNNING state.
6863 	 */
6864 	WARN_ON_ONCE(current->__state);
6865 	do {
6866 		__schedule(SM_NONE);
6867 	} while (need_resched());
6868 }
6869 
6870 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6871 asmlinkage __visible void __sched schedule_user(void)
6872 {
6873 	/*
6874 	 * If we come here after a random call to set_need_resched(),
6875 	 * or we have been woken up remotely but the IPI has not yet arrived,
6876 	 * we haven't yet exited the RCU idle mode. Do it here manually until
6877 	 * we find a better solution.
6878 	 *
6879 	 * NB: There are buggy callers of this function.  Ideally we
6880 	 * should warn if prev_state != CONTEXT_USER, but that will trigger
6881 	 * too frequently to make sense yet.
6882 	 */
6883 	enum ctx_state prev_state = exception_enter();
6884 	schedule();
6885 	exception_exit(prev_state);
6886 }
6887 #endif
6888 
6889 /**
6890  * schedule_preempt_disabled - called with preemption disabled
6891  *
6892  * Returns with preemption disabled. Note: preempt_count must be 1
6893  */
6894 void __sched schedule_preempt_disabled(void)
6895 {
6896 	sched_preempt_enable_no_resched();
6897 	schedule();
6898 	preempt_disable();
6899 }
6900 
6901 #ifdef CONFIG_PREEMPT_RT
6902 void __sched notrace schedule_rtlock(void)
6903 {
6904 	__schedule_loop(SM_RTLOCK_WAIT);
6905 }
6906 NOKPROBE_SYMBOL(schedule_rtlock);
6907 #endif
6908 
6909 static void __sched notrace preempt_schedule_common(void)
6910 {
6911 	do {
6912 		/*
6913 		 * Because the function tracer can trace preempt_count_sub()
6914 		 * and it also uses preempt_enable/disable_notrace(), if
6915 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6916 		 * by the function tracer will call this function again and
6917 		 * cause infinite recursion.
6918 		 *
6919 		 * Preemption must be disabled here before the function
6920 		 * tracer can trace. Break up preempt_disable() into two
6921 		 * calls. One to disable preemption without fear of being
6922 		 * traced. The other to still record the preemption latency,
6923 		 * which can also be traced by the function tracer.
6924 		 */
6925 		preempt_disable_notrace();
6926 		preempt_latency_start(1);
6927 		__schedule(SM_PREEMPT);
6928 		preempt_latency_stop(1);
6929 		preempt_enable_no_resched_notrace();
6930 
6931 		/*
6932 		 * Check again in case we missed a preemption opportunity
6933 		 * between schedule and now.
6934 		 */
6935 	} while (need_resched());
6936 }
6937 
6938 #ifdef CONFIG_PREEMPTION
6939 /*
6940  * This is the entry point to schedule() from in-kernel preemption
6941  * off of preempt_enable.
6942  */
6943 asmlinkage __visible void __sched notrace preempt_schedule(void)
6944 {
6945 	/*
6946 	 * If there is a non-zero preempt_count or interrupts are disabled,
6947 	 * we do not want to preempt the current task. Just return..
6948 	 */
6949 	if (likely(!preemptible()))
6950 		return;
6951 	preempt_schedule_common();
6952 }
6953 NOKPROBE_SYMBOL(preempt_schedule);
6954 EXPORT_SYMBOL(preempt_schedule);
6955 
6956 #ifdef CONFIG_PREEMPT_DYNAMIC
6957 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6958 #ifndef preempt_schedule_dynamic_enabled
6959 #define preempt_schedule_dynamic_enabled	preempt_schedule
6960 #define preempt_schedule_dynamic_disabled	NULL
6961 #endif
6962 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6963 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6964 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6965 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6966 void __sched notrace dynamic_preempt_schedule(void)
6967 {
6968 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6969 		return;
6970 	preempt_schedule();
6971 }
6972 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6973 EXPORT_SYMBOL(dynamic_preempt_schedule);
6974 #endif
6975 #endif
6976 
6977 /**
6978  * preempt_schedule_notrace - preempt_schedule called by tracing
6979  *
6980  * The tracing infrastructure uses preempt_enable_notrace to prevent
6981  * recursion and tracing preempt enabling caused by the tracing
6982  * infrastructure itself. But as tracing can happen in areas coming
6983  * from userspace or just about to enter userspace, a preempt enable
6984  * can occur before user_exit() is called. This will cause the scheduler
6985  * to be called when the system is still in usermode.
6986  *
6987  * To prevent this, the preempt_enable_notrace will use this function
6988  * instead of preempt_schedule() to exit user context if needed before
6989  * calling the scheduler.
6990  */
6991 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6992 {
6993 	enum ctx_state prev_ctx;
6994 
6995 	if (likely(!preemptible()))
6996 		return;
6997 
6998 	do {
6999 		/*
7000 		 * Because the function tracer can trace preempt_count_sub()
7001 		 * and it also uses preempt_enable/disable_notrace(), if
7002 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7003 		 * by the function tracer will call this function again and
7004 		 * cause infinite recursion.
7005 		 *
7006 		 * Preemption must be disabled here before the function
7007 		 * tracer can trace. Break up preempt_disable() into two
7008 		 * calls. One to disable preemption without fear of being
7009 		 * traced. The other to still record the preemption latency,
7010 		 * which can also be traced by the function tracer.
7011 		 */
7012 		preempt_disable_notrace();
7013 		preempt_latency_start(1);
7014 		/*
7015 		 * Needs preempt disabled in case user_exit() is traced
7016 		 * and the tracer calls preempt_enable_notrace() causing
7017 		 * an infinite recursion.
7018 		 */
7019 		prev_ctx = exception_enter();
7020 		__schedule(SM_PREEMPT);
7021 		exception_exit(prev_ctx);
7022 
7023 		preempt_latency_stop(1);
7024 		preempt_enable_no_resched_notrace();
7025 	} while (need_resched());
7026 }
7027 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7028 
7029 #ifdef CONFIG_PREEMPT_DYNAMIC
7030 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7031 #ifndef preempt_schedule_notrace_dynamic_enabled
7032 #define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
7033 #define preempt_schedule_notrace_dynamic_disabled	NULL
7034 #endif
7035 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7036 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7037 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7038 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
7039 void __sched notrace dynamic_preempt_schedule_notrace(void)
7040 {
7041 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7042 		return;
7043 	preempt_schedule_notrace();
7044 }
7045 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7046 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7047 #endif
7048 #endif
7049 
7050 #endif /* CONFIG_PREEMPTION */
7051 
7052 /*
7053  * This is the entry point to schedule() from kernel preemption
7054  * off of irq context.
7055  * Note, that this is called and return with irqs disabled. This will
7056  * protect us against recursive calling from irq.
7057  */
7058 asmlinkage __visible void __sched preempt_schedule_irq(void)
7059 {
7060 	enum ctx_state prev_state;
7061 
7062 	/* Catch callers which need to be fixed */
7063 	BUG_ON(preempt_count() || !irqs_disabled());
7064 
7065 	prev_state = exception_enter();
7066 
7067 	do {
7068 		preempt_disable();
7069 		local_irq_enable();
7070 		__schedule(SM_PREEMPT);
7071 		local_irq_disable();
7072 		sched_preempt_enable_no_resched();
7073 	} while (need_resched());
7074 
7075 	exception_exit(prev_state);
7076 }
7077 
7078 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7079 			  void *key)
7080 {
7081 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7082 	return try_to_wake_up(curr->private, mode, wake_flags);
7083 }
7084 EXPORT_SYMBOL(default_wake_function);
7085 
7086 static void __setscheduler_prio(struct task_struct *p, int prio)
7087 {
7088 	if (dl_prio(prio))
7089 		p->sched_class = &dl_sched_class;
7090 	else if (rt_prio(prio))
7091 		p->sched_class = &rt_sched_class;
7092 	else
7093 		p->sched_class = &fair_sched_class;
7094 
7095 	p->prio = prio;
7096 }
7097 
7098 #ifdef CONFIG_RT_MUTEXES
7099 
7100 /*
7101  * Would be more useful with typeof()/auto_type but they don't mix with
7102  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7103  * name such that if someone were to implement this function we get to compare
7104  * notes.
7105  */
7106 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7107 
7108 void rt_mutex_pre_schedule(void)
7109 {
7110 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7111 	sched_submit_work(current);
7112 }
7113 
7114 void rt_mutex_schedule(void)
7115 {
7116 	lockdep_assert(current->sched_rt_mutex);
7117 	__schedule_loop(SM_NONE);
7118 }
7119 
7120 void rt_mutex_post_schedule(void)
7121 {
7122 	sched_update_worker(current);
7123 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7124 }
7125 
7126 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
7127 {
7128 	if (pi_task)
7129 		prio = min(prio, pi_task->prio);
7130 
7131 	return prio;
7132 }
7133 
7134 static inline int rt_effective_prio(struct task_struct *p, int prio)
7135 {
7136 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
7137 
7138 	return __rt_effective_prio(pi_task, prio);
7139 }
7140 
7141 /*
7142  * rt_mutex_setprio - set the current priority of a task
7143  * @p: task to boost
7144  * @pi_task: donor task
7145  *
7146  * This function changes the 'effective' priority of a task. It does
7147  * not touch ->normal_prio like __setscheduler().
7148  *
7149  * Used by the rt_mutex code to implement priority inheritance
7150  * logic. Call site only calls if the priority of the task changed.
7151  */
7152 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7153 {
7154 	int prio, oldprio, queued, running, queue_flag =
7155 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7156 	const struct sched_class *prev_class;
7157 	struct rq_flags rf;
7158 	struct rq *rq;
7159 
7160 	/* XXX used to be waiter->prio, not waiter->task->prio */
7161 	prio = __rt_effective_prio(pi_task, p->normal_prio);
7162 
7163 	/*
7164 	 * If nothing changed; bail early.
7165 	 */
7166 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7167 		return;
7168 
7169 	rq = __task_rq_lock(p, &rf);
7170 	update_rq_clock(rq);
7171 	/*
7172 	 * Set under pi_lock && rq->lock, such that the value can be used under
7173 	 * either lock.
7174 	 *
7175 	 * Note that there is loads of tricky to make this pointer cache work
7176 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7177 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7178 	 * task is allowed to run again (and can exit). This ensures the pointer
7179 	 * points to a blocked task -- which guarantees the task is present.
7180 	 */
7181 	p->pi_top_task = pi_task;
7182 
7183 	/*
7184 	 * For FIFO/RR we only need to set prio, if that matches we're done.
7185 	 */
7186 	if (prio == p->prio && !dl_prio(prio))
7187 		goto out_unlock;
7188 
7189 	/*
7190 	 * Idle task boosting is a nono in general. There is one
7191 	 * exception, when PREEMPT_RT and NOHZ is active:
7192 	 *
7193 	 * The idle task calls get_next_timer_interrupt() and holds
7194 	 * the timer wheel base->lock on the CPU and another CPU wants
7195 	 * to access the timer (probably to cancel it). We can safely
7196 	 * ignore the boosting request, as the idle CPU runs this code
7197 	 * with interrupts disabled and will complete the lock
7198 	 * protected section without being interrupted. So there is no
7199 	 * real need to boost.
7200 	 */
7201 	if (unlikely(p == rq->idle)) {
7202 		WARN_ON(p != rq->curr);
7203 		WARN_ON(p->pi_blocked_on);
7204 		goto out_unlock;
7205 	}
7206 
7207 	trace_sched_pi_setprio(p, pi_task);
7208 	oldprio = p->prio;
7209 
7210 	if (oldprio == prio)
7211 		queue_flag &= ~DEQUEUE_MOVE;
7212 
7213 	prev_class = p->sched_class;
7214 	queued = task_on_rq_queued(p);
7215 	running = task_current(rq, p);
7216 	if (queued)
7217 		dequeue_task(rq, p, queue_flag);
7218 	if (running)
7219 		put_prev_task(rq, p);
7220 
7221 	/*
7222 	 * Boosting condition are:
7223 	 * 1. -rt task is running and holds mutex A
7224 	 *      --> -dl task blocks on mutex A
7225 	 *
7226 	 * 2. -dl task is running and holds mutex A
7227 	 *      --> -dl task blocks on mutex A and could preempt the
7228 	 *          running task
7229 	 */
7230 	if (dl_prio(prio)) {
7231 		if (!dl_prio(p->normal_prio) ||
7232 		    (pi_task && dl_prio(pi_task->prio) &&
7233 		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7234 			p->dl.pi_se = pi_task->dl.pi_se;
7235 			queue_flag |= ENQUEUE_REPLENISH;
7236 		} else {
7237 			p->dl.pi_se = &p->dl;
7238 		}
7239 	} else if (rt_prio(prio)) {
7240 		if (dl_prio(oldprio))
7241 			p->dl.pi_se = &p->dl;
7242 		if (oldprio < prio)
7243 			queue_flag |= ENQUEUE_HEAD;
7244 	} else {
7245 		if (dl_prio(oldprio))
7246 			p->dl.pi_se = &p->dl;
7247 		if (rt_prio(oldprio))
7248 			p->rt.timeout = 0;
7249 	}
7250 
7251 	__setscheduler_prio(p, prio);
7252 
7253 	if (queued)
7254 		enqueue_task(rq, p, queue_flag);
7255 	if (running)
7256 		set_next_task(rq, p);
7257 
7258 	check_class_changed(rq, p, prev_class, oldprio);
7259 out_unlock:
7260 	/* Avoid rq from going away on us: */
7261 	preempt_disable();
7262 
7263 	rq_unpin_lock(rq, &rf);
7264 	__balance_callbacks(rq);
7265 	raw_spin_rq_unlock(rq);
7266 
7267 	preempt_enable();
7268 }
7269 #else
7270 static inline int rt_effective_prio(struct task_struct *p, int prio)
7271 {
7272 	return prio;
7273 }
7274 #endif
7275 
7276 void set_user_nice(struct task_struct *p, long nice)
7277 {
7278 	bool queued, running;
7279 	struct rq *rq;
7280 	int old_prio;
7281 
7282 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
7283 		return;
7284 	/*
7285 	 * We have to be careful, if called from sys_setpriority(),
7286 	 * the task might be in the middle of scheduling on another CPU.
7287 	 */
7288 	CLASS(task_rq_lock, rq_guard)(p);
7289 	rq = rq_guard.rq;
7290 
7291 	update_rq_clock(rq);
7292 
7293 	/*
7294 	 * The RT priorities are set via sched_setscheduler(), but we still
7295 	 * allow the 'normal' nice value to be set - but as expected
7296 	 * it won't have any effect on scheduling until the task is
7297 	 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
7298 	 */
7299 	if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
7300 		p->static_prio = NICE_TO_PRIO(nice);
7301 		return;
7302 	}
7303 
7304 	queued = task_on_rq_queued(p);
7305 	running = task_current(rq, p);
7306 	if (queued)
7307 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
7308 	if (running)
7309 		put_prev_task(rq, p);
7310 
7311 	p->static_prio = NICE_TO_PRIO(nice);
7312 	set_load_weight(p, true);
7313 	old_prio = p->prio;
7314 	p->prio = effective_prio(p);
7315 
7316 	if (queued)
7317 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7318 	if (running)
7319 		set_next_task(rq, p);
7320 
7321 	/*
7322 	 * If the task increased its priority or is running and
7323 	 * lowered its priority, then reschedule its CPU:
7324 	 */
7325 	p->sched_class->prio_changed(rq, p, old_prio);
7326 }
7327 EXPORT_SYMBOL(set_user_nice);
7328 
7329 /*
7330  * is_nice_reduction - check if nice value is an actual reduction
7331  *
7332  * Similar to can_nice() but does not perform a capability check.
7333  *
7334  * @p: task
7335  * @nice: nice value
7336  */
7337 static bool is_nice_reduction(const struct task_struct *p, const int nice)
7338 {
7339 	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
7340 	int nice_rlim = nice_to_rlimit(nice);
7341 
7342 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
7343 }
7344 
7345 /*
7346  * can_nice - check if a task can reduce its nice value
7347  * @p: task
7348  * @nice: nice value
7349  */
7350 int can_nice(const struct task_struct *p, const int nice)
7351 {
7352 	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
7353 }
7354 
7355 #ifdef __ARCH_WANT_SYS_NICE
7356 
7357 /*
7358  * sys_nice - change the priority of the current process.
7359  * @increment: priority increment
7360  *
7361  * sys_setpriority is a more generic, but much slower function that
7362  * does similar things.
7363  */
7364 SYSCALL_DEFINE1(nice, int, increment)
7365 {
7366 	long nice, retval;
7367 
7368 	/*
7369 	 * Setpriority might change our priority at the same moment.
7370 	 * We don't have to worry. Conceptually one call occurs first
7371 	 * and we have a single winner.
7372 	 */
7373 	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
7374 	nice = task_nice(current) + increment;
7375 
7376 	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
7377 	if (increment < 0 && !can_nice(current, nice))
7378 		return -EPERM;
7379 
7380 	retval = security_task_setnice(current, nice);
7381 	if (retval)
7382 		return retval;
7383 
7384 	set_user_nice(current, nice);
7385 	return 0;
7386 }
7387 
7388 #endif
7389 
7390 /**
7391  * task_prio - return the priority value of a given task.
7392  * @p: the task in question.
7393  *
7394  * Return: The priority value as seen by users in /proc.
7395  *
7396  * sched policy         return value   kernel prio    user prio/nice
7397  *
7398  * normal, batch, idle     [0 ... 39]  [100 ... 139]          0/[-20 ... 19]
7399  * fifo, rr             [-2 ... -100]     [98 ... 0]  [1 ... 99]
7400  * deadline                     -101             -1           0
7401  */
7402 int task_prio(const struct task_struct *p)
7403 {
7404 	return p->prio - MAX_RT_PRIO;
7405 }
7406 
7407 /**
7408  * idle_cpu - is a given CPU idle currently?
7409  * @cpu: the processor in question.
7410  *
7411  * Return: 1 if the CPU is currently idle. 0 otherwise.
7412  */
7413 int idle_cpu(int cpu)
7414 {
7415 	struct rq *rq = cpu_rq(cpu);
7416 
7417 	if (rq->curr != rq->idle)
7418 		return 0;
7419 
7420 	if (rq->nr_running)
7421 		return 0;
7422 
7423 #ifdef CONFIG_SMP
7424 	if (rq->ttwu_pending)
7425 		return 0;
7426 #endif
7427 
7428 	return 1;
7429 }
7430 
7431 /**
7432  * available_idle_cpu - is a given CPU idle for enqueuing work.
7433  * @cpu: the CPU in question.
7434  *
7435  * Return: 1 if the CPU is currently idle. 0 otherwise.
7436  */
7437 int available_idle_cpu(int cpu)
7438 {
7439 	if (!idle_cpu(cpu))
7440 		return 0;
7441 
7442 	if (vcpu_is_preempted(cpu))
7443 		return 0;
7444 
7445 	return 1;
7446 }
7447 
7448 /**
7449  * idle_task - return the idle task for a given CPU.
7450  * @cpu: the processor in question.
7451  *
7452  * Return: The idle task for the CPU @cpu.
7453  */
7454 struct task_struct *idle_task(int cpu)
7455 {
7456 	return cpu_rq(cpu)->idle;
7457 }
7458 
7459 #ifdef CONFIG_SCHED_CORE
7460 int sched_core_idle_cpu(int cpu)
7461 {
7462 	struct rq *rq = cpu_rq(cpu);
7463 
7464 	if (sched_core_enabled(rq) && rq->curr == rq->idle)
7465 		return 1;
7466 
7467 	return idle_cpu(cpu);
7468 }
7469 
7470 #endif
7471 
7472 #ifdef CONFIG_SMP
7473 /*
7474  * This function computes an effective utilization for the given CPU, to be
7475  * used for frequency selection given the linear relation: f = u * f_max.
7476  *
7477  * The scheduler tracks the following metrics:
7478  *
7479  *   cpu_util_{cfs,rt,dl,irq}()
7480  *   cpu_bw_dl()
7481  *
7482  * Where the cfs,rt and dl util numbers are tracked with the same metric and
7483  * synchronized windows and are thus directly comparable.
7484  *
7485  * The cfs,rt,dl utilization are the running times measured with rq->clock_task
7486  * which excludes things like IRQ and steal-time. These latter are then accrued
7487  * in the irq utilization.
7488  *
7489  * The DL bandwidth number otoh is not a measured metric but a value computed
7490  * based on the task model parameters and gives the minimal utilization
7491  * required to meet deadlines.
7492  */
7493 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7494 				 unsigned long *min,
7495 				 unsigned long *max)
7496 {
7497 	unsigned long util, irq, scale;
7498 	struct rq *rq = cpu_rq(cpu);
7499 
7500 	scale = arch_scale_cpu_capacity(cpu);
7501 
7502 	/*
7503 	 * Early check to see if IRQ/steal time saturates the CPU, can be
7504 	 * because of inaccuracies in how we track these -- see
7505 	 * update_irq_load_avg().
7506 	 */
7507 	irq = cpu_util_irq(rq);
7508 	if (unlikely(irq >= scale)) {
7509 		if (min)
7510 			*min = scale;
7511 		if (max)
7512 			*max = scale;
7513 		return scale;
7514 	}
7515 
7516 	if (min) {
7517 		/*
7518 		 * The minimum utilization returns the highest level between:
7519 		 * - the computed DL bandwidth needed with the IRQ pressure which
7520 		 *   steals time to the deadline task.
7521 		 * - The minimum performance requirement for CFS and/or RT.
7522 		 */
7523 		*min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
7524 
7525 		/*
7526 		 * When an RT task is runnable and uclamp is not used, we must
7527 		 * ensure that the task will run at maximum compute capacity.
7528 		 */
7529 		if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
7530 			*min = max(*min, scale);
7531 	}
7532 
7533 	/*
7534 	 * Because the time spend on RT/DL tasks is visible as 'lost' time to
7535 	 * CFS tasks and we use the same metric to track the effective
7536 	 * utilization (PELT windows are synchronized) we can directly add them
7537 	 * to obtain the CPU's actual utilization.
7538 	 */
7539 	util = util_cfs + cpu_util_rt(rq);
7540 	util += cpu_util_dl(rq);
7541 
7542 	/*
7543 	 * The maximum hint is a soft bandwidth requirement, which can be lower
7544 	 * than the actual utilization because of uclamp_max requirements.
7545 	 */
7546 	if (max)
7547 		*max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
7548 
7549 	if (util >= scale)
7550 		return scale;
7551 
7552 	/*
7553 	 * There is still idle time; further improve the number by using the
7554 	 * irq metric. Because IRQ/steal time is hidden from the task clock we
7555 	 * need to scale the task numbers:
7556 	 *
7557 	 *              max - irq
7558 	 *   U' = irq + --------- * U
7559 	 *                 max
7560 	 */
7561 	util = scale_irq_capacity(util, irq, scale);
7562 	util += irq;
7563 
7564 	return min(scale, util);
7565 }
7566 
7567 unsigned long sched_cpu_util(int cpu)
7568 {
7569 	return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL);
7570 }
7571 #endif /* CONFIG_SMP */
7572 
7573 /**
7574  * find_process_by_pid - find a process with a matching PID value.
7575  * @pid: the pid in question.
7576  *
7577  * The task of @pid, if found. %NULL otherwise.
7578  */
7579 static struct task_struct *find_process_by_pid(pid_t pid)
7580 {
7581 	return pid ? find_task_by_vpid(pid) : current;
7582 }
7583 
7584 static struct task_struct *find_get_task(pid_t pid)
7585 {
7586 	struct task_struct *p;
7587 	guard(rcu)();
7588 
7589 	p = find_process_by_pid(pid);
7590 	if (likely(p))
7591 		get_task_struct(p);
7592 
7593 	return p;
7594 }
7595 
7596 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
7597 	     find_get_task(pid), pid_t pid)
7598 
7599 /*
7600  * sched_setparam() passes in -1 for its policy, to let the functions
7601  * it calls know not to change it.
7602  */
7603 #define SETPARAM_POLICY	-1
7604 
7605 static void __setscheduler_params(struct task_struct *p,
7606 		const struct sched_attr *attr)
7607 {
7608 	int policy = attr->sched_policy;
7609 
7610 	if (policy == SETPARAM_POLICY)
7611 		policy = p->policy;
7612 
7613 	p->policy = policy;
7614 
7615 	if (dl_policy(policy))
7616 		__setparam_dl(p, attr);
7617 	else if (fair_policy(policy))
7618 		p->static_prio = NICE_TO_PRIO(attr->sched_nice);
7619 
7620 	/*
7621 	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
7622 	 * !rt_policy. Always setting this ensures that things like
7623 	 * getparam()/getattr() don't report silly values for !rt tasks.
7624 	 */
7625 	p->rt_priority = attr->sched_priority;
7626 	p->normal_prio = normal_prio(p);
7627 	set_load_weight(p, true);
7628 }
7629 
7630 /*
7631  * Check the target process has a UID that matches the current process's:
7632  */
7633 static bool check_same_owner(struct task_struct *p)
7634 {
7635 	const struct cred *cred = current_cred(), *pcred;
7636 	guard(rcu)();
7637 
7638 	pcred = __task_cred(p);
7639 	return (uid_eq(cred->euid, pcred->euid) ||
7640 		uid_eq(cred->euid, pcred->uid));
7641 }
7642 
7643 /*
7644  * Allow unprivileged RT tasks to decrease priority.
7645  * Only issue a capable test if needed and only once to avoid an audit
7646  * event on permitted non-privileged operations:
7647  */
7648 static int user_check_sched_setscheduler(struct task_struct *p,
7649 					 const struct sched_attr *attr,
7650 					 int policy, int reset_on_fork)
7651 {
7652 	if (fair_policy(policy)) {
7653 		if (attr->sched_nice < task_nice(p) &&
7654 		    !is_nice_reduction(p, attr->sched_nice))
7655 			goto req_priv;
7656 	}
7657 
7658 	if (rt_policy(policy)) {
7659 		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
7660 
7661 		/* Can't set/change the rt policy: */
7662 		if (policy != p->policy && !rlim_rtprio)
7663 			goto req_priv;
7664 
7665 		/* Can't increase priority: */
7666 		if (attr->sched_priority > p->rt_priority &&
7667 		    attr->sched_priority > rlim_rtprio)
7668 			goto req_priv;
7669 	}
7670 
7671 	/*
7672 	 * Can't set/change SCHED_DEADLINE policy at all for now
7673 	 * (safest behavior); in the future we would like to allow
7674 	 * unprivileged DL tasks to increase their relative deadline
7675 	 * or reduce their runtime (both ways reducing utilization)
7676 	 */
7677 	if (dl_policy(policy))
7678 		goto req_priv;
7679 
7680 	/*
7681 	 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7682 	 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7683 	 */
7684 	if (task_has_idle_policy(p) && !idle_policy(policy)) {
7685 		if (!is_nice_reduction(p, task_nice(p)))
7686 			goto req_priv;
7687 	}
7688 
7689 	/* Can't change other user's priorities: */
7690 	if (!check_same_owner(p))
7691 		goto req_priv;
7692 
7693 	/* Normal users shall not reset the sched_reset_on_fork flag: */
7694 	if (p->sched_reset_on_fork && !reset_on_fork)
7695 		goto req_priv;
7696 
7697 	return 0;
7698 
7699 req_priv:
7700 	if (!capable(CAP_SYS_NICE))
7701 		return -EPERM;
7702 
7703 	return 0;
7704 }
7705 
7706 static int __sched_setscheduler(struct task_struct *p,
7707 				const struct sched_attr *attr,
7708 				bool user, bool pi)
7709 {
7710 	int oldpolicy = -1, policy = attr->sched_policy;
7711 	int retval, oldprio, newprio, queued, running;
7712 	const struct sched_class *prev_class;
7713 	struct balance_callback *head;
7714 	struct rq_flags rf;
7715 	int reset_on_fork;
7716 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7717 	struct rq *rq;
7718 	bool cpuset_locked = false;
7719 
7720 	/* The pi code expects interrupts enabled */
7721 	BUG_ON(pi && in_interrupt());
7722 recheck:
7723 	/* Double check policy once rq lock held: */
7724 	if (policy < 0) {
7725 		reset_on_fork = p->sched_reset_on_fork;
7726 		policy = oldpolicy = p->policy;
7727 	} else {
7728 		reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
7729 
7730 		if (!valid_policy(policy))
7731 			return -EINVAL;
7732 	}
7733 
7734 	if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
7735 		return -EINVAL;
7736 
7737 	/*
7738 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
7739 	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL,
7740 	 * SCHED_BATCH and SCHED_IDLE is 0.
7741 	 */
7742 	if (attr->sched_priority > MAX_RT_PRIO-1)
7743 		return -EINVAL;
7744 	if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7745 	    (rt_policy(policy) != (attr->sched_priority != 0)))
7746 		return -EINVAL;
7747 
7748 	if (user) {
7749 		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
7750 		if (retval)
7751 			return retval;
7752 
7753 		if (attr->sched_flags & SCHED_FLAG_SUGOV)
7754 			return -EINVAL;
7755 
7756 		retval = security_task_setscheduler(p);
7757 		if (retval)
7758 			return retval;
7759 	}
7760 
7761 	/* Update task specific "requested" clamps */
7762 	if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
7763 		retval = uclamp_validate(p, attr);
7764 		if (retval)
7765 			return retval;
7766 	}
7767 
7768 	/*
7769 	 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
7770 	 * information.
7771 	 */
7772 	if (dl_policy(policy) || dl_policy(p->policy)) {
7773 		cpuset_locked = true;
7774 		cpuset_lock();
7775 	}
7776 
7777 	/*
7778 	 * Make sure no PI-waiters arrive (or leave) while we are
7779 	 * changing the priority of the task:
7780 	 *
7781 	 * To be able to change p->policy safely, the appropriate
7782 	 * runqueue lock must be held.
7783 	 */
7784 	rq = task_rq_lock(p, &rf);
7785 	update_rq_clock(rq);
7786 
7787 	/*
7788 	 * Changing the policy of the stop threads its a very bad idea:
7789 	 */
7790 	if (p == rq->stop) {
7791 		retval = -EINVAL;
7792 		goto unlock;
7793 	}
7794 
7795 	/*
7796 	 * If not changing anything there's no need to proceed further,
7797 	 * but store a possible modification of reset_on_fork.
7798 	 */
7799 	if (unlikely(policy == p->policy)) {
7800 		if (fair_policy(policy) && attr->sched_nice != task_nice(p))
7801 			goto change;
7802 		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
7803 			goto change;
7804 		if (dl_policy(policy) && dl_param_changed(p, attr))
7805 			goto change;
7806 		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
7807 			goto change;
7808 
7809 		p->sched_reset_on_fork = reset_on_fork;
7810 		retval = 0;
7811 		goto unlock;
7812 	}
7813 change:
7814 
7815 	if (user) {
7816 #ifdef CONFIG_RT_GROUP_SCHED
7817 		/*
7818 		 * Do not allow realtime tasks into groups that have no runtime
7819 		 * assigned.
7820 		 */
7821 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
7822 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
7823 				!task_group_is_autogroup(task_group(p))) {
7824 			retval = -EPERM;
7825 			goto unlock;
7826 		}
7827 #endif
7828 #ifdef CONFIG_SMP
7829 		if (dl_bandwidth_enabled() && dl_policy(policy) &&
7830 				!(attr->sched_flags & SCHED_FLAG_SUGOV)) {
7831 			cpumask_t *span = rq->rd->span;
7832 
7833 			/*
7834 			 * Don't allow tasks with an affinity mask smaller than
7835 			 * the entire root_domain to become SCHED_DEADLINE. We
7836 			 * will also fail if there's no bandwidth available.
7837 			 */
7838 			if (!cpumask_subset(span, p->cpus_ptr) ||
7839 			    rq->rd->dl_bw.bw == 0) {
7840 				retval = -EPERM;
7841 				goto unlock;
7842 			}
7843 		}
7844 #endif
7845 	}
7846 
7847 	/* Re-check policy now with rq lock held: */
7848 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
7849 		policy = oldpolicy = -1;
7850 		task_rq_unlock(rq, p, &rf);
7851 		if (cpuset_locked)
7852 			cpuset_unlock();
7853 		goto recheck;
7854 	}
7855 
7856 	/*
7857 	 * If setscheduling to SCHED_DEADLINE (or changing the parameters
7858 	 * of a SCHED_DEADLINE task) we need to check if enough bandwidth
7859 	 * is available.
7860 	 */
7861 	if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) {
7862 		retval = -EBUSY;
7863 		goto unlock;
7864 	}
7865 
7866 	p->sched_reset_on_fork = reset_on_fork;
7867 	oldprio = p->prio;
7868 
7869 	newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
7870 	if (pi) {
7871 		/*
7872 		 * Take priority boosted tasks into account. If the new
7873 		 * effective priority is unchanged, we just store the new
7874 		 * normal parameters and do not touch the scheduler class and
7875 		 * the runqueue. This will be done when the task deboost
7876 		 * itself.
7877 		 */
7878 		newprio = rt_effective_prio(p, newprio);
7879 		if (newprio == oldprio)
7880 			queue_flags &= ~DEQUEUE_MOVE;
7881 	}
7882 
7883 	queued = task_on_rq_queued(p);
7884 	running = task_current(rq, p);
7885 	if (queued)
7886 		dequeue_task(rq, p, queue_flags);
7887 	if (running)
7888 		put_prev_task(rq, p);
7889 
7890 	prev_class = p->sched_class;
7891 
7892 	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
7893 		__setscheduler_params(p, attr);
7894 		__setscheduler_prio(p, newprio);
7895 	}
7896 	__setscheduler_uclamp(p, attr);
7897 
7898 	if (queued) {
7899 		/*
7900 		 * We enqueue to tail when the priority of a task is
7901 		 * increased (user space view).
7902 		 */
7903 		if (oldprio < p->prio)
7904 			queue_flags |= ENQUEUE_HEAD;
7905 
7906 		enqueue_task(rq, p, queue_flags);
7907 	}
7908 	if (running)
7909 		set_next_task(rq, p);
7910 
7911 	check_class_changed(rq, p, prev_class, oldprio);
7912 
7913 	/* Avoid rq from going away on us: */
7914 	preempt_disable();
7915 	head = splice_balance_callbacks(rq);
7916 	task_rq_unlock(rq, p, &rf);
7917 
7918 	if (pi) {
7919 		if (cpuset_locked)
7920 			cpuset_unlock();
7921 		rt_mutex_adjust_pi(p);
7922 	}
7923 
7924 	/* Run balance callbacks after we've adjusted the PI chain: */
7925 	balance_callbacks(rq, head);
7926 	preempt_enable();
7927 
7928 	return 0;
7929 
7930 unlock:
7931 	task_rq_unlock(rq, p, &rf);
7932 	if (cpuset_locked)
7933 		cpuset_unlock();
7934 	return retval;
7935 }
7936 
7937 static int _sched_setscheduler(struct task_struct *p, int policy,
7938 			       const struct sched_param *param, bool check)
7939 {
7940 	struct sched_attr attr = {
7941 		.sched_policy   = policy,
7942 		.sched_priority = param->sched_priority,
7943 		.sched_nice	= PRIO_TO_NICE(p->static_prio),
7944 	};
7945 
7946 	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
7947 	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
7948 		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
7949 		policy &= ~SCHED_RESET_ON_FORK;
7950 		attr.sched_policy = policy;
7951 	}
7952 
7953 	return __sched_setscheduler(p, &attr, check, true);
7954 }
7955 /**
7956  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
7957  * @p: the task in question.
7958  * @policy: new policy.
7959  * @param: structure containing the new RT priority.
7960  *
7961  * Use sched_set_fifo(), read its comment.
7962  *
7963  * Return: 0 on success. An error code otherwise.
7964  *
7965  * NOTE that the task may be already dead.
7966  */
7967 int sched_setscheduler(struct task_struct *p, int policy,
7968 		       const struct sched_param *param)
7969 {
7970 	return _sched_setscheduler(p, policy, param, true);
7971 }
7972 
7973 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
7974 {
7975 	return __sched_setscheduler(p, attr, true, true);
7976 }
7977 
7978 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
7979 {
7980 	return __sched_setscheduler(p, attr, false, true);
7981 }
7982 EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
7983 
7984 /**
7985  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
7986  * @p: the task in question.
7987  * @policy: new policy.
7988  * @param: structure containing the new RT priority.
7989  *
7990  * Just like sched_setscheduler, only don't bother checking if the
7991  * current context has permission.  For example, this is needed in
7992  * stop_machine(): we create temporary high priority worker threads,
7993  * but our caller might not have that capability.
7994  *
7995  * Return: 0 on success. An error code otherwise.
7996  */
7997 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
7998 			       const struct sched_param *param)
7999 {
8000 	return _sched_setscheduler(p, policy, param, false);
8001 }
8002 
8003 /*
8004  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
8005  * incapable of resource management, which is the one thing an OS really should
8006  * be doing.
8007  *
8008  * This is of course the reason it is limited to privileged users only.
8009  *
8010  * Worse still; it is fundamentally impossible to compose static priority
8011  * workloads. You cannot take two correctly working static prio workloads
8012  * and smash them together and still expect them to work.
8013  *
8014  * For this reason 'all' FIFO tasks the kernel creates are basically at:
8015  *
8016  *   MAX_RT_PRIO / 2
8017  *
8018  * The administrator _MUST_ configure the system, the kernel simply doesn't
8019  * know enough information to make a sensible choice.
8020  */
8021 void sched_set_fifo(struct task_struct *p)
8022 {
8023 	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
8024 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
8025 }
8026 EXPORT_SYMBOL_GPL(sched_set_fifo);
8027 
8028 /*
8029  * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
8030  */
8031 void sched_set_fifo_low(struct task_struct *p)
8032 {
8033 	struct sched_param sp = { .sched_priority = 1 };
8034 	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
8035 }
8036 EXPORT_SYMBOL_GPL(sched_set_fifo_low);
8037 
8038 void sched_set_normal(struct task_struct *p, int nice)
8039 {
8040 	struct sched_attr attr = {
8041 		.sched_policy = SCHED_NORMAL,
8042 		.sched_nice = nice,
8043 	};
8044 	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
8045 }
8046 EXPORT_SYMBOL_GPL(sched_set_normal);
8047 
8048 static int
8049 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
8050 {
8051 	struct sched_param lparam;
8052 
8053 	if (!param || pid < 0)
8054 		return -EINVAL;
8055 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
8056 		return -EFAULT;
8057 
8058 	CLASS(find_get_task, p)(pid);
8059 	if (!p)
8060 		return -ESRCH;
8061 
8062 	return sched_setscheduler(p, policy, &lparam);
8063 }
8064 
8065 /*
8066  * Mimics kernel/events/core.c perf_copy_attr().
8067  */
8068 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
8069 {
8070 	u32 size;
8071 	int ret;
8072 
8073 	/* Zero the full structure, so that a short copy will be nice: */
8074 	memset(attr, 0, sizeof(*attr));
8075 
8076 	ret = get_user(size, &uattr->size);
8077 	if (ret)
8078 		return ret;
8079 
8080 	/* ABI compatibility quirk: */
8081 	if (!size)
8082 		size = SCHED_ATTR_SIZE_VER0;
8083 	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
8084 		goto err_size;
8085 
8086 	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
8087 	if (ret) {
8088 		if (ret == -E2BIG)
8089 			goto err_size;
8090 		return ret;
8091 	}
8092 
8093 	if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
8094 	    size < SCHED_ATTR_SIZE_VER1)
8095 		return -EINVAL;
8096 
8097 	/*
8098 	 * XXX: Do we want to be lenient like existing syscalls; or do we want
8099 	 * to be strict and return an error on out-of-bounds values?
8100 	 */
8101 	attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
8102 
8103 	return 0;
8104 
8105 err_size:
8106 	put_user(sizeof(*attr), &uattr->size);
8107 	return -E2BIG;
8108 }
8109 
8110 static void get_params(struct task_struct *p, struct sched_attr *attr)
8111 {
8112 	if (task_has_dl_policy(p))
8113 		__getparam_dl(p, attr);
8114 	else if (task_has_rt_policy(p))
8115 		attr->sched_priority = p->rt_priority;
8116 	else
8117 		attr->sched_nice = task_nice(p);
8118 }
8119 
8120 /**
8121  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
8122  * @pid: the pid in question.
8123  * @policy: new policy.
8124  * @param: structure containing the new RT priority.
8125  *
8126  * Return: 0 on success. An error code otherwise.
8127  */
8128 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
8129 {
8130 	if (policy < 0)
8131 		return -EINVAL;
8132 
8133 	return do_sched_setscheduler(pid, policy, param);
8134 }
8135 
8136 /**
8137  * sys_sched_setparam - set/change the RT priority of a thread
8138  * @pid: the pid in question.
8139  * @param: structure containing the new RT priority.
8140  *
8141  * Return: 0 on success. An error code otherwise.
8142  */
8143 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
8144 {
8145 	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
8146 }
8147 
8148 /**
8149  * sys_sched_setattr - same as above, but with extended sched_attr
8150  * @pid: the pid in question.
8151  * @uattr: structure containing the extended parameters.
8152  * @flags: for future extension.
8153  */
8154 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
8155 			       unsigned int, flags)
8156 {
8157 	struct sched_attr attr;
8158 	int retval;
8159 
8160 	if (!uattr || pid < 0 || flags)
8161 		return -EINVAL;
8162 
8163 	retval = sched_copy_attr(uattr, &attr);
8164 	if (retval)
8165 		return retval;
8166 
8167 	if ((int)attr.sched_policy < 0)
8168 		return -EINVAL;
8169 	if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
8170 		attr.sched_policy = SETPARAM_POLICY;
8171 
8172 	CLASS(find_get_task, p)(pid);
8173 	if (!p)
8174 		return -ESRCH;
8175 
8176 	if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
8177 		get_params(p, &attr);
8178 
8179 	return sched_setattr(p, &attr);
8180 }
8181 
8182 /**
8183  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
8184  * @pid: the pid in question.
8185  *
8186  * Return: On success, the policy of the thread. Otherwise, a negative error
8187  * code.
8188  */
8189 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
8190 {
8191 	struct task_struct *p;
8192 	int retval;
8193 
8194 	if (pid < 0)
8195 		return -EINVAL;
8196 
8197 	guard(rcu)();
8198 	p = find_process_by_pid(pid);
8199 	if (!p)
8200 		return -ESRCH;
8201 
8202 	retval = security_task_getscheduler(p);
8203 	if (!retval) {
8204 		retval = p->policy;
8205 		if (p->sched_reset_on_fork)
8206 			retval |= SCHED_RESET_ON_FORK;
8207 	}
8208 	return retval;
8209 }
8210 
8211 /**
8212  * sys_sched_getparam - get the RT priority of a thread
8213  * @pid: the pid in question.
8214  * @param: structure containing the RT priority.
8215  *
8216  * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
8217  * code.
8218  */
8219 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
8220 {
8221 	struct sched_param lp = { .sched_priority = 0 };
8222 	struct task_struct *p;
8223 	int retval;
8224 
8225 	if (!param || pid < 0)
8226 		return -EINVAL;
8227 
8228 	scoped_guard (rcu) {
8229 		p = find_process_by_pid(pid);
8230 		if (!p)
8231 			return -ESRCH;
8232 
8233 		retval = security_task_getscheduler(p);
8234 		if (retval)
8235 			return retval;
8236 
8237 		if (task_has_rt_policy(p))
8238 			lp.sched_priority = p->rt_priority;
8239 	}
8240 
8241 	/*
8242 	 * This one might sleep, we cannot do it with a spinlock held ...
8243 	 */
8244 	return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
8245 }
8246 
8247 /*
8248  * Copy the kernel size attribute structure (which might be larger
8249  * than what user-space knows about) to user-space.
8250  *
8251  * Note that all cases are valid: user-space buffer can be larger or
8252  * smaller than the kernel-space buffer. The usual case is that both
8253  * have the same size.
8254  */
8255 static int
8256 sched_attr_copy_to_user(struct sched_attr __user *uattr,
8257 			struct sched_attr *kattr,
8258 			unsigned int usize)
8259 {
8260 	unsigned int ksize = sizeof(*kattr);
8261 
8262 	if (!access_ok(uattr, usize))
8263 		return -EFAULT;
8264 
8265 	/*
8266 	 * sched_getattr() ABI forwards and backwards compatibility:
8267 	 *
8268 	 * If usize == ksize then we just copy everything to user-space and all is good.
8269 	 *
8270 	 * If usize < ksize then we only copy as much as user-space has space for,
8271 	 * this keeps ABI compatibility as well. We skip the rest.
8272 	 *
8273 	 * If usize > ksize then user-space is using a newer version of the ABI,
8274 	 * which part the kernel doesn't know about. Just ignore it - tooling can
8275 	 * detect the kernel's knowledge of attributes from the attr->size value
8276 	 * which is set to ksize in this case.
8277 	 */
8278 	kattr->size = min(usize, ksize);
8279 
8280 	if (copy_to_user(uattr, kattr, kattr->size))
8281 		return -EFAULT;
8282 
8283 	return 0;
8284 }
8285 
8286 /**
8287  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
8288  * @pid: the pid in question.
8289  * @uattr: structure containing the extended parameters.
8290  * @usize: sizeof(attr) for fwd/bwd comp.
8291  * @flags: for future extension.
8292  */
8293 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
8294 		unsigned int, usize, unsigned int, flags)
8295 {
8296 	struct sched_attr kattr = { };
8297 	struct task_struct *p;
8298 	int retval;
8299 
8300 	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
8301 	    usize < SCHED_ATTR_SIZE_VER0 || flags)
8302 		return -EINVAL;
8303 
8304 	scoped_guard (rcu) {
8305 		p = find_process_by_pid(pid);
8306 		if (!p)
8307 			return -ESRCH;
8308 
8309 		retval = security_task_getscheduler(p);
8310 		if (retval)
8311 			return retval;
8312 
8313 		kattr.sched_policy = p->policy;
8314 		if (p->sched_reset_on_fork)
8315 			kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
8316 		get_params(p, &kattr);
8317 		kattr.sched_flags &= SCHED_FLAG_ALL;
8318 
8319 #ifdef CONFIG_UCLAMP_TASK
8320 		/*
8321 		 * This could race with another potential updater, but this is fine
8322 		 * because it'll correctly read the old or the new value. We don't need
8323 		 * to guarantee who wins the race as long as it doesn't return garbage.
8324 		 */
8325 		kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
8326 		kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
8327 #endif
8328 	}
8329 
8330 	return sched_attr_copy_to_user(uattr, &kattr, usize);
8331 }
8332 
8333 #ifdef CONFIG_SMP
8334 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
8335 {
8336 	/*
8337 	 * If the task isn't a deadline task or admission control is
8338 	 * disabled then we don't care about affinity changes.
8339 	 */
8340 	if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
8341 		return 0;
8342 
8343 	/*
8344 	 * Since bandwidth control happens on root_domain basis,
8345 	 * if admission test is enabled, we only admit -deadline
8346 	 * tasks allowed to run on all the CPUs in the task's
8347 	 * root_domain.
8348 	 */
8349 	guard(rcu)();
8350 	if (!cpumask_subset(task_rq(p)->rd->span, mask))
8351 		return -EBUSY;
8352 
8353 	return 0;
8354 }
8355 #endif
8356 
8357 static int
8358 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
8359 {
8360 	int retval;
8361 	cpumask_var_t cpus_allowed, new_mask;
8362 
8363 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
8364 		return -ENOMEM;
8365 
8366 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
8367 		retval = -ENOMEM;
8368 		goto out_free_cpus_allowed;
8369 	}
8370 
8371 	cpuset_cpus_allowed(p, cpus_allowed);
8372 	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
8373 
8374 	ctx->new_mask = new_mask;
8375 	ctx->flags |= SCA_CHECK;
8376 
8377 	retval = dl_task_check_affinity(p, new_mask);
8378 	if (retval)
8379 		goto out_free_new_mask;
8380 
8381 	retval = __set_cpus_allowed_ptr(p, ctx);
8382 	if (retval)
8383 		goto out_free_new_mask;
8384 
8385 	cpuset_cpus_allowed(p, cpus_allowed);
8386 	if (!cpumask_subset(new_mask, cpus_allowed)) {
8387 		/*
8388 		 * We must have raced with a concurrent cpuset update.
8389 		 * Just reset the cpumask to the cpuset's cpus_allowed.
8390 		 */
8391 		cpumask_copy(new_mask, cpus_allowed);
8392 
8393 		/*
8394 		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
8395 		 * will restore the previous user_cpus_ptr value.
8396 		 *
8397 		 * In the unlikely event a previous user_cpus_ptr exists,
8398 		 * we need to further restrict the mask to what is allowed
8399 		 * by that old user_cpus_ptr.
8400 		 */
8401 		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
8402 			bool empty = !cpumask_and(new_mask, new_mask,
8403 						  ctx->user_mask);
8404 
8405 			if (WARN_ON_ONCE(empty))
8406 				cpumask_copy(new_mask, cpus_allowed);
8407 		}
8408 		__set_cpus_allowed_ptr(p, ctx);
8409 		retval = -EINVAL;
8410 	}
8411 
8412 out_free_new_mask:
8413 	free_cpumask_var(new_mask);
8414 out_free_cpus_allowed:
8415 	free_cpumask_var(cpus_allowed);
8416 	return retval;
8417 }
8418 
8419 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
8420 {
8421 	struct affinity_context ac;
8422 	struct cpumask *user_mask;
8423 	int retval;
8424 
8425 	CLASS(find_get_task, p)(pid);
8426 	if (!p)
8427 		return -ESRCH;
8428 
8429 	if (p->flags & PF_NO_SETAFFINITY)
8430 		return -EINVAL;
8431 
8432 	if (!check_same_owner(p)) {
8433 		guard(rcu)();
8434 		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
8435 			return -EPERM;
8436 	}
8437 
8438 	retval = security_task_setscheduler(p);
8439 	if (retval)
8440 		return retval;
8441 
8442 	/*
8443 	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
8444 	 * alloc_user_cpus_ptr() returns NULL.
8445 	 */
8446 	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
8447 	if (user_mask) {
8448 		cpumask_copy(user_mask, in_mask);
8449 	} else if (IS_ENABLED(CONFIG_SMP)) {
8450 		return -ENOMEM;
8451 	}
8452 
8453 	ac = (struct affinity_context){
8454 		.new_mask  = in_mask,
8455 		.user_mask = user_mask,
8456 		.flags     = SCA_USER,
8457 	};
8458 
8459 	retval = __sched_setaffinity(p, &ac);
8460 	kfree(ac.user_mask);
8461 
8462 	return retval;
8463 }
8464 
8465 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
8466 			     struct cpumask *new_mask)
8467 {
8468 	if (len < cpumask_size())
8469 		cpumask_clear(new_mask);
8470 	else if (len > cpumask_size())
8471 		len = cpumask_size();
8472 
8473 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
8474 }
8475 
8476 /**
8477  * sys_sched_setaffinity - set the CPU affinity of a process
8478  * @pid: pid of the process
8479  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8480  * @user_mask_ptr: user-space pointer to the new CPU mask
8481  *
8482  * Return: 0 on success. An error code otherwise.
8483  */
8484 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
8485 		unsigned long __user *, user_mask_ptr)
8486 {
8487 	cpumask_var_t new_mask;
8488 	int retval;
8489 
8490 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
8491 		return -ENOMEM;
8492 
8493 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
8494 	if (retval == 0)
8495 		retval = sched_setaffinity(pid, new_mask);
8496 	free_cpumask_var(new_mask);
8497 	return retval;
8498 }
8499 
8500 long sched_getaffinity(pid_t pid, struct cpumask *mask)
8501 {
8502 	struct task_struct *p;
8503 	int retval;
8504 
8505 	guard(rcu)();
8506 	p = find_process_by_pid(pid);
8507 	if (!p)
8508 		return -ESRCH;
8509 
8510 	retval = security_task_getscheduler(p);
8511 	if (retval)
8512 		return retval;
8513 
8514 	guard(raw_spinlock_irqsave)(&p->pi_lock);
8515 	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
8516 
8517 	return 0;
8518 }
8519 
8520 /**
8521  * sys_sched_getaffinity - get the CPU affinity of a process
8522  * @pid: pid of the process
8523  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
8524  * @user_mask_ptr: user-space pointer to hold the current CPU mask
8525  *
8526  * Return: size of CPU mask copied to user_mask_ptr on success. An
8527  * error code otherwise.
8528  */
8529 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
8530 		unsigned long __user *, user_mask_ptr)
8531 {
8532 	int ret;
8533 	cpumask_var_t mask;
8534 
8535 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
8536 		return -EINVAL;
8537 	if (len & (sizeof(unsigned long)-1))
8538 		return -EINVAL;
8539 
8540 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
8541 		return -ENOMEM;
8542 
8543 	ret = sched_getaffinity(pid, mask);
8544 	if (ret == 0) {
8545 		unsigned int retlen = min(len, cpumask_size());
8546 
8547 		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
8548 			ret = -EFAULT;
8549 		else
8550 			ret = retlen;
8551 	}
8552 	free_cpumask_var(mask);
8553 
8554 	return ret;
8555 }
8556 
8557 static void do_sched_yield(void)
8558 {
8559 	struct rq_flags rf;
8560 	struct rq *rq;
8561 
8562 	rq = this_rq_lock_irq(&rf);
8563 
8564 	schedstat_inc(rq->yld_count);
8565 	current->sched_class->yield_task(rq);
8566 
8567 	preempt_disable();
8568 	rq_unlock_irq(rq, &rf);
8569 	sched_preempt_enable_no_resched();
8570 
8571 	schedule();
8572 }
8573 
8574 /**
8575  * sys_sched_yield - yield the current processor to other threads.
8576  *
8577  * This function yields the current CPU to other tasks. If there are no
8578  * other threads running on this CPU then this function will return.
8579  *
8580  * Return: 0.
8581  */
8582 SYSCALL_DEFINE0(sched_yield)
8583 {
8584 	do_sched_yield();
8585 	return 0;
8586 }
8587 
8588 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
8589 int __sched __cond_resched(void)
8590 {
8591 	if (should_resched(0)) {
8592 		preempt_schedule_common();
8593 		return 1;
8594 	}
8595 	/*
8596 	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
8597 	 * whether the current CPU is in an RCU read-side critical section,
8598 	 * so the tick can report quiescent states even for CPUs looping
8599 	 * in kernel context.  In contrast, in non-preemptible kernels,
8600 	 * RCU readers leave no in-memory hints, which means that CPU-bound
8601 	 * processes executing in kernel context might never report an
8602 	 * RCU quiescent state.  Therefore, the following code causes
8603 	 * cond_resched() to report a quiescent state, but only when RCU
8604 	 * is in urgent need of one.
8605 	 */
8606 #ifndef CONFIG_PREEMPT_RCU
8607 	rcu_all_qs();
8608 #endif
8609 	return 0;
8610 }
8611 EXPORT_SYMBOL(__cond_resched);
8612 #endif
8613 
8614 #ifdef CONFIG_PREEMPT_DYNAMIC
8615 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8616 #define cond_resched_dynamic_enabled	__cond_resched
8617 #define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
8618 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
8619 EXPORT_STATIC_CALL_TRAMP(cond_resched);
8620 
8621 #define might_resched_dynamic_enabled	__cond_resched
8622 #define might_resched_dynamic_disabled	((void *)&__static_call_return0)
8623 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
8624 EXPORT_STATIC_CALL_TRAMP(might_resched);
8625 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8626 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
8627 int __sched dynamic_cond_resched(void)
8628 {
8629 	klp_sched_try_switch();
8630 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
8631 		return 0;
8632 	return __cond_resched();
8633 }
8634 EXPORT_SYMBOL(dynamic_cond_resched);
8635 
8636 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
8637 int __sched dynamic_might_resched(void)
8638 {
8639 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
8640 		return 0;
8641 	return __cond_resched();
8642 }
8643 EXPORT_SYMBOL(dynamic_might_resched);
8644 #endif
8645 #endif
8646 
8647 /*
8648  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
8649  * call schedule, and on return reacquire the lock.
8650  *
8651  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
8652  * operations here to prevent schedule() from being called twice (once via
8653  * spin_unlock(), once by hand).
8654  */
8655 int __cond_resched_lock(spinlock_t *lock)
8656 {
8657 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8658 	int ret = 0;
8659 
8660 	lockdep_assert_held(lock);
8661 
8662 	if (spin_needbreak(lock) || resched) {
8663 		spin_unlock(lock);
8664 		if (!_cond_resched())
8665 			cpu_relax();
8666 		ret = 1;
8667 		spin_lock(lock);
8668 	}
8669 	return ret;
8670 }
8671 EXPORT_SYMBOL(__cond_resched_lock);
8672 
8673 int __cond_resched_rwlock_read(rwlock_t *lock)
8674 {
8675 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8676 	int ret = 0;
8677 
8678 	lockdep_assert_held_read(lock);
8679 
8680 	if (rwlock_needbreak(lock) || resched) {
8681 		read_unlock(lock);
8682 		if (!_cond_resched())
8683 			cpu_relax();
8684 		ret = 1;
8685 		read_lock(lock);
8686 	}
8687 	return ret;
8688 }
8689 EXPORT_SYMBOL(__cond_resched_rwlock_read);
8690 
8691 int __cond_resched_rwlock_write(rwlock_t *lock)
8692 {
8693 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
8694 	int ret = 0;
8695 
8696 	lockdep_assert_held_write(lock);
8697 
8698 	if (rwlock_needbreak(lock) || resched) {
8699 		write_unlock(lock);
8700 		if (!_cond_resched())
8701 			cpu_relax();
8702 		ret = 1;
8703 		write_lock(lock);
8704 	}
8705 	return ret;
8706 }
8707 EXPORT_SYMBOL(__cond_resched_rwlock_write);
8708 
8709 #ifdef CONFIG_PREEMPT_DYNAMIC
8710 
8711 #ifdef CONFIG_GENERIC_ENTRY
8712 #include <linux/entry-common.h>
8713 #endif
8714 
8715 /*
8716  * SC:cond_resched
8717  * SC:might_resched
8718  * SC:preempt_schedule
8719  * SC:preempt_schedule_notrace
8720  * SC:irqentry_exit_cond_resched
8721  *
8722  *
8723  * NONE:
8724  *   cond_resched               <- __cond_resched
8725  *   might_resched              <- RET0
8726  *   preempt_schedule           <- NOP
8727  *   preempt_schedule_notrace   <- NOP
8728  *   irqentry_exit_cond_resched <- NOP
8729  *
8730  * VOLUNTARY:
8731  *   cond_resched               <- __cond_resched
8732  *   might_resched              <- __cond_resched
8733  *   preempt_schedule           <- NOP
8734  *   preempt_schedule_notrace   <- NOP
8735  *   irqentry_exit_cond_resched <- NOP
8736  *
8737  * FULL:
8738  *   cond_resched               <- RET0
8739  *   might_resched              <- RET0
8740  *   preempt_schedule           <- preempt_schedule
8741  *   preempt_schedule_notrace   <- preempt_schedule_notrace
8742  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
8743  */
8744 
8745 enum {
8746 	preempt_dynamic_undefined = -1,
8747 	preempt_dynamic_none,
8748 	preempt_dynamic_voluntary,
8749 	preempt_dynamic_full,
8750 };
8751 
8752 int preempt_dynamic_mode = preempt_dynamic_undefined;
8753 
8754 int sched_dynamic_mode(const char *str)
8755 {
8756 	if (!strcmp(str, "none"))
8757 		return preempt_dynamic_none;
8758 
8759 	if (!strcmp(str, "voluntary"))
8760 		return preempt_dynamic_voluntary;
8761 
8762 	if (!strcmp(str, "full"))
8763 		return preempt_dynamic_full;
8764 
8765 	return -EINVAL;
8766 }
8767 
8768 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
8769 #define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
8770 #define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
8771 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
8772 #define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
8773 #define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
8774 #else
8775 #error "Unsupported PREEMPT_DYNAMIC mechanism"
8776 #endif
8777 
8778 static DEFINE_MUTEX(sched_dynamic_mutex);
8779 static bool klp_override;
8780 
8781 static void __sched_dynamic_update(int mode)
8782 {
8783 	/*
8784 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
8785 	 * the ZERO state, which is invalid.
8786 	 */
8787 	if (!klp_override)
8788 		preempt_dynamic_enable(cond_resched);
8789 	preempt_dynamic_enable(might_resched);
8790 	preempt_dynamic_enable(preempt_schedule);
8791 	preempt_dynamic_enable(preempt_schedule_notrace);
8792 	preempt_dynamic_enable(irqentry_exit_cond_resched);
8793 
8794 	switch (mode) {
8795 	case preempt_dynamic_none:
8796 		if (!klp_override)
8797 			preempt_dynamic_enable(cond_resched);
8798 		preempt_dynamic_disable(might_resched);
8799 		preempt_dynamic_disable(preempt_schedule);
8800 		preempt_dynamic_disable(preempt_schedule_notrace);
8801 		preempt_dynamic_disable(irqentry_exit_cond_resched);
8802 		if (mode != preempt_dynamic_mode)
8803 			pr_info("Dynamic Preempt: none\n");
8804 		break;
8805 
8806 	case preempt_dynamic_voluntary:
8807 		if (!klp_override)
8808 			preempt_dynamic_enable(cond_resched);
8809 		preempt_dynamic_enable(might_resched);
8810 		preempt_dynamic_disable(preempt_schedule);
8811 		preempt_dynamic_disable(preempt_schedule_notrace);
8812 		preempt_dynamic_disable(irqentry_exit_cond_resched);
8813 		if (mode != preempt_dynamic_mode)
8814 			pr_info("Dynamic Preempt: voluntary\n");
8815 		break;
8816 
8817 	case preempt_dynamic_full:
8818 		if (!klp_override)
8819 			preempt_dynamic_disable(cond_resched);
8820 		preempt_dynamic_disable(might_resched);
8821 		preempt_dynamic_enable(preempt_schedule);
8822 		preempt_dynamic_enable(preempt_schedule_notrace);
8823 		preempt_dynamic_enable(irqentry_exit_cond_resched);
8824 		if (mode != preempt_dynamic_mode)
8825 			pr_info("Dynamic Preempt: full\n");
8826 		break;
8827 	}
8828 
8829 	preempt_dynamic_mode = mode;
8830 }
8831 
8832 void sched_dynamic_update(int mode)
8833 {
8834 	mutex_lock(&sched_dynamic_mutex);
8835 	__sched_dynamic_update(mode);
8836 	mutex_unlock(&sched_dynamic_mutex);
8837 }
8838 
8839 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
8840 
8841 static int klp_cond_resched(void)
8842 {
8843 	__klp_sched_try_switch();
8844 	return __cond_resched();
8845 }
8846 
8847 void sched_dynamic_klp_enable(void)
8848 {
8849 	mutex_lock(&sched_dynamic_mutex);
8850 
8851 	klp_override = true;
8852 	static_call_update(cond_resched, klp_cond_resched);
8853 
8854 	mutex_unlock(&sched_dynamic_mutex);
8855 }
8856 
8857 void sched_dynamic_klp_disable(void)
8858 {
8859 	mutex_lock(&sched_dynamic_mutex);
8860 
8861 	klp_override = false;
8862 	__sched_dynamic_update(preempt_dynamic_mode);
8863 
8864 	mutex_unlock(&sched_dynamic_mutex);
8865 }
8866 
8867 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
8868 
8869 static int __init setup_preempt_mode(char *str)
8870 {
8871 	int mode = sched_dynamic_mode(str);
8872 	if (mode < 0) {
8873 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
8874 		return 0;
8875 	}
8876 
8877 	sched_dynamic_update(mode);
8878 	return 1;
8879 }
8880 __setup("preempt=", setup_preempt_mode);
8881 
8882 static void __init preempt_dynamic_init(void)
8883 {
8884 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
8885 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
8886 			sched_dynamic_update(preempt_dynamic_none);
8887 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
8888 			sched_dynamic_update(preempt_dynamic_voluntary);
8889 		} else {
8890 			/* Default static call setting, nothing to do */
8891 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
8892 			preempt_dynamic_mode = preempt_dynamic_full;
8893 			pr_info("Dynamic Preempt: full\n");
8894 		}
8895 	}
8896 }
8897 
8898 #define PREEMPT_MODEL_ACCESSOR(mode) \
8899 	bool preempt_model_##mode(void)						 \
8900 	{									 \
8901 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
8902 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
8903 	}									 \
8904 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
8905 
8906 PREEMPT_MODEL_ACCESSOR(none);
8907 PREEMPT_MODEL_ACCESSOR(voluntary);
8908 PREEMPT_MODEL_ACCESSOR(full);
8909 
8910 #else /* !CONFIG_PREEMPT_DYNAMIC */
8911 
8912 static inline void preempt_dynamic_init(void) { }
8913 
8914 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
8915 
8916 /**
8917  * yield - yield the current processor to other threads.
8918  *
8919  * Do not ever use this function, there's a 99% chance you're doing it wrong.
8920  *
8921  * The scheduler is at all times free to pick the calling task as the most
8922  * eligible task to run, if removing the yield() call from your code breaks
8923  * it, it's already broken.
8924  *
8925  * Typical broken usage is:
8926  *
8927  * while (!event)
8928  *	yield();
8929  *
8930  * where one assumes that yield() will let 'the other' process run that will
8931  * make event true. If the current task is a SCHED_FIFO task that will never
8932  * happen. Never use yield() as a progress guarantee!!
8933  *
8934  * If you want to use yield() to wait for something, use wait_event().
8935  * If you want to use yield() to be 'nice' for others, use cond_resched().
8936  * If you still want to use yield(), do not!
8937  */
8938 void __sched yield(void)
8939 {
8940 	set_current_state(TASK_RUNNING);
8941 	do_sched_yield();
8942 }
8943 EXPORT_SYMBOL(yield);
8944 
8945 /**
8946  * yield_to - yield the current processor to another thread in
8947  * your thread group, or accelerate that thread toward the
8948  * processor it's on.
8949  * @p: target task
8950  * @preempt: whether task preemption is allowed or not
8951  *
8952  * It's the caller's job to ensure that the target task struct
8953  * can't go away on us before we can do any checks.
8954  *
8955  * Return:
8956  *	true (>0) if we indeed boosted the target task.
8957  *	false (0) if we failed to boost the target.
8958  *	-ESRCH if there's no task to yield to.
8959  */
8960 int __sched yield_to(struct task_struct *p, bool preempt)
8961 {
8962 	struct task_struct *curr = current;
8963 	struct rq *rq, *p_rq;
8964 	int yielded = 0;
8965 
8966 	scoped_guard (irqsave) {
8967 		rq = this_rq();
8968 
8969 again:
8970 		p_rq = task_rq(p);
8971 		/*
8972 		 * If we're the only runnable task on the rq and target rq also
8973 		 * has only one task, there's absolutely no point in yielding.
8974 		 */
8975 		if (rq->nr_running == 1 && p_rq->nr_running == 1)
8976 			return -ESRCH;
8977 
8978 		guard(double_rq_lock)(rq, p_rq);
8979 		if (task_rq(p) != p_rq)
8980 			goto again;
8981 
8982 		if (!curr->sched_class->yield_to_task)
8983 			return 0;
8984 
8985 		if (curr->sched_class != p->sched_class)
8986 			return 0;
8987 
8988 		if (task_on_cpu(p_rq, p) || !task_is_running(p))
8989 			return 0;
8990 
8991 		yielded = curr->sched_class->yield_to_task(rq, p);
8992 		if (yielded) {
8993 			schedstat_inc(rq->yld_count);
8994 			/*
8995 			 * Make p's CPU reschedule; pick_next_entity
8996 			 * takes care of fairness.
8997 			 */
8998 			if (preempt && rq != p_rq)
8999 				resched_curr(p_rq);
9000 		}
9001 	}
9002 
9003 	if (yielded)
9004 		schedule();
9005 
9006 	return yielded;
9007 }
9008 EXPORT_SYMBOL_GPL(yield_to);
9009 
9010 int io_schedule_prepare(void)
9011 {
9012 	int old_iowait = current->in_iowait;
9013 
9014 	current->in_iowait = 1;
9015 	blk_flush_plug(current->plug, true);
9016 	return old_iowait;
9017 }
9018 
9019 void io_schedule_finish(int token)
9020 {
9021 	current->in_iowait = token;
9022 }
9023 
9024 /*
9025  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
9026  * that process accounting knows that this is a task in IO wait state.
9027  */
9028 long __sched io_schedule_timeout(long timeout)
9029 {
9030 	int token;
9031 	long ret;
9032 
9033 	token = io_schedule_prepare();
9034 	ret = schedule_timeout(timeout);
9035 	io_schedule_finish(token);
9036 
9037 	return ret;
9038 }
9039 EXPORT_SYMBOL(io_schedule_timeout);
9040 
9041 void __sched io_schedule(void)
9042 {
9043 	int token;
9044 
9045 	token = io_schedule_prepare();
9046 	schedule();
9047 	io_schedule_finish(token);
9048 }
9049 EXPORT_SYMBOL(io_schedule);
9050 
9051 /**
9052  * sys_sched_get_priority_max - return maximum RT priority.
9053  * @policy: scheduling class.
9054  *
9055  * Return: On success, this syscall returns the maximum
9056  * rt_priority that can be used by a given scheduling class.
9057  * On failure, a negative error code is returned.
9058  */
9059 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
9060 {
9061 	int ret = -EINVAL;
9062 
9063 	switch (policy) {
9064 	case SCHED_FIFO:
9065 	case SCHED_RR:
9066 		ret = MAX_RT_PRIO-1;
9067 		break;
9068 	case SCHED_DEADLINE:
9069 	case SCHED_NORMAL:
9070 	case SCHED_BATCH:
9071 	case SCHED_IDLE:
9072 		ret = 0;
9073 		break;
9074 	}
9075 	return ret;
9076 }
9077 
9078 /**
9079  * sys_sched_get_priority_min - return minimum RT priority.
9080  * @policy: scheduling class.
9081  *
9082  * Return: On success, this syscall returns the minimum
9083  * rt_priority that can be used by a given scheduling class.
9084  * On failure, a negative error code is returned.
9085  */
9086 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
9087 {
9088 	int ret = -EINVAL;
9089 
9090 	switch (policy) {
9091 	case SCHED_FIFO:
9092 	case SCHED_RR:
9093 		ret = 1;
9094 		break;
9095 	case SCHED_DEADLINE:
9096 	case SCHED_NORMAL:
9097 	case SCHED_BATCH:
9098 	case SCHED_IDLE:
9099 		ret = 0;
9100 	}
9101 	return ret;
9102 }
9103 
9104 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
9105 {
9106 	unsigned int time_slice = 0;
9107 	int retval;
9108 
9109 	if (pid < 0)
9110 		return -EINVAL;
9111 
9112 	scoped_guard (rcu) {
9113 		struct task_struct *p = find_process_by_pid(pid);
9114 		if (!p)
9115 			return -ESRCH;
9116 
9117 		retval = security_task_getscheduler(p);
9118 		if (retval)
9119 			return retval;
9120 
9121 		scoped_guard (task_rq_lock, p) {
9122 			struct rq *rq = scope.rq;
9123 			if (p->sched_class->get_rr_interval)
9124 				time_slice = p->sched_class->get_rr_interval(rq, p);
9125 		}
9126 	}
9127 
9128 	jiffies_to_timespec64(time_slice, t);
9129 	return 0;
9130 }
9131 
9132 /**
9133  * sys_sched_rr_get_interval - return the default timeslice of a process.
9134  * @pid: pid of the process.
9135  * @interval: userspace pointer to the timeslice value.
9136  *
9137  * this syscall writes the default timeslice value of a given process
9138  * into the user-space timespec buffer. A value of '0' means infinity.
9139  *
9140  * Return: On success, 0 and the timeslice is in @interval. Otherwise,
9141  * an error code.
9142  */
9143 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
9144 		struct __kernel_timespec __user *, interval)
9145 {
9146 	struct timespec64 t;
9147 	int retval = sched_rr_get_interval(pid, &t);
9148 
9149 	if (retval == 0)
9150 		retval = put_timespec64(&t, interval);
9151 
9152 	return retval;
9153 }
9154 
9155 #ifdef CONFIG_COMPAT_32BIT_TIME
9156 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
9157 		struct old_timespec32 __user *, interval)
9158 {
9159 	struct timespec64 t;
9160 	int retval = sched_rr_get_interval(pid, &t);
9161 
9162 	if (retval == 0)
9163 		retval = put_old_timespec32(&t, interval);
9164 	return retval;
9165 }
9166 #endif
9167 
9168 void sched_show_task(struct task_struct *p)
9169 {
9170 	unsigned long free = 0;
9171 	int ppid;
9172 
9173 	if (!try_get_task_stack(p))
9174 		return;
9175 
9176 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
9177 
9178 	if (task_is_running(p))
9179 		pr_cont("  running task    ");
9180 #ifdef CONFIG_DEBUG_STACK_USAGE
9181 	free = stack_not_used(p);
9182 #endif
9183 	ppid = 0;
9184 	rcu_read_lock();
9185 	if (pid_alive(p))
9186 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
9187 	rcu_read_unlock();
9188 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
9189 		free, task_pid_nr(p), task_tgid_nr(p),
9190 		ppid, read_task_thread_flags(p));
9191 
9192 	print_worker_info(KERN_INFO, p);
9193 	print_stop_info(KERN_INFO, p);
9194 	show_stack(p, NULL, KERN_INFO);
9195 	put_task_stack(p);
9196 }
9197 EXPORT_SYMBOL_GPL(sched_show_task);
9198 
9199 static inline bool
9200 state_filter_match(unsigned long state_filter, struct task_struct *p)
9201 {
9202 	unsigned int state = READ_ONCE(p->__state);
9203 
9204 	/* no filter, everything matches */
9205 	if (!state_filter)
9206 		return true;
9207 
9208 	/* filter, but doesn't match */
9209 	if (!(state & state_filter))
9210 		return false;
9211 
9212 	/*
9213 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
9214 	 * TASK_KILLABLE).
9215 	 */
9216 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
9217 		return false;
9218 
9219 	return true;
9220 }
9221 
9222 
9223 void show_state_filter(unsigned int state_filter)
9224 {
9225 	struct task_struct *g, *p;
9226 
9227 	rcu_read_lock();
9228 	for_each_process_thread(g, p) {
9229 		/*
9230 		 * reset the NMI-timeout, listing all files on a slow
9231 		 * console might take a lot of time:
9232 		 * Also, reset softlockup watchdogs on all CPUs, because
9233 		 * another CPU might be blocked waiting for us to process
9234 		 * an IPI.
9235 		 */
9236 		touch_nmi_watchdog();
9237 		touch_all_softlockup_watchdogs();
9238 		if (state_filter_match(state_filter, p))
9239 			sched_show_task(p);
9240 	}
9241 
9242 #ifdef CONFIG_SCHED_DEBUG
9243 	if (!state_filter)
9244 		sysrq_sched_debug_show();
9245 #endif
9246 	rcu_read_unlock();
9247 	/*
9248 	 * Only show locks if all tasks are dumped:
9249 	 */
9250 	if (!state_filter)
9251 		debug_show_all_locks();
9252 }
9253 
9254 /**
9255  * init_idle - set up an idle thread for a given CPU
9256  * @idle: task in question
9257  * @cpu: CPU the idle task belongs to
9258  *
9259  * NOTE: this function does not set the idle thread's NEED_RESCHED
9260  * flag, to make booting more robust.
9261  */
9262 void __init init_idle(struct task_struct *idle, int cpu)
9263 {
9264 #ifdef CONFIG_SMP
9265 	struct affinity_context ac = (struct affinity_context) {
9266 		.new_mask  = cpumask_of(cpu),
9267 		.flags     = 0,
9268 	};
9269 #endif
9270 	struct rq *rq = cpu_rq(cpu);
9271 	unsigned long flags;
9272 
9273 	__sched_fork(0, idle);
9274 
9275 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
9276 	raw_spin_rq_lock(rq);
9277 
9278 	idle->__state = TASK_RUNNING;
9279 	idle->se.exec_start = sched_clock();
9280 	/*
9281 	 * PF_KTHREAD should already be set at this point; regardless, make it
9282 	 * look like a proper per-CPU kthread.
9283 	 */
9284 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
9285 	kthread_set_per_cpu(idle, cpu);
9286 
9287 #ifdef CONFIG_SMP
9288 	/*
9289 	 * It's possible that init_idle() gets called multiple times on a task,
9290 	 * in that case do_set_cpus_allowed() will not do the right thing.
9291 	 *
9292 	 * And since this is boot we can forgo the serialization.
9293 	 */
9294 	set_cpus_allowed_common(idle, &ac);
9295 #endif
9296 	/*
9297 	 * We're having a chicken and egg problem, even though we are
9298 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
9299 	 * lockdep check in task_group() will fail.
9300 	 *
9301 	 * Similar case to sched_fork(). / Alternatively we could
9302 	 * use task_rq_lock() here and obtain the other rq->lock.
9303 	 *
9304 	 * Silence PROVE_RCU
9305 	 */
9306 	rcu_read_lock();
9307 	__set_task_cpu(idle, cpu);
9308 	rcu_read_unlock();
9309 
9310 	rq->idle = idle;
9311 	rcu_assign_pointer(rq->curr, idle);
9312 	idle->on_rq = TASK_ON_RQ_QUEUED;
9313 #ifdef CONFIG_SMP
9314 	idle->on_cpu = 1;
9315 #endif
9316 	raw_spin_rq_unlock(rq);
9317 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
9318 
9319 	/* Set the preempt count _outside_ the spinlocks! */
9320 	init_idle_preempt_count(idle, cpu);
9321 
9322 	/*
9323 	 * The idle tasks have their own, simple scheduling class:
9324 	 */
9325 	idle->sched_class = &idle_sched_class;
9326 	ftrace_graph_init_idle_task(idle, cpu);
9327 	vtime_init_idle(idle, cpu);
9328 #ifdef CONFIG_SMP
9329 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
9330 #endif
9331 }
9332 
9333 #ifdef CONFIG_SMP
9334 
9335 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
9336 			      const struct cpumask *trial)
9337 {
9338 	int ret = 1;
9339 
9340 	if (cpumask_empty(cur))
9341 		return ret;
9342 
9343 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
9344 
9345 	return ret;
9346 }
9347 
9348 int task_can_attach(struct task_struct *p)
9349 {
9350 	int ret = 0;
9351 
9352 	/*
9353 	 * Kthreads which disallow setaffinity shouldn't be moved
9354 	 * to a new cpuset; we don't want to change their CPU
9355 	 * affinity and isolating such threads by their set of
9356 	 * allowed nodes is unnecessary.  Thus, cpusets are not
9357 	 * applicable for such threads.  This prevents checking for
9358 	 * success of set_cpus_allowed_ptr() on all attached tasks
9359 	 * before cpus_mask may be changed.
9360 	 */
9361 	if (p->flags & PF_NO_SETAFFINITY)
9362 		ret = -EINVAL;
9363 
9364 	return ret;
9365 }
9366 
9367 bool sched_smp_initialized __read_mostly;
9368 
9369 #ifdef CONFIG_NUMA_BALANCING
9370 /* Migrate current task p to target_cpu */
9371 int migrate_task_to(struct task_struct *p, int target_cpu)
9372 {
9373 	struct migration_arg arg = { p, target_cpu };
9374 	int curr_cpu = task_cpu(p);
9375 
9376 	if (curr_cpu == target_cpu)
9377 		return 0;
9378 
9379 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
9380 		return -EINVAL;
9381 
9382 	/* TODO: This is not properly updating schedstats */
9383 
9384 	trace_sched_move_numa(p, curr_cpu, target_cpu);
9385 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
9386 }
9387 
9388 /*
9389  * Requeue a task on a given node and accurately track the number of NUMA
9390  * tasks on the runqueues
9391  */
9392 void sched_setnuma(struct task_struct *p, int nid)
9393 {
9394 	bool queued, running;
9395 	struct rq_flags rf;
9396 	struct rq *rq;
9397 
9398 	rq = task_rq_lock(p, &rf);
9399 	queued = task_on_rq_queued(p);
9400 	running = task_current(rq, p);
9401 
9402 	if (queued)
9403 		dequeue_task(rq, p, DEQUEUE_SAVE);
9404 	if (running)
9405 		put_prev_task(rq, p);
9406 
9407 	p->numa_preferred_nid = nid;
9408 
9409 	if (queued)
9410 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
9411 	if (running)
9412 		set_next_task(rq, p);
9413 	task_rq_unlock(rq, p, &rf);
9414 }
9415 #endif /* CONFIG_NUMA_BALANCING */
9416 
9417 #ifdef CONFIG_HOTPLUG_CPU
9418 /*
9419  * Ensure that the idle task is using init_mm right before its CPU goes
9420  * offline.
9421  */
9422 void idle_task_exit(void)
9423 {
9424 	struct mm_struct *mm = current->active_mm;
9425 
9426 	BUG_ON(cpu_online(smp_processor_id()));
9427 	BUG_ON(current != this_rq()->idle);
9428 
9429 	if (mm != &init_mm) {
9430 		switch_mm(mm, &init_mm, current);
9431 		finish_arch_post_lock_switch();
9432 	}
9433 
9434 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
9435 }
9436 
9437 static int __balance_push_cpu_stop(void *arg)
9438 {
9439 	struct task_struct *p = arg;
9440 	struct rq *rq = this_rq();
9441 	struct rq_flags rf;
9442 	int cpu;
9443 
9444 	raw_spin_lock_irq(&p->pi_lock);
9445 	rq_lock(rq, &rf);
9446 
9447 	update_rq_clock(rq);
9448 
9449 	if (task_rq(p) == rq && task_on_rq_queued(p)) {
9450 		cpu = select_fallback_rq(rq->cpu, p);
9451 		rq = __migrate_task(rq, &rf, p, cpu);
9452 	}
9453 
9454 	rq_unlock(rq, &rf);
9455 	raw_spin_unlock_irq(&p->pi_lock);
9456 
9457 	put_task_struct(p);
9458 
9459 	return 0;
9460 }
9461 
9462 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
9463 
9464 /*
9465  * Ensure we only run per-cpu kthreads once the CPU goes !active.
9466  *
9467  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
9468  * effective when the hotplug motion is down.
9469  */
9470 static void balance_push(struct rq *rq)
9471 {
9472 	struct task_struct *push_task = rq->curr;
9473 
9474 	lockdep_assert_rq_held(rq);
9475 
9476 	/*
9477 	 * Ensure the thing is persistent until balance_push_set(.on = false);
9478 	 */
9479 	rq->balance_callback = &balance_push_callback;
9480 
9481 	/*
9482 	 * Only active while going offline and when invoked on the outgoing
9483 	 * CPU.
9484 	 */
9485 	if (!cpu_dying(rq->cpu) || rq != this_rq())
9486 		return;
9487 
9488 	/*
9489 	 * Both the cpu-hotplug and stop task are in this case and are
9490 	 * required to complete the hotplug process.
9491 	 */
9492 	if (kthread_is_per_cpu(push_task) ||
9493 	    is_migration_disabled(push_task)) {
9494 
9495 		/*
9496 		 * If this is the idle task on the outgoing CPU try to wake
9497 		 * up the hotplug control thread which might wait for the
9498 		 * last task to vanish. The rcuwait_active() check is
9499 		 * accurate here because the waiter is pinned on this CPU
9500 		 * and can't obviously be running in parallel.
9501 		 *
9502 		 * On RT kernels this also has to check whether there are
9503 		 * pinned and scheduled out tasks on the runqueue. They
9504 		 * need to leave the migrate disabled section first.
9505 		 */
9506 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
9507 		    rcuwait_active(&rq->hotplug_wait)) {
9508 			raw_spin_rq_unlock(rq);
9509 			rcuwait_wake_up(&rq->hotplug_wait);
9510 			raw_spin_rq_lock(rq);
9511 		}
9512 		return;
9513 	}
9514 
9515 	get_task_struct(push_task);
9516 	/*
9517 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
9518 	 * Both preemption and IRQs are still disabled.
9519 	 */
9520 	preempt_disable();
9521 	raw_spin_rq_unlock(rq);
9522 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
9523 			    this_cpu_ptr(&push_work));
9524 	preempt_enable();
9525 	/*
9526 	 * At this point need_resched() is true and we'll take the loop in
9527 	 * schedule(). The next pick is obviously going to be the stop task
9528 	 * which kthread_is_per_cpu() and will push this task away.
9529 	 */
9530 	raw_spin_rq_lock(rq);
9531 }
9532 
9533 static void balance_push_set(int cpu, bool on)
9534 {
9535 	struct rq *rq = cpu_rq(cpu);
9536 	struct rq_flags rf;
9537 
9538 	rq_lock_irqsave(rq, &rf);
9539 	if (on) {
9540 		WARN_ON_ONCE(rq->balance_callback);
9541 		rq->balance_callback = &balance_push_callback;
9542 	} else if (rq->balance_callback == &balance_push_callback) {
9543 		rq->balance_callback = NULL;
9544 	}
9545 	rq_unlock_irqrestore(rq, &rf);
9546 }
9547 
9548 /*
9549  * Invoked from a CPUs hotplug control thread after the CPU has been marked
9550  * inactive. All tasks which are not per CPU kernel threads are either
9551  * pushed off this CPU now via balance_push() or placed on a different CPU
9552  * during wakeup. Wait until the CPU is quiescent.
9553  */
9554 static void balance_hotplug_wait(void)
9555 {
9556 	struct rq *rq = this_rq();
9557 
9558 	rcuwait_wait_event(&rq->hotplug_wait,
9559 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
9560 			   TASK_UNINTERRUPTIBLE);
9561 }
9562 
9563 #else
9564 
9565 static inline void balance_push(struct rq *rq)
9566 {
9567 }
9568 
9569 static inline void balance_push_set(int cpu, bool on)
9570 {
9571 }
9572 
9573 static inline void balance_hotplug_wait(void)
9574 {
9575 }
9576 
9577 #endif /* CONFIG_HOTPLUG_CPU */
9578 
9579 void set_rq_online(struct rq *rq)
9580 {
9581 	if (!rq->online) {
9582 		const struct sched_class *class;
9583 
9584 		cpumask_set_cpu(rq->cpu, rq->rd->online);
9585 		rq->online = 1;
9586 
9587 		for_each_class(class) {
9588 			if (class->rq_online)
9589 				class->rq_online(rq);
9590 		}
9591 	}
9592 }
9593 
9594 void set_rq_offline(struct rq *rq)
9595 {
9596 	if (rq->online) {
9597 		const struct sched_class *class;
9598 
9599 		update_rq_clock(rq);
9600 		for_each_class(class) {
9601 			if (class->rq_offline)
9602 				class->rq_offline(rq);
9603 		}
9604 
9605 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
9606 		rq->online = 0;
9607 	}
9608 }
9609 
9610 /*
9611  * used to mark begin/end of suspend/resume:
9612  */
9613 static int num_cpus_frozen;
9614 
9615 /*
9616  * Update cpusets according to cpu_active mask.  If cpusets are
9617  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
9618  * around partition_sched_domains().
9619  *
9620  * If we come here as part of a suspend/resume, don't touch cpusets because we
9621  * want to restore it back to its original state upon resume anyway.
9622  */
9623 static void cpuset_cpu_active(void)
9624 {
9625 	if (cpuhp_tasks_frozen) {
9626 		/*
9627 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
9628 		 * resume sequence. As long as this is not the last online
9629 		 * operation in the resume sequence, just build a single sched
9630 		 * domain, ignoring cpusets.
9631 		 */
9632 		partition_sched_domains(1, NULL, NULL);
9633 		if (--num_cpus_frozen)
9634 			return;
9635 		/*
9636 		 * This is the last CPU online operation. So fall through and
9637 		 * restore the original sched domains by considering the
9638 		 * cpuset configurations.
9639 		 */
9640 		cpuset_force_rebuild();
9641 	}
9642 	cpuset_update_active_cpus();
9643 }
9644 
9645 static int cpuset_cpu_inactive(unsigned int cpu)
9646 {
9647 	if (!cpuhp_tasks_frozen) {
9648 		int ret = dl_bw_check_overflow(cpu);
9649 
9650 		if (ret)
9651 			return ret;
9652 		cpuset_update_active_cpus();
9653 	} else {
9654 		num_cpus_frozen++;
9655 		partition_sched_domains(1, NULL, NULL);
9656 	}
9657 	return 0;
9658 }
9659 
9660 int sched_cpu_activate(unsigned int cpu)
9661 {
9662 	struct rq *rq = cpu_rq(cpu);
9663 	struct rq_flags rf;
9664 
9665 	/*
9666 	 * Clear the balance_push callback and prepare to schedule
9667 	 * regular tasks.
9668 	 */
9669 	balance_push_set(cpu, false);
9670 
9671 #ifdef CONFIG_SCHED_SMT
9672 	/*
9673 	 * When going up, increment the number of cores with SMT present.
9674 	 */
9675 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9676 		static_branch_inc_cpuslocked(&sched_smt_present);
9677 #endif
9678 	set_cpu_active(cpu, true);
9679 
9680 	if (sched_smp_initialized) {
9681 		sched_update_numa(cpu, true);
9682 		sched_domains_numa_masks_set(cpu);
9683 		cpuset_cpu_active();
9684 	}
9685 
9686 	/*
9687 	 * Put the rq online, if not already. This happens:
9688 	 *
9689 	 * 1) In the early boot process, because we build the real domains
9690 	 *    after all CPUs have been brought up.
9691 	 *
9692 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
9693 	 *    domains.
9694 	 */
9695 	rq_lock_irqsave(rq, &rf);
9696 	if (rq->rd) {
9697 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9698 		set_rq_online(rq);
9699 	}
9700 	rq_unlock_irqrestore(rq, &rf);
9701 
9702 	return 0;
9703 }
9704 
9705 int sched_cpu_deactivate(unsigned int cpu)
9706 {
9707 	struct rq *rq = cpu_rq(cpu);
9708 	struct rq_flags rf;
9709 	int ret;
9710 
9711 	/*
9712 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
9713 	 * load balancing when not active
9714 	 */
9715 	nohz_balance_exit_idle(rq);
9716 
9717 	set_cpu_active(cpu, false);
9718 
9719 	/*
9720 	 * From this point forward, this CPU will refuse to run any task that
9721 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
9722 	 * push those tasks away until this gets cleared, see
9723 	 * sched_cpu_dying().
9724 	 */
9725 	balance_push_set(cpu, true);
9726 
9727 	/*
9728 	 * We've cleared cpu_active_mask / set balance_push, wait for all
9729 	 * preempt-disabled and RCU users of this state to go away such that
9730 	 * all new such users will observe it.
9731 	 *
9732 	 * Specifically, we rely on ttwu to no longer target this CPU, see
9733 	 * ttwu_queue_cond() and is_cpu_allowed().
9734 	 *
9735 	 * Do sync before park smpboot threads to take care the rcu boost case.
9736 	 */
9737 	synchronize_rcu();
9738 
9739 	rq_lock_irqsave(rq, &rf);
9740 	if (rq->rd) {
9741 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
9742 		set_rq_offline(rq);
9743 	}
9744 	rq_unlock_irqrestore(rq, &rf);
9745 
9746 #ifdef CONFIG_SCHED_SMT
9747 	/*
9748 	 * When going down, decrement the number of cores with SMT present.
9749 	 */
9750 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
9751 		static_branch_dec_cpuslocked(&sched_smt_present);
9752 
9753 	sched_core_cpu_deactivate(cpu);
9754 #endif
9755 
9756 	if (!sched_smp_initialized)
9757 		return 0;
9758 
9759 	sched_update_numa(cpu, false);
9760 	ret = cpuset_cpu_inactive(cpu);
9761 	if (ret) {
9762 		balance_push_set(cpu, false);
9763 		set_cpu_active(cpu, true);
9764 		sched_update_numa(cpu, true);
9765 		return ret;
9766 	}
9767 	sched_domains_numa_masks_clear(cpu);
9768 	return 0;
9769 }
9770 
9771 static void sched_rq_cpu_starting(unsigned int cpu)
9772 {
9773 	struct rq *rq = cpu_rq(cpu);
9774 
9775 	rq->calc_load_update = calc_load_update;
9776 	update_max_interval();
9777 }
9778 
9779 int sched_cpu_starting(unsigned int cpu)
9780 {
9781 	sched_core_cpu_starting(cpu);
9782 	sched_rq_cpu_starting(cpu);
9783 	sched_tick_start(cpu);
9784 	return 0;
9785 }
9786 
9787 #ifdef CONFIG_HOTPLUG_CPU
9788 
9789 /*
9790  * Invoked immediately before the stopper thread is invoked to bring the
9791  * CPU down completely. At this point all per CPU kthreads except the
9792  * hotplug thread (current) and the stopper thread (inactive) have been
9793  * either parked or have been unbound from the outgoing CPU. Ensure that
9794  * any of those which might be on the way out are gone.
9795  *
9796  * If after this point a bound task is being woken on this CPU then the
9797  * responsible hotplug callback has failed to do it's job.
9798  * sched_cpu_dying() will catch it with the appropriate fireworks.
9799  */
9800 int sched_cpu_wait_empty(unsigned int cpu)
9801 {
9802 	balance_hotplug_wait();
9803 	return 0;
9804 }
9805 
9806 /*
9807  * Since this CPU is going 'away' for a while, fold any nr_active delta we
9808  * might have. Called from the CPU stopper task after ensuring that the
9809  * stopper is the last running task on the CPU, so nr_active count is
9810  * stable. We need to take the teardown thread which is calling this into
9811  * account, so we hand in adjust = 1 to the load calculation.
9812  *
9813  * Also see the comment "Global load-average calculations".
9814  */
9815 static void calc_load_migrate(struct rq *rq)
9816 {
9817 	long delta = calc_load_fold_active(rq, 1);
9818 
9819 	if (delta)
9820 		atomic_long_add(delta, &calc_load_tasks);
9821 }
9822 
9823 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
9824 {
9825 	struct task_struct *g, *p;
9826 	int cpu = cpu_of(rq);
9827 
9828 	lockdep_assert_rq_held(rq);
9829 
9830 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
9831 	for_each_process_thread(g, p) {
9832 		if (task_cpu(p) != cpu)
9833 			continue;
9834 
9835 		if (!task_on_rq_queued(p))
9836 			continue;
9837 
9838 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
9839 	}
9840 }
9841 
9842 int sched_cpu_dying(unsigned int cpu)
9843 {
9844 	struct rq *rq = cpu_rq(cpu);
9845 	struct rq_flags rf;
9846 
9847 	/* Handle pending wakeups and then migrate everything off */
9848 	sched_tick_stop(cpu);
9849 
9850 	rq_lock_irqsave(rq, &rf);
9851 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
9852 		WARN(true, "Dying CPU not properly vacated!");
9853 		dump_rq_tasks(rq, KERN_WARNING);
9854 	}
9855 	rq_unlock_irqrestore(rq, &rf);
9856 
9857 	calc_load_migrate(rq);
9858 	update_max_interval();
9859 	hrtick_clear(rq);
9860 	sched_core_cpu_dying(cpu);
9861 	return 0;
9862 }
9863 #endif
9864 
9865 void __init sched_init_smp(void)
9866 {
9867 	sched_init_numa(NUMA_NO_NODE);
9868 
9869 	/*
9870 	 * There's no userspace yet to cause hotplug operations; hence all the
9871 	 * CPU masks are stable and all blatant races in the below code cannot
9872 	 * happen.
9873 	 */
9874 	mutex_lock(&sched_domains_mutex);
9875 	sched_init_domains(cpu_active_mask);
9876 	mutex_unlock(&sched_domains_mutex);
9877 
9878 	/* Move init over to a non-isolated CPU */
9879 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
9880 		BUG();
9881 	current->flags &= ~PF_NO_SETAFFINITY;
9882 	sched_init_granularity();
9883 
9884 	init_sched_rt_class();
9885 	init_sched_dl_class();
9886 
9887 	sched_smp_initialized = true;
9888 }
9889 
9890 static int __init migration_init(void)
9891 {
9892 	sched_cpu_starting(smp_processor_id());
9893 	return 0;
9894 }
9895 early_initcall(migration_init);
9896 
9897 #else
9898 void __init sched_init_smp(void)
9899 {
9900 	sched_init_granularity();
9901 }
9902 #endif /* CONFIG_SMP */
9903 
9904 int in_sched_functions(unsigned long addr)
9905 {
9906 	return in_lock_functions(addr) ||
9907 		(addr >= (unsigned long)__sched_text_start
9908 		&& addr < (unsigned long)__sched_text_end);
9909 }
9910 
9911 #ifdef CONFIG_CGROUP_SCHED
9912 /*
9913  * Default task group.
9914  * Every task in system belongs to this group at bootup.
9915  */
9916 struct task_group root_task_group;
9917 LIST_HEAD(task_groups);
9918 
9919 /* Cacheline aligned slab cache for task_group */
9920 static struct kmem_cache *task_group_cache __ro_after_init;
9921 #endif
9922 
9923 void __init sched_init(void)
9924 {
9925 	unsigned long ptr = 0;
9926 	int i;
9927 
9928 	/* Make sure the linker didn't screw up */
9929 	BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
9930 	       &fair_sched_class != &rt_sched_class + 1 ||
9931 	       &rt_sched_class   != &dl_sched_class + 1);
9932 #ifdef CONFIG_SMP
9933 	BUG_ON(&dl_sched_class != &stop_sched_class + 1);
9934 #endif
9935 
9936 	wait_bit_init();
9937 
9938 #ifdef CONFIG_FAIR_GROUP_SCHED
9939 	ptr += 2 * nr_cpu_ids * sizeof(void **);
9940 #endif
9941 #ifdef CONFIG_RT_GROUP_SCHED
9942 	ptr += 2 * nr_cpu_ids * sizeof(void **);
9943 #endif
9944 	if (ptr) {
9945 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
9946 
9947 #ifdef CONFIG_FAIR_GROUP_SCHED
9948 		root_task_group.se = (struct sched_entity **)ptr;
9949 		ptr += nr_cpu_ids * sizeof(void **);
9950 
9951 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9952 		ptr += nr_cpu_ids * sizeof(void **);
9953 
9954 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
9955 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
9956 #endif /* CONFIG_FAIR_GROUP_SCHED */
9957 #ifdef CONFIG_RT_GROUP_SCHED
9958 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9959 		ptr += nr_cpu_ids * sizeof(void **);
9960 
9961 		root_task_group.rt_rq = (struct rt_rq **)ptr;
9962 		ptr += nr_cpu_ids * sizeof(void **);
9963 
9964 #endif /* CONFIG_RT_GROUP_SCHED */
9965 	}
9966 
9967 	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9968 
9969 #ifdef CONFIG_SMP
9970 	init_defrootdomain();
9971 #endif
9972 
9973 #ifdef CONFIG_RT_GROUP_SCHED
9974 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
9975 			global_rt_period(), global_rt_runtime());
9976 #endif /* CONFIG_RT_GROUP_SCHED */
9977 
9978 #ifdef CONFIG_CGROUP_SCHED
9979 	task_group_cache = KMEM_CACHE(task_group, 0);
9980 
9981 	list_add(&root_task_group.list, &task_groups);
9982 	INIT_LIST_HEAD(&root_task_group.children);
9983 	INIT_LIST_HEAD(&root_task_group.siblings);
9984 	autogroup_init(&init_task);
9985 #endif /* CONFIG_CGROUP_SCHED */
9986 
9987 	for_each_possible_cpu(i) {
9988 		struct rq *rq;
9989 
9990 		rq = cpu_rq(i);
9991 		raw_spin_lock_init(&rq->__lock);
9992 		rq->nr_running = 0;
9993 		rq->calc_load_active = 0;
9994 		rq->calc_load_update = jiffies + LOAD_FREQ;
9995 		init_cfs_rq(&rq->cfs);
9996 		init_rt_rq(&rq->rt);
9997 		init_dl_rq(&rq->dl);
9998 #ifdef CONFIG_FAIR_GROUP_SCHED
9999 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
10000 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
10001 		/*
10002 		 * How much CPU bandwidth does root_task_group get?
10003 		 *
10004 		 * In case of task-groups formed thr' the cgroup filesystem, it
10005 		 * gets 100% of the CPU resources in the system. This overall
10006 		 * system CPU resource is divided among the tasks of
10007 		 * root_task_group and its child task-groups in a fair manner,
10008 		 * based on each entity's (task or task-group's) weight
10009 		 * (se->load.weight).
10010 		 *
10011 		 * In other words, if root_task_group has 10 tasks of weight
10012 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
10013 		 * then A0's share of the CPU resource is:
10014 		 *
10015 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
10016 		 *
10017 		 * We achieve this by letting root_task_group's tasks sit
10018 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
10019 		 */
10020 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
10021 #endif /* CONFIG_FAIR_GROUP_SCHED */
10022 
10023 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
10024 #ifdef CONFIG_RT_GROUP_SCHED
10025 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
10026 #endif
10027 #ifdef CONFIG_SMP
10028 		rq->sd = NULL;
10029 		rq->rd = NULL;
10030 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
10031 		rq->balance_callback = &balance_push_callback;
10032 		rq->active_balance = 0;
10033 		rq->next_balance = jiffies;
10034 		rq->push_cpu = 0;
10035 		rq->cpu = i;
10036 		rq->online = 0;
10037 		rq->idle_stamp = 0;
10038 		rq->avg_idle = 2*sysctl_sched_migration_cost;
10039 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
10040 
10041 		INIT_LIST_HEAD(&rq->cfs_tasks);
10042 
10043 		rq_attach_root(rq, &def_root_domain);
10044 #ifdef CONFIG_NO_HZ_COMMON
10045 		rq->last_blocked_load_update_tick = jiffies;
10046 		atomic_set(&rq->nohz_flags, 0);
10047 
10048 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
10049 #endif
10050 #ifdef CONFIG_HOTPLUG_CPU
10051 		rcuwait_init(&rq->hotplug_wait);
10052 #endif
10053 #endif /* CONFIG_SMP */
10054 		hrtick_rq_init(rq);
10055 		atomic_set(&rq->nr_iowait, 0);
10056 
10057 #ifdef CONFIG_SCHED_CORE
10058 		rq->core = rq;
10059 		rq->core_pick = NULL;
10060 		rq->core_enabled = 0;
10061 		rq->core_tree = RB_ROOT;
10062 		rq->core_forceidle_count = 0;
10063 		rq->core_forceidle_occupation = 0;
10064 		rq->core_forceidle_start = 0;
10065 
10066 		rq->core_cookie = 0UL;
10067 #endif
10068 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
10069 	}
10070 
10071 	set_load_weight(&init_task, false);
10072 
10073 	/*
10074 	 * The boot idle thread does lazy MMU switching as well:
10075 	 */
10076 	mmgrab_lazy_tlb(&init_mm);
10077 	enter_lazy_tlb(&init_mm, current);
10078 
10079 	/*
10080 	 * The idle task doesn't need the kthread struct to function, but it
10081 	 * is dressed up as a per-CPU kthread and thus needs to play the part
10082 	 * if we want to avoid special-casing it in code that deals with per-CPU
10083 	 * kthreads.
10084 	 */
10085 	WARN_ON(!set_kthread_struct(current));
10086 
10087 	/*
10088 	 * Make us the idle thread. Technically, schedule() should not be
10089 	 * called from this thread, however somewhere below it might be,
10090 	 * but because we are the idle thread, we just pick up running again
10091 	 * when this runqueue becomes "idle".
10092 	 */
10093 	init_idle(current, smp_processor_id());
10094 
10095 	calc_load_update = jiffies + LOAD_FREQ;
10096 
10097 #ifdef CONFIG_SMP
10098 	idle_thread_set_boot_cpu();
10099 	balance_push_set(smp_processor_id(), false);
10100 #endif
10101 	init_sched_fair_class();
10102 
10103 	psi_init();
10104 
10105 	init_uclamp();
10106 
10107 	preempt_dynamic_init();
10108 
10109 	scheduler_running = 1;
10110 }
10111 
10112 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
10113 
10114 void __might_sleep(const char *file, int line)
10115 {
10116 	unsigned int state = get_current_state();
10117 	/*
10118 	 * Blocking primitives will set (and therefore destroy) current->state,
10119 	 * since we will exit with TASK_RUNNING make sure we enter with it,
10120 	 * otherwise we will destroy state.
10121 	 */
10122 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
10123 			"do not call blocking ops when !TASK_RUNNING; "
10124 			"state=%x set at [<%p>] %pS\n", state,
10125 			(void *)current->task_state_change,
10126 			(void *)current->task_state_change);
10127 
10128 	__might_resched(file, line, 0);
10129 }
10130 EXPORT_SYMBOL(__might_sleep);
10131 
10132 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
10133 {
10134 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
10135 		return;
10136 
10137 	if (preempt_count() == preempt_offset)
10138 		return;
10139 
10140 	pr_err("Preemption disabled at:");
10141 	print_ip_sym(KERN_ERR, ip);
10142 }
10143 
10144 static inline bool resched_offsets_ok(unsigned int offsets)
10145 {
10146 	unsigned int nested = preempt_count();
10147 
10148 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
10149 
10150 	return nested == offsets;
10151 }
10152 
10153 void __might_resched(const char *file, int line, unsigned int offsets)
10154 {
10155 	/* Ratelimiting timestamp: */
10156 	static unsigned long prev_jiffy;
10157 
10158 	unsigned long preempt_disable_ip;
10159 
10160 	/* WARN_ON_ONCE() by default, no rate limit required: */
10161 	rcu_sleep_check();
10162 
10163 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
10164 	     !is_idle_task(current) && !current->non_block_count) ||
10165 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
10166 	    oops_in_progress)
10167 		return;
10168 
10169 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10170 		return;
10171 	prev_jiffy = jiffies;
10172 
10173 	/* Save this before calling printk(), since that will clobber it: */
10174 	preempt_disable_ip = get_preempt_disable_ip(current);
10175 
10176 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
10177 	       file, line);
10178 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
10179 	       in_atomic(), irqs_disabled(), current->non_block_count,
10180 	       current->pid, current->comm);
10181 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
10182 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
10183 
10184 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
10185 		pr_err("RCU nest depth: %d, expected: %u\n",
10186 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
10187 	}
10188 
10189 	if (task_stack_end_corrupted(current))
10190 		pr_emerg("Thread overran stack, or stack corrupted\n");
10191 
10192 	debug_show_held_locks(current);
10193 	if (irqs_disabled())
10194 		print_irqtrace_events(current);
10195 
10196 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
10197 				 preempt_disable_ip);
10198 
10199 	dump_stack();
10200 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10201 }
10202 EXPORT_SYMBOL(__might_resched);
10203 
10204 void __cant_sleep(const char *file, int line, int preempt_offset)
10205 {
10206 	static unsigned long prev_jiffy;
10207 
10208 	if (irqs_disabled())
10209 		return;
10210 
10211 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10212 		return;
10213 
10214 	if (preempt_count() > preempt_offset)
10215 		return;
10216 
10217 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10218 		return;
10219 	prev_jiffy = jiffies;
10220 
10221 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
10222 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
10223 			in_atomic(), irqs_disabled(),
10224 			current->pid, current->comm);
10225 
10226 	debug_show_held_locks(current);
10227 	dump_stack();
10228 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10229 }
10230 EXPORT_SYMBOL_GPL(__cant_sleep);
10231 
10232 #ifdef CONFIG_SMP
10233 void __cant_migrate(const char *file, int line)
10234 {
10235 	static unsigned long prev_jiffy;
10236 
10237 	if (irqs_disabled())
10238 		return;
10239 
10240 	if (is_migration_disabled(current))
10241 		return;
10242 
10243 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
10244 		return;
10245 
10246 	if (preempt_count() > 0)
10247 		return;
10248 
10249 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
10250 		return;
10251 	prev_jiffy = jiffies;
10252 
10253 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
10254 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
10255 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
10256 	       current->pid, current->comm);
10257 
10258 	debug_show_held_locks(current);
10259 	dump_stack();
10260 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
10261 }
10262 EXPORT_SYMBOL_GPL(__cant_migrate);
10263 #endif
10264 #endif
10265 
10266 #ifdef CONFIG_MAGIC_SYSRQ
10267 void normalize_rt_tasks(void)
10268 {
10269 	struct task_struct *g, *p;
10270 	struct sched_attr attr = {
10271 		.sched_policy = SCHED_NORMAL,
10272 	};
10273 
10274 	read_lock(&tasklist_lock);
10275 	for_each_process_thread(g, p) {
10276 		/*
10277 		 * Only normalize user tasks:
10278 		 */
10279 		if (p->flags & PF_KTHREAD)
10280 			continue;
10281 
10282 		p->se.exec_start = 0;
10283 		schedstat_set(p->stats.wait_start,  0);
10284 		schedstat_set(p->stats.sleep_start, 0);
10285 		schedstat_set(p->stats.block_start, 0);
10286 
10287 		if (!dl_task(p) && !rt_task(p)) {
10288 			/*
10289 			 * Renice negative nice level userspace
10290 			 * tasks back to 0:
10291 			 */
10292 			if (task_nice(p) < 0)
10293 				set_user_nice(p, 0);
10294 			continue;
10295 		}
10296 
10297 		__sched_setscheduler(p, &attr, false, false);
10298 	}
10299 	read_unlock(&tasklist_lock);
10300 }
10301 
10302 #endif /* CONFIG_MAGIC_SYSRQ */
10303 
10304 #if defined(CONFIG_KGDB_KDB)
10305 /*
10306  * These functions are only useful for kdb.
10307  *
10308  * They can only be called when the whole system has been
10309  * stopped - every CPU needs to be quiescent, and no scheduling
10310  * activity can take place. Using them for anything else would
10311  * be a serious bug, and as a result, they aren't even visible
10312  * under any other configuration.
10313  */
10314 
10315 /**
10316  * curr_task - return the current task for a given CPU.
10317  * @cpu: the processor in question.
10318  *
10319  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
10320  *
10321  * Return: The current task for @cpu.
10322  */
10323 struct task_struct *curr_task(int cpu)
10324 {
10325 	return cpu_curr(cpu);
10326 }
10327 
10328 #endif /* defined(CONFIG_KGDB_KDB) */
10329 
10330 #ifdef CONFIG_CGROUP_SCHED
10331 /* task_group_lock serializes the addition/removal of task groups */
10332 static DEFINE_SPINLOCK(task_group_lock);
10333 
10334 static inline void alloc_uclamp_sched_group(struct task_group *tg,
10335 					    struct task_group *parent)
10336 {
10337 #ifdef CONFIG_UCLAMP_TASK_GROUP
10338 	enum uclamp_id clamp_id;
10339 
10340 	for_each_clamp_id(clamp_id) {
10341 		uclamp_se_set(&tg->uclamp_req[clamp_id],
10342 			      uclamp_none(clamp_id), false);
10343 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
10344 	}
10345 #endif
10346 }
10347 
10348 static void sched_free_group(struct task_group *tg)
10349 {
10350 	free_fair_sched_group(tg);
10351 	free_rt_sched_group(tg);
10352 	autogroup_free(tg);
10353 	kmem_cache_free(task_group_cache, tg);
10354 }
10355 
10356 static void sched_free_group_rcu(struct rcu_head *rcu)
10357 {
10358 	sched_free_group(container_of(rcu, struct task_group, rcu));
10359 }
10360 
10361 static void sched_unregister_group(struct task_group *tg)
10362 {
10363 	unregister_fair_sched_group(tg);
10364 	unregister_rt_sched_group(tg);
10365 	/*
10366 	 * We have to wait for yet another RCU grace period to expire, as
10367 	 * print_cfs_stats() might run concurrently.
10368 	 */
10369 	call_rcu(&tg->rcu, sched_free_group_rcu);
10370 }
10371 
10372 /* allocate runqueue etc for a new task group */
10373 struct task_group *sched_create_group(struct task_group *parent)
10374 {
10375 	struct task_group *tg;
10376 
10377 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
10378 	if (!tg)
10379 		return ERR_PTR(-ENOMEM);
10380 
10381 	if (!alloc_fair_sched_group(tg, parent))
10382 		goto err;
10383 
10384 	if (!alloc_rt_sched_group(tg, parent))
10385 		goto err;
10386 
10387 	alloc_uclamp_sched_group(tg, parent);
10388 
10389 	return tg;
10390 
10391 err:
10392 	sched_free_group(tg);
10393 	return ERR_PTR(-ENOMEM);
10394 }
10395 
10396 void sched_online_group(struct task_group *tg, struct task_group *parent)
10397 {
10398 	unsigned long flags;
10399 
10400 	spin_lock_irqsave(&task_group_lock, flags);
10401 	list_add_rcu(&tg->list, &task_groups);
10402 
10403 	/* Root should already exist: */
10404 	WARN_ON(!parent);
10405 
10406 	tg->parent = parent;
10407 	INIT_LIST_HEAD(&tg->children);
10408 	list_add_rcu(&tg->siblings, &parent->children);
10409 	spin_unlock_irqrestore(&task_group_lock, flags);
10410 
10411 	online_fair_sched_group(tg);
10412 }
10413 
10414 /* rcu callback to free various structures associated with a task group */
10415 static void sched_unregister_group_rcu(struct rcu_head *rhp)
10416 {
10417 	/* Now it should be safe to free those cfs_rqs: */
10418 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
10419 }
10420 
10421 void sched_destroy_group(struct task_group *tg)
10422 {
10423 	/* Wait for possible concurrent references to cfs_rqs complete: */
10424 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
10425 }
10426 
10427 void sched_release_group(struct task_group *tg)
10428 {
10429 	unsigned long flags;
10430 
10431 	/*
10432 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
10433 	 * sched_cfs_period_timer()).
10434 	 *
10435 	 * For this to be effective, we have to wait for all pending users of
10436 	 * this task group to leave their RCU critical section to ensure no new
10437 	 * user will see our dying task group any more. Specifically ensure
10438 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
10439 	 *
10440 	 * We therefore defer calling unregister_fair_sched_group() to
10441 	 * sched_unregister_group() which is guarantied to get called only after the
10442 	 * current RCU grace period has expired.
10443 	 */
10444 	spin_lock_irqsave(&task_group_lock, flags);
10445 	list_del_rcu(&tg->list);
10446 	list_del_rcu(&tg->siblings);
10447 	spin_unlock_irqrestore(&task_group_lock, flags);
10448 }
10449 
10450 static struct task_group *sched_get_task_group(struct task_struct *tsk)
10451 {
10452 	struct task_group *tg;
10453 
10454 	/*
10455 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
10456 	 * which is pointless here. Thus, we pass "true" to task_css_check()
10457 	 * to prevent lockdep warnings.
10458 	 */
10459 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
10460 			  struct task_group, css);
10461 	tg = autogroup_task_group(tsk, tg);
10462 
10463 	return tg;
10464 }
10465 
10466 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
10467 {
10468 	tsk->sched_task_group = group;
10469 
10470 #ifdef CONFIG_FAIR_GROUP_SCHED
10471 	if (tsk->sched_class->task_change_group)
10472 		tsk->sched_class->task_change_group(tsk);
10473 	else
10474 #endif
10475 		set_task_rq(tsk, task_cpu(tsk));
10476 }
10477 
10478 /*
10479  * Change task's runqueue when it moves between groups.
10480  *
10481  * The caller of this function should have put the task in its new group by
10482  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
10483  * its new group.
10484  */
10485 void sched_move_task(struct task_struct *tsk)
10486 {
10487 	int queued, running, queue_flags =
10488 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
10489 	struct task_group *group;
10490 	struct rq *rq;
10491 
10492 	CLASS(task_rq_lock, rq_guard)(tsk);
10493 	rq = rq_guard.rq;
10494 
10495 	/*
10496 	 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
10497 	 * group changes.
10498 	 */
10499 	group = sched_get_task_group(tsk);
10500 	if (group == tsk->sched_task_group)
10501 		return;
10502 
10503 	update_rq_clock(rq);
10504 
10505 	running = task_current(rq, tsk);
10506 	queued = task_on_rq_queued(tsk);
10507 
10508 	if (queued)
10509 		dequeue_task(rq, tsk, queue_flags);
10510 	if (running)
10511 		put_prev_task(rq, tsk);
10512 
10513 	sched_change_group(tsk, group);
10514 
10515 	if (queued)
10516 		enqueue_task(rq, tsk, queue_flags);
10517 	if (running) {
10518 		set_next_task(rq, tsk);
10519 		/*
10520 		 * After changing group, the running task may have joined a
10521 		 * throttled one but it's still the running task. Trigger a
10522 		 * resched to make sure that task can still run.
10523 		 */
10524 		resched_curr(rq);
10525 	}
10526 }
10527 
10528 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
10529 {
10530 	return css ? container_of(css, struct task_group, css) : NULL;
10531 }
10532 
10533 static struct cgroup_subsys_state *
10534 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10535 {
10536 	struct task_group *parent = css_tg(parent_css);
10537 	struct task_group *tg;
10538 
10539 	if (!parent) {
10540 		/* This is early initialization for the top cgroup */
10541 		return &root_task_group.css;
10542 	}
10543 
10544 	tg = sched_create_group(parent);
10545 	if (IS_ERR(tg))
10546 		return ERR_PTR(-ENOMEM);
10547 
10548 	return &tg->css;
10549 }
10550 
10551 /* Expose task group only after completing cgroup initialization */
10552 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
10553 {
10554 	struct task_group *tg = css_tg(css);
10555 	struct task_group *parent = css_tg(css->parent);
10556 
10557 	if (parent)
10558 		sched_online_group(tg, parent);
10559 
10560 #ifdef CONFIG_UCLAMP_TASK_GROUP
10561 	/* Propagate the effective uclamp value for the new group */
10562 	guard(mutex)(&uclamp_mutex);
10563 	guard(rcu)();
10564 	cpu_util_update_eff(css);
10565 #endif
10566 
10567 	return 0;
10568 }
10569 
10570 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
10571 {
10572 	struct task_group *tg = css_tg(css);
10573 
10574 	sched_release_group(tg);
10575 }
10576 
10577 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
10578 {
10579 	struct task_group *tg = css_tg(css);
10580 
10581 	/*
10582 	 * Relies on the RCU grace period between css_released() and this.
10583 	 */
10584 	sched_unregister_group(tg);
10585 }
10586 
10587 #ifdef CONFIG_RT_GROUP_SCHED
10588 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
10589 {
10590 	struct task_struct *task;
10591 	struct cgroup_subsys_state *css;
10592 
10593 	cgroup_taskset_for_each(task, css, tset) {
10594 		if (!sched_rt_can_attach(css_tg(css), task))
10595 			return -EINVAL;
10596 	}
10597 	return 0;
10598 }
10599 #endif
10600 
10601 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
10602 {
10603 	struct task_struct *task;
10604 	struct cgroup_subsys_state *css;
10605 
10606 	cgroup_taskset_for_each(task, css, tset)
10607 		sched_move_task(task);
10608 }
10609 
10610 #ifdef CONFIG_UCLAMP_TASK_GROUP
10611 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
10612 {
10613 	struct cgroup_subsys_state *top_css = css;
10614 	struct uclamp_se *uc_parent = NULL;
10615 	struct uclamp_se *uc_se = NULL;
10616 	unsigned int eff[UCLAMP_CNT];
10617 	enum uclamp_id clamp_id;
10618 	unsigned int clamps;
10619 
10620 	lockdep_assert_held(&uclamp_mutex);
10621 	SCHED_WARN_ON(!rcu_read_lock_held());
10622 
10623 	css_for_each_descendant_pre(css, top_css) {
10624 		uc_parent = css_tg(css)->parent
10625 			? css_tg(css)->parent->uclamp : NULL;
10626 
10627 		for_each_clamp_id(clamp_id) {
10628 			/* Assume effective clamps matches requested clamps */
10629 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
10630 			/* Cap effective clamps with parent's effective clamps */
10631 			if (uc_parent &&
10632 			    eff[clamp_id] > uc_parent[clamp_id].value) {
10633 				eff[clamp_id] = uc_parent[clamp_id].value;
10634 			}
10635 		}
10636 		/* Ensure protection is always capped by limit */
10637 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
10638 
10639 		/* Propagate most restrictive effective clamps */
10640 		clamps = 0x0;
10641 		uc_se = css_tg(css)->uclamp;
10642 		for_each_clamp_id(clamp_id) {
10643 			if (eff[clamp_id] == uc_se[clamp_id].value)
10644 				continue;
10645 			uc_se[clamp_id].value = eff[clamp_id];
10646 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
10647 			clamps |= (0x1 << clamp_id);
10648 		}
10649 		if (!clamps) {
10650 			css = css_rightmost_descendant(css);
10651 			continue;
10652 		}
10653 
10654 		/* Immediately update descendants RUNNABLE tasks */
10655 		uclamp_update_active_tasks(css);
10656 	}
10657 }
10658 
10659 /*
10660  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
10661  * C expression. Since there is no way to convert a macro argument (N) into a
10662  * character constant, use two levels of macros.
10663  */
10664 #define _POW10(exp) ((unsigned int)1e##exp)
10665 #define POW10(exp) _POW10(exp)
10666 
10667 struct uclamp_request {
10668 #define UCLAMP_PERCENT_SHIFT	2
10669 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
10670 	s64 percent;
10671 	u64 util;
10672 	int ret;
10673 };
10674 
10675 static inline struct uclamp_request
10676 capacity_from_percent(char *buf)
10677 {
10678 	struct uclamp_request req = {
10679 		.percent = UCLAMP_PERCENT_SCALE,
10680 		.util = SCHED_CAPACITY_SCALE,
10681 		.ret = 0,
10682 	};
10683 
10684 	buf = strim(buf);
10685 	if (strcmp(buf, "max")) {
10686 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
10687 					     &req.percent);
10688 		if (req.ret)
10689 			return req;
10690 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
10691 			req.ret = -ERANGE;
10692 			return req;
10693 		}
10694 
10695 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
10696 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
10697 	}
10698 
10699 	return req;
10700 }
10701 
10702 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
10703 				size_t nbytes, loff_t off,
10704 				enum uclamp_id clamp_id)
10705 {
10706 	struct uclamp_request req;
10707 	struct task_group *tg;
10708 
10709 	req = capacity_from_percent(buf);
10710 	if (req.ret)
10711 		return req.ret;
10712 
10713 	static_branch_enable(&sched_uclamp_used);
10714 
10715 	guard(mutex)(&uclamp_mutex);
10716 	guard(rcu)();
10717 
10718 	tg = css_tg(of_css(of));
10719 	if (tg->uclamp_req[clamp_id].value != req.util)
10720 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
10721 
10722 	/*
10723 	 * Because of not recoverable conversion rounding we keep track of the
10724 	 * exact requested value
10725 	 */
10726 	tg->uclamp_pct[clamp_id] = req.percent;
10727 
10728 	/* Update effective clamps to track the most restrictive value */
10729 	cpu_util_update_eff(of_css(of));
10730 
10731 	return nbytes;
10732 }
10733 
10734 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
10735 				    char *buf, size_t nbytes,
10736 				    loff_t off)
10737 {
10738 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
10739 }
10740 
10741 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
10742 				    char *buf, size_t nbytes,
10743 				    loff_t off)
10744 {
10745 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
10746 }
10747 
10748 static inline void cpu_uclamp_print(struct seq_file *sf,
10749 				    enum uclamp_id clamp_id)
10750 {
10751 	struct task_group *tg;
10752 	u64 util_clamp;
10753 	u64 percent;
10754 	u32 rem;
10755 
10756 	scoped_guard (rcu) {
10757 		tg = css_tg(seq_css(sf));
10758 		util_clamp = tg->uclamp_req[clamp_id].value;
10759 	}
10760 
10761 	if (util_clamp == SCHED_CAPACITY_SCALE) {
10762 		seq_puts(sf, "max\n");
10763 		return;
10764 	}
10765 
10766 	percent = tg->uclamp_pct[clamp_id];
10767 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
10768 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
10769 }
10770 
10771 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
10772 {
10773 	cpu_uclamp_print(sf, UCLAMP_MIN);
10774 	return 0;
10775 }
10776 
10777 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
10778 {
10779 	cpu_uclamp_print(sf, UCLAMP_MAX);
10780 	return 0;
10781 }
10782 #endif /* CONFIG_UCLAMP_TASK_GROUP */
10783 
10784 #ifdef CONFIG_FAIR_GROUP_SCHED
10785 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
10786 				struct cftype *cftype, u64 shareval)
10787 {
10788 	if (shareval > scale_load_down(ULONG_MAX))
10789 		shareval = MAX_SHARES;
10790 	return sched_group_set_shares(css_tg(css), scale_load(shareval));
10791 }
10792 
10793 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
10794 			       struct cftype *cft)
10795 {
10796 	struct task_group *tg = css_tg(css);
10797 
10798 	return (u64) scale_load_down(tg->shares);
10799 }
10800 
10801 #ifdef CONFIG_CFS_BANDWIDTH
10802 static DEFINE_MUTEX(cfs_constraints_mutex);
10803 
10804 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
10805 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
10806 /* More than 203 days if BW_SHIFT equals 20. */
10807 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
10808 
10809 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10810 
10811 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
10812 				u64 burst)
10813 {
10814 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
10815 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
10816 
10817 	if (tg == &root_task_group)
10818 		return -EINVAL;
10819 
10820 	/*
10821 	 * Ensure we have at some amount of bandwidth every period.  This is
10822 	 * to prevent reaching a state of large arrears when throttled via
10823 	 * entity_tick() resulting in prolonged exit starvation.
10824 	 */
10825 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
10826 		return -EINVAL;
10827 
10828 	/*
10829 	 * Likewise, bound things on the other side by preventing insane quota
10830 	 * periods.  This also allows us to normalize in computing quota
10831 	 * feasibility.
10832 	 */
10833 	if (period > max_cfs_quota_period)
10834 		return -EINVAL;
10835 
10836 	/*
10837 	 * Bound quota to defend quota against overflow during bandwidth shift.
10838 	 */
10839 	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
10840 		return -EINVAL;
10841 
10842 	if (quota != RUNTIME_INF && (burst > quota ||
10843 				     burst + quota > max_cfs_runtime))
10844 		return -EINVAL;
10845 
10846 	/*
10847 	 * Prevent race between setting of cfs_rq->runtime_enabled and
10848 	 * unthrottle_offline_cfs_rqs().
10849 	 */
10850 	guard(cpus_read_lock)();
10851 	guard(mutex)(&cfs_constraints_mutex);
10852 
10853 	ret = __cfs_schedulable(tg, period, quota);
10854 	if (ret)
10855 		return ret;
10856 
10857 	runtime_enabled = quota != RUNTIME_INF;
10858 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
10859 	/*
10860 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
10861 	 * before making related changes, and on->off must occur afterwards
10862 	 */
10863 	if (runtime_enabled && !runtime_was_enabled)
10864 		cfs_bandwidth_usage_inc();
10865 
10866 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
10867 		cfs_b->period = ns_to_ktime(period);
10868 		cfs_b->quota = quota;
10869 		cfs_b->burst = burst;
10870 
10871 		__refill_cfs_bandwidth_runtime(cfs_b);
10872 
10873 		/*
10874 		 * Restart the period timer (if active) to handle new
10875 		 * period expiry:
10876 		 */
10877 		if (runtime_enabled)
10878 			start_cfs_bandwidth(cfs_b);
10879 	}
10880 
10881 	for_each_online_cpu(i) {
10882 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
10883 		struct rq *rq = cfs_rq->rq;
10884 
10885 		guard(rq_lock_irq)(rq);
10886 		cfs_rq->runtime_enabled = runtime_enabled;
10887 		cfs_rq->runtime_remaining = 0;
10888 
10889 		if (cfs_rq->throttled)
10890 			unthrottle_cfs_rq(cfs_rq);
10891 	}
10892 
10893 	if (runtime_was_enabled && !runtime_enabled)
10894 		cfs_bandwidth_usage_dec();
10895 
10896 	return 0;
10897 }
10898 
10899 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
10900 {
10901 	u64 quota, period, burst;
10902 
10903 	period = ktime_to_ns(tg->cfs_bandwidth.period);
10904 	burst = tg->cfs_bandwidth.burst;
10905 	if (cfs_quota_us < 0)
10906 		quota = RUNTIME_INF;
10907 	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
10908 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
10909 	else
10910 		return -EINVAL;
10911 
10912 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
10913 }
10914 
10915 static long tg_get_cfs_quota(struct task_group *tg)
10916 {
10917 	u64 quota_us;
10918 
10919 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
10920 		return -1;
10921 
10922 	quota_us = tg->cfs_bandwidth.quota;
10923 	do_div(quota_us, NSEC_PER_USEC);
10924 
10925 	return quota_us;
10926 }
10927 
10928 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
10929 {
10930 	u64 quota, period, burst;
10931 
10932 	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
10933 		return -EINVAL;
10934 
10935 	period = (u64)cfs_period_us * NSEC_PER_USEC;
10936 	quota = tg->cfs_bandwidth.quota;
10937 	burst = tg->cfs_bandwidth.burst;
10938 
10939 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
10940 }
10941 
10942 static long tg_get_cfs_period(struct task_group *tg)
10943 {
10944 	u64 cfs_period_us;
10945 
10946 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
10947 	do_div(cfs_period_us, NSEC_PER_USEC);
10948 
10949 	return cfs_period_us;
10950 }
10951 
10952 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
10953 {
10954 	u64 quota, period, burst;
10955 
10956 	if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
10957 		return -EINVAL;
10958 
10959 	burst = (u64)cfs_burst_us * NSEC_PER_USEC;
10960 	period = ktime_to_ns(tg->cfs_bandwidth.period);
10961 	quota = tg->cfs_bandwidth.quota;
10962 
10963 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
10964 }
10965 
10966 static long tg_get_cfs_burst(struct task_group *tg)
10967 {
10968 	u64 burst_us;
10969 
10970 	burst_us = tg->cfs_bandwidth.burst;
10971 	do_div(burst_us, NSEC_PER_USEC);
10972 
10973 	return burst_us;
10974 }
10975 
10976 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
10977 				  struct cftype *cft)
10978 {
10979 	return tg_get_cfs_quota(css_tg(css));
10980 }
10981 
10982 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
10983 				   struct cftype *cftype, s64 cfs_quota_us)
10984 {
10985 	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
10986 }
10987 
10988 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
10989 				   struct cftype *cft)
10990 {
10991 	return tg_get_cfs_period(css_tg(css));
10992 }
10993 
10994 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
10995 				    struct cftype *cftype, u64 cfs_period_us)
10996 {
10997 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
10998 }
10999 
11000 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
11001 				  struct cftype *cft)
11002 {
11003 	return tg_get_cfs_burst(css_tg(css));
11004 }
11005 
11006 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
11007 				   struct cftype *cftype, u64 cfs_burst_us)
11008 {
11009 	return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
11010 }
11011 
11012 struct cfs_schedulable_data {
11013 	struct task_group *tg;
11014 	u64 period, quota;
11015 };
11016 
11017 /*
11018  * normalize group quota/period to be quota/max_period
11019  * note: units are usecs
11020  */
11021 static u64 normalize_cfs_quota(struct task_group *tg,
11022 			       struct cfs_schedulable_data *d)
11023 {
11024 	u64 quota, period;
11025 
11026 	if (tg == d->tg) {
11027 		period = d->period;
11028 		quota = d->quota;
11029 	} else {
11030 		period = tg_get_cfs_period(tg);
11031 		quota = tg_get_cfs_quota(tg);
11032 	}
11033 
11034 	/* note: these should typically be equivalent */
11035 	if (quota == RUNTIME_INF || quota == -1)
11036 		return RUNTIME_INF;
11037 
11038 	return to_ratio(period, quota);
11039 }
11040 
11041 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
11042 {
11043 	struct cfs_schedulable_data *d = data;
11044 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11045 	s64 quota = 0, parent_quota = -1;
11046 
11047 	if (!tg->parent) {
11048 		quota = RUNTIME_INF;
11049 	} else {
11050 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
11051 
11052 		quota = normalize_cfs_quota(tg, d);
11053 		parent_quota = parent_b->hierarchical_quota;
11054 
11055 		/*
11056 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
11057 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
11058 		 * inherit when no limit is set. In both cases this is used
11059 		 * by the scheduler to determine if a given CFS task has a
11060 		 * bandwidth constraint at some higher level.
11061 		 */
11062 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
11063 			if (quota == RUNTIME_INF)
11064 				quota = parent_quota;
11065 			else if (parent_quota != RUNTIME_INF)
11066 				quota = min(quota, parent_quota);
11067 		} else {
11068 			if (quota == RUNTIME_INF)
11069 				quota = parent_quota;
11070 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
11071 				return -EINVAL;
11072 		}
11073 	}
11074 	cfs_b->hierarchical_quota = quota;
11075 
11076 	return 0;
11077 }
11078 
11079 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
11080 {
11081 	struct cfs_schedulable_data data = {
11082 		.tg = tg,
11083 		.period = period,
11084 		.quota = quota,
11085 	};
11086 
11087 	if (quota != RUNTIME_INF) {
11088 		do_div(data.period, NSEC_PER_USEC);
11089 		do_div(data.quota, NSEC_PER_USEC);
11090 	}
11091 
11092 	guard(rcu)();
11093 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
11094 }
11095 
11096 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
11097 {
11098 	struct task_group *tg = css_tg(seq_css(sf));
11099 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11100 
11101 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
11102 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
11103 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
11104 
11105 	if (schedstat_enabled() && tg != &root_task_group) {
11106 		struct sched_statistics *stats;
11107 		u64 ws = 0;
11108 		int i;
11109 
11110 		for_each_possible_cpu(i) {
11111 			stats = __schedstats_from_se(tg->se[i]);
11112 			ws += schedstat_val(stats->wait_sum);
11113 		}
11114 
11115 		seq_printf(sf, "wait_sum %llu\n", ws);
11116 	}
11117 
11118 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
11119 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
11120 
11121 	return 0;
11122 }
11123 
11124 static u64 throttled_time_self(struct task_group *tg)
11125 {
11126 	int i;
11127 	u64 total = 0;
11128 
11129 	for_each_possible_cpu(i) {
11130 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
11131 	}
11132 
11133 	return total;
11134 }
11135 
11136 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
11137 {
11138 	struct task_group *tg = css_tg(seq_css(sf));
11139 
11140 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
11141 
11142 	return 0;
11143 }
11144 #endif /* CONFIG_CFS_BANDWIDTH */
11145 #endif /* CONFIG_FAIR_GROUP_SCHED */
11146 
11147 #ifdef CONFIG_RT_GROUP_SCHED
11148 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
11149 				struct cftype *cft, s64 val)
11150 {
11151 	return sched_group_set_rt_runtime(css_tg(css), val);
11152 }
11153 
11154 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
11155 			       struct cftype *cft)
11156 {
11157 	return sched_group_rt_runtime(css_tg(css));
11158 }
11159 
11160 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
11161 				    struct cftype *cftype, u64 rt_period_us)
11162 {
11163 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
11164 }
11165 
11166 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
11167 				   struct cftype *cft)
11168 {
11169 	return sched_group_rt_period(css_tg(css));
11170 }
11171 #endif /* CONFIG_RT_GROUP_SCHED */
11172 
11173 #ifdef CONFIG_FAIR_GROUP_SCHED
11174 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
11175 			       struct cftype *cft)
11176 {
11177 	return css_tg(css)->idle;
11178 }
11179 
11180 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
11181 				struct cftype *cft, s64 idle)
11182 {
11183 	return sched_group_set_idle(css_tg(css), idle);
11184 }
11185 #endif
11186 
11187 static struct cftype cpu_legacy_files[] = {
11188 #ifdef CONFIG_FAIR_GROUP_SCHED
11189 	{
11190 		.name = "shares",
11191 		.read_u64 = cpu_shares_read_u64,
11192 		.write_u64 = cpu_shares_write_u64,
11193 	},
11194 	{
11195 		.name = "idle",
11196 		.read_s64 = cpu_idle_read_s64,
11197 		.write_s64 = cpu_idle_write_s64,
11198 	},
11199 #endif
11200 #ifdef CONFIG_CFS_BANDWIDTH
11201 	{
11202 		.name = "cfs_quota_us",
11203 		.read_s64 = cpu_cfs_quota_read_s64,
11204 		.write_s64 = cpu_cfs_quota_write_s64,
11205 	},
11206 	{
11207 		.name = "cfs_period_us",
11208 		.read_u64 = cpu_cfs_period_read_u64,
11209 		.write_u64 = cpu_cfs_period_write_u64,
11210 	},
11211 	{
11212 		.name = "cfs_burst_us",
11213 		.read_u64 = cpu_cfs_burst_read_u64,
11214 		.write_u64 = cpu_cfs_burst_write_u64,
11215 	},
11216 	{
11217 		.name = "stat",
11218 		.seq_show = cpu_cfs_stat_show,
11219 	},
11220 	{
11221 		.name = "stat.local",
11222 		.seq_show = cpu_cfs_local_stat_show,
11223 	},
11224 #endif
11225 #ifdef CONFIG_RT_GROUP_SCHED
11226 	{
11227 		.name = "rt_runtime_us",
11228 		.read_s64 = cpu_rt_runtime_read,
11229 		.write_s64 = cpu_rt_runtime_write,
11230 	},
11231 	{
11232 		.name = "rt_period_us",
11233 		.read_u64 = cpu_rt_period_read_uint,
11234 		.write_u64 = cpu_rt_period_write_uint,
11235 	},
11236 #endif
11237 #ifdef CONFIG_UCLAMP_TASK_GROUP
11238 	{
11239 		.name = "uclamp.min",
11240 		.flags = CFTYPE_NOT_ON_ROOT,
11241 		.seq_show = cpu_uclamp_min_show,
11242 		.write = cpu_uclamp_min_write,
11243 	},
11244 	{
11245 		.name = "uclamp.max",
11246 		.flags = CFTYPE_NOT_ON_ROOT,
11247 		.seq_show = cpu_uclamp_max_show,
11248 		.write = cpu_uclamp_max_write,
11249 	},
11250 #endif
11251 	{ }	/* Terminate */
11252 };
11253 
11254 static int cpu_extra_stat_show(struct seq_file *sf,
11255 			       struct cgroup_subsys_state *css)
11256 {
11257 #ifdef CONFIG_CFS_BANDWIDTH
11258 	{
11259 		struct task_group *tg = css_tg(css);
11260 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
11261 		u64 throttled_usec, burst_usec;
11262 
11263 		throttled_usec = cfs_b->throttled_time;
11264 		do_div(throttled_usec, NSEC_PER_USEC);
11265 		burst_usec = cfs_b->burst_time;
11266 		do_div(burst_usec, NSEC_PER_USEC);
11267 
11268 		seq_printf(sf, "nr_periods %d\n"
11269 			   "nr_throttled %d\n"
11270 			   "throttled_usec %llu\n"
11271 			   "nr_bursts %d\n"
11272 			   "burst_usec %llu\n",
11273 			   cfs_b->nr_periods, cfs_b->nr_throttled,
11274 			   throttled_usec, cfs_b->nr_burst, burst_usec);
11275 	}
11276 #endif
11277 	return 0;
11278 }
11279 
11280 static int cpu_local_stat_show(struct seq_file *sf,
11281 			       struct cgroup_subsys_state *css)
11282 {
11283 #ifdef CONFIG_CFS_BANDWIDTH
11284 	{
11285 		struct task_group *tg = css_tg(css);
11286 		u64 throttled_self_usec;
11287 
11288 		throttled_self_usec = throttled_time_self(tg);
11289 		do_div(throttled_self_usec, NSEC_PER_USEC);
11290 
11291 		seq_printf(sf, "throttled_usec %llu\n",
11292 			   throttled_self_usec);
11293 	}
11294 #endif
11295 	return 0;
11296 }
11297 
11298 #ifdef CONFIG_FAIR_GROUP_SCHED
11299 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
11300 			       struct cftype *cft)
11301 {
11302 	struct task_group *tg = css_tg(css);
11303 	u64 weight = scale_load_down(tg->shares);
11304 
11305 	return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
11306 }
11307 
11308 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
11309 				struct cftype *cft, u64 weight)
11310 {
11311 	/*
11312 	 * cgroup weight knobs should use the common MIN, DFL and MAX
11313 	 * values which are 1, 100 and 10000 respectively.  While it loses
11314 	 * a bit of range on both ends, it maps pretty well onto the shares
11315 	 * value used by scheduler and the round-trip conversions preserve
11316 	 * the original value over the entire range.
11317 	 */
11318 	if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
11319 		return -ERANGE;
11320 
11321 	weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
11322 
11323 	return sched_group_set_shares(css_tg(css), scale_load(weight));
11324 }
11325 
11326 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
11327 				    struct cftype *cft)
11328 {
11329 	unsigned long weight = scale_load_down(css_tg(css)->shares);
11330 	int last_delta = INT_MAX;
11331 	int prio, delta;
11332 
11333 	/* find the closest nice value to the current weight */
11334 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
11335 		delta = abs(sched_prio_to_weight[prio] - weight);
11336 		if (delta >= last_delta)
11337 			break;
11338 		last_delta = delta;
11339 	}
11340 
11341 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
11342 }
11343 
11344 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
11345 				     struct cftype *cft, s64 nice)
11346 {
11347 	unsigned long weight;
11348 	int idx;
11349 
11350 	if (nice < MIN_NICE || nice > MAX_NICE)
11351 		return -ERANGE;
11352 
11353 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
11354 	idx = array_index_nospec(idx, 40);
11355 	weight = sched_prio_to_weight[idx];
11356 
11357 	return sched_group_set_shares(css_tg(css), scale_load(weight));
11358 }
11359 #endif
11360 
11361 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
11362 						  long period, long quota)
11363 {
11364 	if (quota < 0)
11365 		seq_puts(sf, "max");
11366 	else
11367 		seq_printf(sf, "%ld", quota);
11368 
11369 	seq_printf(sf, " %ld\n", period);
11370 }
11371 
11372 /* caller should put the current value in *@periodp before calling */
11373 static int __maybe_unused cpu_period_quota_parse(char *buf,
11374 						 u64 *periodp, u64 *quotap)
11375 {
11376 	char tok[21];	/* U64_MAX */
11377 
11378 	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
11379 		return -EINVAL;
11380 
11381 	*periodp *= NSEC_PER_USEC;
11382 
11383 	if (sscanf(tok, "%llu", quotap))
11384 		*quotap *= NSEC_PER_USEC;
11385 	else if (!strcmp(tok, "max"))
11386 		*quotap = RUNTIME_INF;
11387 	else
11388 		return -EINVAL;
11389 
11390 	return 0;
11391 }
11392 
11393 #ifdef CONFIG_CFS_BANDWIDTH
11394 static int cpu_max_show(struct seq_file *sf, void *v)
11395 {
11396 	struct task_group *tg = css_tg(seq_css(sf));
11397 
11398 	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
11399 	return 0;
11400 }
11401 
11402 static ssize_t cpu_max_write(struct kernfs_open_file *of,
11403 			     char *buf, size_t nbytes, loff_t off)
11404 {
11405 	struct task_group *tg = css_tg(of_css(of));
11406 	u64 period = tg_get_cfs_period(tg);
11407 	u64 burst = tg->cfs_bandwidth.burst;
11408 	u64 quota;
11409 	int ret;
11410 
11411 	ret = cpu_period_quota_parse(buf, &period, &quota);
11412 	if (!ret)
11413 		ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
11414 	return ret ?: nbytes;
11415 }
11416 #endif
11417 
11418 static struct cftype cpu_files[] = {
11419 #ifdef CONFIG_FAIR_GROUP_SCHED
11420 	{
11421 		.name = "weight",
11422 		.flags = CFTYPE_NOT_ON_ROOT,
11423 		.read_u64 = cpu_weight_read_u64,
11424 		.write_u64 = cpu_weight_write_u64,
11425 	},
11426 	{
11427 		.name = "weight.nice",
11428 		.flags = CFTYPE_NOT_ON_ROOT,
11429 		.read_s64 = cpu_weight_nice_read_s64,
11430 		.write_s64 = cpu_weight_nice_write_s64,
11431 	},
11432 	{
11433 		.name = "idle",
11434 		.flags = CFTYPE_NOT_ON_ROOT,
11435 		.read_s64 = cpu_idle_read_s64,
11436 		.write_s64 = cpu_idle_write_s64,
11437 	},
11438 #endif
11439 #ifdef CONFIG_CFS_BANDWIDTH
11440 	{
11441 		.name = "max",
11442 		.flags = CFTYPE_NOT_ON_ROOT,
11443 		.seq_show = cpu_max_show,
11444 		.write = cpu_max_write,
11445 	},
11446 	{
11447 		.name = "max.burst",
11448 		.flags = CFTYPE_NOT_ON_ROOT,
11449 		.read_u64 = cpu_cfs_burst_read_u64,
11450 		.write_u64 = cpu_cfs_burst_write_u64,
11451 	},
11452 #endif
11453 #ifdef CONFIG_UCLAMP_TASK_GROUP
11454 	{
11455 		.name = "uclamp.min",
11456 		.flags = CFTYPE_NOT_ON_ROOT,
11457 		.seq_show = cpu_uclamp_min_show,
11458 		.write = cpu_uclamp_min_write,
11459 	},
11460 	{
11461 		.name = "uclamp.max",
11462 		.flags = CFTYPE_NOT_ON_ROOT,
11463 		.seq_show = cpu_uclamp_max_show,
11464 		.write = cpu_uclamp_max_write,
11465 	},
11466 #endif
11467 	{ }	/* terminate */
11468 };
11469 
11470 struct cgroup_subsys cpu_cgrp_subsys = {
11471 	.css_alloc	= cpu_cgroup_css_alloc,
11472 	.css_online	= cpu_cgroup_css_online,
11473 	.css_released	= cpu_cgroup_css_released,
11474 	.css_free	= cpu_cgroup_css_free,
11475 	.css_extra_stat_show = cpu_extra_stat_show,
11476 	.css_local_stat_show = cpu_local_stat_show,
11477 #ifdef CONFIG_RT_GROUP_SCHED
11478 	.can_attach	= cpu_cgroup_can_attach,
11479 #endif
11480 	.attach		= cpu_cgroup_attach,
11481 	.legacy_cftypes	= cpu_legacy_files,
11482 	.dfl_cftypes	= cpu_files,
11483 	.early_init	= true,
11484 	.threaded	= true,
11485 };
11486 
11487 #endif	/* CONFIG_CGROUP_SCHED */
11488 
11489 void dump_cpu_task(int cpu)
11490 {
11491 	if (cpu == smp_processor_id() && in_hardirq()) {
11492 		struct pt_regs *regs;
11493 
11494 		regs = get_irq_regs();
11495 		if (regs) {
11496 			show_regs(regs);
11497 			return;
11498 		}
11499 	}
11500 
11501 	if (trigger_single_cpu_backtrace(cpu))
11502 		return;
11503 
11504 	pr_info("Task dump for CPU %d:\n", cpu);
11505 	sched_show_task(cpu_curr(cpu));
11506 }
11507 
11508 /*
11509  * Nice levels are multiplicative, with a gentle 10% change for every
11510  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
11511  * nice 1, it will get ~10% less CPU time than another CPU-bound task
11512  * that remained on nice 0.
11513  *
11514  * The "10% effect" is relative and cumulative: from _any_ nice level,
11515  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
11516  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
11517  * If a task goes up by ~10% and another task goes down by ~10% then
11518  * the relative distance between them is ~25%.)
11519  */
11520 const int sched_prio_to_weight[40] = {
11521  /* -20 */     88761,     71755,     56483,     46273,     36291,
11522  /* -15 */     29154,     23254,     18705,     14949,     11916,
11523  /* -10 */      9548,      7620,      6100,      4904,      3906,
11524  /*  -5 */      3121,      2501,      1991,      1586,      1277,
11525  /*   0 */      1024,       820,       655,       526,       423,
11526  /*   5 */       335,       272,       215,       172,       137,
11527  /*  10 */       110,        87,        70,        56,        45,
11528  /*  15 */        36,        29,        23,        18,        15,
11529 };
11530 
11531 /*
11532  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated.
11533  *
11534  * In cases where the weight does not change often, we can use the
11535  * precalculated inverse to speed up arithmetics by turning divisions
11536  * into multiplications:
11537  */
11538 const u32 sched_prio_to_wmult[40] = {
11539  /* -20 */     48388,     59856,     76040,     92818,    118348,
11540  /* -15 */    147320,    184698,    229616,    287308,    360437,
11541  /* -10 */    449829,    563644,    704093,    875809,   1099582,
11542  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
11543  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
11544  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
11545  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
11546  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
11547 };
11548 
11549 void call_trace_sched_update_nr_running(struct rq *rq, int count)
11550 {
11551         trace_sched_update_nr_running_tp(rq, count);
11552 }
11553 
11554 #ifdef CONFIG_SCHED_MM_CID
11555 
11556 /*
11557  * @cid_lock: Guarantee forward-progress of cid allocation.
11558  *
11559  * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
11560  * is only used when contention is detected by the lock-free allocation so
11561  * forward progress can be guaranteed.
11562  */
11563 DEFINE_RAW_SPINLOCK(cid_lock);
11564 
11565 /*
11566  * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
11567  *
11568  * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
11569  * detected, it is set to 1 to ensure that all newly coming allocations are
11570  * serialized by @cid_lock until the allocation which detected contention
11571  * completes and sets @use_cid_lock back to 0. This guarantees forward progress
11572  * of a cid allocation.
11573  */
11574 int use_cid_lock;
11575 
11576 /*
11577  * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
11578  * concurrently with respect to the execution of the source runqueue context
11579  * switch.
11580  *
11581  * There is one basic properties we want to guarantee here:
11582  *
11583  * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
11584  * used by a task. That would lead to concurrent allocation of the cid and
11585  * userspace corruption.
11586  *
11587  * Provide this guarantee by introducing a Dekker memory ordering to guarantee
11588  * that a pair of loads observe at least one of a pair of stores, which can be
11589  * shown as:
11590  *
11591  *      X = Y = 0
11592  *
11593  *      w[X]=1          w[Y]=1
11594  *      MB              MB
11595  *      r[Y]=y          r[X]=x
11596  *
11597  * Which guarantees that x==0 && y==0 is impossible. But rather than using
11598  * values 0 and 1, this algorithm cares about specific state transitions of the
11599  * runqueue current task (as updated by the scheduler context switch), and the
11600  * per-mm/cpu cid value.
11601  *
11602  * Let's introduce task (Y) which has task->mm == mm and task (N) which has
11603  * task->mm != mm for the rest of the discussion. There are two scheduler state
11604  * transitions on context switch we care about:
11605  *
11606  * (TSA) Store to rq->curr with transition from (N) to (Y)
11607  *
11608  * (TSB) Store to rq->curr with transition from (Y) to (N)
11609  *
11610  * On the remote-clear side, there is one transition we care about:
11611  *
11612  * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
11613  *
11614  * There is also a transition to UNSET state which can be performed from all
11615  * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
11616  * guarantees that only a single thread will succeed:
11617  *
11618  * (TMB) cmpxchg to *pcpu_cid to mark UNSET
11619  *
11620  * Just to be clear, what we do _not_ want to happen is a transition to UNSET
11621  * when a thread is actively using the cid (property (1)).
11622  *
11623  * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
11624  *
11625  * Scenario A) (TSA)+(TMA) (from next task perspective)
11626  *
11627  * CPU0                                      CPU1
11628  *
11629  * Context switch CS-1                       Remote-clear
11630  *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
11631  *                                             (implied barrier after cmpxchg)
11632  *   - switch_mm_cid()
11633  *     - memory barrier (see switch_mm_cid()
11634  *       comment explaining how this barrier
11635  *       is combined with other scheduler
11636  *       barriers)
11637  *     - mm_cid_get (next)
11638  *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
11639  *
11640  * This Dekker ensures that either task (Y) is observed by the
11641  * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
11642  * observed.
11643  *
11644  * If task (Y) store is observed by rcu_dereference(), it means that there is
11645  * still an active task on the cpu. Remote-clear will therefore not transition
11646  * to UNSET, which fulfills property (1).
11647  *
11648  * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
11649  * it will move its state to UNSET, which clears the percpu cid perhaps
11650  * uselessly (which is not an issue for correctness). Because task (Y) is not
11651  * observed, CPU1 can move ahead to set the state to UNSET. Because moving
11652  * state to UNSET is done with a cmpxchg expecting that the old state has the
11653  * LAZY flag set, only one thread will successfully UNSET.
11654  *
11655  * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
11656  * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
11657  * CPU1 will observe task (Y) and do nothing more, which is fine.
11658  *
11659  * What we are effectively preventing with this Dekker is a scenario where
11660  * neither LAZY flag nor store (Y) are observed, which would fail property (1)
11661  * because this would UNSET a cid which is actively used.
11662  */
11663 
11664 void sched_mm_cid_migrate_from(struct task_struct *t)
11665 {
11666 	t->migrate_from_cpu = task_cpu(t);
11667 }
11668 
11669 static
11670 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
11671 					  struct task_struct *t,
11672 					  struct mm_cid *src_pcpu_cid)
11673 {
11674 	struct mm_struct *mm = t->mm;
11675 	struct task_struct *src_task;
11676 	int src_cid, last_mm_cid;
11677 
11678 	if (!mm)
11679 		return -1;
11680 
11681 	last_mm_cid = t->last_mm_cid;
11682 	/*
11683 	 * If the migrated task has no last cid, or if the current
11684 	 * task on src rq uses the cid, it means the source cid does not need
11685 	 * to be moved to the destination cpu.
11686 	 */
11687 	if (last_mm_cid == -1)
11688 		return -1;
11689 	src_cid = READ_ONCE(src_pcpu_cid->cid);
11690 	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
11691 		return -1;
11692 
11693 	/*
11694 	 * If we observe an active task using the mm on this rq, it means we
11695 	 * are not the last task to be migrated from this cpu for this mm, so
11696 	 * there is no need to move src_cid to the destination cpu.
11697 	 */
11698 	guard(rcu)();
11699 	src_task = rcu_dereference(src_rq->curr);
11700 	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11701 		t->last_mm_cid = -1;
11702 		return -1;
11703 	}
11704 
11705 	return src_cid;
11706 }
11707 
11708 static
11709 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
11710 					      struct task_struct *t,
11711 					      struct mm_cid *src_pcpu_cid,
11712 					      int src_cid)
11713 {
11714 	struct task_struct *src_task;
11715 	struct mm_struct *mm = t->mm;
11716 	int lazy_cid;
11717 
11718 	if (src_cid == -1)
11719 		return -1;
11720 
11721 	/*
11722 	 * Attempt to clear the source cpu cid to move it to the destination
11723 	 * cpu.
11724 	 */
11725 	lazy_cid = mm_cid_set_lazy_put(src_cid);
11726 	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
11727 		return -1;
11728 
11729 	/*
11730 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11731 	 * rq->curr->mm matches the scheduler barrier in context_switch()
11732 	 * between store to rq->curr and load of prev and next task's
11733 	 * per-mm/cpu cid.
11734 	 *
11735 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11736 	 * rq->curr->mm_cid_active matches the barrier in
11737 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11738 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11739 	 * load of per-mm/cpu cid.
11740 	 */
11741 
11742 	/*
11743 	 * If we observe an active task using the mm on this rq after setting
11744 	 * the lazy-put flag, this task will be responsible for transitioning
11745 	 * from lazy-put flag set to MM_CID_UNSET.
11746 	 */
11747 	scoped_guard (rcu) {
11748 		src_task = rcu_dereference(src_rq->curr);
11749 		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
11750 			/*
11751 			 * We observed an active task for this mm, there is therefore
11752 			 * no point in moving this cid to the destination cpu.
11753 			 */
11754 			t->last_mm_cid = -1;
11755 			return -1;
11756 		}
11757 	}
11758 
11759 	/*
11760 	 * The src_cid is unused, so it can be unset.
11761 	 */
11762 	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11763 		return -1;
11764 	return src_cid;
11765 }
11766 
11767 /*
11768  * Migration to dst cpu. Called with dst_rq lock held.
11769  * Interrupts are disabled, which keeps the window of cid ownership without the
11770  * source rq lock held small.
11771  */
11772 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
11773 {
11774 	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
11775 	struct mm_struct *mm = t->mm;
11776 	int src_cid, dst_cid, src_cpu;
11777 	struct rq *src_rq;
11778 
11779 	lockdep_assert_rq_held(dst_rq);
11780 
11781 	if (!mm)
11782 		return;
11783 	src_cpu = t->migrate_from_cpu;
11784 	if (src_cpu == -1) {
11785 		t->last_mm_cid = -1;
11786 		return;
11787 	}
11788 	/*
11789 	 * Move the src cid if the dst cid is unset. This keeps id
11790 	 * allocation closest to 0 in cases where few threads migrate around
11791 	 * many cpus.
11792 	 *
11793 	 * If destination cid is already set, we may have to just clear
11794 	 * the src cid to ensure compactness in frequent migrations
11795 	 * scenarios.
11796 	 *
11797 	 * It is not useful to clear the src cid when the number of threads is
11798 	 * greater or equal to the number of allowed cpus, because user-space
11799 	 * can expect that the number of allowed cids can reach the number of
11800 	 * allowed cpus.
11801 	 */
11802 	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
11803 	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
11804 	if (!mm_cid_is_unset(dst_cid) &&
11805 	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
11806 		return;
11807 	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
11808 	src_rq = cpu_rq(src_cpu);
11809 	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
11810 	if (src_cid == -1)
11811 		return;
11812 	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
11813 							    src_cid);
11814 	if (src_cid == -1)
11815 		return;
11816 	if (!mm_cid_is_unset(dst_cid)) {
11817 		__mm_cid_put(mm, src_cid);
11818 		return;
11819 	}
11820 	/* Move src_cid to dst cpu. */
11821 	mm_cid_snapshot_time(dst_rq, mm);
11822 	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
11823 }
11824 
11825 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
11826 				      int cpu)
11827 {
11828 	struct rq *rq = cpu_rq(cpu);
11829 	struct task_struct *t;
11830 	int cid, lazy_cid;
11831 
11832 	cid = READ_ONCE(pcpu_cid->cid);
11833 	if (!mm_cid_is_valid(cid))
11834 		return;
11835 
11836 	/*
11837 	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
11838 	 * there happens to be other tasks left on the source cpu using this
11839 	 * mm, the next task using this mm will reallocate its cid on context
11840 	 * switch.
11841 	 */
11842 	lazy_cid = mm_cid_set_lazy_put(cid);
11843 	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
11844 		return;
11845 
11846 	/*
11847 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11848 	 * rq->curr->mm matches the scheduler barrier in context_switch()
11849 	 * between store to rq->curr and load of prev and next task's
11850 	 * per-mm/cpu cid.
11851 	 *
11852 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
11853 	 * rq->curr->mm_cid_active matches the barrier in
11854 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
11855 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
11856 	 * load of per-mm/cpu cid.
11857 	 */
11858 
11859 	/*
11860 	 * If we observe an active task using the mm on this rq after setting
11861 	 * the lazy-put flag, that task will be responsible for transitioning
11862 	 * from lazy-put flag set to MM_CID_UNSET.
11863 	 */
11864 	scoped_guard (rcu) {
11865 		t = rcu_dereference(rq->curr);
11866 		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
11867 			return;
11868 	}
11869 
11870 	/*
11871 	 * The cid is unused, so it can be unset.
11872 	 * Disable interrupts to keep the window of cid ownership without rq
11873 	 * lock small.
11874 	 */
11875 	scoped_guard (irqsave) {
11876 		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
11877 			__mm_cid_put(mm, cid);
11878 	}
11879 }
11880 
11881 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
11882 {
11883 	struct rq *rq = cpu_rq(cpu);
11884 	struct mm_cid *pcpu_cid;
11885 	struct task_struct *curr;
11886 	u64 rq_clock;
11887 
11888 	/*
11889 	 * rq->clock load is racy on 32-bit but one spurious clear once in a
11890 	 * while is irrelevant.
11891 	 */
11892 	rq_clock = READ_ONCE(rq->clock);
11893 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11894 
11895 	/*
11896 	 * In order to take care of infrequently scheduled tasks, bump the time
11897 	 * snapshot associated with this cid if an active task using the mm is
11898 	 * observed on this rq.
11899 	 */
11900 	scoped_guard (rcu) {
11901 		curr = rcu_dereference(rq->curr);
11902 		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
11903 			WRITE_ONCE(pcpu_cid->time, rq_clock);
11904 			return;
11905 		}
11906 	}
11907 
11908 	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
11909 		return;
11910 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11911 }
11912 
11913 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
11914 					     int weight)
11915 {
11916 	struct mm_cid *pcpu_cid;
11917 	int cid;
11918 
11919 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
11920 	cid = READ_ONCE(pcpu_cid->cid);
11921 	if (!mm_cid_is_valid(cid) || cid < weight)
11922 		return;
11923 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
11924 }
11925 
11926 static void task_mm_cid_work(struct callback_head *work)
11927 {
11928 	unsigned long now = jiffies, old_scan, next_scan;
11929 	struct task_struct *t = current;
11930 	struct cpumask *cidmask;
11931 	struct mm_struct *mm;
11932 	int weight, cpu;
11933 
11934 	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
11935 
11936 	work->next = work;	/* Prevent double-add */
11937 	if (t->flags & PF_EXITING)
11938 		return;
11939 	mm = t->mm;
11940 	if (!mm)
11941 		return;
11942 	old_scan = READ_ONCE(mm->mm_cid_next_scan);
11943 	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
11944 	if (!old_scan) {
11945 		unsigned long res;
11946 
11947 		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
11948 		if (res != old_scan)
11949 			old_scan = res;
11950 		else
11951 			old_scan = next_scan;
11952 	}
11953 	if (time_before(now, old_scan))
11954 		return;
11955 	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
11956 		return;
11957 	cidmask = mm_cidmask(mm);
11958 	/* Clear cids that were not recently used. */
11959 	for_each_possible_cpu(cpu)
11960 		sched_mm_cid_remote_clear_old(mm, cpu);
11961 	weight = cpumask_weight(cidmask);
11962 	/*
11963 	 * Clear cids that are greater or equal to the cidmask weight to
11964 	 * recompact it.
11965 	 */
11966 	for_each_possible_cpu(cpu)
11967 		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
11968 }
11969 
11970 void init_sched_mm_cid(struct task_struct *t)
11971 {
11972 	struct mm_struct *mm = t->mm;
11973 	int mm_users = 0;
11974 
11975 	if (mm) {
11976 		mm_users = atomic_read(&mm->mm_users);
11977 		if (mm_users == 1)
11978 			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
11979 	}
11980 	t->cid_work.next = &t->cid_work;	/* Protect against double add */
11981 	init_task_work(&t->cid_work, task_mm_cid_work);
11982 }
11983 
11984 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
11985 {
11986 	struct callback_head *work = &curr->cid_work;
11987 	unsigned long now = jiffies;
11988 
11989 	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
11990 	    work->next != work)
11991 		return;
11992 	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
11993 		return;
11994 	task_work_add(curr, work, TWA_RESUME);
11995 }
11996 
11997 void sched_mm_cid_exit_signals(struct task_struct *t)
11998 {
11999 	struct mm_struct *mm = t->mm;
12000 	struct rq *rq;
12001 
12002 	if (!mm)
12003 		return;
12004 
12005 	preempt_disable();
12006 	rq = this_rq();
12007 	guard(rq_lock_irqsave)(rq);
12008 	preempt_enable_no_resched();	/* holding spinlock */
12009 	WRITE_ONCE(t->mm_cid_active, 0);
12010 	/*
12011 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
12012 	 * Matches barrier in sched_mm_cid_remote_clear_old().
12013 	 */
12014 	smp_mb();
12015 	mm_cid_put(mm);
12016 	t->last_mm_cid = t->mm_cid = -1;
12017 }
12018 
12019 void sched_mm_cid_before_execve(struct task_struct *t)
12020 {
12021 	struct mm_struct *mm = t->mm;
12022 	struct rq *rq;
12023 
12024 	if (!mm)
12025 		return;
12026 
12027 	preempt_disable();
12028 	rq = this_rq();
12029 	guard(rq_lock_irqsave)(rq);
12030 	preempt_enable_no_resched();	/* holding spinlock */
12031 	WRITE_ONCE(t->mm_cid_active, 0);
12032 	/*
12033 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
12034 	 * Matches barrier in sched_mm_cid_remote_clear_old().
12035 	 */
12036 	smp_mb();
12037 	mm_cid_put(mm);
12038 	t->last_mm_cid = t->mm_cid = -1;
12039 }
12040 
12041 void sched_mm_cid_after_execve(struct task_struct *t)
12042 {
12043 	struct mm_struct *mm = t->mm;
12044 	struct rq *rq;
12045 
12046 	if (!mm)
12047 		return;
12048 
12049 	preempt_disable();
12050 	rq = this_rq();
12051 	scoped_guard (rq_lock_irqsave, rq) {
12052 		preempt_enable_no_resched();	/* holding spinlock */
12053 		WRITE_ONCE(t->mm_cid_active, 1);
12054 		/*
12055 		 * Store t->mm_cid_active before loading per-mm/cpu cid.
12056 		 * Matches barrier in sched_mm_cid_remote_clear_old().
12057 		 */
12058 		smp_mb();
12059 		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
12060 	}
12061 	rseq_set_notify_resume(t);
12062 }
12063 
12064 void sched_mm_cid_fork(struct task_struct *t)
12065 {
12066 	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
12067 	t->mm_cid_active = 1;
12068 }
12069 #endif
12070