xref: /linux/kernel/sched/core.c (revision c245910049d04fbfa85bb2f5acd591c24e9907c7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel CPU scheduler code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40 
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69 
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 #  include <linux/entry-common.h>
73 # endif
74 #endif
75 
76 #include <uapi/linux/sched/types.h>
77 
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81 
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87 
88 #include "sched.h"
89 #include "stats.h"
90 
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95 
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99 
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102 
103 /*
104  * Export tracepoints that act as a bare tracehook (ie: have no trace event
105  * associated with them) to allow external modules to probe them.
106  */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119 
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121 
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124  * Debugging: various feature bits
125  *
126  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127  * sysctl_sched_features, defined in sched.h, to allow constants propagation
128  * at compile time and compiler optimization based on features default.
129  */
130 #define SCHED_FEAT(name, enabled)	\
131 	(1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 	0;
135 #undef SCHED_FEAT
136 
137 /*
138  * Print a warning if need_resched is set for the given duration (if
139  * LATENCY_WARN is enabled).
140  *
141  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142  * per boot.
143  */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147 
148 /*
149  * Number of tasks to iterate in a single balance run.
150  * Limited because this is done with IRQs disabled.
151  */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153 
154 __read_mostly int scheduler_running;
155 
156 #ifdef CONFIG_SCHED_CORE
157 
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159 
160 /* kernel prio, less is more */
161 static inline int __task_prio(const struct task_struct *p)
162 {
163 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 		return -2;
165 
166 	if (rt_prio(p->prio)) /* includes deadline */
167 		return p->prio; /* [-1, 99] */
168 
169 	if (p->sched_class == &idle_sched_class)
170 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
171 
172 	return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */
173 }
174 
175 /*
176  * l(a,b)
177  * le(a,b) := !l(b,a)
178  * g(a,b)  := l(b,a)
179  * ge(a,b) := !l(a,b)
180  */
181 
182 /* real prio, less is less */
183 static inline bool prio_less(const struct task_struct *a,
184 			     const struct task_struct *b, bool in_fi)
185 {
186 
187 	int pa = __task_prio(a), pb = __task_prio(b);
188 
189 	if (-pa < -pb)
190 		return true;
191 
192 	if (-pb < -pa)
193 		return false;
194 
195 	if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
196 		return !dl_time_before(a->dl.deadline, b->dl.deadline);
197 
198 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
199 		return cfs_prio_less(a, b, in_fi);
200 
201 	return false;
202 }
203 
204 static inline bool __sched_core_less(const struct task_struct *a,
205 				     const struct task_struct *b)
206 {
207 	if (a->core_cookie < b->core_cookie)
208 		return true;
209 
210 	if (a->core_cookie > b->core_cookie)
211 		return false;
212 
213 	/* flip prio, so high prio is leftmost */
214 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
215 		return true;
216 
217 	return false;
218 }
219 
220 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
221 
222 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
223 {
224 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
225 }
226 
227 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
228 {
229 	const struct task_struct *p = __node_2_sc(node);
230 	unsigned long cookie = (unsigned long)key;
231 
232 	if (cookie < p->core_cookie)
233 		return -1;
234 
235 	if (cookie > p->core_cookie)
236 		return 1;
237 
238 	return 0;
239 }
240 
241 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
242 {
243 	rq->core->core_task_seq++;
244 
245 	if (!p->core_cookie)
246 		return;
247 
248 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
249 }
250 
251 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
252 {
253 	rq->core->core_task_seq++;
254 
255 	if (sched_core_enqueued(p)) {
256 		rb_erase(&p->core_node, &rq->core_tree);
257 		RB_CLEAR_NODE(&p->core_node);
258 	}
259 
260 	/*
261 	 * Migrating the last task off the cpu, with the cpu in forced idle
262 	 * state. Reschedule to create an accounting edge for forced idle,
263 	 * and re-examine whether the core is still in forced idle state.
264 	 */
265 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
266 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
267 		resched_curr(rq);
268 }
269 
270 static int sched_task_is_throttled(struct task_struct *p, int cpu)
271 {
272 	if (p->sched_class->task_is_throttled)
273 		return p->sched_class->task_is_throttled(p, cpu);
274 
275 	return 0;
276 }
277 
278 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
279 {
280 	struct rb_node *node = &p->core_node;
281 	int cpu = task_cpu(p);
282 
283 	do {
284 		node = rb_next(node);
285 		if (!node)
286 			return NULL;
287 
288 		p = __node_2_sc(node);
289 		if (p->core_cookie != cookie)
290 			return NULL;
291 
292 	} while (sched_task_is_throttled(p, cpu));
293 
294 	return p;
295 }
296 
297 /*
298  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
299  * If no suitable task is found, NULL will be returned.
300  */
301 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
302 {
303 	struct task_struct *p;
304 	struct rb_node *node;
305 
306 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
307 	if (!node)
308 		return NULL;
309 
310 	p = __node_2_sc(node);
311 	if (!sched_task_is_throttled(p, rq->cpu))
312 		return p;
313 
314 	return sched_core_next(p, cookie);
315 }
316 
317 /*
318  * Magic required such that:
319  *
320  *	raw_spin_rq_lock(rq);
321  *	...
322  *	raw_spin_rq_unlock(rq);
323  *
324  * ends up locking and unlocking the _same_ lock, and all CPUs
325  * always agree on what rq has what lock.
326  *
327  * XXX entirely possible to selectively enable cores, don't bother for now.
328  */
329 
330 static DEFINE_MUTEX(sched_core_mutex);
331 static atomic_t sched_core_count;
332 static struct cpumask sched_core_mask;
333 
334 static void sched_core_lock(int cpu, unsigned long *flags)
335 {
336 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
337 	int t, i = 0;
338 
339 	local_irq_save(*flags);
340 	for_each_cpu(t, smt_mask)
341 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
342 }
343 
344 static void sched_core_unlock(int cpu, unsigned long *flags)
345 {
346 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
347 	int t;
348 
349 	for_each_cpu(t, smt_mask)
350 		raw_spin_unlock(&cpu_rq(t)->__lock);
351 	local_irq_restore(*flags);
352 }
353 
354 static void __sched_core_flip(bool enabled)
355 {
356 	unsigned long flags;
357 	int cpu, t;
358 
359 	cpus_read_lock();
360 
361 	/*
362 	 * Toggle the online cores, one by one.
363 	 */
364 	cpumask_copy(&sched_core_mask, cpu_online_mask);
365 	for_each_cpu(cpu, &sched_core_mask) {
366 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
367 
368 		sched_core_lock(cpu, &flags);
369 
370 		for_each_cpu(t, smt_mask)
371 			cpu_rq(t)->core_enabled = enabled;
372 
373 		cpu_rq(cpu)->core->core_forceidle_start = 0;
374 
375 		sched_core_unlock(cpu, &flags);
376 
377 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
378 	}
379 
380 	/*
381 	 * Toggle the offline CPUs.
382 	 */
383 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
384 		cpu_rq(cpu)->core_enabled = enabled;
385 
386 	cpus_read_unlock();
387 }
388 
389 static void sched_core_assert_empty(void)
390 {
391 	int cpu;
392 
393 	for_each_possible_cpu(cpu)
394 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
395 }
396 
397 static void __sched_core_enable(void)
398 {
399 	static_branch_enable(&__sched_core_enabled);
400 	/*
401 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
402 	 * and future ones will observe !sched_core_disabled().
403 	 */
404 	synchronize_rcu();
405 	__sched_core_flip(true);
406 	sched_core_assert_empty();
407 }
408 
409 static void __sched_core_disable(void)
410 {
411 	sched_core_assert_empty();
412 	__sched_core_flip(false);
413 	static_branch_disable(&__sched_core_enabled);
414 }
415 
416 void sched_core_get(void)
417 {
418 	if (atomic_inc_not_zero(&sched_core_count))
419 		return;
420 
421 	mutex_lock(&sched_core_mutex);
422 	if (!atomic_read(&sched_core_count))
423 		__sched_core_enable();
424 
425 	smp_mb__before_atomic();
426 	atomic_inc(&sched_core_count);
427 	mutex_unlock(&sched_core_mutex);
428 }
429 
430 static void __sched_core_put(struct work_struct *work)
431 {
432 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
433 		__sched_core_disable();
434 		mutex_unlock(&sched_core_mutex);
435 	}
436 }
437 
438 void sched_core_put(void)
439 {
440 	static DECLARE_WORK(_work, __sched_core_put);
441 
442 	/*
443 	 * "There can be only one"
444 	 *
445 	 * Either this is the last one, or we don't actually need to do any
446 	 * 'work'. If it is the last *again*, we rely on
447 	 * WORK_STRUCT_PENDING_BIT.
448 	 */
449 	if (!atomic_add_unless(&sched_core_count, -1, 1))
450 		schedule_work(&_work);
451 }
452 
453 #else /* !CONFIG_SCHED_CORE */
454 
455 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
456 static inline void
457 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
458 
459 #endif /* CONFIG_SCHED_CORE */
460 
461 /*
462  * Serialization rules:
463  *
464  * Lock order:
465  *
466  *   p->pi_lock
467  *     rq->lock
468  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
469  *
470  *  rq1->lock
471  *    rq2->lock  where: rq1 < rq2
472  *
473  * Regular state:
474  *
475  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
476  * local CPU's rq->lock, it optionally removes the task from the runqueue and
477  * always looks at the local rq data structures to find the most eligible task
478  * to run next.
479  *
480  * Task enqueue is also under rq->lock, possibly taken from another CPU.
481  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
482  * the local CPU to avoid bouncing the runqueue state around [ see
483  * ttwu_queue_wakelist() ]
484  *
485  * Task wakeup, specifically wakeups that involve migration, are horribly
486  * complicated to avoid having to take two rq->locks.
487  *
488  * Special state:
489  *
490  * System-calls and anything external will use task_rq_lock() which acquires
491  * both p->pi_lock and rq->lock. As a consequence the state they change is
492  * stable while holding either lock:
493  *
494  *  - sched_setaffinity()/
495  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
496  *  - set_user_nice():		p->se.load, p->*prio
497  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
498  *				p->se.load, p->rt_priority,
499  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
500  *  - sched_setnuma():		p->numa_preferred_nid
501  *  - sched_move_task():	p->sched_task_group
502  *  - uclamp_update_active()	p->uclamp*
503  *
504  * p->state <- TASK_*:
505  *
506  *   is changed locklessly using set_current_state(), __set_current_state() or
507  *   set_special_state(), see their respective comments, or by
508  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
509  *   concurrent self.
510  *
511  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
512  *
513  *   is set by activate_task() and cleared by deactivate_task(), under
514  *   rq->lock. Non-zero indicates the task is runnable, the special
515  *   ON_RQ_MIGRATING state is used for migration without holding both
516  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
517  *
518  * p->on_cpu <- { 0, 1 }:
519  *
520  *   is set by prepare_task() and cleared by finish_task() such that it will be
521  *   set before p is scheduled-in and cleared after p is scheduled-out, both
522  *   under rq->lock. Non-zero indicates the task is running on its CPU.
523  *
524  *   [ The astute reader will observe that it is possible for two tasks on one
525  *     CPU to have ->on_cpu = 1 at the same time. ]
526  *
527  * task_cpu(p): is changed by set_task_cpu(), the rules are:
528  *
529  *  - Don't call set_task_cpu() on a blocked task:
530  *
531  *    We don't care what CPU we're not running on, this simplifies hotplug,
532  *    the CPU assignment of blocked tasks isn't required to be valid.
533  *
534  *  - for try_to_wake_up(), called under p->pi_lock:
535  *
536  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
537  *
538  *  - for migration called under rq->lock:
539  *    [ see task_on_rq_migrating() in task_rq_lock() ]
540  *
541  *    o move_queued_task()
542  *    o detach_task()
543  *
544  *  - for migration called under double_rq_lock():
545  *
546  *    o __migrate_swap_task()
547  *    o push_rt_task() / pull_rt_task()
548  *    o push_dl_task() / pull_dl_task()
549  *    o dl_task_offline_migration()
550  *
551  */
552 
553 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
554 {
555 	raw_spinlock_t *lock;
556 
557 	/* Matches synchronize_rcu() in __sched_core_enable() */
558 	preempt_disable();
559 	if (sched_core_disabled()) {
560 		raw_spin_lock_nested(&rq->__lock, subclass);
561 		/* preempt_count *MUST* be > 1 */
562 		preempt_enable_no_resched();
563 		return;
564 	}
565 
566 	for (;;) {
567 		lock = __rq_lockp(rq);
568 		raw_spin_lock_nested(lock, subclass);
569 		if (likely(lock == __rq_lockp(rq))) {
570 			/* preempt_count *MUST* be > 1 */
571 			preempt_enable_no_resched();
572 			return;
573 		}
574 		raw_spin_unlock(lock);
575 	}
576 }
577 
578 bool raw_spin_rq_trylock(struct rq *rq)
579 {
580 	raw_spinlock_t *lock;
581 	bool ret;
582 
583 	/* Matches synchronize_rcu() in __sched_core_enable() */
584 	preempt_disable();
585 	if (sched_core_disabled()) {
586 		ret = raw_spin_trylock(&rq->__lock);
587 		preempt_enable();
588 		return ret;
589 	}
590 
591 	for (;;) {
592 		lock = __rq_lockp(rq);
593 		ret = raw_spin_trylock(lock);
594 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
595 			preempt_enable();
596 			return ret;
597 		}
598 		raw_spin_unlock(lock);
599 	}
600 }
601 
602 void raw_spin_rq_unlock(struct rq *rq)
603 {
604 	raw_spin_unlock(rq_lockp(rq));
605 }
606 
607 #ifdef CONFIG_SMP
608 /*
609  * double_rq_lock - safely lock two runqueues
610  */
611 void double_rq_lock(struct rq *rq1, struct rq *rq2)
612 {
613 	lockdep_assert_irqs_disabled();
614 
615 	if (rq_order_less(rq2, rq1))
616 		swap(rq1, rq2);
617 
618 	raw_spin_rq_lock(rq1);
619 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
620 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
621 
622 	double_rq_clock_clear_update(rq1, rq2);
623 }
624 #endif
625 
626 /*
627  * __task_rq_lock - lock the rq @p resides on.
628  */
629 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
630 	__acquires(rq->lock)
631 {
632 	struct rq *rq;
633 
634 	lockdep_assert_held(&p->pi_lock);
635 
636 	for (;;) {
637 		rq = task_rq(p);
638 		raw_spin_rq_lock(rq);
639 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
640 			rq_pin_lock(rq, rf);
641 			return rq;
642 		}
643 		raw_spin_rq_unlock(rq);
644 
645 		while (unlikely(task_on_rq_migrating(p)))
646 			cpu_relax();
647 	}
648 }
649 
650 /*
651  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
652  */
653 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
654 	__acquires(p->pi_lock)
655 	__acquires(rq->lock)
656 {
657 	struct rq *rq;
658 
659 	for (;;) {
660 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
661 		rq = task_rq(p);
662 		raw_spin_rq_lock(rq);
663 		/*
664 		 *	move_queued_task()		task_rq_lock()
665 		 *
666 		 *	ACQUIRE (rq->lock)
667 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
668 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
669 		 *	[S] ->cpu = new_cpu		[L] task_rq()
670 		 *					[L] ->on_rq
671 		 *	RELEASE (rq->lock)
672 		 *
673 		 * If we observe the old CPU in task_rq_lock(), the acquire of
674 		 * the old rq->lock will fully serialize against the stores.
675 		 *
676 		 * If we observe the new CPU in task_rq_lock(), the address
677 		 * dependency headed by '[L] rq = task_rq()' and the acquire
678 		 * will pair with the WMB to ensure we then also see migrating.
679 		 */
680 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
681 			rq_pin_lock(rq, rf);
682 			return rq;
683 		}
684 		raw_spin_rq_unlock(rq);
685 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
686 
687 		while (unlikely(task_on_rq_migrating(p)))
688 			cpu_relax();
689 	}
690 }
691 
692 /*
693  * RQ-clock updating methods:
694  */
695 
696 static void update_rq_clock_task(struct rq *rq, s64 delta)
697 {
698 /*
699  * In theory, the compile should just see 0 here, and optimize out the call
700  * to sched_rt_avg_update. But I don't trust it...
701  */
702 	s64 __maybe_unused steal = 0, irq_delta = 0;
703 
704 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
705 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
706 
707 	/*
708 	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
709 	 * this case when a previous update_rq_clock() happened inside a
710 	 * {soft,}IRQ region.
711 	 *
712 	 * When this happens, we stop ->clock_task and only update the
713 	 * prev_irq_time stamp to account for the part that fit, so that a next
714 	 * update will consume the rest. This ensures ->clock_task is
715 	 * monotonic.
716 	 *
717 	 * It does however cause some slight miss-attribution of {soft,}IRQ
718 	 * time, a more accurate solution would be to update the irq_time using
719 	 * the current rq->clock timestamp, except that would require using
720 	 * atomic ops.
721 	 */
722 	if (irq_delta > delta)
723 		irq_delta = delta;
724 
725 	rq->prev_irq_time += irq_delta;
726 	delta -= irq_delta;
727 	delayacct_irq(rq->curr, irq_delta);
728 #endif
729 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
730 	if (static_key_false((&paravirt_steal_rq_enabled))) {
731 		steal = paravirt_steal_clock(cpu_of(rq));
732 		steal -= rq->prev_steal_time_rq;
733 
734 		if (unlikely(steal > delta))
735 			steal = delta;
736 
737 		rq->prev_steal_time_rq += steal;
738 		delta -= steal;
739 	}
740 #endif
741 
742 	rq->clock_task += delta;
743 
744 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
745 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
746 		update_irq_load_avg(rq, irq_delta + steal);
747 #endif
748 	update_rq_clock_pelt(rq, delta);
749 }
750 
751 void update_rq_clock(struct rq *rq)
752 {
753 	s64 delta;
754 
755 	lockdep_assert_rq_held(rq);
756 
757 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
758 		return;
759 
760 #ifdef CONFIG_SCHED_DEBUG
761 	if (sched_feat(WARN_DOUBLE_CLOCK))
762 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
763 	rq->clock_update_flags |= RQCF_UPDATED;
764 #endif
765 
766 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
767 	if (delta < 0)
768 		return;
769 	rq->clock += delta;
770 	update_rq_clock_task(rq, delta);
771 }
772 
773 #ifdef CONFIG_SCHED_HRTICK
774 /*
775  * Use HR-timers to deliver accurate preemption points.
776  */
777 
778 static void hrtick_clear(struct rq *rq)
779 {
780 	if (hrtimer_active(&rq->hrtick_timer))
781 		hrtimer_cancel(&rq->hrtick_timer);
782 }
783 
784 /*
785  * High-resolution timer tick.
786  * Runs from hardirq context with interrupts disabled.
787  */
788 static enum hrtimer_restart hrtick(struct hrtimer *timer)
789 {
790 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
791 	struct rq_flags rf;
792 
793 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
794 
795 	rq_lock(rq, &rf);
796 	update_rq_clock(rq);
797 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
798 	rq_unlock(rq, &rf);
799 
800 	return HRTIMER_NORESTART;
801 }
802 
803 #ifdef CONFIG_SMP
804 
805 static void __hrtick_restart(struct rq *rq)
806 {
807 	struct hrtimer *timer = &rq->hrtick_timer;
808 	ktime_t time = rq->hrtick_time;
809 
810 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
811 }
812 
813 /*
814  * called from hardirq (IPI) context
815  */
816 static void __hrtick_start(void *arg)
817 {
818 	struct rq *rq = arg;
819 	struct rq_flags rf;
820 
821 	rq_lock(rq, &rf);
822 	__hrtick_restart(rq);
823 	rq_unlock(rq, &rf);
824 }
825 
826 /*
827  * Called to set the hrtick timer state.
828  *
829  * called with rq->lock held and IRQs disabled
830  */
831 void hrtick_start(struct rq *rq, u64 delay)
832 {
833 	struct hrtimer *timer = &rq->hrtick_timer;
834 	s64 delta;
835 
836 	/*
837 	 * Don't schedule slices shorter than 10000ns, that just
838 	 * doesn't make sense and can cause timer DoS.
839 	 */
840 	delta = max_t(s64, delay, 10000LL);
841 	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
842 
843 	if (rq == this_rq())
844 		__hrtick_restart(rq);
845 	else
846 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
847 }
848 
849 #else
850 /*
851  * Called to set the hrtick timer state.
852  *
853  * called with rq->lock held and IRQs disabled
854  */
855 void hrtick_start(struct rq *rq, u64 delay)
856 {
857 	/*
858 	 * Don't schedule slices shorter than 10000ns, that just
859 	 * doesn't make sense. Rely on vruntime for fairness.
860 	 */
861 	delay = max_t(u64, delay, 10000LL);
862 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
863 		      HRTIMER_MODE_REL_PINNED_HARD);
864 }
865 
866 #endif /* CONFIG_SMP */
867 
868 static void hrtick_rq_init(struct rq *rq)
869 {
870 #ifdef CONFIG_SMP
871 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
872 #endif
873 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
874 	rq->hrtick_timer.function = hrtick;
875 }
876 #else	/* CONFIG_SCHED_HRTICK */
877 static inline void hrtick_clear(struct rq *rq)
878 {
879 }
880 
881 static inline void hrtick_rq_init(struct rq *rq)
882 {
883 }
884 #endif	/* CONFIG_SCHED_HRTICK */
885 
886 /*
887  * try_cmpxchg based fetch_or() macro so it works for different integer types:
888  */
889 #define fetch_or(ptr, mask)						\
890 	({								\
891 		typeof(ptr) _ptr = (ptr);				\
892 		typeof(mask) _mask = (mask);				\
893 		typeof(*_ptr) _val = *_ptr;				\
894 									\
895 		do {							\
896 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
897 	_val;								\
898 })
899 
900 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
901 /*
902  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
903  * this avoids any races wrt polling state changes and thereby avoids
904  * spurious IPIs.
905  */
906 static inline bool set_nr_and_not_polling(struct task_struct *p)
907 {
908 	struct thread_info *ti = task_thread_info(p);
909 	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
910 }
911 
912 /*
913  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
914  *
915  * If this returns true, then the idle task promises to call
916  * sched_ttwu_pending() and reschedule soon.
917  */
918 static bool set_nr_if_polling(struct task_struct *p)
919 {
920 	struct thread_info *ti = task_thread_info(p);
921 	typeof(ti->flags) val = READ_ONCE(ti->flags);
922 
923 	do {
924 		if (!(val & _TIF_POLLING_NRFLAG))
925 			return false;
926 		if (val & _TIF_NEED_RESCHED)
927 			return true;
928 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
929 
930 	return true;
931 }
932 
933 #else
934 static inline bool set_nr_and_not_polling(struct task_struct *p)
935 {
936 	set_tsk_need_resched(p);
937 	return true;
938 }
939 
940 #ifdef CONFIG_SMP
941 static inline bool set_nr_if_polling(struct task_struct *p)
942 {
943 	return false;
944 }
945 #endif
946 #endif
947 
948 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
949 {
950 	struct wake_q_node *node = &task->wake_q;
951 
952 	/*
953 	 * Atomically grab the task, if ->wake_q is !nil already it means
954 	 * it's already queued (either by us or someone else) and will get the
955 	 * wakeup due to that.
956 	 *
957 	 * In order to ensure that a pending wakeup will observe our pending
958 	 * state, even in the failed case, an explicit smp_mb() must be used.
959 	 */
960 	smp_mb__before_atomic();
961 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
962 		return false;
963 
964 	/*
965 	 * The head is context local, there can be no concurrency.
966 	 */
967 	*head->lastp = node;
968 	head->lastp = &node->next;
969 	return true;
970 }
971 
972 /**
973  * wake_q_add() - queue a wakeup for 'later' waking.
974  * @head: the wake_q_head to add @task to
975  * @task: the task to queue for 'later' wakeup
976  *
977  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
978  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
979  * instantly.
980  *
981  * This function must be used as-if it were wake_up_process(); IOW the task
982  * must be ready to be woken at this location.
983  */
984 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
985 {
986 	if (__wake_q_add(head, task))
987 		get_task_struct(task);
988 }
989 
990 /**
991  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
992  * @head: the wake_q_head to add @task to
993  * @task: the task to queue for 'later' wakeup
994  *
995  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
996  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
997  * instantly.
998  *
999  * This function must be used as-if it were wake_up_process(); IOW the task
1000  * must be ready to be woken at this location.
1001  *
1002  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1003  * that already hold reference to @task can call the 'safe' version and trust
1004  * wake_q to do the right thing depending whether or not the @task is already
1005  * queued for wakeup.
1006  */
1007 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1008 {
1009 	if (!__wake_q_add(head, task))
1010 		put_task_struct(task);
1011 }
1012 
1013 void wake_up_q(struct wake_q_head *head)
1014 {
1015 	struct wake_q_node *node = head->first;
1016 
1017 	while (node != WAKE_Q_TAIL) {
1018 		struct task_struct *task;
1019 
1020 		task = container_of(node, struct task_struct, wake_q);
1021 		/* Task can safely be re-inserted now: */
1022 		node = node->next;
1023 		task->wake_q.next = NULL;
1024 
1025 		/*
1026 		 * wake_up_process() executes a full barrier, which pairs with
1027 		 * the queueing in wake_q_add() so as not to miss wakeups.
1028 		 */
1029 		wake_up_process(task);
1030 		put_task_struct(task);
1031 	}
1032 }
1033 
1034 /*
1035  * resched_curr - mark rq's current task 'to be rescheduled now'.
1036  *
1037  * On UP this means the setting of the need_resched flag, on SMP it
1038  * might also involve a cross-CPU call to trigger the scheduler on
1039  * the target CPU.
1040  */
1041 void resched_curr(struct rq *rq)
1042 {
1043 	struct task_struct *curr = rq->curr;
1044 	int cpu;
1045 
1046 	lockdep_assert_rq_held(rq);
1047 
1048 	if (test_tsk_need_resched(curr))
1049 		return;
1050 
1051 	cpu = cpu_of(rq);
1052 
1053 	if (cpu == smp_processor_id()) {
1054 		set_tsk_need_resched(curr);
1055 		set_preempt_need_resched();
1056 		return;
1057 	}
1058 
1059 	if (set_nr_and_not_polling(curr))
1060 		smp_send_reschedule(cpu);
1061 	else
1062 		trace_sched_wake_idle_without_ipi(cpu);
1063 }
1064 
1065 void resched_cpu(int cpu)
1066 {
1067 	struct rq *rq = cpu_rq(cpu);
1068 	unsigned long flags;
1069 
1070 	raw_spin_rq_lock_irqsave(rq, flags);
1071 	if (cpu_online(cpu) || cpu == smp_processor_id())
1072 		resched_curr(rq);
1073 	raw_spin_rq_unlock_irqrestore(rq, flags);
1074 }
1075 
1076 #ifdef CONFIG_SMP
1077 #ifdef CONFIG_NO_HZ_COMMON
1078 /*
1079  * In the semi idle case, use the nearest busy CPU for migrating timers
1080  * from an idle CPU.  This is good for power-savings.
1081  *
1082  * We don't do similar optimization for completely idle system, as
1083  * selecting an idle CPU will add more delays to the timers than intended
1084  * (as that CPU's timer base may not be up to date wrt jiffies etc).
1085  */
1086 int get_nohz_timer_target(void)
1087 {
1088 	int i, cpu = smp_processor_id(), default_cpu = -1;
1089 	struct sched_domain *sd;
1090 	const struct cpumask *hk_mask;
1091 
1092 	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
1093 		if (!idle_cpu(cpu))
1094 			return cpu;
1095 		default_cpu = cpu;
1096 	}
1097 
1098 	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
1099 
1100 	guard(rcu)();
1101 
1102 	for_each_domain(cpu, sd) {
1103 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1104 			if (cpu == i)
1105 				continue;
1106 
1107 			if (!idle_cpu(i))
1108 				return i;
1109 		}
1110 	}
1111 
1112 	if (default_cpu == -1)
1113 		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
1114 
1115 	return default_cpu;
1116 }
1117 
1118 /*
1119  * When add_timer_on() enqueues a timer into the timer wheel of an
1120  * idle CPU then this timer might expire before the next timer event
1121  * which is scheduled to wake up that CPU. In case of a completely
1122  * idle system the next event might even be infinite time into the
1123  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1124  * leaves the inner idle loop so the newly added timer is taken into
1125  * account when the CPU goes back to idle and evaluates the timer
1126  * wheel for the next timer event.
1127  */
1128 static void wake_up_idle_cpu(int cpu)
1129 {
1130 	struct rq *rq = cpu_rq(cpu);
1131 
1132 	if (cpu == smp_processor_id())
1133 		return;
1134 
1135 	/*
1136 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1137 	 * part of the idle loop. This forces an exit from the idle loop
1138 	 * and a round trip to schedule(). Now this could be optimized
1139 	 * because a simple new idle loop iteration is enough to
1140 	 * re-evaluate the next tick. Provided some re-ordering of tick
1141 	 * nohz functions that would need to follow TIF_NR_POLLING
1142 	 * clearing:
1143 	 *
1144 	 * - On most architectures, a simple fetch_or on ti::flags with a
1145 	 *   "0" value would be enough to know if an IPI needs to be sent.
1146 	 *
1147 	 * - x86 needs to perform a last need_resched() check between
1148 	 *   monitor and mwait which doesn't take timers into account.
1149 	 *   There a dedicated TIF_TIMER flag would be required to
1150 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1151 	 *   before mwait().
1152 	 *
1153 	 * However, remote timer enqueue is not such a frequent event
1154 	 * and testing of the above solutions didn't appear to report
1155 	 * much benefits.
1156 	 */
1157 	if (set_nr_and_not_polling(rq->idle))
1158 		smp_send_reschedule(cpu);
1159 	else
1160 		trace_sched_wake_idle_without_ipi(cpu);
1161 }
1162 
1163 static bool wake_up_full_nohz_cpu(int cpu)
1164 {
1165 	/*
1166 	 * We just need the target to call irq_exit() and re-evaluate
1167 	 * the next tick. The nohz full kick at least implies that.
1168 	 * If needed we can still optimize that later with an
1169 	 * empty IRQ.
1170 	 */
1171 	if (cpu_is_offline(cpu))
1172 		return true;  /* Don't try to wake offline CPUs. */
1173 	if (tick_nohz_full_cpu(cpu)) {
1174 		if (cpu != smp_processor_id() ||
1175 		    tick_nohz_tick_stopped())
1176 			tick_nohz_full_kick_cpu(cpu);
1177 		return true;
1178 	}
1179 
1180 	return false;
1181 }
1182 
1183 /*
1184  * Wake up the specified CPU.  If the CPU is going offline, it is the
1185  * caller's responsibility to deal with the lost wakeup, for example,
1186  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1187  */
1188 void wake_up_nohz_cpu(int cpu)
1189 {
1190 	if (!wake_up_full_nohz_cpu(cpu))
1191 		wake_up_idle_cpu(cpu);
1192 }
1193 
1194 static void nohz_csd_func(void *info)
1195 {
1196 	struct rq *rq = info;
1197 	int cpu = cpu_of(rq);
1198 	unsigned int flags;
1199 
1200 	/*
1201 	 * Release the rq::nohz_csd.
1202 	 */
1203 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1204 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1205 
1206 	rq->idle_balance = idle_cpu(cpu);
1207 	if (rq->idle_balance && !need_resched()) {
1208 		rq->nohz_idle_balance = flags;
1209 		raise_softirq_irqoff(SCHED_SOFTIRQ);
1210 	}
1211 }
1212 
1213 #endif /* CONFIG_NO_HZ_COMMON */
1214 
1215 #ifdef CONFIG_NO_HZ_FULL
1216 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1217 {
1218 	if (rq->nr_running != 1)
1219 		return false;
1220 
1221 	if (p->sched_class != &fair_sched_class)
1222 		return false;
1223 
1224 	if (!task_on_rq_queued(p))
1225 		return false;
1226 
1227 	return true;
1228 }
1229 
1230 bool sched_can_stop_tick(struct rq *rq)
1231 {
1232 	int fifo_nr_running;
1233 
1234 	/* Deadline tasks, even if single, need the tick */
1235 	if (rq->dl.dl_nr_running)
1236 		return false;
1237 
1238 	/*
1239 	 * If there are more than one RR tasks, we need the tick to affect the
1240 	 * actual RR behaviour.
1241 	 */
1242 	if (rq->rt.rr_nr_running) {
1243 		if (rq->rt.rr_nr_running == 1)
1244 			return true;
1245 		else
1246 			return false;
1247 	}
1248 
1249 	/*
1250 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1251 	 * forced preemption between FIFO tasks.
1252 	 */
1253 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1254 	if (fifo_nr_running)
1255 		return true;
1256 
1257 	/*
1258 	 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
1259 	 * if there's more than one we need the tick for involuntary
1260 	 * preemption.
1261 	 */
1262 	if (rq->nr_running > 1)
1263 		return false;
1264 
1265 	/*
1266 	 * If there is one task and it has CFS runtime bandwidth constraints
1267 	 * and it's on the cpu now we don't want to stop the tick.
1268 	 * This check prevents clearing the bit if a newly enqueued task here is
1269 	 * dequeued by migrating while the constrained task continues to run.
1270 	 * E.g. going from 2->1 without going through pick_next_task().
1271 	 */
1272 	if (__need_bw_check(rq, rq->curr)) {
1273 		if (cfs_task_bw_constrained(rq->curr))
1274 			return false;
1275 	}
1276 
1277 	return true;
1278 }
1279 #endif /* CONFIG_NO_HZ_FULL */
1280 #endif /* CONFIG_SMP */
1281 
1282 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1283 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1284 /*
1285  * Iterate task_group tree rooted at *from, calling @down when first entering a
1286  * node and @up when leaving it for the final time.
1287  *
1288  * Caller must hold rcu_lock or sufficient equivalent.
1289  */
1290 int walk_tg_tree_from(struct task_group *from,
1291 			     tg_visitor down, tg_visitor up, void *data)
1292 {
1293 	struct task_group *parent, *child;
1294 	int ret;
1295 
1296 	parent = from;
1297 
1298 down:
1299 	ret = (*down)(parent, data);
1300 	if (ret)
1301 		goto out;
1302 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1303 		parent = child;
1304 		goto down;
1305 
1306 up:
1307 		continue;
1308 	}
1309 	ret = (*up)(parent, data);
1310 	if (ret || parent == from)
1311 		goto out;
1312 
1313 	child = parent;
1314 	parent = parent->parent;
1315 	if (parent)
1316 		goto up;
1317 out:
1318 	return ret;
1319 }
1320 
1321 int tg_nop(struct task_group *tg, void *data)
1322 {
1323 	return 0;
1324 }
1325 #endif
1326 
1327 void set_load_weight(struct task_struct *p, bool update_load)
1328 {
1329 	int prio = p->static_prio - MAX_RT_PRIO;
1330 	struct load_weight lw;
1331 
1332 	if (task_has_idle_policy(p)) {
1333 		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1334 		lw.inv_weight = WMULT_IDLEPRIO;
1335 	} else {
1336 		lw.weight = scale_load(sched_prio_to_weight[prio]);
1337 		lw.inv_weight = sched_prio_to_wmult[prio];
1338 	}
1339 
1340 	/*
1341 	 * SCHED_OTHER tasks have to update their load when changing their
1342 	 * weight
1343 	 */
1344 	if (update_load && p->sched_class == &fair_sched_class)
1345 		reweight_task(p, &lw);
1346 	else
1347 		p->se.load = lw;
1348 }
1349 
1350 #ifdef CONFIG_UCLAMP_TASK
1351 /*
1352  * Serializes updates of utilization clamp values
1353  *
1354  * The (slow-path) user-space triggers utilization clamp value updates which
1355  * can require updates on (fast-path) scheduler's data structures used to
1356  * support enqueue/dequeue operations.
1357  * While the per-CPU rq lock protects fast-path update operations, user-space
1358  * requests are serialized using a mutex to reduce the risk of conflicting
1359  * updates or API abuses.
1360  */
1361 static DEFINE_MUTEX(uclamp_mutex);
1362 
1363 /* Max allowed minimum utilization */
1364 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1365 
1366 /* Max allowed maximum utilization */
1367 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1368 
1369 /*
1370  * By default RT tasks run at the maximum performance point/capacity of the
1371  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1372  * SCHED_CAPACITY_SCALE.
1373  *
1374  * This knob allows admins to change the default behavior when uclamp is being
1375  * used. In battery powered devices, particularly, running at the maximum
1376  * capacity and frequency will increase energy consumption and shorten the
1377  * battery life.
1378  *
1379  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1380  *
1381  * This knob will not override the system default sched_util_clamp_min defined
1382  * above.
1383  */
1384 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1385 
1386 /* All clamps are required to be less or equal than these values */
1387 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1388 
1389 /*
1390  * This static key is used to reduce the uclamp overhead in the fast path. It
1391  * primarily disables the call to uclamp_rq_{inc, dec}() in
1392  * enqueue/dequeue_task().
1393  *
1394  * This allows users to continue to enable uclamp in their kernel config with
1395  * minimum uclamp overhead in the fast path.
1396  *
1397  * As soon as userspace modifies any of the uclamp knobs, the static key is
1398  * enabled, since we have an actual users that make use of uclamp
1399  * functionality.
1400  *
1401  * The knobs that would enable this static key are:
1402  *
1403  *   * A task modifying its uclamp value with sched_setattr().
1404  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1405  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1406  */
1407 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1408 
1409 static inline unsigned int
1410 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1411 		  unsigned int clamp_value)
1412 {
1413 	/*
1414 	 * Avoid blocked utilization pushing up the frequency when we go
1415 	 * idle (which drops the max-clamp) by retaining the last known
1416 	 * max-clamp.
1417 	 */
1418 	if (clamp_id == UCLAMP_MAX) {
1419 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1420 		return clamp_value;
1421 	}
1422 
1423 	return uclamp_none(UCLAMP_MIN);
1424 }
1425 
1426 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1427 				     unsigned int clamp_value)
1428 {
1429 	/* Reset max-clamp retention only on idle exit */
1430 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1431 		return;
1432 
1433 	uclamp_rq_set(rq, clamp_id, clamp_value);
1434 }
1435 
1436 static inline
1437 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1438 				   unsigned int clamp_value)
1439 {
1440 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1441 	int bucket_id = UCLAMP_BUCKETS - 1;
1442 
1443 	/*
1444 	 * Since both min and max clamps are max aggregated, find the
1445 	 * top most bucket with tasks in.
1446 	 */
1447 	for ( ; bucket_id >= 0; bucket_id--) {
1448 		if (!bucket[bucket_id].tasks)
1449 			continue;
1450 		return bucket[bucket_id].value;
1451 	}
1452 
1453 	/* No tasks -- default clamp values */
1454 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1455 }
1456 
1457 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1458 {
1459 	unsigned int default_util_min;
1460 	struct uclamp_se *uc_se;
1461 
1462 	lockdep_assert_held(&p->pi_lock);
1463 
1464 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1465 
1466 	/* Only sync if user didn't override the default */
1467 	if (uc_se->user_defined)
1468 		return;
1469 
1470 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1471 	uclamp_se_set(uc_se, default_util_min, false);
1472 }
1473 
1474 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1475 {
1476 	if (!rt_task(p))
1477 		return;
1478 
1479 	/* Protect updates to p->uclamp_* */
1480 	guard(task_rq_lock)(p);
1481 	__uclamp_update_util_min_rt_default(p);
1482 }
1483 
1484 static inline struct uclamp_se
1485 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1486 {
1487 	/* Copy by value as we could modify it */
1488 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1489 #ifdef CONFIG_UCLAMP_TASK_GROUP
1490 	unsigned int tg_min, tg_max, value;
1491 
1492 	/*
1493 	 * Tasks in autogroups or root task group will be
1494 	 * restricted by system defaults.
1495 	 */
1496 	if (task_group_is_autogroup(task_group(p)))
1497 		return uc_req;
1498 	if (task_group(p) == &root_task_group)
1499 		return uc_req;
1500 
1501 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1502 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1503 	value = uc_req.value;
1504 	value = clamp(value, tg_min, tg_max);
1505 	uclamp_se_set(&uc_req, value, false);
1506 #endif
1507 
1508 	return uc_req;
1509 }
1510 
1511 /*
1512  * The effective clamp bucket index of a task depends on, by increasing
1513  * priority:
1514  * - the task specific clamp value, when explicitly requested from userspace
1515  * - the task group effective clamp value, for tasks not either in the root
1516  *   group or in an autogroup
1517  * - the system default clamp value, defined by the sysadmin
1518  */
1519 static inline struct uclamp_se
1520 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1521 {
1522 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1523 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1524 
1525 	/* System default restrictions always apply */
1526 	if (unlikely(uc_req.value > uc_max.value))
1527 		return uc_max;
1528 
1529 	return uc_req;
1530 }
1531 
1532 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1533 {
1534 	struct uclamp_se uc_eff;
1535 
1536 	/* Task currently refcounted: use back-annotated (effective) value */
1537 	if (p->uclamp[clamp_id].active)
1538 		return (unsigned long)p->uclamp[clamp_id].value;
1539 
1540 	uc_eff = uclamp_eff_get(p, clamp_id);
1541 
1542 	return (unsigned long)uc_eff.value;
1543 }
1544 
1545 /*
1546  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1547  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1548  * updates the rq's clamp value if required.
1549  *
1550  * Tasks can have a task-specific value requested from user-space, track
1551  * within each bucket the maximum value for tasks refcounted in it.
1552  * This "local max aggregation" allows to track the exact "requested" value
1553  * for each bucket when all its RUNNABLE tasks require the same clamp.
1554  */
1555 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1556 				    enum uclamp_id clamp_id)
1557 {
1558 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1559 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1560 	struct uclamp_bucket *bucket;
1561 
1562 	lockdep_assert_rq_held(rq);
1563 
1564 	/* Update task effective clamp */
1565 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1566 
1567 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1568 	bucket->tasks++;
1569 	uc_se->active = true;
1570 
1571 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1572 
1573 	/*
1574 	 * Local max aggregation: rq buckets always track the max
1575 	 * "requested" clamp value of its RUNNABLE tasks.
1576 	 */
1577 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1578 		bucket->value = uc_se->value;
1579 
1580 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1581 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1582 }
1583 
1584 /*
1585  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1586  * is released. If this is the last task reference counting the rq's max
1587  * active clamp value, then the rq's clamp value is updated.
1588  *
1589  * Both refcounted tasks and rq's cached clamp values are expected to be
1590  * always valid. If it's detected they are not, as defensive programming,
1591  * enforce the expected state and warn.
1592  */
1593 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1594 				    enum uclamp_id clamp_id)
1595 {
1596 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1597 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1598 	struct uclamp_bucket *bucket;
1599 	unsigned int bkt_clamp;
1600 	unsigned int rq_clamp;
1601 
1602 	lockdep_assert_rq_held(rq);
1603 
1604 	/*
1605 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1606 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1607 	 *
1608 	 * In this case the uc_se->active flag should be false since no uclamp
1609 	 * accounting was performed at enqueue time and we can just return
1610 	 * here.
1611 	 *
1612 	 * Need to be careful of the following enqueue/dequeue ordering
1613 	 * problem too
1614 	 *
1615 	 *	enqueue(taskA)
1616 	 *	// sched_uclamp_used gets enabled
1617 	 *	enqueue(taskB)
1618 	 *	dequeue(taskA)
1619 	 *	// Must not decrement bucket->tasks here
1620 	 *	dequeue(taskB)
1621 	 *
1622 	 * where we could end up with stale data in uc_se and
1623 	 * bucket[uc_se->bucket_id].
1624 	 *
1625 	 * The following check here eliminates the possibility of such race.
1626 	 */
1627 	if (unlikely(!uc_se->active))
1628 		return;
1629 
1630 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1631 
1632 	SCHED_WARN_ON(!bucket->tasks);
1633 	if (likely(bucket->tasks))
1634 		bucket->tasks--;
1635 
1636 	uc_se->active = false;
1637 
1638 	/*
1639 	 * Keep "local max aggregation" simple and accept to (possibly)
1640 	 * overboost some RUNNABLE tasks in the same bucket.
1641 	 * The rq clamp bucket value is reset to its base value whenever
1642 	 * there are no more RUNNABLE tasks refcounting it.
1643 	 */
1644 	if (likely(bucket->tasks))
1645 		return;
1646 
1647 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1648 	/*
1649 	 * Defensive programming: this should never happen. If it happens,
1650 	 * e.g. due to future modification, warn and fix up the expected value.
1651 	 */
1652 	SCHED_WARN_ON(bucket->value > rq_clamp);
1653 	if (bucket->value >= rq_clamp) {
1654 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1655 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1656 	}
1657 }
1658 
1659 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1660 {
1661 	enum uclamp_id clamp_id;
1662 
1663 	/*
1664 	 * Avoid any overhead until uclamp is actually used by the userspace.
1665 	 *
1666 	 * The condition is constructed such that a NOP is generated when
1667 	 * sched_uclamp_used is disabled.
1668 	 */
1669 	if (!static_branch_unlikely(&sched_uclamp_used))
1670 		return;
1671 
1672 	if (unlikely(!p->sched_class->uclamp_enabled))
1673 		return;
1674 
1675 	for_each_clamp_id(clamp_id)
1676 		uclamp_rq_inc_id(rq, p, clamp_id);
1677 
1678 	/* Reset clamp idle holding when there is one RUNNABLE task */
1679 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1680 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1681 }
1682 
1683 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1684 {
1685 	enum uclamp_id clamp_id;
1686 
1687 	/*
1688 	 * Avoid any overhead until uclamp is actually used by the userspace.
1689 	 *
1690 	 * The condition is constructed such that a NOP is generated when
1691 	 * sched_uclamp_used is disabled.
1692 	 */
1693 	if (!static_branch_unlikely(&sched_uclamp_used))
1694 		return;
1695 
1696 	if (unlikely(!p->sched_class->uclamp_enabled))
1697 		return;
1698 
1699 	for_each_clamp_id(clamp_id)
1700 		uclamp_rq_dec_id(rq, p, clamp_id);
1701 }
1702 
1703 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1704 				      enum uclamp_id clamp_id)
1705 {
1706 	if (!p->uclamp[clamp_id].active)
1707 		return;
1708 
1709 	uclamp_rq_dec_id(rq, p, clamp_id);
1710 	uclamp_rq_inc_id(rq, p, clamp_id);
1711 
1712 	/*
1713 	 * Make sure to clear the idle flag if we've transiently reached 0
1714 	 * active tasks on rq.
1715 	 */
1716 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1717 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1718 }
1719 
1720 static inline void
1721 uclamp_update_active(struct task_struct *p)
1722 {
1723 	enum uclamp_id clamp_id;
1724 	struct rq_flags rf;
1725 	struct rq *rq;
1726 
1727 	/*
1728 	 * Lock the task and the rq where the task is (or was) queued.
1729 	 *
1730 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1731 	 * price to pay to safely serialize util_{min,max} updates with
1732 	 * enqueues, dequeues and migration operations.
1733 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1734 	 */
1735 	rq = task_rq_lock(p, &rf);
1736 
1737 	/*
1738 	 * Setting the clamp bucket is serialized by task_rq_lock().
1739 	 * If the task is not yet RUNNABLE and its task_struct is not
1740 	 * affecting a valid clamp bucket, the next time it's enqueued,
1741 	 * it will already see the updated clamp bucket value.
1742 	 */
1743 	for_each_clamp_id(clamp_id)
1744 		uclamp_rq_reinc_id(rq, p, clamp_id);
1745 
1746 	task_rq_unlock(rq, p, &rf);
1747 }
1748 
1749 #ifdef CONFIG_UCLAMP_TASK_GROUP
1750 static inline void
1751 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1752 {
1753 	struct css_task_iter it;
1754 	struct task_struct *p;
1755 
1756 	css_task_iter_start(css, 0, &it);
1757 	while ((p = css_task_iter_next(&it)))
1758 		uclamp_update_active(p);
1759 	css_task_iter_end(&it);
1760 }
1761 
1762 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1763 #endif
1764 
1765 #ifdef CONFIG_SYSCTL
1766 #ifdef CONFIG_UCLAMP_TASK_GROUP
1767 static void uclamp_update_root_tg(void)
1768 {
1769 	struct task_group *tg = &root_task_group;
1770 
1771 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1772 		      sysctl_sched_uclamp_util_min, false);
1773 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1774 		      sysctl_sched_uclamp_util_max, false);
1775 
1776 	guard(rcu)();
1777 	cpu_util_update_eff(&root_task_group.css);
1778 }
1779 #else
1780 static void uclamp_update_root_tg(void) { }
1781 #endif
1782 
1783 static void uclamp_sync_util_min_rt_default(void)
1784 {
1785 	struct task_struct *g, *p;
1786 
1787 	/*
1788 	 * copy_process()			sysctl_uclamp
1789 	 *					  uclamp_min_rt = X;
1790 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1791 	 *   // link thread			  smp_mb__after_spinlock()
1792 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1793 	 *   sched_post_fork()			  for_each_process_thread()
1794 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1795 	 *
1796 	 * Ensures that either sched_post_fork() will observe the new
1797 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1798 	 * task.
1799 	 */
1800 	read_lock(&tasklist_lock);
1801 	smp_mb__after_spinlock();
1802 	read_unlock(&tasklist_lock);
1803 
1804 	guard(rcu)();
1805 	for_each_process_thread(g, p)
1806 		uclamp_update_util_min_rt_default(p);
1807 }
1808 
1809 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1810 				void *buffer, size_t *lenp, loff_t *ppos)
1811 {
1812 	bool update_root_tg = false;
1813 	int old_min, old_max, old_min_rt;
1814 	int result;
1815 
1816 	guard(mutex)(&uclamp_mutex);
1817 
1818 	old_min = sysctl_sched_uclamp_util_min;
1819 	old_max = sysctl_sched_uclamp_util_max;
1820 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1821 
1822 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1823 	if (result)
1824 		goto undo;
1825 	if (!write)
1826 		return 0;
1827 
1828 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1829 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1830 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1831 
1832 		result = -EINVAL;
1833 		goto undo;
1834 	}
1835 
1836 	if (old_min != sysctl_sched_uclamp_util_min) {
1837 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1838 			      sysctl_sched_uclamp_util_min, false);
1839 		update_root_tg = true;
1840 	}
1841 	if (old_max != sysctl_sched_uclamp_util_max) {
1842 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1843 			      sysctl_sched_uclamp_util_max, false);
1844 		update_root_tg = true;
1845 	}
1846 
1847 	if (update_root_tg) {
1848 		static_branch_enable(&sched_uclamp_used);
1849 		uclamp_update_root_tg();
1850 	}
1851 
1852 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1853 		static_branch_enable(&sched_uclamp_used);
1854 		uclamp_sync_util_min_rt_default();
1855 	}
1856 
1857 	/*
1858 	 * We update all RUNNABLE tasks only when task groups are in use.
1859 	 * Otherwise, keep it simple and do just a lazy update at each next
1860 	 * task enqueue time.
1861 	 */
1862 	return 0;
1863 
1864 undo:
1865 	sysctl_sched_uclamp_util_min = old_min;
1866 	sysctl_sched_uclamp_util_max = old_max;
1867 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1868 	return result;
1869 }
1870 #endif
1871 
1872 static void uclamp_fork(struct task_struct *p)
1873 {
1874 	enum uclamp_id clamp_id;
1875 
1876 	/*
1877 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1878 	 * as the task is still at its early fork stages.
1879 	 */
1880 	for_each_clamp_id(clamp_id)
1881 		p->uclamp[clamp_id].active = false;
1882 
1883 	if (likely(!p->sched_reset_on_fork))
1884 		return;
1885 
1886 	for_each_clamp_id(clamp_id) {
1887 		uclamp_se_set(&p->uclamp_req[clamp_id],
1888 			      uclamp_none(clamp_id), false);
1889 	}
1890 }
1891 
1892 static void uclamp_post_fork(struct task_struct *p)
1893 {
1894 	uclamp_update_util_min_rt_default(p);
1895 }
1896 
1897 static void __init init_uclamp_rq(struct rq *rq)
1898 {
1899 	enum uclamp_id clamp_id;
1900 	struct uclamp_rq *uc_rq = rq->uclamp;
1901 
1902 	for_each_clamp_id(clamp_id) {
1903 		uc_rq[clamp_id] = (struct uclamp_rq) {
1904 			.value = uclamp_none(clamp_id)
1905 		};
1906 	}
1907 
1908 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
1909 }
1910 
1911 static void __init init_uclamp(void)
1912 {
1913 	struct uclamp_se uc_max = {};
1914 	enum uclamp_id clamp_id;
1915 	int cpu;
1916 
1917 	for_each_possible_cpu(cpu)
1918 		init_uclamp_rq(cpu_rq(cpu));
1919 
1920 	for_each_clamp_id(clamp_id) {
1921 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
1922 			      uclamp_none(clamp_id), false);
1923 	}
1924 
1925 	/* System defaults allow max clamp values for both indexes */
1926 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
1927 	for_each_clamp_id(clamp_id) {
1928 		uclamp_default[clamp_id] = uc_max;
1929 #ifdef CONFIG_UCLAMP_TASK_GROUP
1930 		root_task_group.uclamp_req[clamp_id] = uc_max;
1931 		root_task_group.uclamp[clamp_id] = uc_max;
1932 #endif
1933 	}
1934 }
1935 
1936 #else /* !CONFIG_UCLAMP_TASK */
1937 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
1938 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
1939 static inline void uclamp_fork(struct task_struct *p) { }
1940 static inline void uclamp_post_fork(struct task_struct *p) { }
1941 static inline void init_uclamp(void) { }
1942 #endif /* CONFIG_UCLAMP_TASK */
1943 
1944 bool sched_task_on_rq(struct task_struct *p)
1945 {
1946 	return task_on_rq_queued(p);
1947 }
1948 
1949 unsigned long get_wchan(struct task_struct *p)
1950 {
1951 	unsigned long ip = 0;
1952 	unsigned int state;
1953 
1954 	if (!p || p == current)
1955 		return 0;
1956 
1957 	/* Only get wchan if task is blocked and we can keep it that way. */
1958 	raw_spin_lock_irq(&p->pi_lock);
1959 	state = READ_ONCE(p->__state);
1960 	smp_rmb(); /* see try_to_wake_up() */
1961 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
1962 		ip = __get_wchan(p);
1963 	raw_spin_unlock_irq(&p->pi_lock);
1964 
1965 	return ip;
1966 }
1967 
1968 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1969 {
1970 	if (!(flags & ENQUEUE_NOCLOCK))
1971 		update_rq_clock(rq);
1972 
1973 	if (!(flags & ENQUEUE_RESTORE)) {
1974 		sched_info_enqueue(rq, p);
1975 		psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
1976 	}
1977 
1978 	uclamp_rq_inc(rq, p);
1979 	p->sched_class->enqueue_task(rq, p, flags);
1980 
1981 	if (sched_core_enabled(rq))
1982 		sched_core_enqueue(rq, p);
1983 }
1984 
1985 void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1986 {
1987 	if (sched_core_enabled(rq))
1988 		sched_core_dequeue(rq, p, flags);
1989 
1990 	if (!(flags & DEQUEUE_NOCLOCK))
1991 		update_rq_clock(rq);
1992 
1993 	if (!(flags & DEQUEUE_SAVE)) {
1994 		sched_info_dequeue(rq, p);
1995 		psi_dequeue(p, flags & DEQUEUE_SLEEP);
1996 	}
1997 
1998 	uclamp_rq_dec(rq, p);
1999 	p->sched_class->dequeue_task(rq, p, flags);
2000 }
2001 
2002 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2003 {
2004 	if (task_on_rq_migrating(p))
2005 		flags |= ENQUEUE_MIGRATED;
2006 	if (flags & ENQUEUE_MIGRATED)
2007 		sched_mm_cid_migrate_to(rq, p);
2008 
2009 	enqueue_task(rq, p, flags);
2010 
2011 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2012 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2013 }
2014 
2015 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2016 {
2017 	WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING);
2018 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2019 
2020 	dequeue_task(rq, p, flags);
2021 }
2022 
2023 /**
2024  * task_curr - is this task currently executing on a CPU?
2025  * @p: the task in question.
2026  *
2027  * Return: 1 if the task is currently executing. 0 otherwise.
2028  */
2029 inline int task_curr(const struct task_struct *p)
2030 {
2031 	return cpu_curr(task_cpu(p)) == p;
2032 }
2033 
2034 /*
2035  * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2036  * use the balance_callback list if you want balancing.
2037  *
2038  * this means any call to check_class_changed() must be followed by a call to
2039  * balance_callback().
2040  */
2041 void check_class_changed(struct rq *rq, struct task_struct *p,
2042 			 const struct sched_class *prev_class,
2043 			 int oldprio)
2044 {
2045 	if (prev_class != p->sched_class) {
2046 		if (prev_class->switched_from)
2047 			prev_class->switched_from(rq, p);
2048 
2049 		p->sched_class->switched_to(rq, p);
2050 	} else if (oldprio != p->prio || dl_task(p))
2051 		p->sched_class->prio_changed(rq, p, oldprio);
2052 }
2053 
2054 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2055 {
2056 	if (p->sched_class == rq->curr->sched_class)
2057 		rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2058 	else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2059 		resched_curr(rq);
2060 
2061 	/*
2062 	 * A queue event has occurred, and we're going to schedule.  In
2063 	 * this case, we can save a useless back to back clock update.
2064 	 */
2065 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
2066 		rq_clock_skip_update(rq);
2067 }
2068 
2069 static __always_inline
2070 int __task_state_match(struct task_struct *p, unsigned int state)
2071 {
2072 	if (READ_ONCE(p->__state) & state)
2073 		return 1;
2074 
2075 	if (READ_ONCE(p->saved_state) & state)
2076 		return -1;
2077 
2078 	return 0;
2079 }
2080 
2081 static __always_inline
2082 int task_state_match(struct task_struct *p, unsigned int state)
2083 {
2084 	/*
2085 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2086 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2087 	 */
2088 	guard(raw_spinlock_irq)(&p->pi_lock);
2089 	return __task_state_match(p, state);
2090 }
2091 
2092 /*
2093  * wait_task_inactive - wait for a thread to unschedule.
2094  *
2095  * Wait for the thread to block in any of the states set in @match_state.
2096  * If it changes, i.e. @p might have woken up, then return zero.  When we
2097  * succeed in waiting for @p to be off its CPU, we return a positive number
2098  * (its total switch count).  If a second call a short while later returns the
2099  * same number, the caller can be sure that @p has remained unscheduled the
2100  * whole time.
2101  *
2102  * The caller must ensure that the task *will* unschedule sometime soon,
2103  * else this function might spin for a *long* time. This function can't
2104  * be called with interrupts off, or it may introduce deadlock with
2105  * smp_call_function() if an IPI is sent by the same process we are
2106  * waiting to become inactive.
2107  */
2108 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2109 {
2110 	int running, queued, match;
2111 	struct rq_flags rf;
2112 	unsigned long ncsw;
2113 	struct rq *rq;
2114 
2115 	for (;;) {
2116 		/*
2117 		 * We do the initial early heuristics without holding
2118 		 * any task-queue locks at all. We'll only try to get
2119 		 * the runqueue lock when things look like they will
2120 		 * work out!
2121 		 */
2122 		rq = task_rq(p);
2123 
2124 		/*
2125 		 * If the task is actively running on another CPU
2126 		 * still, just relax and busy-wait without holding
2127 		 * any locks.
2128 		 *
2129 		 * NOTE! Since we don't hold any locks, it's not
2130 		 * even sure that "rq" stays as the right runqueue!
2131 		 * But we don't care, since "task_on_cpu()" will
2132 		 * return false if the runqueue has changed and p
2133 		 * is actually now running somewhere else!
2134 		 */
2135 		while (task_on_cpu(rq, p)) {
2136 			if (!task_state_match(p, match_state))
2137 				return 0;
2138 			cpu_relax();
2139 		}
2140 
2141 		/*
2142 		 * Ok, time to look more closely! We need the rq
2143 		 * lock now, to be *sure*. If we're wrong, we'll
2144 		 * just go back and repeat.
2145 		 */
2146 		rq = task_rq_lock(p, &rf);
2147 		trace_sched_wait_task(p);
2148 		running = task_on_cpu(rq, p);
2149 		queued = task_on_rq_queued(p);
2150 		ncsw = 0;
2151 		if ((match = __task_state_match(p, match_state))) {
2152 			/*
2153 			 * When matching on p->saved_state, consider this task
2154 			 * still queued so it will wait.
2155 			 */
2156 			if (match < 0)
2157 				queued = 1;
2158 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2159 		}
2160 		task_rq_unlock(rq, p, &rf);
2161 
2162 		/*
2163 		 * If it changed from the expected state, bail out now.
2164 		 */
2165 		if (unlikely(!ncsw))
2166 			break;
2167 
2168 		/*
2169 		 * Was it really running after all now that we
2170 		 * checked with the proper locks actually held?
2171 		 *
2172 		 * Oops. Go back and try again..
2173 		 */
2174 		if (unlikely(running)) {
2175 			cpu_relax();
2176 			continue;
2177 		}
2178 
2179 		/*
2180 		 * It's not enough that it's not actively running,
2181 		 * it must be off the runqueue _entirely_, and not
2182 		 * preempted!
2183 		 *
2184 		 * So if it was still runnable (but just not actively
2185 		 * running right now), it's preempted, and we should
2186 		 * yield - it could be a while.
2187 		 */
2188 		if (unlikely(queued)) {
2189 			ktime_t to = NSEC_PER_SEC / HZ;
2190 
2191 			set_current_state(TASK_UNINTERRUPTIBLE);
2192 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2193 			continue;
2194 		}
2195 
2196 		/*
2197 		 * Ahh, all good. It wasn't running, and it wasn't
2198 		 * runnable, which means that it will never become
2199 		 * running in the future either. We're all done!
2200 		 */
2201 		break;
2202 	}
2203 
2204 	return ncsw;
2205 }
2206 
2207 #ifdef CONFIG_SMP
2208 
2209 static void
2210 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2211 
2212 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2213 {
2214 	struct affinity_context ac = {
2215 		.new_mask  = cpumask_of(rq->cpu),
2216 		.flags     = SCA_MIGRATE_DISABLE,
2217 	};
2218 
2219 	if (likely(!p->migration_disabled))
2220 		return;
2221 
2222 	if (p->cpus_ptr != &p->cpus_mask)
2223 		return;
2224 
2225 	/*
2226 	 * Violates locking rules! See comment in __do_set_cpus_allowed().
2227 	 */
2228 	__do_set_cpus_allowed(p, &ac);
2229 }
2230 
2231 void migrate_disable(void)
2232 {
2233 	struct task_struct *p = current;
2234 
2235 	if (p->migration_disabled) {
2236 #ifdef CONFIG_DEBUG_PREEMPT
2237 		/*
2238 		 *Warn about overflow half-way through the range.
2239 		 */
2240 		WARN_ON_ONCE((s16)p->migration_disabled < 0);
2241 #endif
2242 		p->migration_disabled++;
2243 		return;
2244 	}
2245 
2246 	guard(preempt)();
2247 	this_rq()->nr_pinned++;
2248 	p->migration_disabled = 1;
2249 }
2250 EXPORT_SYMBOL_GPL(migrate_disable);
2251 
2252 void migrate_enable(void)
2253 {
2254 	struct task_struct *p = current;
2255 	struct affinity_context ac = {
2256 		.new_mask  = &p->cpus_mask,
2257 		.flags     = SCA_MIGRATE_ENABLE,
2258 	};
2259 
2260 #ifdef CONFIG_DEBUG_PREEMPT
2261 	/*
2262 	 * Check both overflow from migrate_disable() and superfluous
2263 	 * migrate_enable().
2264 	 */
2265 	if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2266 		return;
2267 #endif
2268 
2269 	if (p->migration_disabled > 1) {
2270 		p->migration_disabled--;
2271 		return;
2272 	}
2273 
2274 	/*
2275 	 * Ensure stop_task runs either before or after this, and that
2276 	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2277 	 */
2278 	guard(preempt)();
2279 	if (p->cpus_ptr != &p->cpus_mask)
2280 		__set_cpus_allowed_ptr(p, &ac);
2281 	/*
2282 	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2283 	 * regular cpus_mask, otherwise things that race (eg.
2284 	 * select_fallback_rq) get confused.
2285 	 */
2286 	barrier();
2287 	p->migration_disabled = 0;
2288 	this_rq()->nr_pinned--;
2289 }
2290 EXPORT_SYMBOL_GPL(migrate_enable);
2291 
2292 static inline bool rq_has_pinned_tasks(struct rq *rq)
2293 {
2294 	return rq->nr_pinned;
2295 }
2296 
2297 /*
2298  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2299  * __set_cpus_allowed_ptr() and select_fallback_rq().
2300  */
2301 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2302 {
2303 	/* When not in the task's cpumask, no point in looking further. */
2304 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
2305 		return false;
2306 
2307 	/* migrate_disabled() must be allowed to finish. */
2308 	if (is_migration_disabled(p))
2309 		return cpu_online(cpu);
2310 
2311 	/* Non kernel threads are not allowed during either online or offline. */
2312 	if (!(p->flags & PF_KTHREAD))
2313 		return cpu_active(cpu) && task_cpu_possible(cpu, p);
2314 
2315 	/* KTHREAD_IS_PER_CPU is always allowed. */
2316 	if (kthread_is_per_cpu(p))
2317 		return cpu_online(cpu);
2318 
2319 	/* Regular kernel threads don't get to stay during offline. */
2320 	if (cpu_dying(cpu))
2321 		return false;
2322 
2323 	/* But are allowed during online. */
2324 	return cpu_online(cpu);
2325 }
2326 
2327 /*
2328  * This is how migration works:
2329  *
2330  * 1) we invoke migration_cpu_stop() on the target CPU using
2331  *    stop_one_cpu().
2332  * 2) stopper starts to run (implicitly forcing the migrated thread
2333  *    off the CPU)
2334  * 3) it checks whether the migrated task is still in the wrong runqueue.
2335  * 4) if it's in the wrong runqueue then the migration thread removes
2336  *    it and puts it into the right queue.
2337  * 5) stopper completes and stop_one_cpu() returns and the migration
2338  *    is done.
2339  */
2340 
2341 /*
2342  * move_queued_task - move a queued task to new rq.
2343  *
2344  * Returns (locked) new rq. Old rq's lock is released.
2345  */
2346 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2347 				   struct task_struct *p, int new_cpu)
2348 {
2349 	lockdep_assert_rq_held(rq);
2350 
2351 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2352 	set_task_cpu(p, new_cpu);
2353 	rq_unlock(rq, rf);
2354 
2355 	rq = cpu_rq(new_cpu);
2356 
2357 	rq_lock(rq, rf);
2358 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2359 	activate_task(rq, p, 0);
2360 	wakeup_preempt(rq, p, 0);
2361 
2362 	return rq;
2363 }
2364 
2365 struct migration_arg {
2366 	struct task_struct		*task;
2367 	int				dest_cpu;
2368 	struct set_affinity_pending	*pending;
2369 };
2370 
2371 /*
2372  * @refs: number of wait_for_completion()
2373  * @stop_pending: is @stop_work in use
2374  */
2375 struct set_affinity_pending {
2376 	refcount_t		refs;
2377 	unsigned int		stop_pending;
2378 	struct completion	done;
2379 	struct cpu_stop_work	stop_work;
2380 	struct migration_arg	arg;
2381 };
2382 
2383 /*
2384  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2385  * this because either it can't run here any more (set_cpus_allowed()
2386  * away from this CPU, or CPU going down), or because we're
2387  * attempting to rebalance this task on exec (sched_exec).
2388  *
2389  * So we race with normal scheduler movements, but that's OK, as long
2390  * as the task is no longer on this CPU.
2391  */
2392 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2393 				 struct task_struct *p, int dest_cpu)
2394 {
2395 	/* Affinity changed (again). */
2396 	if (!is_cpu_allowed(p, dest_cpu))
2397 		return rq;
2398 
2399 	rq = move_queued_task(rq, rf, p, dest_cpu);
2400 
2401 	return rq;
2402 }
2403 
2404 /*
2405  * migration_cpu_stop - this will be executed by a high-prio stopper thread
2406  * and performs thread migration by bumping thread off CPU then
2407  * 'pushing' onto another runqueue.
2408  */
2409 static int migration_cpu_stop(void *data)
2410 {
2411 	struct migration_arg *arg = data;
2412 	struct set_affinity_pending *pending = arg->pending;
2413 	struct task_struct *p = arg->task;
2414 	struct rq *rq = this_rq();
2415 	bool complete = false;
2416 	struct rq_flags rf;
2417 
2418 	/*
2419 	 * The original target CPU might have gone down and we might
2420 	 * be on another CPU but it doesn't matter.
2421 	 */
2422 	local_irq_save(rf.flags);
2423 	/*
2424 	 * We need to explicitly wake pending tasks before running
2425 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2426 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2427 	 */
2428 	flush_smp_call_function_queue();
2429 
2430 	raw_spin_lock(&p->pi_lock);
2431 	rq_lock(rq, &rf);
2432 
2433 	/*
2434 	 * If we were passed a pending, then ->stop_pending was set, thus
2435 	 * p->migration_pending must have remained stable.
2436 	 */
2437 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2438 
2439 	/*
2440 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2441 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2442 	 * we're holding p->pi_lock.
2443 	 */
2444 	if (task_rq(p) == rq) {
2445 		if (is_migration_disabled(p))
2446 			goto out;
2447 
2448 		if (pending) {
2449 			p->migration_pending = NULL;
2450 			complete = true;
2451 
2452 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2453 				goto out;
2454 		}
2455 
2456 		if (task_on_rq_queued(p)) {
2457 			update_rq_clock(rq);
2458 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2459 		} else {
2460 			p->wake_cpu = arg->dest_cpu;
2461 		}
2462 
2463 		/*
2464 		 * XXX __migrate_task() can fail, at which point we might end
2465 		 * up running on a dodgy CPU, AFAICT this can only happen
2466 		 * during CPU hotplug, at which point we'll get pushed out
2467 		 * anyway, so it's probably not a big deal.
2468 		 */
2469 
2470 	} else if (pending) {
2471 		/*
2472 		 * This happens when we get migrated between migrate_enable()'s
2473 		 * preempt_enable() and scheduling the stopper task. At that
2474 		 * point we're a regular task again and not current anymore.
2475 		 *
2476 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2477 		 * more likely.
2478 		 */
2479 
2480 		/*
2481 		 * The task moved before the stopper got to run. We're holding
2482 		 * ->pi_lock, so the allowed mask is stable - if it got
2483 		 * somewhere allowed, we're done.
2484 		 */
2485 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2486 			p->migration_pending = NULL;
2487 			complete = true;
2488 			goto out;
2489 		}
2490 
2491 		/*
2492 		 * When migrate_enable() hits a rq mis-match we can't reliably
2493 		 * determine is_migration_disabled() and so have to chase after
2494 		 * it.
2495 		 */
2496 		WARN_ON_ONCE(!pending->stop_pending);
2497 		preempt_disable();
2498 		task_rq_unlock(rq, p, &rf);
2499 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2500 				    &pending->arg, &pending->stop_work);
2501 		preempt_enable();
2502 		return 0;
2503 	}
2504 out:
2505 	if (pending)
2506 		pending->stop_pending = false;
2507 	task_rq_unlock(rq, p, &rf);
2508 
2509 	if (complete)
2510 		complete_all(&pending->done);
2511 
2512 	return 0;
2513 }
2514 
2515 int push_cpu_stop(void *arg)
2516 {
2517 	struct rq *lowest_rq = NULL, *rq = this_rq();
2518 	struct task_struct *p = arg;
2519 
2520 	raw_spin_lock_irq(&p->pi_lock);
2521 	raw_spin_rq_lock(rq);
2522 
2523 	if (task_rq(p) != rq)
2524 		goto out_unlock;
2525 
2526 	if (is_migration_disabled(p)) {
2527 		p->migration_flags |= MDF_PUSH;
2528 		goto out_unlock;
2529 	}
2530 
2531 	p->migration_flags &= ~MDF_PUSH;
2532 
2533 	if (p->sched_class->find_lock_rq)
2534 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2535 
2536 	if (!lowest_rq)
2537 		goto out_unlock;
2538 
2539 	// XXX validate p is still the highest prio task
2540 	if (task_rq(p) == rq) {
2541 		deactivate_task(rq, p, 0);
2542 		set_task_cpu(p, lowest_rq->cpu);
2543 		activate_task(lowest_rq, p, 0);
2544 		resched_curr(lowest_rq);
2545 	}
2546 
2547 	double_unlock_balance(rq, lowest_rq);
2548 
2549 out_unlock:
2550 	rq->push_busy = false;
2551 	raw_spin_rq_unlock(rq);
2552 	raw_spin_unlock_irq(&p->pi_lock);
2553 
2554 	put_task_struct(p);
2555 	return 0;
2556 }
2557 
2558 /*
2559  * sched_class::set_cpus_allowed must do the below, but is not required to
2560  * actually call this function.
2561  */
2562 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2563 {
2564 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2565 		p->cpus_ptr = ctx->new_mask;
2566 		return;
2567 	}
2568 
2569 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2570 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2571 
2572 	/*
2573 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2574 	 */
2575 	if (ctx->flags & SCA_USER)
2576 		swap(p->user_cpus_ptr, ctx->user_mask);
2577 }
2578 
2579 static void
2580 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2581 {
2582 	struct rq *rq = task_rq(p);
2583 	bool queued, running;
2584 
2585 	/*
2586 	 * This here violates the locking rules for affinity, since we're only
2587 	 * supposed to change these variables while holding both rq->lock and
2588 	 * p->pi_lock.
2589 	 *
2590 	 * HOWEVER, it magically works, because ttwu() is the only code that
2591 	 * accesses these variables under p->pi_lock and only does so after
2592 	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2593 	 * before finish_task().
2594 	 *
2595 	 * XXX do further audits, this smells like something putrid.
2596 	 */
2597 	if (ctx->flags & SCA_MIGRATE_DISABLE)
2598 		SCHED_WARN_ON(!p->on_cpu);
2599 	else
2600 		lockdep_assert_held(&p->pi_lock);
2601 
2602 	queued = task_on_rq_queued(p);
2603 	running = task_current(rq, p);
2604 
2605 	if (queued) {
2606 		/*
2607 		 * Because __kthread_bind() calls this on blocked tasks without
2608 		 * holding rq->lock.
2609 		 */
2610 		lockdep_assert_rq_held(rq);
2611 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2612 	}
2613 	if (running)
2614 		put_prev_task(rq, p);
2615 
2616 	p->sched_class->set_cpus_allowed(p, ctx);
2617 
2618 	if (queued)
2619 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2620 	if (running)
2621 		set_next_task(rq, p);
2622 }
2623 
2624 /*
2625  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2626  * affinity (if any) should be destroyed too.
2627  */
2628 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2629 {
2630 	struct affinity_context ac = {
2631 		.new_mask  = new_mask,
2632 		.user_mask = NULL,
2633 		.flags     = SCA_USER,	/* clear the user requested mask */
2634 	};
2635 	union cpumask_rcuhead {
2636 		cpumask_t cpumask;
2637 		struct rcu_head rcu;
2638 	};
2639 
2640 	__do_set_cpus_allowed(p, &ac);
2641 
2642 	/*
2643 	 * Because this is called with p->pi_lock held, it is not possible
2644 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2645 	 * kfree_rcu().
2646 	 */
2647 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2648 }
2649 
2650 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2651 		      int node)
2652 {
2653 	cpumask_t *user_mask;
2654 	unsigned long flags;
2655 
2656 	/*
2657 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2658 	 * may differ by now due to racing.
2659 	 */
2660 	dst->user_cpus_ptr = NULL;
2661 
2662 	/*
2663 	 * This check is racy and losing the race is a valid situation.
2664 	 * It is not worth the extra overhead of taking the pi_lock on
2665 	 * every fork/clone.
2666 	 */
2667 	if (data_race(!src->user_cpus_ptr))
2668 		return 0;
2669 
2670 	user_mask = alloc_user_cpus_ptr(node);
2671 	if (!user_mask)
2672 		return -ENOMEM;
2673 
2674 	/*
2675 	 * Use pi_lock to protect content of user_cpus_ptr
2676 	 *
2677 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2678 	 * do_set_cpus_allowed().
2679 	 */
2680 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2681 	if (src->user_cpus_ptr) {
2682 		swap(dst->user_cpus_ptr, user_mask);
2683 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2684 	}
2685 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2686 
2687 	if (unlikely(user_mask))
2688 		kfree(user_mask);
2689 
2690 	return 0;
2691 }
2692 
2693 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2694 {
2695 	struct cpumask *user_mask = NULL;
2696 
2697 	swap(p->user_cpus_ptr, user_mask);
2698 
2699 	return user_mask;
2700 }
2701 
2702 void release_user_cpus_ptr(struct task_struct *p)
2703 {
2704 	kfree(clear_user_cpus_ptr(p));
2705 }
2706 
2707 /*
2708  * This function is wildly self concurrent; here be dragons.
2709  *
2710  *
2711  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2712  * designated task is enqueued on an allowed CPU. If that task is currently
2713  * running, we have to kick it out using the CPU stopper.
2714  *
2715  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2716  * Consider:
2717  *
2718  *     Initial conditions: P0->cpus_mask = [0, 1]
2719  *
2720  *     P0@CPU0                  P1
2721  *
2722  *     migrate_disable();
2723  *     <preempted>
2724  *                              set_cpus_allowed_ptr(P0, [1]);
2725  *
2726  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2727  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2728  * This means we need the following scheme:
2729  *
2730  *     P0@CPU0                  P1
2731  *
2732  *     migrate_disable();
2733  *     <preempted>
2734  *                              set_cpus_allowed_ptr(P0, [1]);
2735  *                                <blocks>
2736  *     <resumes>
2737  *     migrate_enable();
2738  *       __set_cpus_allowed_ptr();
2739  *       <wakes local stopper>
2740  *                         `--> <woken on migration completion>
2741  *
2742  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2743  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2744  * task p are serialized by p->pi_lock, which we can leverage: the one that
2745  * should come into effect at the end of the Migrate-Disable region is the last
2746  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2747  * but we still need to properly signal those waiting tasks at the appropriate
2748  * moment.
2749  *
2750  * This is implemented using struct set_affinity_pending. The first
2751  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2752  * setup an instance of that struct and install it on the targeted task_struct.
2753  * Any and all further callers will reuse that instance. Those then wait for
2754  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2755  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2756  *
2757  *
2758  * (1) In the cases covered above. There is one more where the completion is
2759  * signaled within affine_move_task() itself: when a subsequent affinity request
2760  * occurs after the stopper bailed out due to the targeted task still being
2761  * Migrate-Disable. Consider:
2762  *
2763  *     Initial conditions: P0->cpus_mask = [0, 1]
2764  *
2765  *     CPU0		  P1				P2
2766  *     <P0>
2767  *       migrate_disable();
2768  *       <preempted>
2769  *                        set_cpus_allowed_ptr(P0, [1]);
2770  *                          <blocks>
2771  *     <migration/0>
2772  *       migration_cpu_stop()
2773  *         is_migration_disabled()
2774  *           <bails>
2775  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2776  *                                                         <signal completion>
2777  *                          <awakes>
2778  *
2779  * Note that the above is safe vs a concurrent migrate_enable(), as any
2780  * pending affinity completion is preceded by an uninstallation of
2781  * p->migration_pending done with p->pi_lock held.
2782  */
2783 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2784 			    int dest_cpu, unsigned int flags)
2785 	__releases(rq->lock)
2786 	__releases(p->pi_lock)
2787 {
2788 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2789 	bool stop_pending, complete = false;
2790 
2791 	/* Can the task run on the task's current CPU? If so, we're done */
2792 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2793 		struct task_struct *push_task = NULL;
2794 
2795 		if ((flags & SCA_MIGRATE_ENABLE) &&
2796 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2797 			rq->push_busy = true;
2798 			push_task = get_task_struct(p);
2799 		}
2800 
2801 		/*
2802 		 * If there are pending waiters, but no pending stop_work,
2803 		 * then complete now.
2804 		 */
2805 		pending = p->migration_pending;
2806 		if (pending && !pending->stop_pending) {
2807 			p->migration_pending = NULL;
2808 			complete = true;
2809 		}
2810 
2811 		preempt_disable();
2812 		task_rq_unlock(rq, p, rf);
2813 		if (push_task) {
2814 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2815 					    p, &rq->push_work);
2816 		}
2817 		preempt_enable();
2818 
2819 		if (complete)
2820 			complete_all(&pending->done);
2821 
2822 		return 0;
2823 	}
2824 
2825 	if (!(flags & SCA_MIGRATE_ENABLE)) {
2826 		/* serialized by p->pi_lock */
2827 		if (!p->migration_pending) {
2828 			/* Install the request */
2829 			refcount_set(&my_pending.refs, 1);
2830 			init_completion(&my_pending.done);
2831 			my_pending.arg = (struct migration_arg) {
2832 				.task = p,
2833 				.dest_cpu = dest_cpu,
2834 				.pending = &my_pending,
2835 			};
2836 
2837 			p->migration_pending = &my_pending;
2838 		} else {
2839 			pending = p->migration_pending;
2840 			refcount_inc(&pending->refs);
2841 			/*
2842 			 * Affinity has changed, but we've already installed a
2843 			 * pending. migration_cpu_stop() *must* see this, else
2844 			 * we risk a completion of the pending despite having a
2845 			 * task on a disallowed CPU.
2846 			 *
2847 			 * Serialized by p->pi_lock, so this is safe.
2848 			 */
2849 			pending->arg.dest_cpu = dest_cpu;
2850 		}
2851 	}
2852 	pending = p->migration_pending;
2853 	/*
2854 	 * - !MIGRATE_ENABLE:
2855 	 *   we'll have installed a pending if there wasn't one already.
2856 	 *
2857 	 * - MIGRATE_ENABLE:
2858 	 *   we're here because the current CPU isn't matching anymore,
2859 	 *   the only way that can happen is because of a concurrent
2860 	 *   set_cpus_allowed_ptr() call, which should then still be
2861 	 *   pending completion.
2862 	 *
2863 	 * Either way, we really should have a @pending here.
2864 	 */
2865 	if (WARN_ON_ONCE(!pending)) {
2866 		task_rq_unlock(rq, p, rf);
2867 		return -EINVAL;
2868 	}
2869 
2870 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
2871 		/*
2872 		 * MIGRATE_ENABLE gets here because 'p == current', but for
2873 		 * anything else we cannot do is_migration_disabled(), punt
2874 		 * and have the stopper function handle it all race-free.
2875 		 */
2876 		stop_pending = pending->stop_pending;
2877 		if (!stop_pending)
2878 			pending->stop_pending = true;
2879 
2880 		if (flags & SCA_MIGRATE_ENABLE)
2881 			p->migration_flags &= ~MDF_PUSH;
2882 
2883 		preempt_disable();
2884 		task_rq_unlock(rq, p, rf);
2885 		if (!stop_pending) {
2886 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
2887 					    &pending->arg, &pending->stop_work);
2888 		}
2889 		preempt_enable();
2890 
2891 		if (flags & SCA_MIGRATE_ENABLE)
2892 			return 0;
2893 	} else {
2894 
2895 		if (!is_migration_disabled(p)) {
2896 			if (task_on_rq_queued(p))
2897 				rq = move_queued_task(rq, rf, p, dest_cpu);
2898 
2899 			if (!pending->stop_pending) {
2900 				p->migration_pending = NULL;
2901 				complete = true;
2902 			}
2903 		}
2904 		task_rq_unlock(rq, p, rf);
2905 
2906 		if (complete)
2907 			complete_all(&pending->done);
2908 	}
2909 
2910 	wait_for_completion(&pending->done);
2911 
2912 	if (refcount_dec_and_test(&pending->refs))
2913 		wake_up_var(&pending->refs); /* No UaF, just an address */
2914 
2915 	/*
2916 	 * Block the original owner of &pending until all subsequent callers
2917 	 * have seen the completion and decremented the refcount
2918 	 */
2919 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
2920 
2921 	/* ARGH */
2922 	WARN_ON_ONCE(my_pending.stop_pending);
2923 
2924 	return 0;
2925 }
2926 
2927 /*
2928  * Called with both p->pi_lock and rq->lock held; drops both before returning.
2929  */
2930 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
2931 					 struct affinity_context *ctx,
2932 					 struct rq *rq,
2933 					 struct rq_flags *rf)
2934 	__releases(rq->lock)
2935 	__releases(p->pi_lock)
2936 {
2937 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
2938 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
2939 	bool kthread = p->flags & PF_KTHREAD;
2940 	unsigned int dest_cpu;
2941 	int ret = 0;
2942 
2943 	update_rq_clock(rq);
2944 
2945 	if (kthread || is_migration_disabled(p)) {
2946 		/*
2947 		 * Kernel threads are allowed on online && !active CPUs,
2948 		 * however, during cpu-hot-unplug, even these might get pushed
2949 		 * away if not KTHREAD_IS_PER_CPU.
2950 		 *
2951 		 * Specifically, migration_disabled() tasks must not fail the
2952 		 * cpumask_any_and_distribute() pick below, esp. so on
2953 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
2954 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
2955 		 */
2956 		cpu_valid_mask = cpu_online_mask;
2957 	}
2958 
2959 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
2960 		ret = -EINVAL;
2961 		goto out;
2962 	}
2963 
2964 	/*
2965 	 * Must re-check here, to close a race against __kthread_bind(),
2966 	 * sched_setaffinity() is not guaranteed to observe the flag.
2967 	 */
2968 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
2969 		ret = -EINVAL;
2970 		goto out;
2971 	}
2972 
2973 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
2974 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
2975 			if (ctx->flags & SCA_USER)
2976 				swap(p->user_cpus_ptr, ctx->user_mask);
2977 			goto out;
2978 		}
2979 
2980 		if (WARN_ON_ONCE(p == current &&
2981 				 is_migration_disabled(p) &&
2982 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
2983 			ret = -EBUSY;
2984 			goto out;
2985 		}
2986 	}
2987 
2988 	/*
2989 	 * Picking a ~random cpu helps in cases where we are changing affinity
2990 	 * for groups of tasks (ie. cpuset), so that load balancing is not
2991 	 * immediately required to distribute the tasks within their new mask.
2992 	 */
2993 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
2994 	if (dest_cpu >= nr_cpu_ids) {
2995 		ret = -EINVAL;
2996 		goto out;
2997 	}
2998 
2999 	__do_set_cpus_allowed(p, ctx);
3000 
3001 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3002 
3003 out:
3004 	task_rq_unlock(rq, p, rf);
3005 
3006 	return ret;
3007 }
3008 
3009 /*
3010  * Change a given task's CPU affinity. Migrate the thread to a
3011  * proper CPU and schedule it away if the CPU it's executing on
3012  * is removed from the allowed bitmask.
3013  *
3014  * NOTE: the caller must have a valid reference to the task, the
3015  * task must not exit() & deallocate itself prematurely. The
3016  * call is not atomic; no spinlocks may be held.
3017  */
3018 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3019 {
3020 	struct rq_flags rf;
3021 	struct rq *rq;
3022 
3023 	rq = task_rq_lock(p, &rf);
3024 	/*
3025 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3026 	 * flags are set.
3027 	 */
3028 	if (p->user_cpus_ptr &&
3029 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3030 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3031 		ctx->new_mask = rq->scratch_mask;
3032 
3033 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3034 }
3035 
3036 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3037 {
3038 	struct affinity_context ac = {
3039 		.new_mask  = new_mask,
3040 		.flags     = 0,
3041 	};
3042 
3043 	return __set_cpus_allowed_ptr(p, &ac);
3044 }
3045 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3046 
3047 /*
3048  * Change a given task's CPU affinity to the intersection of its current
3049  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3050  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3051  * affinity or use cpu_online_mask instead.
3052  *
3053  * If the resulting mask is empty, leave the affinity unchanged and return
3054  * -EINVAL.
3055  */
3056 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3057 				     struct cpumask *new_mask,
3058 				     const struct cpumask *subset_mask)
3059 {
3060 	struct affinity_context ac = {
3061 		.new_mask  = new_mask,
3062 		.flags     = 0,
3063 	};
3064 	struct rq_flags rf;
3065 	struct rq *rq;
3066 	int err;
3067 
3068 	rq = task_rq_lock(p, &rf);
3069 
3070 	/*
3071 	 * Forcefully restricting the affinity of a deadline task is
3072 	 * likely to cause problems, so fail and noisily override the
3073 	 * mask entirely.
3074 	 */
3075 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3076 		err = -EPERM;
3077 		goto err_unlock;
3078 	}
3079 
3080 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3081 		err = -EINVAL;
3082 		goto err_unlock;
3083 	}
3084 
3085 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3086 
3087 err_unlock:
3088 	task_rq_unlock(rq, p, &rf);
3089 	return err;
3090 }
3091 
3092 /*
3093  * Restrict the CPU affinity of task @p so that it is a subset of
3094  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3095  * old affinity mask. If the resulting mask is empty, we warn and walk
3096  * up the cpuset hierarchy until we find a suitable mask.
3097  */
3098 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3099 {
3100 	cpumask_var_t new_mask;
3101 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3102 
3103 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3104 
3105 	/*
3106 	 * __migrate_task() can fail silently in the face of concurrent
3107 	 * offlining of the chosen destination CPU, so take the hotplug
3108 	 * lock to ensure that the migration succeeds.
3109 	 */
3110 	cpus_read_lock();
3111 	if (!cpumask_available(new_mask))
3112 		goto out_set_mask;
3113 
3114 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3115 		goto out_free_mask;
3116 
3117 	/*
3118 	 * We failed to find a valid subset of the affinity mask for the
3119 	 * task, so override it based on its cpuset hierarchy.
3120 	 */
3121 	cpuset_cpus_allowed(p, new_mask);
3122 	override_mask = new_mask;
3123 
3124 out_set_mask:
3125 	if (printk_ratelimit()) {
3126 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3127 				task_pid_nr(p), p->comm,
3128 				cpumask_pr_args(override_mask));
3129 	}
3130 
3131 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3132 out_free_mask:
3133 	cpus_read_unlock();
3134 	free_cpumask_var(new_mask);
3135 }
3136 
3137 /*
3138  * Restore the affinity of a task @p which was previously restricted by a
3139  * call to force_compatible_cpus_allowed_ptr().
3140  *
3141  * It is the caller's responsibility to serialise this with any calls to
3142  * force_compatible_cpus_allowed_ptr(@p).
3143  */
3144 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3145 {
3146 	struct affinity_context ac = {
3147 		.new_mask  = task_user_cpus(p),
3148 		.flags     = 0,
3149 	};
3150 	int ret;
3151 
3152 	/*
3153 	 * Try to restore the old affinity mask with __sched_setaffinity().
3154 	 * Cpuset masking will be done there too.
3155 	 */
3156 	ret = __sched_setaffinity(p, &ac);
3157 	WARN_ON_ONCE(ret);
3158 }
3159 
3160 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3161 {
3162 #ifdef CONFIG_SCHED_DEBUG
3163 	unsigned int state = READ_ONCE(p->__state);
3164 
3165 	/*
3166 	 * We should never call set_task_cpu() on a blocked task,
3167 	 * ttwu() will sort out the placement.
3168 	 */
3169 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3170 
3171 	/*
3172 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3173 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3174 	 * time relying on p->on_rq.
3175 	 */
3176 	WARN_ON_ONCE(state == TASK_RUNNING &&
3177 		     p->sched_class == &fair_sched_class &&
3178 		     (p->on_rq && !task_on_rq_migrating(p)));
3179 
3180 #ifdef CONFIG_LOCKDEP
3181 	/*
3182 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3183 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3184 	 *
3185 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3186 	 * see task_group().
3187 	 *
3188 	 * Furthermore, all task_rq users should acquire both locks, see
3189 	 * task_rq_lock().
3190 	 */
3191 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3192 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3193 #endif
3194 	/*
3195 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3196 	 */
3197 	WARN_ON_ONCE(!cpu_online(new_cpu));
3198 
3199 	WARN_ON_ONCE(is_migration_disabled(p));
3200 #endif
3201 
3202 	trace_sched_migrate_task(p, new_cpu);
3203 
3204 	if (task_cpu(p) != new_cpu) {
3205 		if (p->sched_class->migrate_task_rq)
3206 			p->sched_class->migrate_task_rq(p, new_cpu);
3207 		p->se.nr_migrations++;
3208 		rseq_migrate(p);
3209 		sched_mm_cid_migrate_from(p);
3210 		perf_event_task_migrate(p);
3211 	}
3212 
3213 	__set_task_cpu(p, new_cpu);
3214 }
3215 
3216 #ifdef CONFIG_NUMA_BALANCING
3217 static void __migrate_swap_task(struct task_struct *p, int cpu)
3218 {
3219 	if (task_on_rq_queued(p)) {
3220 		struct rq *src_rq, *dst_rq;
3221 		struct rq_flags srf, drf;
3222 
3223 		src_rq = task_rq(p);
3224 		dst_rq = cpu_rq(cpu);
3225 
3226 		rq_pin_lock(src_rq, &srf);
3227 		rq_pin_lock(dst_rq, &drf);
3228 
3229 		deactivate_task(src_rq, p, 0);
3230 		set_task_cpu(p, cpu);
3231 		activate_task(dst_rq, p, 0);
3232 		wakeup_preempt(dst_rq, p, 0);
3233 
3234 		rq_unpin_lock(dst_rq, &drf);
3235 		rq_unpin_lock(src_rq, &srf);
3236 
3237 	} else {
3238 		/*
3239 		 * Task isn't running anymore; make it appear like we migrated
3240 		 * it before it went to sleep. This means on wakeup we make the
3241 		 * previous CPU our target instead of where it really is.
3242 		 */
3243 		p->wake_cpu = cpu;
3244 	}
3245 }
3246 
3247 struct migration_swap_arg {
3248 	struct task_struct *src_task, *dst_task;
3249 	int src_cpu, dst_cpu;
3250 };
3251 
3252 static int migrate_swap_stop(void *data)
3253 {
3254 	struct migration_swap_arg *arg = data;
3255 	struct rq *src_rq, *dst_rq;
3256 
3257 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3258 		return -EAGAIN;
3259 
3260 	src_rq = cpu_rq(arg->src_cpu);
3261 	dst_rq = cpu_rq(arg->dst_cpu);
3262 
3263 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3264 	guard(double_rq_lock)(src_rq, dst_rq);
3265 
3266 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3267 		return -EAGAIN;
3268 
3269 	if (task_cpu(arg->src_task) != arg->src_cpu)
3270 		return -EAGAIN;
3271 
3272 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3273 		return -EAGAIN;
3274 
3275 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3276 		return -EAGAIN;
3277 
3278 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3279 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3280 
3281 	return 0;
3282 }
3283 
3284 /*
3285  * Cross migrate two tasks
3286  */
3287 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3288 		int target_cpu, int curr_cpu)
3289 {
3290 	struct migration_swap_arg arg;
3291 	int ret = -EINVAL;
3292 
3293 	arg = (struct migration_swap_arg){
3294 		.src_task = cur,
3295 		.src_cpu = curr_cpu,
3296 		.dst_task = p,
3297 		.dst_cpu = target_cpu,
3298 	};
3299 
3300 	if (arg.src_cpu == arg.dst_cpu)
3301 		goto out;
3302 
3303 	/*
3304 	 * These three tests are all lockless; this is OK since all of them
3305 	 * will be re-checked with proper locks held further down the line.
3306 	 */
3307 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3308 		goto out;
3309 
3310 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3311 		goto out;
3312 
3313 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3314 		goto out;
3315 
3316 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3317 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3318 
3319 out:
3320 	return ret;
3321 }
3322 #endif /* CONFIG_NUMA_BALANCING */
3323 
3324 /***
3325  * kick_process - kick a running thread to enter/exit the kernel
3326  * @p: the to-be-kicked thread
3327  *
3328  * Cause a process which is running on another CPU to enter
3329  * kernel-mode, without any delay. (to get signals handled.)
3330  *
3331  * NOTE: this function doesn't have to take the runqueue lock,
3332  * because all it wants to ensure is that the remote task enters
3333  * the kernel. If the IPI races and the task has been migrated
3334  * to another CPU then no harm is done and the purpose has been
3335  * achieved as well.
3336  */
3337 void kick_process(struct task_struct *p)
3338 {
3339 	guard(preempt)();
3340 	int cpu = task_cpu(p);
3341 
3342 	if ((cpu != smp_processor_id()) && task_curr(p))
3343 		smp_send_reschedule(cpu);
3344 }
3345 EXPORT_SYMBOL_GPL(kick_process);
3346 
3347 /*
3348  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3349  *
3350  * A few notes on cpu_active vs cpu_online:
3351  *
3352  *  - cpu_active must be a subset of cpu_online
3353  *
3354  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3355  *    see __set_cpus_allowed_ptr(). At this point the newly online
3356  *    CPU isn't yet part of the sched domains, and balancing will not
3357  *    see it.
3358  *
3359  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3360  *    avoid the load balancer to place new tasks on the to be removed
3361  *    CPU. Existing tasks will remain running there and will be taken
3362  *    off.
3363  *
3364  * This means that fallback selection must not select !active CPUs.
3365  * And can assume that any active CPU must be online. Conversely
3366  * select_task_rq() below may allow selection of !active CPUs in order
3367  * to satisfy the above rules.
3368  */
3369 static int select_fallback_rq(int cpu, struct task_struct *p)
3370 {
3371 	int nid = cpu_to_node(cpu);
3372 	const struct cpumask *nodemask = NULL;
3373 	enum { cpuset, possible, fail } state = cpuset;
3374 	int dest_cpu;
3375 
3376 	/*
3377 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3378 	 * will return -1. There is no CPU on the node, and we should
3379 	 * select the CPU on the other node.
3380 	 */
3381 	if (nid != -1) {
3382 		nodemask = cpumask_of_node(nid);
3383 
3384 		/* Look for allowed, online CPU in same node. */
3385 		for_each_cpu(dest_cpu, nodemask) {
3386 			if (is_cpu_allowed(p, dest_cpu))
3387 				return dest_cpu;
3388 		}
3389 	}
3390 
3391 	for (;;) {
3392 		/* Any allowed, online CPU? */
3393 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3394 			if (!is_cpu_allowed(p, dest_cpu))
3395 				continue;
3396 
3397 			goto out;
3398 		}
3399 
3400 		/* No more Mr. Nice Guy. */
3401 		switch (state) {
3402 		case cpuset:
3403 			if (cpuset_cpus_allowed_fallback(p)) {
3404 				state = possible;
3405 				break;
3406 			}
3407 			fallthrough;
3408 		case possible:
3409 			/*
3410 			 * XXX When called from select_task_rq() we only
3411 			 * hold p->pi_lock and again violate locking order.
3412 			 *
3413 			 * More yuck to audit.
3414 			 */
3415 			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
3416 			state = fail;
3417 			break;
3418 		case fail:
3419 			BUG();
3420 			break;
3421 		}
3422 	}
3423 
3424 out:
3425 	if (state != cpuset) {
3426 		/*
3427 		 * Don't tell them about moving exiting tasks or
3428 		 * kernel threads (both mm NULL), since they never
3429 		 * leave kernel.
3430 		 */
3431 		if (p->mm && printk_ratelimit()) {
3432 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3433 					task_pid_nr(p), p->comm, cpu);
3434 		}
3435 	}
3436 
3437 	return dest_cpu;
3438 }
3439 
3440 /*
3441  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3442  */
3443 static inline
3444 int select_task_rq(struct task_struct *p, int cpu, int wake_flags)
3445 {
3446 	lockdep_assert_held(&p->pi_lock);
3447 
3448 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p))
3449 		cpu = p->sched_class->select_task_rq(p, cpu, wake_flags);
3450 	else
3451 		cpu = cpumask_any(p->cpus_ptr);
3452 
3453 	/*
3454 	 * In order not to call set_task_cpu() on a blocking task we need
3455 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3456 	 * CPU.
3457 	 *
3458 	 * Since this is common to all placement strategies, this lives here.
3459 	 *
3460 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3461 	 *   not worry about this generic constraint ]
3462 	 */
3463 	if (unlikely(!is_cpu_allowed(p, cpu)))
3464 		cpu = select_fallback_rq(task_cpu(p), p);
3465 
3466 	return cpu;
3467 }
3468 
3469 void sched_set_stop_task(int cpu, struct task_struct *stop)
3470 {
3471 	static struct lock_class_key stop_pi_lock;
3472 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3473 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3474 
3475 	if (stop) {
3476 		/*
3477 		 * Make it appear like a SCHED_FIFO task, its something
3478 		 * userspace knows about and won't get confused about.
3479 		 *
3480 		 * Also, it will make PI more or less work without too
3481 		 * much confusion -- but then, stop work should not
3482 		 * rely on PI working anyway.
3483 		 */
3484 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3485 
3486 		stop->sched_class = &stop_sched_class;
3487 
3488 		/*
3489 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3490 		 * adjust the effective priority of a task. As a result,
3491 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3492 		 * which can then trigger wakeups of the stop thread to push
3493 		 * around the current task.
3494 		 *
3495 		 * The stop task itself will never be part of the PI-chain, it
3496 		 * never blocks, therefore that ->pi_lock recursion is safe.
3497 		 * Tell lockdep about this by placing the stop->pi_lock in its
3498 		 * own class.
3499 		 */
3500 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3501 	}
3502 
3503 	cpu_rq(cpu)->stop = stop;
3504 
3505 	if (old_stop) {
3506 		/*
3507 		 * Reset it back to a normal scheduling class so that
3508 		 * it can die in pieces.
3509 		 */
3510 		old_stop->sched_class = &rt_sched_class;
3511 	}
3512 }
3513 
3514 #else /* CONFIG_SMP */
3515 
3516 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3517 
3518 static inline bool rq_has_pinned_tasks(struct rq *rq)
3519 {
3520 	return false;
3521 }
3522 
3523 #endif /* !CONFIG_SMP */
3524 
3525 static void
3526 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3527 {
3528 	struct rq *rq;
3529 
3530 	if (!schedstat_enabled())
3531 		return;
3532 
3533 	rq = this_rq();
3534 
3535 #ifdef CONFIG_SMP
3536 	if (cpu == rq->cpu) {
3537 		__schedstat_inc(rq->ttwu_local);
3538 		__schedstat_inc(p->stats.nr_wakeups_local);
3539 	} else {
3540 		struct sched_domain *sd;
3541 
3542 		__schedstat_inc(p->stats.nr_wakeups_remote);
3543 
3544 		guard(rcu)();
3545 		for_each_domain(rq->cpu, sd) {
3546 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3547 				__schedstat_inc(sd->ttwu_wake_remote);
3548 				break;
3549 			}
3550 		}
3551 	}
3552 
3553 	if (wake_flags & WF_MIGRATED)
3554 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3555 #endif /* CONFIG_SMP */
3556 
3557 	__schedstat_inc(rq->ttwu_count);
3558 	__schedstat_inc(p->stats.nr_wakeups);
3559 
3560 	if (wake_flags & WF_SYNC)
3561 		__schedstat_inc(p->stats.nr_wakeups_sync);
3562 }
3563 
3564 /*
3565  * Mark the task runnable.
3566  */
3567 static inline void ttwu_do_wakeup(struct task_struct *p)
3568 {
3569 	WRITE_ONCE(p->__state, TASK_RUNNING);
3570 	trace_sched_wakeup(p);
3571 }
3572 
3573 static void
3574 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3575 		 struct rq_flags *rf)
3576 {
3577 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3578 
3579 	lockdep_assert_rq_held(rq);
3580 
3581 	if (p->sched_contributes_to_load)
3582 		rq->nr_uninterruptible--;
3583 
3584 #ifdef CONFIG_SMP
3585 	if (wake_flags & WF_MIGRATED)
3586 		en_flags |= ENQUEUE_MIGRATED;
3587 	else
3588 #endif
3589 	if (p->in_iowait) {
3590 		delayacct_blkio_end(p);
3591 		atomic_dec(&task_rq(p)->nr_iowait);
3592 	}
3593 
3594 	activate_task(rq, p, en_flags);
3595 	wakeup_preempt(rq, p, wake_flags);
3596 
3597 	ttwu_do_wakeup(p);
3598 
3599 #ifdef CONFIG_SMP
3600 	if (p->sched_class->task_woken) {
3601 		/*
3602 		 * Our task @p is fully woken up and running; so it's safe to
3603 		 * drop the rq->lock, hereafter rq is only used for statistics.
3604 		 */
3605 		rq_unpin_lock(rq, rf);
3606 		p->sched_class->task_woken(rq, p);
3607 		rq_repin_lock(rq, rf);
3608 	}
3609 
3610 	if (rq->idle_stamp) {
3611 		u64 delta = rq_clock(rq) - rq->idle_stamp;
3612 		u64 max = 2*rq->max_idle_balance_cost;
3613 
3614 		update_avg(&rq->avg_idle, delta);
3615 
3616 		if (rq->avg_idle > max)
3617 			rq->avg_idle = max;
3618 
3619 		rq->idle_stamp = 0;
3620 	}
3621 #endif
3622 
3623 	p->dl_server = NULL;
3624 }
3625 
3626 /*
3627  * Consider @p being inside a wait loop:
3628  *
3629  *   for (;;) {
3630  *      set_current_state(TASK_UNINTERRUPTIBLE);
3631  *
3632  *      if (CONDITION)
3633  *         break;
3634  *
3635  *      schedule();
3636  *   }
3637  *   __set_current_state(TASK_RUNNING);
3638  *
3639  * between set_current_state() and schedule(). In this case @p is still
3640  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3641  * an atomic manner.
3642  *
3643  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3644  * then schedule() must still happen and p->state can be changed to
3645  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3646  * need to do a full wakeup with enqueue.
3647  *
3648  * Returns: %true when the wakeup is done,
3649  *          %false otherwise.
3650  */
3651 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3652 {
3653 	struct rq_flags rf;
3654 	struct rq *rq;
3655 	int ret = 0;
3656 
3657 	rq = __task_rq_lock(p, &rf);
3658 	if (task_on_rq_queued(p)) {
3659 		if (!task_on_cpu(rq, p)) {
3660 			/*
3661 			 * When on_rq && !on_cpu the task is preempted, see if
3662 			 * it should preempt the task that is current now.
3663 			 */
3664 			update_rq_clock(rq);
3665 			wakeup_preempt(rq, p, wake_flags);
3666 		}
3667 		ttwu_do_wakeup(p);
3668 		ret = 1;
3669 	}
3670 	__task_rq_unlock(rq, &rf);
3671 
3672 	return ret;
3673 }
3674 
3675 #ifdef CONFIG_SMP
3676 void sched_ttwu_pending(void *arg)
3677 {
3678 	struct llist_node *llist = arg;
3679 	struct rq *rq = this_rq();
3680 	struct task_struct *p, *t;
3681 	struct rq_flags rf;
3682 
3683 	if (!llist)
3684 		return;
3685 
3686 	rq_lock_irqsave(rq, &rf);
3687 	update_rq_clock(rq);
3688 
3689 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3690 		if (WARN_ON_ONCE(p->on_cpu))
3691 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3692 
3693 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3694 			set_task_cpu(p, cpu_of(rq));
3695 
3696 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3697 	}
3698 
3699 	/*
3700 	 * Must be after enqueueing at least once task such that
3701 	 * idle_cpu() does not observe a false-negative -- if it does,
3702 	 * it is possible for select_idle_siblings() to stack a number
3703 	 * of tasks on this CPU during that window.
3704 	 *
3705 	 * It is OK to clear ttwu_pending when another task pending.
3706 	 * We will receive IPI after local IRQ enabled and then enqueue it.
3707 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3708 	 */
3709 	WRITE_ONCE(rq->ttwu_pending, 0);
3710 	rq_unlock_irqrestore(rq, &rf);
3711 }
3712 
3713 /*
3714  * Prepare the scene for sending an IPI for a remote smp_call
3715  *
3716  * Returns true if the caller can proceed with sending the IPI.
3717  * Returns false otherwise.
3718  */
3719 bool call_function_single_prep_ipi(int cpu)
3720 {
3721 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3722 		trace_sched_wake_idle_without_ipi(cpu);
3723 		return false;
3724 	}
3725 
3726 	return true;
3727 }
3728 
3729 /*
3730  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3731  * necessary. The wakee CPU on receipt of the IPI will queue the task
3732  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3733  * of the wakeup instead of the waker.
3734  */
3735 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3736 {
3737 	struct rq *rq = cpu_rq(cpu);
3738 
3739 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3740 
3741 	WRITE_ONCE(rq->ttwu_pending, 1);
3742 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3743 }
3744 
3745 void wake_up_if_idle(int cpu)
3746 {
3747 	struct rq *rq = cpu_rq(cpu);
3748 
3749 	guard(rcu)();
3750 	if (is_idle_task(rcu_dereference(rq->curr))) {
3751 		guard(rq_lock_irqsave)(rq);
3752 		if (is_idle_task(rq->curr))
3753 			resched_curr(rq);
3754 	}
3755 }
3756 
3757 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3758 {
3759 	if (!sched_asym_cpucap_active())
3760 		return true;
3761 
3762 	if (this_cpu == that_cpu)
3763 		return true;
3764 
3765 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3766 }
3767 
3768 bool cpus_share_cache(int this_cpu, int that_cpu)
3769 {
3770 	if (this_cpu == that_cpu)
3771 		return true;
3772 
3773 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3774 }
3775 
3776 /*
3777  * Whether CPUs are share cache resources, which means LLC on non-cluster
3778  * machines and LLC tag or L2 on machines with clusters.
3779  */
3780 bool cpus_share_resources(int this_cpu, int that_cpu)
3781 {
3782 	if (this_cpu == that_cpu)
3783 		return true;
3784 
3785 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3786 }
3787 
3788 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3789 {
3790 	/*
3791 	 * Do not complicate things with the async wake_list while the CPU is
3792 	 * in hotplug state.
3793 	 */
3794 	if (!cpu_active(cpu))
3795 		return false;
3796 
3797 	/* Ensure the task will still be allowed to run on the CPU. */
3798 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3799 		return false;
3800 
3801 	/*
3802 	 * If the CPU does not share cache, then queue the task on the
3803 	 * remote rqs wakelist to avoid accessing remote data.
3804 	 */
3805 	if (!cpus_share_cache(smp_processor_id(), cpu))
3806 		return true;
3807 
3808 	if (cpu == smp_processor_id())
3809 		return false;
3810 
3811 	/*
3812 	 * If the wakee cpu is idle, or the task is descheduling and the
3813 	 * only running task on the CPU, then use the wakelist to offload
3814 	 * the task activation to the idle (or soon-to-be-idle) CPU as
3815 	 * the current CPU is likely busy. nr_running is checked to
3816 	 * avoid unnecessary task stacking.
3817 	 *
3818 	 * Note that we can only get here with (wakee) p->on_rq=0,
3819 	 * p->on_cpu can be whatever, we've done the dequeue, so
3820 	 * the wakee has been accounted out of ->nr_running.
3821 	 */
3822 	if (!cpu_rq(cpu)->nr_running)
3823 		return true;
3824 
3825 	return false;
3826 }
3827 
3828 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3829 {
3830 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3831 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3832 		__ttwu_queue_wakelist(p, cpu, wake_flags);
3833 		return true;
3834 	}
3835 
3836 	return false;
3837 }
3838 
3839 #else /* !CONFIG_SMP */
3840 
3841 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3842 {
3843 	return false;
3844 }
3845 
3846 #endif /* CONFIG_SMP */
3847 
3848 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3849 {
3850 	struct rq *rq = cpu_rq(cpu);
3851 	struct rq_flags rf;
3852 
3853 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
3854 		return;
3855 
3856 	rq_lock(rq, &rf);
3857 	update_rq_clock(rq);
3858 	ttwu_do_activate(rq, p, wake_flags, &rf);
3859 	rq_unlock(rq, &rf);
3860 }
3861 
3862 /*
3863  * Invoked from try_to_wake_up() to check whether the task can be woken up.
3864  *
3865  * The caller holds p::pi_lock if p != current or has preemption
3866  * disabled when p == current.
3867  *
3868  * The rules of saved_state:
3869  *
3870  *   The related locking code always holds p::pi_lock when updating
3871  *   p::saved_state, which means the code is fully serialized in both cases.
3872  *
3873  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
3874  *   No other bits set. This allows to distinguish all wakeup scenarios.
3875  *
3876  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
3877  *   allows us to prevent early wakeup of tasks before they can be run on
3878  *   asymmetric ISA architectures (eg ARMv9).
3879  */
3880 static __always_inline
3881 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
3882 {
3883 	int match;
3884 
3885 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
3886 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
3887 			     state != TASK_RTLOCK_WAIT);
3888 	}
3889 
3890 	*success = !!(match = __task_state_match(p, state));
3891 
3892 	/*
3893 	 * Saved state preserves the task state across blocking on
3894 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
3895 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
3896 	 * because it waits for a lock wakeup or __thaw_task(). Also
3897 	 * indicate success because from the regular waker's point of
3898 	 * view this has succeeded.
3899 	 *
3900 	 * After acquiring the lock the task will restore p::__state
3901 	 * from p::saved_state which ensures that the regular
3902 	 * wakeup is not lost. The restore will also set
3903 	 * p::saved_state to TASK_RUNNING so any further tests will
3904 	 * not result in false positives vs. @success
3905 	 */
3906 	if (match < 0)
3907 		p->saved_state = TASK_RUNNING;
3908 
3909 	return match > 0;
3910 }
3911 
3912 /*
3913  * Notes on Program-Order guarantees on SMP systems.
3914  *
3915  *  MIGRATION
3916  *
3917  * The basic program-order guarantee on SMP systems is that when a task [t]
3918  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
3919  * execution on its new CPU [c1].
3920  *
3921  * For migration (of runnable tasks) this is provided by the following means:
3922  *
3923  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
3924  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
3925  *     rq(c1)->lock (if not at the same time, then in that order).
3926  *  C) LOCK of the rq(c1)->lock scheduling in task
3927  *
3928  * Release/acquire chaining guarantees that B happens after A and C after B.
3929  * Note: the CPU doing B need not be c0 or c1
3930  *
3931  * Example:
3932  *
3933  *   CPU0            CPU1            CPU2
3934  *
3935  *   LOCK rq(0)->lock
3936  *   sched-out X
3937  *   sched-in Y
3938  *   UNLOCK rq(0)->lock
3939  *
3940  *                                   LOCK rq(0)->lock // orders against CPU0
3941  *                                   dequeue X
3942  *                                   UNLOCK rq(0)->lock
3943  *
3944  *                                   LOCK rq(1)->lock
3945  *                                   enqueue X
3946  *                                   UNLOCK rq(1)->lock
3947  *
3948  *                   LOCK rq(1)->lock // orders against CPU2
3949  *                   sched-out Z
3950  *                   sched-in X
3951  *                   UNLOCK rq(1)->lock
3952  *
3953  *
3954  *  BLOCKING -- aka. SLEEP + WAKEUP
3955  *
3956  * For blocking we (obviously) need to provide the same guarantee as for
3957  * migration. However the means are completely different as there is no lock
3958  * chain to provide order. Instead we do:
3959  *
3960  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
3961  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
3962  *
3963  * Example:
3964  *
3965  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
3966  *
3967  *   LOCK rq(0)->lock LOCK X->pi_lock
3968  *   dequeue X
3969  *   sched-out X
3970  *   smp_store_release(X->on_cpu, 0);
3971  *
3972  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
3973  *                    X->state = WAKING
3974  *                    set_task_cpu(X,2)
3975  *
3976  *                    LOCK rq(2)->lock
3977  *                    enqueue X
3978  *                    X->state = RUNNING
3979  *                    UNLOCK rq(2)->lock
3980  *
3981  *                                          LOCK rq(2)->lock // orders against CPU1
3982  *                                          sched-out Z
3983  *                                          sched-in X
3984  *                                          UNLOCK rq(2)->lock
3985  *
3986  *                    UNLOCK X->pi_lock
3987  *   UNLOCK rq(0)->lock
3988  *
3989  *
3990  * However, for wakeups there is a second guarantee we must provide, namely we
3991  * must ensure that CONDITION=1 done by the caller can not be reordered with
3992  * accesses to the task state; see try_to_wake_up() and set_current_state().
3993  */
3994 
3995 /**
3996  * try_to_wake_up - wake up a thread
3997  * @p: the thread to be awakened
3998  * @state: the mask of task states that can be woken
3999  * @wake_flags: wake modifier flags (WF_*)
4000  *
4001  * Conceptually does:
4002  *
4003  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4004  *
4005  * If the task was not queued/runnable, also place it back on a runqueue.
4006  *
4007  * This function is atomic against schedule() which would dequeue the task.
4008  *
4009  * It issues a full memory barrier before accessing @p->state, see the comment
4010  * with set_current_state().
4011  *
4012  * Uses p->pi_lock to serialize against concurrent wake-ups.
4013  *
4014  * Relies on p->pi_lock stabilizing:
4015  *  - p->sched_class
4016  *  - p->cpus_ptr
4017  *  - p->sched_task_group
4018  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4019  *
4020  * Tries really hard to only take one task_rq(p)->lock for performance.
4021  * Takes rq->lock in:
4022  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4023  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4024  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4025  *
4026  * As a consequence we race really badly with just about everything. See the
4027  * many memory barriers and their comments for details.
4028  *
4029  * Return: %true if @p->state changes (an actual wakeup was done),
4030  *	   %false otherwise.
4031  */
4032 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4033 {
4034 	guard(preempt)();
4035 	int cpu, success = 0;
4036 
4037 	if (p == current) {
4038 		/*
4039 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4040 		 * == smp_processor_id()'. Together this means we can special
4041 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4042 		 * without taking any locks.
4043 		 *
4044 		 * In particular:
4045 		 *  - we rely on Program-Order guarantees for all the ordering,
4046 		 *  - we're serialized against set_special_state() by virtue of
4047 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4048 		 */
4049 		if (!ttwu_state_match(p, state, &success))
4050 			goto out;
4051 
4052 		trace_sched_waking(p);
4053 		ttwu_do_wakeup(p);
4054 		goto out;
4055 	}
4056 
4057 	/*
4058 	 * If we are going to wake up a thread waiting for CONDITION we
4059 	 * need to ensure that CONDITION=1 done by the caller can not be
4060 	 * reordered with p->state check below. This pairs with smp_store_mb()
4061 	 * in set_current_state() that the waiting thread does.
4062 	 */
4063 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4064 		smp_mb__after_spinlock();
4065 		if (!ttwu_state_match(p, state, &success))
4066 			break;
4067 
4068 		trace_sched_waking(p);
4069 
4070 		/*
4071 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4072 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4073 		 * in smp_cond_load_acquire() below.
4074 		 *
4075 		 * sched_ttwu_pending()			try_to_wake_up()
4076 		 *   STORE p->on_rq = 1			  LOAD p->state
4077 		 *   UNLOCK rq->lock
4078 		 *
4079 		 * __schedule() (switch to task 'p')
4080 		 *   LOCK rq->lock			  smp_rmb();
4081 		 *   smp_mb__after_spinlock();
4082 		 *   UNLOCK rq->lock
4083 		 *
4084 		 * [task p]
4085 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4086 		 *
4087 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4088 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4089 		 *
4090 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4091 		 */
4092 		smp_rmb();
4093 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4094 			break;
4095 
4096 #ifdef CONFIG_SMP
4097 		/*
4098 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4099 		 * possible to, falsely, observe p->on_cpu == 0.
4100 		 *
4101 		 * One must be running (->on_cpu == 1) in order to remove oneself
4102 		 * from the runqueue.
4103 		 *
4104 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4105 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4106 		 *   UNLOCK rq->lock
4107 		 *
4108 		 * __schedule() (put 'p' to sleep)
4109 		 *   LOCK rq->lock			  smp_rmb();
4110 		 *   smp_mb__after_spinlock();
4111 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4112 		 *
4113 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4114 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4115 		 *
4116 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4117 		 * schedule()'s deactivate_task() has 'happened' and p will no longer
4118 		 * care about it's own p->state. See the comment in __schedule().
4119 		 */
4120 		smp_acquire__after_ctrl_dep();
4121 
4122 		/*
4123 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4124 		 * == 0), which means we need to do an enqueue, change p->state to
4125 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4126 		 * enqueue, such as ttwu_queue_wakelist().
4127 		 */
4128 		WRITE_ONCE(p->__state, TASK_WAKING);
4129 
4130 		/*
4131 		 * If the owning (remote) CPU is still in the middle of schedule() with
4132 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4133 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4134 		 * let the waker make forward progress. This is safe because IRQs are
4135 		 * disabled and the IPI will deliver after on_cpu is cleared.
4136 		 *
4137 		 * Ensure we load task_cpu(p) after p->on_cpu:
4138 		 *
4139 		 * set_task_cpu(p, cpu);
4140 		 *   STORE p->cpu = @cpu
4141 		 * __schedule() (switch to task 'p')
4142 		 *   LOCK rq->lock
4143 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4144 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4145 		 *
4146 		 * to ensure we observe the correct CPU on which the task is currently
4147 		 * scheduling.
4148 		 */
4149 		if (smp_load_acquire(&p->on_cpu) &&
4150 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4151 			break;
4152 
4153 		/*
4154 		 * If the owning (remote) CPU is still in the middle of schedule() with
4155 		 * this task as prev, wait until it's done referencing the task.
4156 		 *
4157 		 * Pairs with the smp_store_release() in finish_task().
4158 		 *
4159 		 * This ensures that tasks getting woken will be fully ordered against
4160 		 * their previous state and preserve Program Order.
4161 		 */
4162 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4163 
4164 		cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
4165 		if (task_cpu(p) != cpu) {
4166 			if (p->in_iowait) {
4167 				delayacct_blkio_end(p);
4168 				atomic_dec(&task_rq(p)->nr_iowait);
4169 			}
4170 
4171 			wake_flags |= WF_MIGRATED;
4172 			psi_ttwu_dequeue(p);
4173 			set_task_cpu(p, cpu);
4174 		}
4175 #else
4176 		cpu = task_cpu(p);
4177 #endif /* CONFIG_SMP */
4178 
4179 		ttwu_queue(p, cpu, wake_flags);
4180 	}
4181 out:
4182 	if (success)
4183 		ttwu_stat(p, task_cpu(p), wake_flags);
4184 
4185 	return success;
4186 }
4187 
4188 static bool __task_needs_rq_lock(struct task_struct *p)
4189 {
4190 	unsigned int state = READ_ONCE(p->__state);
4191 
4192 	/*
4193 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4194 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4195 	 * locks at the end, see ttwu_queue_wakelist().
4196 	 */
4197 	if (state == TASK_RUNNING || state == TASK_WAKING)
4198 		return true;
4199 
4200 	/*
4201 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4202 	 * possible to, falsely, observe p->on_rq == 0.
4203 	 *
4204 	 * See try_to_wake_up() for a longer comment.
4205 	 */
4206 	smp_rmb();
4207 	if (p->on_rq)
4208 		return true;
4209 
4210 #ifdef CONFIG_SMP
4211 	/*
4212 	 * Ensure the task has finished __schedule() and will not be referenced
4213 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4214 	 */
4215 	smp_rmb();
4216 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4217 #endif
4218 
4219 	return false;
4220 }
4221 
4222 /**
4223  * task_call_func - Invoke a function on task in fixed state
4224  * @p: Process for which the function is to be invoked, can be @current.
4225  * @func: Function to invoke.
4226  * @arg: Argument to function.
4227  *
4228  * Fix the task in it's current state by avoiding wakeups and or rq operations
4229  * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
4230  * to work out what the state is, if required.  Given that @func can be invoked
4231  * with a runqueue lock held, it had better be quite lightweight.
4232  *
4233  * Returns:
4234  *   Whatever @func returns
4235  */
4236 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4237 {
4238 	struct rq *rq = NULL;
4239 	struct rq_flags rf;
4240 	int ret;
4241 
4242 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4243 
4244 	if (__task_needs_rq_lock(p))
4245 		rq = __task_rq_lock(p, &rf);
4246 
4247 	/*
4248 	 * At this point the task is pinned; either:
4249 	 *  - blocked and we're holding off wakeups	 (pi->lock)
4250 	 *  - woken, and we're holding off enqueue	 (rq->lock)
4251 	 *  - queued, and we're holding off schedule	 (rq->lock)
4252 	 *  - running, and we're holding off de-schedule (rq->lock)
4253 	 *
4254 	 * The called function (@func) can use: task_curr(), p->on_rq and
4255 	 * p->__state to differentiate between these states.
4256 	 */
4257 	ret = func(p, arg);
4258 
4259 	if (rq)
4260 		rq_unlock(rq, &rf);
4261 
4262 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4263 	return ret;
4264 }
4265 
4266 /**
4267  * cpu_curr_snapshot - Return a snapshot of the currently running task
4268  * @cpu: The CPU on which to snapshot the task.
4269  *
4270  * Returns the task_struct pointer of the task "currently" running on
4271  * the specified CPU.
4272  *
4273  * If the specified CPU was offline, the return value is whatever it
4274  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4275  * task, but there is no guarantee.  Callers wishing a useful return
4276  * value must take some action to ensure that the specified CPU remains
4277  * online throughout.
4278  *
4279  * This function executes full memory barriers before and after fetching
4280  * the pointer, which permits the caller to confine this function's fetch
4281  * with respect to the caller's accesses to other shared variables.
4282  */
4283 struct task_struct *cpu_curr_snapshot(int cpu)
4284 {
4285 	struct rq *rq = cpu_rq(cpu);
4286 	struct task_struct *t;
4287 	struct rq_flags rf;
4288 
4289 	rq_lock_irqsave(rq, &rf);
4290 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4291 	t = rcu_dereference(cpu_curr(cpu));
4292 	rq_unlock_irqrestore(rq, &rf);
4293 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4294 
4295 	return t;
4296 }
4297 
4298 /**
4299  * wake_up_process - Wake up a specific process
4300  * @p: The process to be woken up.
4301  *
4302  * Attempt to wake up the nominated process and move it to the set of runnable
4303  * processes.
4304  *
4305  * Return: 1 if the process was woken up, 0 if it was already running.
4306  *
4307  * This function executes a full memory barrier before accessing the task state.
4308  */
4309 int wake_up_process(struct task_struct *p)
4310 {
4311 	return try_to_wake_up(p, TASK_NORMAL, 0);
4312 }
4313 EXPORT_SYMBOL(wake_up_process);
4314 
4315 int wake_up_state(struct task_struct *p, unsigned int state)
4316 {
4317 	return try_to_wake_up(p, state, 0);
4318 }
4319 
4320 /*
4321  * Perform scheduler related setup for a newly forked process p.
4322  * p is forked by current.
4323  *
4324  * __sched_fork() is basic setup used by init_idle() too:
4325  */
4326 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4327 {
4328 	p->on_rq			= 0;
4329 
4330 	p->se.on_rq			= 0;
4331 	p->se.exec_start		= 0;
4332 	p->se.sum_exec_runtime		= 0;
4333 	p->se.prev_sum_exec_runtime	= 0;
4334 	p->se.nr_migrations		= 0;
4335 	p->se.vruntime			= 0;
4336 	p->se.vlag			= 0;
4337 	p->se.slice			= sysctl_sched_base_slice;
4338 	INIT_LIST_HEAD(&p->se.group_node);
4339 
4340 #ifdef CONFIG_FAIR_GROUP_SCHED
4341 	p->se.cfs_rq			= NULL;
4342 #endif
4343 
4344 #ifdef CONFIG_SCHEDSTATS
4345 	/* Even if schedstat is disabled, there should not be garbage */
4346 	memset(&p->stats, 0, sizeof(p->stats));
4347 #endif
4348 
4349 	init_dl_entity(&p->dl);
4350 
4351 	INIT_LIST_HEAD(&p->rt.run_list);
4352 	p->rt.timeout		= 0;
4353 	p->rt.time_slice	= sched_rr_timeslice;
4354 	p->rt.on_rq		= 0;
4355 	p->rt.on_list		= 0;
4356 
4357 #ifdef CONFIG_PREEMPT_NOTIFIERS
4358 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4359 #endif
4360 
4361 #ifdef CONFIG_COMPACTION
4362 	p->capture_control = NULL;
4363 #endif
4364 	init_numa_balancing(clone_flags, p);
4365 #ifdef CONFIG_SMP
4366 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4367 	p->migration_pending = NULL;
4368 #endif
4369 	init_sched_mm_cid(p);
4370 }
4371 
4372 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4373 
4374 #ifdef CONFIG_NUMA_BALANCING
4375 
4376 int sysctl_numa_balancing_mode;
4377 
4378 static void __set_numabalancing_state(bool enabled)
4379 {
4380 	if (enabled)
4381 		static_branch_enable(&sched_numa_balancing);
4382 	else
4383 		static_branch_disable(&sched_numa_balancing);
4384 }
4385 
4386 void set_numabalancing_state(bool enabled)
4387 {
4388 	if (enabled)
4389 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4390 	else
4391 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4392 	__set_numabalancing_state(enabled);
4393 }
4394 
4395 #ifdef CONFIG_PROC_SYSCTL
4396 static void reset_memory_tiering(void)
4397 {
4398 	struct pglist_data *pgdat;
4399 
4400 	for_each_online_pgdat(pgdat) {
4401 		pgdat->nbp_threshold = 0;
4402 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4403 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4404 	}
4405 }
4406 
4407 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4408 			  void *buffer, size_t *lenp, loff_t *ppos)
4409 {
4410 	struct ctl_table t;
4411 	int err;
4412 	int state = sysctl_numa_balancing_mode;
4413 
4414 	if (write && !capable(CAP_SYS_ADMIN))
4415 		return -EPERM;
4416 
4417 	t = *table;
4418 	t.data = &state;
4419 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4420 	if (err < 0)
4421 		return err;
4422 	if (write) {
4423 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4424 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4425 			reset_memory_tiering();
4426 		sysctl_numa_balancing_mode = state;
4427 		__set_numabalancing_state(state);
4428 	}
4429 	return err;
4430 }
4431 #endif
4432 #endif
4433 
4434 #ifdef CONFIG_SCHEDSTATS
4435 
4436 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4437 
4438 static void set_schedstats(bool enabled)
4439 {
4440 	if (enabled)
4441 		static_branch_enable(&sched_schedstats);
4442 	else
4443 		static_branch_disable(&sched_schedstats);
4444 }
4445 
4446 void force_schedstat_enabled(void)
4447 {
4448 	if (!schedstat_enabled()) {
4449 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4450 		static_branch_enable(&sched_schedstats);
4451 	}
4452 }
4453 
4454 static int __init setup_schedstats(char *str)
4455 {
4456 	int ret = 0;
4457 	if (!str)
4458 		goto out;
4459 
4460 	if (!strcmp(str, "enable")) {
4461 		set_schedstats(true);
4462 		ret = 1;
4463 	} else if (!strcmp(str, "disable")) {
4464 		set_schedstats(false);
4465 		ret = 1;
4466 	}
4467 out:
4468 	if (!ret)
4469 		pr_warn("Unable to parse schedstats=\n");
4470 
4471 	return ret;
4472 }
4473 __setup("schedstats=", setup_schedstats);
4474 
4475 #ifdef CONFIG_PROC_SYSCTL
4476 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4477 		size_t *lenp, loff_t *ppos)
4478 {
4479 	struct ctl_table t;
4480 	int err;
4481 	int state = static_branch_likely(&sched_schedstats);
4482 
4483 	if (write && !capable(CAP_SYS_ADMIN))
4484 		return -EPERM;
4485 
4486 	t = *table;
4487 	t.data = &state;
4488 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4489 	if (err < 0)
4490 		return err;
4491 	if (write)
4492 		set_schedstats(state);
4493 	return err;
4494 }
4495 #endif /* CONFIG_PROC_SYSCTL */
4496 #endif /* CONFIG_SCHEDSTATS */
4497 
4498 #ifdef CONFIG_SYSCTL
4499 static struct ctl_table sched_core_sysctls[] = {
4500 #ifdef CONFIG_SCHEDSTATS
4501 	{
4502 		.procname       = "sched_schedstats",
4503 		.data           = NULL,
4504 		.maxlen         = sizeof(unsigned int),
4505 		.mode           = 0644,
4506 		.proc_handler   = sysctl_schedstats,
4507 		.extra1         = SYSCTL_ZERO,
4508 		.extra2         = SYSCTL_ONE,
4509 	},
4510 #endif /* CONFIG_SCHEDSTATS */
4511 #ifdef CONFIG_UCLAMP_TASK
4512 	{
4513 		.procname       = "sched_util_clamp_min",
4514 		.data           = &sysctl_sched_uclamp_util_min,
4515 		.maxlen         = sizeof(unsigned int),
4516 		.mode           = 0644,
4517 		.proc_handler   = sysctl_sched_uclamp_handler,
4518 	},
4519 	{
4520 		.procname       = "sched_util_clamp_max",
4521 		.data           = &sysctl_sched_uclamp_util_max,
4522 		.maxlen         = sizeof(unsigned int),
4523 		.mode           = 0644,
4524 		.proc_handler   = sysctl_sched_uclamp_handler,
4525 	},
4526 	{
4527 		.procname       = "sched_util_clamp_min_rt_default",
4528 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4529 		.maxlen         = sizeof(unsigned int),
4530 		.mode           = 0644,
4531 		.proc_handler   = sysctl_sched_uclamp_handler,
4532 	},
4533 #endif /* CONFIG_UCLAMP_TASK */
4534 #ifdef CONFIG_NUMA_BALANCING
4535 	{
4536 		.procname	= "numa_balancing",
4537 		.data		= NULL, /* filled in by handler */
4538 		.maxlen		= sizeof(unsigned int),
4539 		.mode		= 0644,
4540 		.proc_handler	= sysctl_numa_balancing,
4541 		.extra1		= SYSCTL_ZERO,
4542 		.extra2		= SYSCTL_FOUR,
4543 	},
4544 #endif /* CONFIG_NUMA_BALANCING */
4545 };
4546 static int __init sched_core_sysctl_init(void)
4547 {
4548 	register_sysctl_init("kernel", sched_core_sysctls);
4549 	return 0;
4550 }
4551 late_initcall(sched_core_sysctl_init);
4552 #endif /* CONFIG_SYSCTL */
4553 
4554 /*
4555  * fork()/clone()-time setup:
4556  */
4557 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4558 {
4559 	__sched_fork(clone_flags, p);
4560 	/*
4561 	 * We mark the process as NEW here. This guarantees that
4562 	 * nobody will actually run it, and a signal or other external
4563 	 * event cannot wake it up and insert it on the runqueue either.
4564 	 */
4565 	p->__state = TASK_NEW;
4566 
4567 	/*
4568 	 * Make sure we do not leak PI boosting priority to the child.
4569 	 */
4570 	p->prio = current->normal_prio;
4571 
4572 	uclamp_fork(p);
4573 
4574 	/*
4575 	 * Revert to default priority/policy on fork if requested.
4576 	 */
4577 	if (unlikely(p->sched_reset_on_fork)) {
4578 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4579 			p->policy = SCHED_NORMAL;
4580 			p->static_prio = NICE_TO_PRIO(0);
4581 			p->rt_priority = 0;
4582 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4583 			p->static_prio = NICE_TO_PRIO(0);
4584 
4585 		p->prio = p->normal_prio = p->static_prio;
4586 		set_load_weight(p, false);
4587 
4588 		/*
4589 		 * We don't need the reset flag anymore after the fork. It has
4590 		 * fulfilled its duty:
4591 		 */
4592 		p->sched_reset_on_fork = 0;
4593 	}
4594 
4595 	if (dl_prio(p->prio))
4596 		return -EAGAIN;
4597 	else if (rt_prio(p->prio))
4598 		p->sched_class = &rt_sched_class;
4599 	else
4600 		p->sched_class = &fair_sched_class;
4601 
4602 	init_entity_runnable_average(&p->se);
4603 
4604 
4605 #ifdef CONFIG_SCHED_INFO
4606 	if (likely(sched_info_on()))
4607 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4608 #endif
4609 #if defined(CONFIG_SMP)
4610 	p->on_cpu = 0;
4611 #endif
4612 	init_task_preempt_count(p);
4613 #ifdef CONFIG_SMP
4614 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4615 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4616 #endif
4617 	return 0;
4618 }
4619 
4620 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4621 {
4622 	unsigned long flags;
4623 
4624 	/*
4625 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4626 	 * required yet, but lockdep gets upset if rules are violated.
4627 	 */
4628 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4629 #ifdef CONFIG_CGROUP_SCHED
4630 	if (1) {
4631 		struct task_group *tg;
4632 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4633 				  struct task_group, css);
4634 		tg = autogroup_task_group(p, tg);
4635 		p->sched_task_group = tg;
4636 	}
4637 #endif
4638 	rseq_migrate(p);
4639 	/*
4640 	 * We're setting the CPU for the first time, we don't migrate,
4641 	 * so use __set_task_cpu().
4642 	 */
4643 	__set_task_cpu(p, smp_processor_id());
4644 	if (p->sched_class->task_fork)
4645 		p->sched_class->task_fork(p);
4646 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4647 }
4648 
4649 void sched_post_fork(struct task_struct *p)
4650 {
4651 	uclamp_post_fork(p);
4652 }
4653 
4654 unsigned long to_ratio(u64 period, u64 runtime)
4655 {
4656 	if (runtime == RUNTIME_INF)
4657 		return BW_UNIT;
4658 
4659 	/*
4660 	 * Doing this here saves a lot of checks in all
4661 	 * the calling paths, and returning zero seems
4662 	 * safe for them anyway.
4663 	 */
4664 	if (period == 0)
4665 		return 0;
4666 
4667 	return div64_u64(runtime << BW_SHIFT, period);
4668 }
4669 
4670 /*
4671  * wake_up_new_task - wake up a newly created task for the first time.
4672  *
4673  * This function will do some initial scheduler statistics housekeeping
4674  * that must be done for every newly created context, then puts the task
4675  * on the runqueue and wakes it.
4676  */
4677 void wake_up_new_task(struct task_struct *p)
4678 {
4679 	struct rq_flags rf;
4680 	struct rq *rq;
4681 
4682 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4683 	WRITE_ONCE(p->__state, TASK_RUNNING);
4684 #ifdef CONFIG_SMP
4685 	/*
4686 	 * Fork balancing, do it here and not earlier because:
4687 	 *  - cpus_ptr can change in the fork path
4688 	 *  - any previously selected CPU might disappear through hotplug
4689 	 *
4690 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4691 	 * as we're not fully set-up yet.
4692 	 */
4693 	p->recent_used_cpu = task_cpu(p);
4694 	rseq_migrate(p);
4695 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4696 #endif
4697 	rq = __task_rq_lock(p, &rf);
4698 	update_rq_clock(rq);
4699 	post_init_entity_util_avg(p);
4700 
4701 	activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4702 	trace_sched_wakeup_new(p);
4703 	wakeup_preempt(rq, p, WF_FORK);
4704 #ifdef CONFIG_SMP
4705 	if (p->sched_class->task_woken) {
4706 		/*
4707 		 * Nothing relies on rq->lock after this, so it's fine to
4708 		 * drop it.
4709 		 */
4710 		rq_unpin_lock(rq, &rf);
4711 		p->sched_class->task_woken(rq, p);
4712 		rq_repin_lock(rq, &rf);
4713 	}
4714 #endif
4715 	task_rq_unlock(rq, p, &rf);
4716 }
4717 
4718 #ifdef CONFIG_PREEMPT_NOTIFIERS
4719 
4720 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4721 
4722 void preempt_notifier_inc(void)
4723 {
4724 	static_branch_inc(&preempt_notifier_key);
4725 }
4726 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4727 
4728 void preempt_notifier_dec(void)
4729 {
4730 	static_branch_dec(&preempt_notifier_key);
4731 }
4732 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4733 
4734 /**
4735  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4736  * @notifier: notifier struct to register
4737  */
4738 void preempt_notifier_register(struct preempt_notifier *notifier)
4739 {
4740 	if (!static_branch_unlikely(&preempt_notifier_key))
4741 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4742 
4743 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4744 }
4745 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4746 
4747 /**
4748  * preempt_notifier_unregister - no longer interested in preemption notifications
4749  * @notifier: notifier struct to unregister
4750  *
4751  * This is *not* safe to call from within a preemption notifier.
4752  */
4753 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4754 {
4755 	hlist_del(&notifier->link);
4756 }
4757 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4758 
4759 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4760 {
4761 	struct preempt_notifier *notifier;
4762 
4763 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4764 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4765 }
4766 
4767 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4768 {
4769 	if (static_branch_unlikely(&preempt_notifier_key))
4770 		__fire_sched_in_preempt_notifiers(curr);
4771 }
4772 
4773 static void
4774 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4775 				   struct task_struct *next)
4776 {
4777 	struct preempt_notifier *notifier;
4778 
4779 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4780 		notifier->ops->sched_out(notifier, next);
4781 }
4782 
4783 static __always_inline void
4784 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4785 				 struct task_struct *next)
4786 {
4787 	if (static_branch_unlikely(&preempt_notifier_key))
4788 		__fire_sched_out_preempt_notifiers(curr, next);
4789 }
4790 
4791 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4792 
4793 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4794 {
4795 }
4796 
4797 static inline void
4798 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4799 				 struct task_struct *next)
4800 {
4801 }
4802 
4803 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4804 
4805 static inline void prepare_task(struct task_struct *next)
4806 {
4807 #ifdef CONFIG_SMP
4808 	/*
4809 	 * Claim the task as running, we do this before switching to it
4810 	 * such that any running task will have this set.
4811 	 *
4812 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4813 	 * its ordering comment.
4814 	 */
4815 	WRITE_ONCE(next->on_cpu, 1);
4816 #endif
4817 }
4818 
4819 static inline void finish_task(struct task_struct *prev)
4820 {
4821 #ifdef CONFIG_SMP
4822 	/*
4823 	 * This must be the very last reference to @prev from this CPU. After
4824 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
4825 	 * must ensure this doesn't happen until the switch is completely
4826 	 * finished.
4827 	 *
4828 	 * In particular, the load of prev->state in finish_task_switch() must
4829 	 * happen before this.
4830 	 *
4831 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
4832 	 */
4833 	smp_store_release(&prev->on_cpu, 0);
4834 #endif
4835 }
4836 
4837 #ifdef CONFIG_SMP
4838 
4839 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
4840 {
4841 	void (*func)(struct rq *rq);
4842 	struct balance_callback *next;
4843 
4844 	lockdep_assert_rq_held(rq);
4845 
4846 	while (head) {
4847 		func = (void (*)(struct rq *))head->func;
4848 		next = head->next;
4849 		head->next = NULL;
4850 		head = next;
4851 
4852 		func(rq);
4853 	}
4854 }
4855 
4856 static void balance_push(struct rq *rq);
4857 
4858 /*
4859  * balance_push_callback is a right abuse of the callback interface and plays
4860  * by significantly different rules.
4861  *
4862  * Where the normal balance_callback's purpose is to be ran in the same context
4863  * that queued it (only later, when it's safe to drop rq->lock again),
4864  * balance_push_callback is specifically targeted at __schedule().
4865  *
4866  * This abuse is tolerated because it places all the unlikely/odd cases behind
4867  * a single test, namely: rq->balance_callback == NULL.
4868  */
4869 struct balance_callback balance_push_callback = {
4870 	.next = NULL,
4871 	.func = balance_push,
4872 };
4873 
4874 static inline struct balance_callback *
4875 __splice_balance_callbacks(struct rq *rq, bool split)
4876 {
4877 	struct balance_callback *head = rq->balance_callback;
4878 
4879 	if (likely(!head))
4880 		return NULL;
4881 
4882 	lockdep_assert_rq_held(rq);
4883 	/*
4884 	 * Must not take balance_push_callback off the list when
4885 	 * splice_balance_callbacks() and balance_callbacks() are not
4886 	 * in the same rq->lock section.
4887 	 *
4888 	 * In that case it would be possible for __schedule() to interleave
4889 	 * and observe the list empty.
4890 	 */
4891 	if (split && head == &balance_push_callback)
4892 		head = NULL;
4893 	else
4894 		rq->balance_callback = NULL;
4895 
4896 	return head;
4897 }
4898 
4899 struct balance_callback *splice_balance_callbacks(struct rq *rq)
4900 {
4901 	return __splice_balance_callbacks(rq, true);
4902 }
4903 
4904 static void __balance_callbacks(struct rq *rq)
4905 {
4906 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4907 }
4908 
4909 void balance_callbacks(struct rq *rq, struct balance_callback *head)
4910 {
4911 	unsigned long flags;
4912 
4913 	if (unlikely(head)) {
4914 		raw_spin_rq_lock_irqsave(rq, flags);
4915 		do_balance_callbacks(rq, head);
4916 		raw_spin_rq_unlock_irqrestore(rq, flags);
4917 	}
4918 }
4919 
4920 #else
4921 
4922 static inline void __balance_callbacks(struct rq *rq)
4923 {
4924 }
4925 
4926 #endif
4927 
4928 static inline void
4929 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
4930 {
4931 	/*
4932 	 * Since the runqueue lock will be released by the next
4933 	 * task (which is an invalid locking op but in the case
4934 	 * of the scheduler it's an obvious special-case), so we
4935 	 * do an early lockdep release here:
4936 	 */
4937 	rq_unpin_lock(rq, rf);
4938 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
4939 #ifdef CONFIG_DEBUG_SPINLOCK
4940 	/* this is a valid case when another task releases the spinlock */
4941 	rq_lockp(rq)->owner = next;
4942 #endif
4943 }
4944 
4945 static inline void finish_lock_switch(struct rq *rq)
4946 {
4947 	/*
4948 	 * If we are tracking spinlock dependencies then we have to
4949 	 * fix up the runqueue lock - which gets 'carried over' from
4950 	 * prev into current:
4951 	 */
4952 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
4953 	__balance_callbacks(rq);
4954 	raw_spin_rq_unlock_irq(rq);
4955 }
4956 
4957 /*
4958  * NOP if the arch has not defined these:
4959  */
4960 
4961 #ifndef prepare_arch_switch
4962 # define prepare_arch_switch(next)	do { } while (0)
4963 #endif
4964 
4965 #ifndef finish_arch_post_lock_switch
4966 # define finish_arch_post_lock_switch()	do { } while (0)
4967 #endif
4968 
4969 static inline void kmap_local_sched_out(void)
4970 {
4971 #ifdef CONFIG_KMAP_LOCAL
4972 	if (unlikely(current->kmap_ctrl.idx))
4973 		__kmap_local_sched_out();
4974 #endif
4975 }
4976 
4977 static inline void kmap_local_sched_in(void)
4978 {
4979 #ifdef CONFIG_KMAP_LOCAL
4980 	if (unlikely(current->kmap_ctrl.idx))
4981 		__kmap_local_sched_in();
4982 #endif
4983 }
4984 
4985 /**
4986  * prepare_task_switch - prepare to switch tasks
4987  * @rq: the runqueue preparing to switch
4988  * @prev: the current task that is being switched out
4989  * @next: the task we are going to switch to.
4990  *
4991  * This is called with the rq lock held and interrupts off. It must
4992  * be paired with a subsequent finish_task_switch after the context
4993  * switch.
4994  *
4995  * prepare_task_switch sets up locking and calls architecture specific
4996  * hooks.
4997  */
4998 static inline void
4999 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5000 		    struct task_struct *next)
5001 {
5002 	kcov_prepare_switch(prev);
5003 	sched_info_switch(rq, prev, next);
5004 	perf_event_task_sched_out(prev, next);
5005 	rseq_preempt(prev);
5006 	fire_sched_out_preempt_notifiers(prev, next);
5007 	kmap_local_sched_out();
5008 	prepare_task(next);
5009 	prepare_arch_switch(next);
5010 }
5011 
5012 /**
5013  * finish_task_switch - clean up after a task-switch
5014  * @prev: the thread we just switched away from.
5015  *
5016  * finish_task_switch must be called after the context switch, paired
5017  * with a prepare_task_switch call before the context switch.
5018  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5019  * and do any other architecture-specific cleanup actions.
5020  *
5021  * Note that we may have delayed dropping an mm in context_switch(). If
5022  * so, we finish that here outside of the runqueue lock. (Doing it
5023  * with the lock held can cause deadlocks; see schedule() for
5024  * details.)
5025  *
5026  * The context switch have flipped the stack from under us and restored the
5027  * local variables which were saved when this task called schedule() in the
5028  * past. 'prev == current' is still correct but we need to recalculate this_rq
5029  * because prev may have moved to another CPU.
5030  */
5031 static struct rq *finish_task_switch(struct task_struct *prev)
5032 	__releases(rq->lock)
5033 {
5034 	struct rq *rq = this_rq();
5035 	struct mm_struct *mm = rq->prev_mm;
5036 	unsigned int prev_state;
5037 
5038 	/*
5039 	 * The previous task will have left us with a preempt_count of 2
5040 	 * because it left us after:
5041 	 *
5042 	 *	schedule()
5043 	 *	  preempt_disable();			// 1
5044 	 *	  __schedule()
5045 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5046 	 *
5047 	 * Also, see FORK_PREEMPT_COUNT.
5048 	 */
5049 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5050 		      "corrupted preempt_count: %s/%d/0x%x\n",
5051 		      current->comm, current->pid, preempt_count()))
5052 		preempt_count_set(FORK_PREEMPT_COUNT);
5053 
5054 	rq->prev_mm = NULL;
5055 
5056 	/*
5057 	 * A task struct has one reference for the use as "current".
5058 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5059 	 * schedule one last time. The schedule call will never return, and
5060 	 * the scheduled task must drop that reference.
5061 	 *
5062 	 * We must observe prev->state before clearing prev->on_cpu (in
5063 	 * finish_task), otherwise a concurrent wakeup can get prev
5064 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5065 	 * transition, resulting in a double drop.
5066 	 */
5067 	prev_state = READ_ONCE(prev->__state);
5068 	vtime_task_switch(prev);
5069 	perf_event_task_sched_in(prev, current);
5070 	finish_task(prev);
5071 	tick_nohz_task_switch();
5072 	finish_lock_switch(rq);
5073 	finish_arch_post_lock_switch();
5074 	kcov_finish_switch(current);
5075 	/*
5076 	 * kmap_local_sched_out() is invoked with rq::lock held and
5077 	 * interrupts disabled. There is no requirement for that, but the
5078 	 * sched out code does not have an interrupt enabled section.
5079 	 * Restoring the maps on sched in does not require interrupts being
5080 	 * disabled either.
5081 	 */
5082 	kmap_local_sched_in();
5083 
5084 	fire_sched_in_preempt_notifiers(current);
5085 	/*
5086 	 * When switching through a kernel thread, the loop in
5087 	 * membarrier_{private,global}_expedited() may have observed that
5088 	 * kernel thread and not issued an IPI. It is therefore possible to
5089 	 * schedule between user->kernel->user threads without passing though
5090 	 * switch_mm(). Membarrier requires a barrier after storing to
5091 	 * rq->curr, before returning to userspace, so provide them here:
5092 	 *
5093 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5094 	 *   provided by mmdrop_lazy_tlb(),
5095 	 * - a sync_core for SYNC_CORE.
5096 	 */
5097 	if (mm) {
5098 		membarrier_mm_sync_core_before_usermode(mm);
5099 		mmdrop_lazy_tlb_sched(mm);
5100 	}
5101 
5102 	if (unlikely(prev_state == TASK_DEAD)) {
5103 		if (prev->sched_class->task_dead)
5104 			prev->sched_class->task_dead(prev);
5105 
5106 		/* Task is done with its stack. */
5107 		put_task_stack(prev);
5108 
5109 		put_task_struct_rcu_user(prev);
5110 	}
5111 
5112 	return rq;
5113 }
5114 
5115 /**
5116  * schedule_tail - first thing a freshly forked thread must call.
5117  * @prev: the thread we just switched away from.
5118  */
5119 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5120 	__releases(rq->lock)
5121 {
5122 	/*
5123 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5124 	 * finish_task_switch() for details.
5125 	 *
5126 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5127 	 * and the preempt_enable() will end up enabling preemption (on
5128 	 * PREEMPT_COUNT kernels).
5129 	 */
5130 
5131 	finish_task_switch(prev);
5132 	preempt_enable();
5133 
5134 	if (current->set_child_tid)
5135 		put_user(task_pid_vnr(current), current->set_child_tid);
5136 
5137 	calculate_sigpending();
5138 }
5139 
5140 /*
5141  * context_switch - switch to the new MM and the new thread's register state.
5142  */
5143 static __always_inline struct rq *
5144 context_switch(struct rq *rq, struct task_struct *prev,
5145 	       struct task_struct *next, struct rq_flags *rf)
5146 {
5147 	prepare_task_switch(rq, prev, next);
5148 
5149 	/*
5150 	 * For paravirt, this is coupled with an exit in switch_to to
5151 	 * combine the page table reload and the switch backend into
5152 	 * one hypercall.
5153 	 */
5154 	arch_start_context_switch(prev);
5155 
5156 	/*
5157 	 * kernel -> kernel   lazy + transfer active
5158 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5159 	 *
5160 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5161 	 *   user ->   user   switch
5162 	 *
5163 	 * switch_mm_cid() needs to be updated if the barriers provided
5164 	 * by context_switch() are modified.
5165 	 */
5166 	if (!next->mm) {                                // to kernel
5167 		enter_lazy_tlb(prev->active_mm, next);
5168 
5169 		next->active_mm = prev->active_mm;
5170 		if (prev->mm)                           // from user
5171 			mmgrab_lazy_tlb(prev->active_mm);
5172 		else
5173 			prev->active_mm = NULL;
5174 	} else {                                        // to user
5175 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5176 		/*
5177 		 * sys_membarrier() requires an smp_mb() between setting
5178 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5179 		 *
5180 		 * The below provides this either through switch_mm(), or in
5181 		 * case 'prev->active_mm == next->mm' through
5182 		 * finish_task_switch()'s mmdrop().
5183 		 */
5184 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5185 		lru_gen_use_mm(next->mm);
5186 
5187 		if (!prev->mm) {                        // from kernel
5188 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5189 			rq->prev_mm = prev->active_mm;
5190 			prev->active_mm = NULL;
5191 		}
5192 	}
5193 
5194 	/* switch_mm_cid() requires the memory barriers above. */
5195 	switch_mm_cid(rq, prev, next);
5196 
5197 	prepare_lock_switch(rq, next, rf);
5198 
5199 	/* Here we just switch the register state and the stack. */
5200 	switch_to(prev, next, prev);
5201 	barrier();
5202 
5203 	return finish_task_switch(prev);
5204 }
5205 
5206 /*
5207  * nr_running and nr_context_switches:
5208  *
5209  * externally visible scheduler statistics: current number of runnable
5210  * threads, total number of context switches performed since bootup.
5211  */
5212 unsigned int nr_running(void)
5213 {
5214 	unsigned int i, sum = 0;
5215 
5216 	for_each_online_cpu(i)
5217 		sum += cpu_rq(i)->nr_running;
5218 
5219 	return sum;
5220 }
5221 
5222 /*
5223  * Check if only the current task is running on the CPU.
5224  *
5225  * Caution: this function does not check that the caller has disabled
5226  * preemption, thus the result might have a time-of-check-to-time-of-use
5227  * race.  The caller is responsible to use it correctly, for example:
5228  *
5229  * - from a non-preemptible section (of course)
5230  *
5231  * - from a thread that is bound to a single CPU
5232  *
5233  * - in a loop with very short iterations (e.g. a polling loop)
5234  */
5235 bool single_task_running(void)
5236 {
5237 	return raw_rq()->nr_running == 1;
5238 }
5239 EXPORT_SYMBOL(single_task_running);
5240 
5241 unsigned long long nr_context_switches_cpu(int cpu)
5242 {
5243 	return cpu_rq(cpu)->nr_switches;
5244 }
5245 
5246 unsigned long long nr_context_switches(void)
5247 {
5248 	int i;
5249 	unsigned long long sum = 0;
5250 
5251 	for_each_possible_cpu(i)
5252 		sum += cpu_rq(i)->nr_switches;
5253 
5254 	return sum;
5255 }
5256 
5257 /*
5258  * Consumers of these two interfaces, like for example the cpuidle menu
5259  * governor, are using nonsensical data. Preferring shallow idle state selection
5260  * for a CPU that has IO-wait which might not even end up running the task when
5261  * it does become runnable.
5262  */
5263 
5264 unsigned int nr_iowait_cpu(int cpu)
5265 {
5266 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5267 }
5268 
5269 /*
5270  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5271  *
5272  * The idea behind IO-wait account is to account the idle time that we could
5273  * have spend running if it were not for IO. That is, if we were to improve the
5274  * storage performance, we'd have a proportional reduction in IO-wait time.
5275  *
5276  * This all works nicely on UP, where, when a task blocks on IO, we account
5277  * idle time as IO-wait, because if the storage were faster, it could've been
5278  * running and we'd not be idle.
5279  *
5280  * This has been extended to SMP, by doing the same for each CPU. This however
5281  * is broken.
5282  *
5283  * Imagine for instance the case where two tasks block on one CPU, only the one
5284  * CPU will have IO-wait accounted, while the other has regular idle. Even
5285  * though, if the storage were faster, both could've ran at the same time,
5286  * utilising both CPUs.
5287  *
5288  * This means, that when looking globally, the current IO-wait accounting on
5289  * SMP is a lower bound, by reason of under accounting.
5290  *
5291  * Worse, since the numbers are provided per CPU, they are sometimes
5292  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5293  * associated with any one particular CPU, it can wake to another CPU than it
5294  * blocked on. This means the per CPU IO-wait number is meaningless.
5295  *
5296  * Task CPU affinities can make all that even more 'interesting'.
5297  */
5298 
5299 unsigned int nr_iowait(void)
5300 {
5301 	unsigned int i, sum = 0;
5302 
5303 	for_each_possible_cpu(i)
5304 		sum += nr_iowait_cpu(i);
5305 
5306 	return sum;
5307 }
5308 
5309 #ifdef CONFIG_SMP
5310 
5311 /*
5312  * sched_exec - execve() is a valuable balancing opportunity, because at
5313  * this point the task has the smallest effective memory and cache footprint.
5314  */
5315 void sched_exec(void)
5316 {
5317 	struct task_struct *p = current;
5318 	struct migration_arg arg;
5319 	int dest_cpu;
5320 
5321 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5322 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5323 		if (dest_cpu == smp_processor_id())
5324 			return;
5325 
5326 		if (unlikely(!cpu_active(dest_cpu)))
5327 			return;
5328 
5329 		arg = (struct migration_arg){ p, dest_cpu };
5330 	}
5331 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5332 }
5333 
5334 #endif
5335 
5336 DEFINE_PER_CPU(struct kernel_stat, kstat);
5337 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5338 
5339 EXPORT_PER_CPU_SYMBOL(kstat);
5340 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5341 
5342 /*
5343  * The function fair_sched_class.update_curr accesses the struct curr
5344  * and its field curr->exec_start; when called from task_sched_runtime(),
5345  * we observe a high rate of cache misses in practice.
5346  * Prefetching this data results in improved performance.
5347  */
5348 static inline void prefetch_curr_exec_start(struct task_struct *p)
5349 {
5350 #ifdef CONFIG_FAIR_GROUP_SCHED
5351 	struct sched_entity *curr = p->se.cfs_rq->curr;
5352 #else
5353 	struct sched_entity *curr = task_rq(p)->cfs.curr;
5354 #endif
5355 	prefetch(curr);
5356 	prefetch(&curr->exec_start);
5357 }
5358 
5359 /*
5360  * Return accounted runtime for the task.
5361  * In case the task is currently running, return the runtime plus current's
5362  * pending runtime that have not been accounted yet.
5363  */
5364 unsigned long long task_sched_runtime(struct task_struct *p)
5365 {
5366 	struct rq_flags rf;
5367 	struct rq *rq;
5368 	u64 ns;
5369 
5370 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5371 	/*
5372 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5373 	 * So we have a optimization chance when the task's delta_exec is 0.
5374 	 * Reading ->on_cpu is racy, but this is OK.
5375 	 *
5376 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5377 	 * If we race with it entering CPU, unaccounted time is 0. This is
5378 	 * indistinguishable from the read occurring a few cycles earlier.
5379 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5380 	 * been accounted, so we're correct here as well.
5381 	 */
5382 	if (!p->on_cpu || !task_on_rq_queued(p))
5383 		return p->se.sum_exec_runtime;
5384 #endif
5385 
5386 	rq = task_rq_lock(p, &rf);
5387 	/*
5388 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5389 	 * project cycles that may never be accounted to this
5390 	 * thread, breaking clock_gettime().
5391 	 */
5392 	if (task_current(rq, p) && task_on_rq_queued(p)) {
5393 		prefetch_curr_exec_start(p);
5394 		update_rq_clock(rq);
5395 		p->sched_class->update_curr(rq);
5396 	}
5397 	ns = p->se.sum_exec_runtime;
5398 	task_rq_unlock(rq, p, &rf);
5399 
5400 	return ns;
5401 }
5402 
5403 #ifdef CONFIG_SCHED_DEBUG
5404 static u64 cpu_resched_latency(struct rq *rq)
5405 {
5406 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5407 	u64 resched_latency, now = rq_clock(rq);
5408 	static bool warned_once;
5409 
5410 	if (sysctl_resched_latency_warn_once && warned_once)
5411 		return 0;
5412 
5413 	if (!need_resched() || !latency_warn_ms)
5414 		return 0;
5415 
5416 	if (system_state == SYSTEM_BOOTING)
5417 		return 0;
5418 
5419 	if (!rq->last_seen_need_resched_ns) {
5420 		rq->last_seen_need_resched_ns = now;
5421 		rq->ticks_without_resched = 0;
5422 		return 0;
5423 	}
5424 
5425 	rq->ticks_without_resched++;
5426 	resched_latency = now - rq->last_seen_need_resched_ns;
5427 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5428 		return 0;
5429 
5430 	warned_once = true;
5431 
5432 	return resched_latency;
5433 }
5434 
5435 static int __init setup_resched_latency_warn_ms(char *str)
5436 {
5437 	long val;
5438 
5439 	if ((kstrtol(str, 0, &val))) {
5440 		pr_warn("Unable to set resched_latency_warn_ms\n");
5441 		return 1;
5442 	}
5443 
5444 	sysctl_resched_latency_warn_ms = val;
5445 	return 1;
5446 }
5447 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5448 #else
5449 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5450 #endif /* CONFIG_SCHED_DEBUG */
5451 
5452 /*
5453  * This function gets called by the timer code, with HZ frequency.
5454  * We call it with interrupts disabled.
5455  */
5456 void sched_tick(void)
5457 {
5458 	int cpu = smp_processor_id();
5459 	struct rq *rq = cpu_rq(cpu);
5460 	struct task_struct *curr;
5461 	struct rq_flags rf;
5462 	unsigned long hw_pressure;
5463 	u64 resched_latency;
5464 
5465 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5466 		arch_scale_freq_tick();
5467 
5468 	sched_clock_tick();
5469 
5470 	rq_lock(rq, &rf);
5471 
5472 	curr = rq->curr;
5473 	psi_account_irqtime(rq, curr, NULL);
5474 
5475 	update_rq_clock(rq);
5476 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5477 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5478 	curr->sched_class->task_tick(rq, curr, 0);
5479 	if (sched_feat(LATENCY_WARN))
5480 		resched_latency = cpu_resched_latency(rq);
5481 	calc_global_load_tick(rq);
5482 	sched_core_tick(rq);
5483 	task_tick_mm_cid(rq, curr);
5484 
5485 	rq_unlock(rq, &rf);
5486 
5487 	if (sched_feat(LATENCY_WARN) && resched_latency)
5488 		resched_latency_warn(cpu, resched_latency);
5489 
5490 	perf_event_task_tick();
5491 
5492 	if (curr->flags & PF_WQ_WORKER)
5493 		wq_worker_tick(curr);
5494 
5495 #ifdef CONFIG_SMP
5496 	rq->idle_balance = idle_cpu(cpu);
5497 	sched_balance_trigger(rq);
5498 #endif
5499 }
5500 
5501 #ifdef CONFIG_NO_HZ_FULL
5502 
5503 struct tick_work {
5504 	int			cpu;
5505 	atomic_t		state;
5506 	struct delayed_work	work;
5507 };
5508 /* Values for ->state, see diagram below. */
5509 #define TICK_SCHED_REMOTE_OFFLINE	0
5510 #define TICK_SCHED_REMOTE_OFFLINING	1
5511 #define TICK_SCHED_REMOTE_RUNNING	2
5512 
5513 /*
5514  * State diagram for ->state:
5515  *
5516  *
5517  *          TICK_SCHED_REMOTE_OFFLINE
5518  *                    |   ^
5519  *                    |   |
5520  *                    |   | sched_tick_remote()
5521  *                    |   |
5522  *                    |   |
5523  *                    +--TICK_SCHED_REMOTE_OFFLINING
5524  *                    |   ^
5525  *                    |   |
5526  * sched_tick_start() |   | sched_tick_stop()
5527  *                    |   |
5528  *                    V   |
5529  *          TICK_SCHED_REMOTE_RUNNING
5530  *
5531  *
5532  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5533  * and sched_tick_start() are happy to leave the state in RUNNING.
5534  */
5535 
5536 static struct tick_work __percpu *tick_work_cpu;
5537 
5538 static void sched_tick_remote(struct work_struct *work)
5539 {
5540 	struct delayed_work *dwork = to_delayed_work(work);
5541 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5542 	int cpu = twork->cpu;
5543 	struct rq *rq = cpu_rq(cpu);
5544 	int os;
5545 
5546 	/*
5547 	 * Handle the tick only if it appears the remote CPU is running in full
5548 	 * dynticks mode. The check is racy by nature, but missing a tick or
5549 	 * having one too much is no big deal because the scheduler tick updates
5550 	 * statistics and checks timeslices in a time-independent way, regardless
5551 	 * of when exactly it is running.
5552 	 */
5553 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5554 		guard(rq_lock_irq)(rq);
5555 		struct task_struct *curr = rq->curr;
5556 
5557 		if (cpu_online(cpu)) {
5558 			update_rq_clock(rq);
5559 
5560 			if (!is_idle_task(curr)) {
5561 				/*
5562 				 * Make sure the next tick runs within a
5563 				 * reasonable amount of time.
5564 				 */
5565 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5566 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5567 			}
5568 			curr->sched_class->task_tick(rq, curr, 0);
5569 
5570 			calc_load_nohz_remote(rq);
5571 		}
5572 	}
5573 
5574 	/*
5575 	 * Run the remote tick once per second (1Hz). This arbitrary
5576 	 * frequency is large enough to avoid overload but short enough
5577 	 * to keep scheduler internal stats reasonably up to date.  But
5578 	 * first update state to reflect hotplug activity if required.
5579 	 */
5580 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5581 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5582 	if (os == TICK_SCHED_REMOTE_RUNNING)
5583 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5584 }
5585 
5586 static void sched_tick_start(int cpu)
5587 {
5588 	int os;
5589 	struct tick_work *twork;
5590 
5591 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5592 		return;
5593 
5594 	WARN_ON_ONCE(!tick_work_cpu);
5595 
5596 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5597 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5598 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5599 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5600 		twork->cpu = cpu;
5601 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5602 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5603 	}
5604 }
5605 
5606 #ifdef CONFIG_HOTPLUG_CPU
5607 static void sched_tick_stop(int cpu)
5608 {
5609 	struct tick_work *twork;
5610 	int os;
5611 
5612 	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
5613 		return;
5614 
5615 	WARN_ON_ONCE(!tick_work_cpu);
5616 
5617 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5618 	/* There cannot be competing actions, but don't rely on stop-machine. */
5619 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5620 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5621 	/* Don't cancel, as this would mess up the state machine. */
5622 }
5623 #endif /* CONFIG_HOTPLUG_CPU */
5624 
5625 int __init sched_tick_offload_init(void)
5626 {
5627 	tick_work_cpu = alloc_percpu(struct tick_work);
5628 	BUG_ON(!tick_work_cpu);
5629 	return 0;
5630 }
5631 
5632 #else /* !CONFIG_NO_HZ_FULL */
5633 static inline void sched_tick_start(int cpu) { }
5634 static inline void sched_tick_stop(int cpu) { }
5635 #endif
5636 
5637 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5638 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5639 /*
5640  * If the value passed in is equal to the current preempt count
5641  * then we just disabled preemption. Start timing the latency.
5642  */
5643 static inline void preempt_latency_start(int val)
5644 {
5645 	if (preempt_count() == val) {
5646 		unsigned long ip = get_lock_parent_ip();
5647 #ifdef CONFIG_DEBUG_PREEMPT
5648 		current->preempt_disable_ip = ip;
5649 #endif
5650 		trace_preempt_off(CALLER_ADDR0, ip);
5651 	}
5652 }
5653 
5654 void preempt_count_add(int val)
5655 {
5656 #ifdef CONFIG_DEBUG_PREEMPT
5657 	/*
5658 	 * Underflow?
5659 	 */
5660 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5661 		return;
5662 #endif
5663 	__preempt_count_add(val);
5664 #ifdef CONFIG_DEBUG_PREEMPT
5665 	/*
5666 	 * Spinlock count overflowing soon?
5667 	 */
5668 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5669 				PREEMPT_MASK - 10);
5670 #endif
5671 	preempt_latency_start(val);
5672 }
5673 EXPORT_SYMBOL(preempt_count_add);
5674 NOKPROBE_SYMBOL(preempt_count_add);
5675 
5676 /*
5677  * If the value passed in equals to the current preempt count
5678  * then we just enabled preemption. Stop timing the latency.
5679  */
5680 static inline void preempt_latency_stop(int val)
5681 {
5682 	if (preempt_count() == val)
5683 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5684 }
5685 
5686 void preempt_count_sub(int val)
5687 {
5688 #ifdef CONFIG_DEBUG_PREEMPT
5689 	/*
5690 	 * Underflow?
5691 	 */
5692 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5693 		return;
5694 	/*
5695 	 * Is the spinlock portion underflowing?
5696 	 */
5697 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5698 			!(preempt_count() & PREEMPT_MASK)))
5699 		return;
5700 #endif
5701 
5702 	preempt_latency_stop(val);
5703 	__preempt_count_sub(val);
5704 }
5705 EXPORT_SYMBOL(preempt_count_sub);
5706 NOKPROBE_SYMBOL(preempt_count_sub);
5707 
5708 #else
5709 static inline void preempt_latency_start(int val) { }
5710 static inline void preempt_latency_stop(int val) { }
5711 #endif
5712 
5713 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5714 {
5715 #ifdef CONFIG_DEBUG_PREEMPT
5716 	return p->preempt_disable_ip;
5717 #else
5718 	return 0;
5719 #endif
5720 }
5721 
5722 /*
5723  * Print scheduling while atomic bug:
5724  */
5725 static noinline void __schedule_bug(struct task_struct *prev)
5726 {
5727 	/* Save this before calling printk(), since that will clobber it */
5728 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5729 
5730 	if (oops_in_progress)
5731 		return;
5732 
5733 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5734 		prev->comm, prev->pid, preempt_count());
5735 
5736 	debug_show_held_locks(prev);
5737 	print_modules();
5738 	if (irqs_disabled())
5739 		print_irqtrace_events(prev);
5740 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5741 		pr_err("Preemption disabled at:");
5742 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5743 	}
5744 	check_panic_on_warn("scheduling while atomic");
5745 
5746 	dump_stack();
5747 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5748 }
5749 
5750 /*
5751  * Various schedule()-time debugging checks and statistics:
5752  */
5753 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5754 {
5755 #ifdef CONFIG_SCHED_STACK_END_CHECK
5756 	if (task_stack_end_corrupted(prev))
5757 		panic("corrupted stack end detected inside scheduler\n");
5758 
5759 	if (task_scs_end_corrupted(prev))
5760 		panic("corrupted shadow stack detected inside scheduler\n");
5761 #endif
5762 
5763 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5764 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5765 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5766 			prev->comm, prev->pid, prev->non_block_count);
5767 		dump_stack();
5768 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5769 	}
5770 #endif
5771 
5772 	if (unlikely(in_atomic_preempt_off())) {
5773 		__schedule_bug(prev);
5774 		preempt_count_set(PREEMPT_DISABLED);
5775 	}
5776 	rcu_sleep_check();
5777 	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
5778 
5779 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5780 
5781 	schedstat_inc(this_rq()->sched_count);
5782 }
5783 
5784 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
5785 				  struct rq_flags *rf)
5786 {
5787 #ifdef CONFIG_SMP
5788 	const struct sched_class *class;
5789 	/*
5790 	 * We must do the balancing pass before put_prev_task(), such
5791 	 * that when we release the rq->lock the task is in the same
5792 	 * state as before we took rq->lock.
5793 	 *
5794 	 * We can terminate the balance pass as soon as we know there is
5795 	 * a runnable task of @class priority or higher.
5796 	 */
5797 	for_class_range(class, prev->sched_class, &idle_sched_class) {
5798 		if (class->balance(rq, prev, rf))
5799 			break;
5800 	}
5801 #endif
5802 
5803 	put_prev_task(rq, prev);
5804 
5805 	/*
5806 	 * We've updated @prev and no longer need the server link, clear it.
5807 	 * Must be done before ->pick_next_task() because that can (re)set
5808 	 * ->dl_server.
5809 	 */
5810 	if (prev->dl_server)
5811 		prev->dl_server = NULL;
5812 }
5813 
5814 /*
5815  * Pick up the highest-prio task:
5816  */
5817 static inline struct task_struct *
5818 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5819 {
5820 	const struct sched_class *class;
5821 	struct task_struct *p;
5822 
5823 	/*
5824 	 * Optimization: we know that if all tasks are in the fair class we can
5825 	 * call that function directly, but only if the @prev task wasn't of a
5826 	 * higher scheduling class, because otherwise those lose the
5827 	 * opportunity to pull in more work from other CPUs.
5828 	 */
5829 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
5830 		   rq->nr_running == rq->cfs.h_nr_running)) {
5831 
5832 		p = pick_next_task_fair(rq, prev, rf);
5833 		if (unlikely(p == RETRY_TASK))
5834 			goto restart;
5835 
5836 		/* Assume the next prioritized class is idle_sched_class */
5837 		if (!p) {
5838 			put_prev_task(rq, prev);
5839 			p = pick_next_task_idle(rq);
5840 		}
5841 
5842 		/*
5843 		 * This is the fast path; it cannot be a DL server pick;
5844 		 * therefore even if @p == @prev, ->dl_server must be NULL.
5845 		 */
5846 		if (p->dl_server)
5847 			p->dl_server = NULL;
5848 
5849 		return p;
5850 	}
5851 
5852 restart:
5853 	put_prev_task_balance(rq, prev, rf);
5854 
5855 	for_each_class(class) {
5856 		p = class->pick_next_task(rq);
5857 		if (p)
5858 			return p;
5859 	}
5860 
5861 	BUG(); /* The idle class should always have a runnable task. */
5862 }
5863 
5864 #ifdef CONFIG_SCHED_CORE
5865 static inline bool is_task_rq_idle(struct task_struct *t)
5866 {
5867 	return (task_rq(t)->idle == t);
5868 }
5869 
5870 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
5871 {
5872 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
5873 }
5874 
5875 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
5876 {
5877 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
5878 		return true;
5879 
5880 	return a->core_cookie == b->core_cookie;
5881 }
5882 
5883 static inline struct task_struct *pick_task(struct rq *rq)
5884 {
5885 	const struct sched_class *class;
5886 	struct task_struct *p;
5887 
5888 	for_each_class(class) {
5889 		p = class->pick_task(rq);
5890 		if (p)
5891 			return p;
5892 	}
5893 
5894 	BUG(); /* The idle class should always have a runnable task. */
5895 }
5896 
5897 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
5898 
5899 static void queue_core_balance(struct rq *rq);
5900 
5901 static struct task_struct *
5902 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
5903 {
5904 	struct task_struct *next, *p, *max = NULL;
5905 	const struct cpumask *smt_mask;
5906 	bool fi_before = false;
5907 	bool core_clock_updated = (rq == rq->core);
5908 	unsigned long cookie;
5909 	int i, cpu, occ = 0;
5910 	struct rq *rq_i;
5911 	bool need_sync;
5912 
5913 	if (!sched_core_enabled(rq))
5914 		return __pick_next_task(rq, prev, rf);
5915 
5916 	cpu = cpu_of(rq);
5917 
5918 	/* Stopper task is switching into idle, no need core-wide selection. */
5919 	if (cpu_is_offline(cpu)) {
5920 		/*
5921 		 * Reset core_pick so that we don't enter the fastpath when
5922 		 * coming online. core_pick would already be migrated to
5923 		 * another cpu during offline.
5924 		 */
5925 		rq->core_pick = NULL;
5926 		return __pick_next_task(rq, prev, rf);
5927 	}
5928 
5929 	/*
5930 	 * If there were no {en,de}queues since we picked (IOW, the task
5931 	 * pointers are all still valid), and we haven't scheduled the last
5932 	 * pick yet, do so now.
5933 	 *
5934 	 * rq->core_pick can be NULL if no selection was made for a CPU because
5935 	 * it was either offline or went offline during a sibling's core-wide
5936 	 * selection. In this case, do a core-wide selection.
5937 	 */
5938 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
5939 	    rq->core->core_pick_seq != rq->core_sched_seq &&
5940 	    rq->core_pick) {
5941 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
5942 
5943 		next = rq->core_pick;
5944 		if (next != prev) {
5945 			put_prev_task(rq, prev);
5946 			set_next_task(rq, next);
5947 		}
5948 
5949 		rq->core_pick = NULL;
5950 		goto out;
5951 	}
5952 
5953 	put_prev_task_balance(rq, prev, rf);
5954 
5955 	smt_mask = cpu_smt_mask(cpu);
5956 	need_sync = !!rq->core->core_cookie;
5957 
5958 	/* reset state */
5959 	rq->core->core_cookie = 0UL;
5960 	if (rq->core->core_forceidle_count) {
5961 		if (!core_clock_updated) {
5962 			update_rq_clock(rq->core);
5963 			core_clock_updated = true;
5964 		}
5965 		sched_core_account_forceidle(rq);
5966 		/* reset after accounting force idle */
5967 		rq->core->core_forceidle_start = 0;
5968 		rq->core->core_forceidle_count = 0;
5969 		rq->core->core_forceidle_occupation = 0;
5970 		need_sync = true;
5971 		fi_before = true;
5972 	}
5973 
5974 	/*
5975 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
5976 	 *
5977 	 * @task_seq guards the task state ({en,de}queues)
5978 	 * @pick_seq is the @task_seq we did a selection on
5979 	 * @sched_seq is the @pick_seq we scheduled
5980 	 *
5981 	 * However, preemptions can cause multiple picks on the same task set.
5982 	 * 'Fix' this by also increasing @task_seq for every pick.
5983 	 */
5984 	rq->core->core_task_seq++;
5985 
5986 	/*
5987 	 * Optimize for common case where this CPU has no cookies
5988 	 * and there are no cookied tasks running on siblings.
5989 	 */
5990 	if (!need_sync) {
5991 		next = pick_task(rq);
5992 		if (!next->core_cookie) {
5993 			rq->core_pick = NULL;
5994 			/*
5995 			 * For robustness, update the min_vruntime_fi for
5996 			 * unconstrained picks as well.
5997 			 */
5998 			WARN_ON_ONCE(fi_before);
5999 			task_vruntime_update(rq, next, false);
6000 			goto out_set_next;
6001 		}
6002 	}
6003 
6004 	/*
6005 	 * For each thread: do the regular task pick and find the max prio task
6006 	 * amongst them.
6007 	 *
6008 	 * Tie-break prio towards the current CPU
6009 	 */
6010 	for_each_cpu_wrap(i, smt_mask, cpu) {
6011 		rq_i = cpu_rq(i);
6012 
6013 		/*
6014 		 * Current cpu always has its clock updated on entrance to
6015 		 * pick_next_task(). If the current cpu is not the core,
6016 		 * the core may also have been updated above.
6017 		 */
6018 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6019 			update_rq_clock(rq_i);
6020 
6021 		p = rq_i->core_pick = pick_task(rq_i);
6022 		if (!max || prio_less(max, p, fi_before))
6023 			max = p;
6024 	}
6025 
6026 	cookie = rq->core->core_cookie = max->core_cookie;
6027 
6028 	/*
6029 	 * For each thread: try and find a runnable task that matches @max or
6030 	 * force idle.
6031 	 */
6032 	for_each_cpu(i, smt_mask) {
6033 		rq_i = cpu_rq(i);
6034 		p = rq_i->core_pick;
6035 
6036 		if (!cookie_equals(p, cookie)) {
6037 			p = NULL;
6038 			if (cookie)
6039 				p = sched_core_find(rq_i, cookie);
6040 			if (!p)
6041 				p = idle_sched_class.pick_task(rq_i);
6042 		}
6043 
6044 		rq_i->core_pick = p;
6045 
6046 		if (p == rq_i->idle) {
6047 			if (rq_i->nr_running) {
6048 				rq->core->core_forceidle_count++;
6049 				if (!fi_before)
6050 					rq->core->core_forceidle_seq++;
6051 			}
6052 		} else {
6053 			occ++;
6054 		}
6055 	}
6056 
6057 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6058 		rq->core->core_forceidle_start = rq_clock(rq->core);
6059 		rq->core->core_forceidle_occupation = occ;
6060 	}
6061 
6062 	rq->core->core_pick_seq = rq->core->core_task_seq;
6063 	next = rq->core_pick;
6064 	rq->core_sched_seq = rq->core->core_pick_seq;
6065 
6066 	/* Something should have been selected for current CPU */
6067 	WARN_ON_ONCE(!next);
6068 
6069 	/*
6070 	 * Reschedule siblings
6071 	 *
6072 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6073 	 * sending an IPI (below) ensures the sibling will no longer be running
6074 	 * their task. This ensures there is no inter-sibling overlap between
6075 	 * non-matching user state.
6076 	 */
6077 	for_each_cpu(i, smt_mask) {
6078 		rq_i = cpu_rq(i);
6079 
6080 		/*
6081 		 * An online sibling might have gone offline before a task
6082 		 * could be picked for it, or it might be offline but later
6083 		 * happen to come online, but its too late and nothing was
6084 		 * picked for it.  That's Ok - it will pick tasks for itself,
6085 		 * so ignore it.
6086 		 */
6087 		if (!rq_i->core_pick)
6088 			continue;
6089 
6090 		/*
6091 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6092 		 * fi_before     fi      update?
6093 		 *  0            0       1
6094 		 *  0            1       1
6095 		 *  1            0       1
6096 		 *  1            1       0
6097 		 */
6098 		if (!(fi_before && rq->core->core_forceidle_count))
6099 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6100 
6101 		rq_i->core_pick->core_occupation = occ;
6102 
6103 		if (i == cpu) {
6104 			rq_i->core_pick = NULL;
6105 			continue;
6106 		}
6107 
6108 		/* Did we break L1TF mitigation requirements? */
6109 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6110 
6111 		if (rq_i->curr == rq_i->core_pick) {
6112 			rq_i->core_pick = NULL;
6113 			continue;
6114 		}
6115 
6116 		resched_curr(rq_i);
6117 	}
6118 
6119 out_set_next:
6120 	set_next_task(rq, next);
6121 out:
6122 	if (rq->core->core_forceidle_count && next == rq->idle)
6123 		queue_core_balance(rq);
6124 
6125 	return next;
6126 }
6127 
6128 static bool try_steal_cookie(int this, int that)
6129 {
6130 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6131 	struct task_struct *p;
6132 	unsigned long cookie;
6133 	bool success = false;
6134 
6135 	guard(irq)();
6136 	guard(double_rq_lock)(dst, src);
6137 
6138 	cookie = dst->core->core_cookie;
6139 	if (!cookie)
6140 		return false;
6141 
6142 	if (dst->curr != dst->idle)
6143 		return false;
6144 
6145 	p = sched_core_find(src, cookie);
6146 	if (!p)
6147 		return false;
6148 
6149 	do {
6150 		if (p == src->core_pick || p == src->curr)
6151 			goto next;
6152 
6153 		if (!is_cpu_allowed(p, this))
6154 			goto next;
6155 
6156 		if (p->core_occupation > dst->idle->core_occupation)
6157 			goto next;
6158 		/*
6159 		 * sched_core_find() and sched_core_next() will ensure
6160 		 * that task @p is not throttled now, we also need to
6161 		 * check whether the runqueue of the destination CPU is
6162 		 * being throttled.
6163 		 */
6164 		if (sched_task_is_throttled(p, this))
6165 			goto next;
6166 
6167 		deactivate_task(src, p, 0);
6168 		set_task_cpu(p, this);
6169 		activate_task(dst, p, 0);
6170 
6171 		resched_curr(dst);
6172 
6173 		success = true;
6174 		break;
6175 
6176 next:
6177 		p = sched_core_next(p, cookie);
6178 	} while (p);
6179 
6180 	return success;
6181 }
6182 
6183 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6184 {
6185 	int i;
6186 
6187 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6188 		if (i == cpu)
6189 			continue;
6190 
6191 		if (need_resched())
6192 			break;
6193 
6194 		if (try_steal_cookie(cpu, i))
6195 			return true;
6196 	}
6197 
6198 	return false;
6199 }
6200 
6201 static void sched_core_balance(struct rq *rq)
6202 {
6203 	struct sched_domain *sd;
6204 	int cpu = cpu_of(rq);
6205 
6206 	guard(preempt)();
6207 	guard(rcu)();
6208 
6209 	raw_spin_rq_unlock_irq(rq);
6210 	for_each_domain(cpu, sd) {
6211 		if (need_resched())
6212 			break;
6213 
6214 		if (steal_cookie_task(cpu, sd))
6215 			break;
6216 	}
6217 	raw_spin_rq_lock_irq(rq);
6218 }
6219 
6220 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6221 
6222 static void queue_core_balance(struct rq *rq)
6223 {
6224 	if (!sched_core_enabled(rq))
6225 		return;
6226 
6227 	if (!rq->core->core_cookie)
6228 		return;
6229 
6230 	if (!rq->nr_running) /* not forced idle */
6231 		return;
6232 
6233 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6234 }
6235 
6236 DEFINE_LOCK_GUARD_1(core_lock, int,
6237 		    sched_core_lock(*_T->lock, &_T->flags),
6238 		    sched_core_unlock(*_T->lock, &_T->flags),
6239 		    unsigned long flags)
6240 
6241 static void sched_core_cpu_starting(unsigned int cpu)
6242 {
6243 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6244 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6245 	int t;
6246 
6247 	guard(core_lock)(&cpu);
6248 
6249 	WARN_ON_ONCE(rq->core != rq);
6250 
6251 	/* if we're the first, we'll be our own leader */
6252 	if (cpumask_weight(smt_mask) == 1)
6253 		return;
6254 
6255 	/* find the leader */
6256 	for_each_cpu(t, smt_mask) {
6257 		if (t == cpu)
6258 			continue;
6259 		rq = cpu_rq(t);
6260 		if (rq->core == rq) {
6261 			core_rq = rq;
6262 			break;
6263 		}
6264 	}
6265 
6266 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6267 		return;
6268 
6269 	/* install and validate core_rq */
6270 	for_each_cpu(t, smt_mask) {
6271 		rq = cpu_rq(t);
6272 
6273 		if (t == cpu)
6274 			rq->core = core_rq;
6275 
6276 		WARN_ON_ONCE(rq->core != core_rq);
6277 	}
6278 }
6279 
6280 static void sched_core_cpu_deactivate(unsigned int cpu)
6281 {
6282 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6283 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6284 	int t;
6285 
6286 	guard(core_lock)(&cpu);
6287 
6288 	/* if we're the last man standing, nothing to do */
6289 	if (cpumask_weight(smt_mask) == 1) {
6290 		WARN_ON_ONCE(rq->core != rq);
6291 		return;
6292 	}
6293 
6294 	/* if we're not the leader, nothing to do */
6295 	if (rq->core != rq)
6296 		return;
6297 
6298 	/* find a new leader */
6299 	for_each_cpu(t, smt_mask) {
6300 		if (t == cpu)
6301 			continue;
6302 		core_rq = cpu_rq(t);
6303 		break;
6304 	}
6305 
6306 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6307 		return;
6308 
6309 	/* copy the shared state to the new leader */
6310 	core_rq->core_task_seq             = rq->core_task_seq;
6311 	core_rq->core_pick_seq             = rq->core_pick_seq;
6312 	core_rq->core_cookie               = rq->core_cookie;
6313 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6314 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6315 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6316 
6317 	/*
6318 	 * Accounting edge for forced idle is handled in pick_next_task().
6319 	 * Don't need another one here, since the hotplug thread shouldn't
6320 	 * have a cookie.
6321 	 */
6322 	core_rq->core_forceidle_start = 0;
6323 
6324 	/* install new leader */
6325 	for_each_cpu(t, smt_mask) {
6326 		rq = cpu_rq(t);
6327 		rq->core = core_rq;
6328 	}
6329 }
6330 
6331 static inline void sched_core_cpu_dying(unsigned int cpu)
6332 {
6333 	struct rq *rq = cpu_rq(cpu);
6334 
6335 	if (rq->core != rq)
6336 		rq->core = rq;
6337 }
6338 
6339 #else /* !CONFIG_SCHED_CORE */
6340 
6341 static inline void sched_core_cpu_starting(unsigned int cpu) {}
6342 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
6343 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6344 
6345 static struct task_struct *
6346 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6347 {
6348 	return __pick_next_task(rq, prev, rf);
6349 }
6350 
6351 #endif /* CONFIG_SCHED_CORE */
6352 
6353 /*
6354  * Constants for the sched_mode argument of __schedule().
6355  *
6356  * The mode argument allows RT enabled kernels to differentiate a
6357  * preemption from blocking on an 'sleeping' spin/rwlock. Note that
6358  * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
6359  * optimize the AND operation out and just check for zero.
6360  */
6361 #define SM_NONE			0x0
6362 #define SM_PREEMPT		0x1
6363 #define SM_RTLOCK_WAIT		0x2
6364 
6365 #ifndef CONFIG_PREEMPT_RT
6366 # define SM_MASK_PREEMPT	(~0U)
6367 #else
6368 # define SM_MASK_PREEMPT	SM_PREEMPT
6369 #endif
6370 
6371 /*
6372  * __schedule() is the main scheduler function.
6373  *
6374  * The main means of driving the scheduler and thus entering this function are:
6375  *
6376  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6377  *
6378  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6379  *      paths. For example, see arch/x86/entry_64.S.
6380  *
6381  *      To drive preemption between tasks, the scheduler sets the flag in timer
6382  *      interrupt handler sched_tick().
6383  *
6384  *   3. Wakeups don't really cause entry into schedule(). They add a
6385  *      task to the run-queue and that's it.
6386  *
6387  *      Now, if the new task added to the run-queue preempts the current
6388  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6389  *      called on the nearest possible occasion:
6390  *
6391  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6392  *
6393  *         - in syscall or exception context, at the next outmost
6394  *           preempt_enable(). (this might be as soon as the wake_up()'s
6395  *           spin_unlock()!)
6396  *
6397  *         - in IRQ context, return from interrupt-handler to
6398  *           preemptible context
6399  *
6400  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6401  *         then at the next:
6402  *
6403  *          - cond_resched() call
6404  *          - explicit schedule() call
6405  *          - return from syscall or exception to user-space
6406  *          - return from interrupt-handler to user-space
6407  *
6408  * WARNING: must be called with preemption disabled!
6409  */
6410 static void __sched notrace __schedule(unsigned int sched_mode)
6411 {
6412 	struct task_struct *prev, *next;
6413 	unsigned long *switch_count;
6414 	unsigned long prev_state;
6415 	struct rq_flags rf;
6416 	struct rq *rq;
6417 	int cpu;
6418 
6419 	cpu = smp_processor_id();
6420 	rq = cpu_rq(cpu);
6421 	prev = rq->curr;
6422 
6423 	schedule_debug(prev, !!sched_mode);
6424 
6425 	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6426 		hrtick_clear(rq);
6427 
6428 	local_irq_disable();
6429 	rcu_note_context_switch(!!sched_mode);
6430 
6431 	/*
6432 	 * Make sure that signal_pending_state()->signal_pending() below
6433 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6434 	 * done by the caller to avoid the race with signal_wake_up():
6435 	 *
6436 	 * __set_current_state(@state)		signal_wake_up()
6437 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6438 	 *					  wake_up_state(p, state)
6439 	 *   LOCK rq->lock			    LOCK p->pi_state
6440 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6441 	 *     if (signal_pending_state())	    if (p->state & @state)
6442 	 *
6443 	 * Also, the membarrier system call requires a full memory barrier
6444 	 * after coming from user-space, before storing to rq->curr; this
6445 	 * barrier matches a full barrier in the proximity of the membarrier
6446 	 * system call exit.
6447 	 */
6448 	rq_lock(rq, &rf);
6449 	smp_mb__after_spinlock();
6450 
6451 	/* Promote REQ to ACT */
6452 	rq->clock_update_flags <<= 1;
6453 	update_rq_clock(rq);
6454 	rq->clock_update_flags = RQCF_UPDATED;
6455 
6456 	switch_count = &prev->nivcsw;
6457 
6458 	/*
6459 	 * We must load prev->state once (task_struct::state is volatile), such
6460 	 * that we form a control dependency vs deactivate_task() below.
6461 	 */
6462 	prev_state = READ_ONCE(prev->__state);
6463 	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
6464 		if (signal_pending_state(prev_state, prev)) {
6465 			WRITE_ONCE(prev->__state, TASK_RUNNING);
6466 		} else {
6467 			prev->sched_contributes_to_load =
6468 				(prev_state & TASK_UNINTERRUPTIBLE) &&
6469 				!(prev_state & TASK_NOLOAD) &&
6470 				!(prev_state & TASK_FROZEN);
6471 
6472 			if (prev->sched_contributes_to_load)
6473 				rq->nr_uninterruptible++;
6474 
6475 			/*
6476 			 * __schedule()			ttwu()
6477 			 *   prev_state = prev->state;    if (p->on_rq && ...)
6478 			 *   if (prev_state)		    goto out;
6479 			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6480 			 *				  p->state = TASK_WAKING
6481 			 *
6482 			 * Where __schedule() and ttwu() have matching control dependencies.
6483 			 *
6484 			 * After this, schedule() must not care about p->state any more.
6485 			 */
6486 			deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
6487 
6488 			if (prev->in_iowait) {
6489 				atomic_inc(&rq->nr_iowait);
6490 				delayacct_blkio_start();
6491 			}
6492 		}
6493 		switch_count = &prev->nvcsw;
6494 	}
6495 
6496 	next = pick_next_task(rq, prev, &rf);
6497 	clear_tsk_need_resched(prev);
6498 	clear_preempt_need_resched();
6499 #ifdef CONFIG_SCHED_DEBUG
6500 	rq->last_seen_need_resched_ns = 0;
6501 #endif
6502 
6503 	if (likely(prev != next)) {
6504 		rq->nr_switches++;
6505 		/*
6506 		 * RCU users of rcu_dereference(rq->curr) may not see
6507 		 * changes to task_struct made by pick_next_task().
6508 		 */
6509 		RCU_INIT_POINTER(rq->curr, next);
6510 		/*
6511 		 * The membarrier system call requires each architecture
6512 		 * to have a full memory barrier after updating
6513 		 * rq->curr, before returning to user-space.
6514 		 *
6515 		 * Here are the schemes providing that barrier on the
6516 		 * various architectures:
6517 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6518 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6519 		 *   on PowerPC and on RISC-V.
6520 		 * - finish_lock_switch() for weakly-ordered
6521 		 *   architectures where spin_unlock is a full barrier,
6522 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6523 		 *   is a RELEASE barrier),
6524 		 *
6525 		 * The barrier matches a full barrier in the proximity of
6526 		 * the membarrier system call entry.
6527 		 *
6528 		 * On RISC-V, this barrier pairing is also needed for the
6529 		 * SYNC_CORE command when switching between processes, cf.
6530 		 * the inline comments in membarrier_arch_switch_mm().
6531 		 */
6532 		++*switch_count;
6533 
6534 		migrate_disable_switch(rq, prev);
6535 		psi_account_irqtime(rq, prev, next);
6536 		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
6537 
6538 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
6539 
6540 		/* Also unlocks the rq: */
6541 		rq = context_switch(rq, prev, next, &rf);
6542 	} else {
6543 		rq_unpin_lock(rq, &rf);
6544 		__balance_callbacks(rq);
6545 		raw_spin_rq_unlock_irq(rq);
6546 	}
6547 }
6548 
6549 void __noreturn do_task_dead(void)
6550 {
6551 	/* Causes final put_task_struct in finish_task_switch(): */
6552 	set_special_state(TASK_DEAD);
6553 
6554 	/* Tell freezer to ignore us: */
6555 	current->flags |= PF_NOFREEZE;
6556 
6557 	__schedule(SM_NONE);
6558 	BUG();
6559 
6560 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6561 	for (;;)
6562 		cpu_relax();
6563 }
6564 
6565 static inline void sched_submit_work(struct task_struct *tsk)
6566 {
6567 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6568 	unsigned int task_flags;
6569 
6570 	/*
6571 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6572 	 * will use a blocking primitive -- which would lead to recursion.
6573 	 */
6574 	lock_map_acquire_try(&sched_map);
6575 
6576 	task_flags = tsk->flags;
6577 	/*
6578 	 * If a worker goes to sleep, notify and ask workqueue whether it
6579 	 * wants to wake up a task to maintain concurrency.
6580 	 */
6581 	if (task_flags & PF_WQ_WORKER)
6582 		wq_worker_sleeping(tsk);
6583 	else if (task_flags & PF_IO_WORKER)
6584 		io_wq_worker_sleeping(tsk);
6585 
6586 	/*
6587 	 * spinlock and rwlock must not flush block requests.  This will
6588 	 * deadlock if the callback attempts to acquire a lock which is
6589 	 * already acquired.
6590 	 */
6591 	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6592 
6593 	/*
6594 	 * If we are going to sleep and we have plugged IO queued,
6595 	 * make sure to submit it to avoid deadlocks.
6596 	 */
6597 	blk_flush_plug(tsk->plug, true);
6598 
6599 	lock_map_release(&sched_map);
6600 }
6601 
6602 static void sched_update_worker(struct task_struct *tsk)
6603 {
6604 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6605 		if (tsk->flags & PF_BLOCK_TS)
6606 			blk_plug_invalidate_ts(tsk);
6607 		if (tsk->flags & PF_WQ_WORKER)
6608 			wq_worker_running(tsk);
6609 		else if (tsk->flags & PF_IO_WORKER)
6610 			io_wq_worker_running(tsk);
6611 	}
6612 }
6613 
6614 static __always_inline void __schedule_loop(unsigned int sched_mode)
6615 {
6616 	do {
6617 		preempt_disable();
6618 		__schedule(sched_mode);
6619 		sched_preempt_enable_no_resched();
6620 	} while (need_resched());
6621 }
6622 
6623 asmlinkage __visible void __sched schedule(void)
6624 {
6625 	struct task_struct *tsk = current;
6626 
6627 #ifdef CONFIG_RT_MUTEXES
6628 	lockdep_assert(!tsk->sched_rt_mutex);
6629 #endif
6630 
6631 	if (!task_is_running(tsk))
6632 		sched_submit_work(tsk);
6633 	__schedule_loop(SM_NONE);
6634 	sched_update_worker(tsk);
6635 }
6636 EXPORT_SYMBOL(schedule);
6637 
6638 /*
6639  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6640  * state (have scheduled out non-voluntarily) by making sure that all
6641  * tasks have either left the run queue or have gone into user space.
6642  * As idle tasks do not do either, they must not ever be preempted
6643  * (schedule out non-voluntarily).
6644  *
6645  * schedule_idle() is similar to schedule_preempt_disable() except that it
6646  * never enables preemption because it does not call sched_submit_work().
6647  */
6648 void __sched schedule_idle(void)
6649 {
6650 	/*
6651 	 * As this skips calling sched_submit_work(), which the idle task does
6652 	 * regardless because that function is a NOP when the task is in a
6653 	 * TASK_RUNNING state, make sure this isn't used someplace that the
6654 	 * current task can be in any other state. Note, idle is always in the
6655 	 * TASK_RUNNING state.
6656 	 */
6657 	WARN_ON_ONCE(current->__state);
6658 	do {
6659 		__schedule(SM_NONE);
6660 	} while (need_resched());
6661 }
6662 
6663 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6664 asmlinkage __visible void __sched schedule_user(void)
6665 {
6666 	/*
6667 	 * If we come here after a random call to set_need_resched(),
6668 	 * or we have been woken up remotely but the IPI has not yet arrived,
6669 	 * we haven't yet exited the RCU idle mode. Do it here manually until
6670 	 * we find a better solution.
6671 	 *
6672 	 * NB: There are buggy callers of this function.  Ideally we
6673 	 * should warn if prev_state != CONTEXT_USER, but that will trigger
6674 	 * too frequently to make sense yet.
6675 	 */
6676 	enum ctx_state prev_state = exception_enter();
6677 	schedule();
6678 	exception_exit(prev_state);
6679 }
6680 #endif
6681 
6682 /**
6683  * schedule_preempt_disabled - called with preemption disabled
6684  *
6685  * Returns with preemption disabled. Note: preempt_count must be 1
6686  */
6687 void __sched schedule_preempt_disabled(void)
6688 {
6689 	sched_preempt_enable_no_resched();
6690 	schedule();
6691 	preempt_disable();
6692 }
6693 
6694 #ifdef CONFIG_PREEMPT_RT
6695 void __sched notrace schedule_rtlock(void)
6696 {
6697 	__schedule_loop(SM_RTLOCK_WAIT);
6698 }
6699 NOKPROBE_SYMBOL(schedule_rtlock);
6700 #endif
6701 
6702 static void __sched notrace preempt_schedule_common(void)
6703 {
6704 	do {
6705 		/*
6706 		 * Because the function tracer can trace preempt_count_sub()
6707 		 * and it also uses preempt_enable/disable_notrace(), if
6708 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6709 		 * by the function tracer will call this function again and
6710 		 * cause infinite recursion.
6711 		 *
6712 		 * Preemption must be disabled here before the function
6713 		 * tracer can trace. Break up preempt_disable() into two
6714 		 * calls. One to disable preemption without fear of being
6715 		 * traced. The other to still record the preemption latency,
6716 		 * which can also be traced by the function tracer.
6717 		 */
6718 		preempt_disable_notrace();
6719 		preempt_latency_start(1);
6720 		__schedule(SM_PREEMPT);
6721 		preempt_latency_stop(1);
6722 		preempt_enable_no_resched_notrace();
6723 
6724 		/*
6725 		 * Check again in case we missed a preemption opportunity
6726 		 * between schedule and now.
6727 		 */
6728 	} while (need_resched());
6729 }
6730 
6731 #ifdef CONFIG_PREEMPTION
6732 /*
6733  * This is the entry point to schedule() from in-kernel preemption
6734  * off of preempt_enable.
6735  */
6736 asmlinkage __visible void __sched notrace preempt_schedule(void)
6737 {
6738 	/*
6739 	 * If there is a non-zero preempt_count or interrupts are disabled,
6740 	 * we do not want to preempt the current task. Just return..
6741 	 */
6742 	if (likely(!preemptible()))
6743 		return;
6744 	preempt_schedule_common();
6745 }
6746 NOKPROBE_SYMBOL(preempt_schedule);
6747 EXPORT_SYMBOL(preempt_schedule);
6748 
6749 #ifdef CONFIG_PREEMPT_DYNAMIC
6750 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6751 #ifndef preempt_schedule_dynamic_enabled
6752 #define preempt_schedule_dynamic_enabled	preempt_schedule
6753 #define preempt_schedule_dynamic_disabled	NULL
6754 #endif
6755 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6756 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6757 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6758 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
6759 void __sched notrace dynamic_preempt_schedule(void)
6760 {
6761 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6762 		return;
6763 	preempt_schedule();
6764 }
6765 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6766 EXPORT_SYMBOL(dynamic_preempt_schedule);
6767 #endif
6768 #endif
6769 
6770 /**
6771  * preempt_schedule_notrace - preempt_schedule called by tracing
6772  *
6773  * The tracing infrastructure uses preempt_enable_notrace to prevent
6774  * recursion and tracing preempt enabling caused by the tracing
6775  * infrastructure itself. But as tracing can happen in areas coming
6776  * from userspace or just about to enter userspace, a preempt enable
6777  * can occur before user_exit() is called. This will cause the scheduler
6778  * to be called when the system is still in usermode.
6779  *
6780  * To prevent this, the preempt_enable_notrace will use this function
6781  * instead of preempt_schedule() to exit user context if needed before
6782  * calling the scheduler.
6783  */
6784 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
6785 {
6786 	enum ctx_state prev_ctx;
6787 
6788 	if (likely(!preemptible()))
6789 		return;
6790 
6791 	do {
6792 		/*
6793 		 * Because the function tracer can trace preempt_count_sub()
6794 		 * and it also uses preempt_enable/disable_notrace(), if
6795 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6796 		 * by the function tracer will call this function again and
6797 		 * cause infinite recursion.
6798 		 *
6799 		 * Preemption must be disabled here before the function
6800 		 * tracer can trace. Break up preempt_disable() into two
6801 		 * calls. One to disable preemption without fear of being
6802 		 * traced. The other to still record the preemption latency,
6803 		 * which can also be traced by the function tracer.
6804 		 */
6805 		preempt_disable_notrace();
6806 		preempt_latency_start(1);
6807 		/*
6808 		 * Needs preempt disabled in case user_exit() is traced
6809 		 * and the tracer calls preempt_enable_notrace() causing
6810 		 * an infinite recursion.
6811 		 */
6812 		prev_ctx = exception_enter();
6813 		__schedule(SM_PREEMPT);
6814 		exception_exit(prev_ctx);
6815 
6816 		preempt_latency_stop(1);
6817 		preempt_enable_no_resched_notrace();
6818 	} while (need_resched());
6819 }
6820 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
6821 
6822 #ifdef CONFIG_PREEMPT_DYNAMIC
6823 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6824 #ifndef preempt_schedule_notrace_dynamic_enabled
6825 #define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
6826 #define preempt_schedule_notrace_dynamic_disabled	NULL
6827 #endif
6828 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
6829 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
6830 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6831 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
6832 void __sched notrace dynamic_preempt_schedule_notrace(void)
6833 {
6834 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
6835 		return;
6836 	preempt_schedule_notrace();
6837 }
6838 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
6839 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
6840 #endif
6841 #endif
6842 
6843 #endif /* CONFIG_PREEMPTION */
6844 
6845 /*
6846  * This is the entry point to schedule() from kernel preemption
6847  * off of IRQ context.
6848  * Note, that this is called and return with IRQs disabled. This will
6849  * protect us against recursive calling from IRQ contexts.
6850  */
6851 asmlinkage __visible void __sched preempt_schedule_irq(void)
6852 {
6853 	enum ctx_state prev_state;
6854 
6855 	/* Catch callers which need to be fixed */
6856 	BUG_ON(preempt_count() || !irqs_disabled());
6857 
6858 	prev_state = exception_enter();
6859 
6860 	do {
6861 		preempt_disable();
6862 		local_irq_enable();
6863 		__schedule(SM_PREEMPT);
6864 		local_irq_disable();
6865 		sched_preempt_enable_no_resched();
6866 	} while (need_resched());
6867 
6868 	exception_exit(prev_state);
6869 }
6870 
6871 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
6872 			  void *key)
6873 {
6874 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
6875 	return try_to_wake_up(curr->private, mode, wake_flags);
6876 }
6877 EXPORT_SYMBOL(default_wake_function);
6878 
6879 void __setscheduler_prio(struct task_struct *p, int prio)
6880 {
6881 	if (dl_prio(prio))
6882 		p->sched_class = &dl_sched_class;
6883 	else if (rt_prio(prio))
6884 		p->sched_class = &rt_sched_class;
6885 	else
6886 		p->sched_class = &fair_sched_class;
6887 
6888 	p->prio = prio;
6889 }
6890 
6891 #ifdef CONFIG_RT_MUTEXES
6892 
6893 /*
6894  * Would be more useful with typeof()/auto_type but they don't mix with
6895  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
6896  * name such that if someone were to implement this function we get to compare
6897  * notes.
6898  */
6899 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
6900 
6901 void rt_mutex_pre_schedule(void)
6902 {
6903 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
6904 	sched_submit_work(current);
6905 }
6906 
6907 void rt_mutex_schedule(void)
6908 {
6909 	lockdep_assert(current->sched_rt_mutex);
6910 	__schedule_loop(SM_NONE);
6911 }
6912 
6913 void rt_mutex_post_schedule(void)
6914 {
6915 	sched_update_worker(current);
6916 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
6917 }
6918 
6919 /*
6920  * rt_mutex_setprio - set the current priority of a task
6921  * @p: task to boost
6922  * @pi_task: donor task
6923  *
6924  * This function changes the 'effective' priority of a task. It does
6925  * not touch ->normal_prio like __setscheduler().
6926  *
6927  * Used by the rt_mutex code to implement priority inheritance
6928  * logic. Call site only calls if the priority of the task changed.
6929  */
6930 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
6931 {
6932 	int prio, oldprio, queued, running, queue_flag =
6933 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
6934 	const struct sched_class *prev_class;
6935 	struct rq_flags rf;
6936 	struct rq *rq;
6937 
6938 	/* XXX used to be waiter->prio, not waiter->task->prio */
6939 	prio = __rt_effective_prio(pi_task, p->normal_prio);
6940 
6941 	/*
6942 	 * If nothing changed; bail early.
6943 	 */
6944 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
6945 		return;
6946 
6947 	rq = __task_rq_lock(p, &rf);
6948 	update_rq_clock(rq);
6949 	/*
6950 	 * Set under pi_lock && rq->lock, such that the value can be used under
6951 	 * either lock.
6952 	 *
6953 	 * Note that there is loads of tricky to make this pointer cache work
6954 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
6955 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
6956 	 * task is allowed to run again (and can exit). This ensures the pointer
6957 	 * points to a blocked task -- which guarantees the task is present.
6958 	 */
6959 	p->pi_top_task = pi_task;
6960 
6961 	/*
6962 	 * For FIFO/RR we only need to set prio, if that matches we're done.
6963 	 */
6964 	if (prio == p->prio && !dl_prio(prio))
6965 		goto out_unlock;
6966 
6967 	/*
6968 	 * Idle task boosting is a no-no in general. There is one
6969 	 * exception, when PREEMPT_RT and NOHZ is active:
6970 	 *
6971 	 * The idle task calls get_next_timer_interrupt() and holds
6972 	 * the timer wheel base->lock on the CPU and another CPU wants
6973 	 * to access the timer (probably to cancel it). We can safely
6974 	 * ignore the boosting request, as the idle CPU runs this code
6975 	 * with interrupts disabled and will complete the lock
6976 	 * protected section without being interrupted. So there is no
6977 	 * real need to boost.
6978 	 */
6979 	if (unlikely(p == rq->idle)) {
6980 		WARN_ON(p != rq->curr);
6981 		WARN_ON(p->pi_blocked_on);
6982 		goto out_unlock;
6983 	}
6984 
6985 	trace_sched_pi_setprio(p, pi_task);
6986 	oldprio = p->prio;
6987 
6988 	if (oldprio == prio)
6989 		queue_flag &= ~DEQUEUE_MOVE;
6990 
6991 	prev_class = p->sched_class;
6992 	queued = task_on_rq_queued(p);
6993 	running = task_current(rq, p);
6994 	if (queued)
6995 		dequeue_task(rq, p, queue_flag);
6996 	if (running)
6997 		put_prev_task(rq, p);
6998 
6999 	/*
7000 	 * Boosting condition are:
7001 	 * 1. -rt task is running and holds mutex A
7002 	 *      --> -dl task blocks on mutex A
7003 	 *
7004 	 * 2. -dl task is running and holds mutex A
7005 	 *      --> -dl task blocks on mutex A and could preempt the
7006 	 *          running task
7007 	 */
7008 	if (dl_prio(prio)) {
7009 		if (!dl_prio(p->normal_prio) ||
7010 		    (pi_task && dl_prio(pi_task->prio) &&
7011 		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7012 			p->dl.pi_se = pi_task->dl.pi_se;
7013 			queue_flag |= ENQUEUE_REPLENISH;
7014 		} else {
7015 			p->dl.pi_se = &p->dl;
7016 		}
7017 	} else if (rt_prio(prio)) {
7018 		if (dl_prio(oldprio))
7019 			p->dl.pi_se = &p->dl;
7020 		if (oldprio < prio)
7021 			queue_flag |= ENQUEUE_HEAD;
7022 	} else {
7023 		if (dl_prio(oldprio))
7024 			p->dl.pi_se = &p->dl;
7025 		if (rt_prio(oldprio))
7026 			p->rt.timeout = 0;
7027 	}
7028 
7029 	__setscheduler_prio(p, prio);
7030 
7031 	if (queued)
7032 		enqueue_task(rq, p, queue_flag);
7033 	if (running)
7034 		set_next_task(rq, p);
7035 
7036 	check_class_changed(rq, p, prev_class, oldprio);
7037 out_unlock:
7038 	/* Avoid rq from going away on us: */
7039 	preempt_disable();
7040 
7041 	rq_unpin_lock(rq, &rf);
7042 	__balance_callbacks(rq);
7043 	raw_spin_rq_unlock(rq);
7044 
7045 	preempt_enable();
7046 }
7047 #endif
7048 
7049 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
7050 int __sched __cond_resched(void)
7051 {
7052 	if (should_resched(0)) {
7053 		preempt_schedule_common();
7054 		return 1;
7055 	}
7056 	/*
7057 	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7058 	 * whether the current CPU is in an RCU read-side critical section,
7059 	 * so the tick can report quiescent states even for CPUs looping
7060 	 * in kernel context.  In contrast, in non-preemptible kernels,
7061 	 * RCU readers leave no in-memory hints, which means that CPU-bound
7062 	 * processes executing in kernel context might never report an
7063 	 * RCU quiescent state.  Therefore, the following code causes
7064 	 * cond_resched() to report a quiescent state, but only when RCU
7065 	 * is in urgent need of one.
7066 	 */
7067 #ifndef CONFIG_PREEMPT_RCU
7068 	rcu_all_qs();
7069 #endif
7070 	return 0;
7071 }
7072 EXPORT_SYMBOL(__cond_resched);
7073 #endif
7074 
7075 #ifdef CONFIG_PREEMPT_DYNAMIC
7076 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7077 #define cond_resched_dynamic_enabled	__cond_resched
7078 #define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
7079 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7080 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7081 
7082 #define might_resched_dynamic_enabled	__cond_resched
7083 #define might_resched_dynamic_disabled	((void *)&__static_call_return0)
7084 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7085 EXPORT_STATIC_CALL_TRAMP(might_resched);
7086 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7087 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
7088 int __sched dynamic_cond_resched(void)
7089 {
7090 	klp_sched_try_switch();
7091 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7092 		return 0;
7093 	return __cond_resched();
7094 }
7095 EXPORT_SYMBOL(dynamic_cond_resched);
7096 
7097 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
7098 int __sched dynamic_might_resched(void)
7099 {
7100 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
7101 		return 0;
7102 	return __cond_resched();
7103 }
7104 EXPORT_SYMBOL(dynamic_might_resched);
7105 #endif
7106 #endif
7107 
7108 /*
7109  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7110  * call schedule, and on return reacquire the lock.
7111  *
7112  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7113  * operations here to prevent schedule() from being called twice (once via
7114  * spin_unlock(), once by hand).
7115  */
7116 int __cond_resched_lock(spinlock_t *lock)
7117 {
7118 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7119 	int ret = 0;
7120 
7121 	lockdep_assert_held(lock);
7122 
7123 	if (spin_needbreak(lock) || resched) {
7124 		spin_unlock(lock);
7125 		if (!_cond_resched())
7126 			cpu_relax();
7127 		ret = 1;
7128 		spin_lock(lock);
7129 	}
7130 	return ret;
7131 }
7132 EXPORT_SYMBOL(__cond_resched_lock);
7133 
7134 int __cond_resched_rwlock_read(rwlock_t *lock)
7135 {
7136 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7137 	int ret = 0;
7138 
7139 	lockdep_assert_held_read(lock);
7140 
7141 	if (rwlock_needbreak(lock) || resched) {
7142 		read_unlock(lock);
7143 		if (!_cond_resched())
7144 			cpu_relax();
7145 		ret = 1;
7146 		read_lock(lock);
7147 	}
7148 	return ret;
7149 }
7150 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7151 
7152 int __cond_resched_rwlock_write(rwlock_t *lock)
7153 {
7154 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7155 	int ret = 0;
7156 
7157 	lockdep_assert_held_write(lock);
7158 
7159 	if (rwlock_needbreak(lock) || resched) {
7160 		write_unlock(lock);
7161 		if (!_cond_resched())
7162 			cpu_relax();
7163 		ret = 1;
7164 		write_lock(lock);
7165 	}
7166 	return ret;
7167 }
7168 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7169 
7170 #ifdef CONFIG_PREEMPT_DYNAMIC
7171 
7172 #ifdef CONFIG_GENERIC_ENTRY
7173 #include <linux/entry-common.h>
7174 #endif
7175 
7176 /*
7177  * SC:cond_resched
7178  * SC:might_resched
7179  * SC:preempt_schedule
7180  * SC:preempt_schedule_notrace
7181  * SC:irqentry_exit_cond_resched
7182  *
7183  *
7184  * NONE:
7185  *   cond_resched               <- __cond_resched
7186  *   might_resched              <- RET0
7187  *   preempt_schedule           <- NOP
7188  *   preempt_schedule_notrace   <- NOP
7189  *   irqentry_exit_cond_resched <- NOP
7190  *
7191  * VOLUNTARY:
7192  *   cond_resched               <- __cond_resched
7193  *   might_resched              <- __cond_resched
7194  *   preempt_schedule           <- NOP
7195  *   preempt_schedule_notrace   <- NOP
7196  *   irqentry_exit_cond_resched <- NOP
7197  *
7198  * FULL:
7199  *   cond_resched               <- RET0
7200  *   might_resched              <- RET0
7201  *   preempt_schedule           <- preempt_schedule
7202  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7203  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7204  */
7205 
7206 enum {
7207 	preempt_dynamic_undefined = -1,
7208 	preempt_dynamic_none,
7209 	preempt_dynamic_voluntary,
7210 	preempt_dynamic_full,
7211 };
7212 
7213 int preempt_dynamic_mode = preempt_dynamic_undefined;
7214 
7215 int sched_dynamic_mode(const char *str)
7216 {
7217 	if (!strcmp(str, "none"))
7218 		return preempt_dynamic_none;
7219 
7220 	if (!strcmp(str, "voluntary"))
7221 		return preempt_dynamic_voluntary;
7222 
7223 	if (!strcmp(str, "full"))
7224 		return preempt_dynamic_full;
7225 
7226 	return -EINVAL;
7227 }
7228 
7229 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7230 #define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
7231 #define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
7232 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7233 #define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
7234 #define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
7235 #else
7236 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7237 #endif
7238 
7239 static DEFINE_MUTEX(sched_dynamic_mutex);
7240 static bool klp_override;
7241 
7242 static void __sched_dynamic_update(int mode)
7243 {
7244 	/*
7245 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7246 	 * the ZERO state, which is invalid.
7247 	 */
7248 	if (!klp_override)
7249 		preempt_dynamic_enable(cond_resched);
7250 	preempt_dynamic_enable(might_resched);
7251 	preempt_dynamic_enable(preempt_schedule);
7252 	preempt_dynamic_enable(preempt_schedule_notrace);
7253 	preempt_dynamic_enable(irqentry_exit_cond_resched);
7254 
7255 	switch (mode) {
7256 	case preempt_dynamic_none:
7257 		if (!klp_override)
7258 			preempt_dynamic_enable(cond_resched);
7259 		preempt_dynamic_disable(might_resched);
7260 		preempt_dynamic_disable(preempt_schedule);
7261 		preempt_dynamic_disable(preempt_schedule_notrace);
7262 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7263 		if (mode != preempt_dynamic_mode)
7264 			pr_info("Dynamic Preempt: none\n");
7265 		break;
7266 
7267 	case preempt_dynamic_voluntary:
7268 		if (!klp_override)
7269 			preempt_dynamic_enable(cond_resched);
7270 		preempt_dynamic_enable(might_resched);
7271 		preempt_dynamic_disable(preempt_schedule);
7272 		preempt_dynamic_disable(preempt_schedule_notrace);
7273 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7274 		if (mode != preempt_dynamic_mode)
7275 			pr_info("Dynamic Preempt: voluntary\n");
7276 		break;
7277 
7278 	case preempt_dynamic_full:
7279 		if (!klp_override)
7280 			preempt_dynamic_disable(cond_resched);
7281 		preempt_dynamic_disable(might_resched);
7282 		preempt_dynamic_enable(preempt_schedule);
7283 		preempt_dynamic_enable(preempt_schedule_notrace);
7284 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7285 		if (mode != preempt_dynamic_mode)
7286 			pr_info("Dynamic Preempt: full\n");
7287 		break;
7288 	}
7289 
7290 	preempt_dynamic_mode = mode;
7291 }
7292 
7293 void sched_dynamic_update(int mode)
7294 {
7295 	mutex_lock(&sched_dynamic_mutex);
7296 	__sched_dynamic_update(mode);
7297 	mutex_unlock(&sched_dynamic_mutex);
7298 }
7299 
7300 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7301 
7302 static int klp_cond_resched(void)
7303 {
7304 	__klp_sched_try_switch();
7305 	return __cond_resched();
7306 }
7307 
7308 void sched_dynamic_klp_enable(void)
7309 {
7310 	mutex_lock(&sched_dynamic_mutex);
7311 
7312 	klp_override = true;
7313 	static_call_update(cond_resched, klp_cond_resched);
7314 
7315 	mutex_unlock(&sched_dynamic_mutex);
7316 }
7317 
7318 void sched_dynamic_klp_disable(void)
7319 {
7320 	mutex_lock(&sched_dynamic_mutex);
7321 
7322 	klp_override = false;
7323 	__sched_dynamic_update(preempt_dynamic_mode);
7324 
7325 	mutex_unlock(&sched_dynamic_mutex);
7326 }
7327 
7328 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7329 
7330 static int __init setup_preempt_mode(char *str)
7331 {
7332 	int mode = sched_dynamic_mode(str);
7333 	if (mode < 0) {
7334 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7335 		return 0;
7336 	}
7337 
7338 	sched_dynamic_update(mode);
7339 	return 1;
7340 }
7341 __setup("preempt=", setup_preempt_mode);
7342 
7343 static void __init preempt_dynamic_init(void)
7344 {
7345 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7346 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7347 			sched_dynamic_update(preempt_dynamic_none);
7348 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7349 			sched_dynamic_update(preempt_dynamic_voluntary);
7350 		} else {
7351 			/* Default static call setting, nothing to do */
7352 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7353 			preempt_dynamic_mode = preempt_dynamic_full;
7354 			pr_info("Dynamic Preempt: full\n");
7355 		}
7356 	}
7357 }
7358 
7359 #define PREEMPT_MODEL_ACCESSOR(mode) \
7360 	bool preempt_model_##mode(void)						 \
7361 	{									 \
7362 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7363 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
7364 	}									 \
7365 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
7366 
7367 PREEMPT_MODEL_ACCESSOR(none);
7368 PREEMPT_MODEL_ACCESSOR(voluntary);
7369 PREEMPT_MODEL_ACCESSOR(full);
7370 
7371 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7372 
7373 static inline void preempt_dynamic_init(void) { }
7374 
7375 #endif /* CONFIG_PREEMPT_DYNAMIC */
7376 
7377 int io_schedule_prepare(void)
7378 {
7379 	int old_iowait = current->in_iowait;
7380 
7381 	current->in_iowait = 1;
7382 	blk_flush_plug(current->plug, true);
7383 	return old_iowait;
7384 }
7385 
7386 void io_schedule_finish(int token)
7387 {
7388 	current->in_iowait = token;
7389 }
7390 
7391 /*
7392  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7393  * that process accounting knows that this is a task in IO wait state.
7394  */
7395 long __sched io_schedule_timeout(long timeout)
7396 {
7397 	int token;
7398 	long ret;
7399 
7400 	token = io_schedule_prepare();
7401 	ret = schedule_timeout(timeout);
7402 	io_schedule_finish(token);
7403 
7404 	return ret;
7405 }
7406 EXPORT_SYMBOL(io_schedule_timeout);
7407 
7408 void __sched io_schedule(void)
7409 {
7410 	int token;
7411 
7412 	token = io_schedule_prepare();
7413 	schedule();
7414 	io_schedule_finish(token);
7415 }
7416 EXPORT_SYMBOL(io_schedule);
7417 
7418 void sched_show_task(struct task_struct *p)
7419 {
7420 	unsigned long free = 0;
7421 	int ppid;
7422 
7423 	if (!try_get_task_stack(p))
7424 		return;
7425 
7426 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7427 
7428 	if (task_is_running(p))
7429 		pr_cont("  running task    ");
7430 #ifdef CONFIG_DEBUG_STACK_USAGE
7431 	free = stack_not_used(p);
7432 #endif
7433 	ppid = 0;
7434 	rcu_read_lock();
7435 	if (pid_alive(p))
7436 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
7437 	rcu_read_unlock();
7438 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n",
7439 		free, task_pid_nr(p), task_tgid_nr(p),
7440 		ppid, read_task_thread_flags(p));
7441 
7442 	print_worker_info(KERN_INFO, p);
7443 	print_stop_info(KERN_INFO, p);
7444 	show_stack(p, NULL, KERN_INFO);
7445 	put_task_stack(p);
7446 }
7447 EXPORT_SYMBOL_GPL(sched_show_task);
7448 
7449 static inline bool
7450 state_filter_match(unsigned long state_filter, struct task_struct *p)
7451 {
7452 	unsigned int state = READ_ONCE(p->__state);
7453 
7454 	/* no filter, everything matches */
7455 	if (!state_filter)
7456 		return true;
7457 
7458 	/* filter, but doesn't match */
7459 	if (!(state & state_filter))
7460 		return false;
7461 
7462 	/*
7463 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7464 	 * TASK_KILLABLE).
7465 	 */
7466 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7467 		return false;
7468 
7469 	return true;
7470 }
7471 
7472 
7473 void show_state_filter(unsigned int state_filter)
7474 {
7475 	struct task_struct *g, *p;
7476 
7477 	rcu_read_lock();
7478 	for_each_process_thread(g, p) {
7479 		/*
7480 		 * reset the NMI-timeout, listing all files on a slow
7481 		 * console might take a lot of time:
7482 		 * Also, reset softlockup watchdogs on all CPUs, because
7483 		 * another CPU might be blocked waiting for us to process
7484 		 * an IPI.
7485 		 */
7486 		touch_nmi_watchdog();
7487 		touch_all_softlockup_watchdogs();
7488 		if (state_filter_match(state_filter, p))
7489 			sched_show_task(p);
7490 	}
7491 
7492 #ifdef CONFIG_SCHED_DEBUG
7493 	if (!state_filter)
7494 		sysrq_sched_debug_show();
7495 #endif
7496 	rcu_read_unlock();
7497 	/*
7498 	 * Only show locks if all tasks are dumped:
7499 	 */
7500 	if (!state_filter)
7501 		debug_show_all_locks();
7502 }
7503 
7504 /**
7505  * init_idle - set up an idle thread for a given CPU
7506  * @idle: task in question
7507  * @cpu: CPU the idle task belongs to
7508  *
7509  * NOTE: this function does not set the idle thread's NEED_RESCHED
7510  * flag, to make booting more robust.
7511  */
7512 void __init init_idle(struct task_struct *idle, int cpu)
7513 {
7514 #ifdef CONFIG_SMP
7515 	struct affinity_context ac = (struct affinity_context) {
7516 		.new_mask  = cpumask_of(cpu),
7517 		.flags     = 0,
7518 	};
7519 #endif
7520 	struct rq *rq = cpu_rq(cpu);
7521 	unsigned long flags;
7522 
7523 	__sched_fork(0, idle);
7524 
7525 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
7526 	raw_spin_rq_lock(rq);
7527 
7528 	idle->__state = TASK_RUNNING;
7529 	idle->se.exec_start = sched_clock();
7530 	/*
7531 	 * PF_KTHREAD should already be set at this point; regardless, make it
7532 	 * look like a proper per-CPU kthread.
7533 	 */
7534 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7535 	kthread_set_per_cpu(idle, cpu);
7536 
7537 #ifdef CONFIG_SMP
7538 	/*
7539 	 * It's possible that init_idle() gets called multiple times on a task,
7540 	 * in that case do_set_cpus_allowed() will not do the right thing.
7541 	 *
7542 	 * And since this is boot we can forgo the serialization.
7543 	 */
7544 	set_cpus_allowed_common(idle, &ac);
7545 #endif
7546 	/*
7547 	 * We're having a chicken and egg problem, even though we are
7548 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
7549 	 * lockdep check in task_group() will fail.
7550 	 *
7551 	 * Similar case to sched_fork(). / Alternatively we could
7552 	 * use task_rq_lock() here and obtain the other rq->lock.
7553 	 *
7554 	 * Silence PROVE_RCU
7555 	 */
7556 	rcu_read_lock();
7557 	__set_task_cpu(idle, cpu);
7558 	rcu_read_unlock();
7559 
7560 	rq->idle = idle;
7561 	rcu_assign_pointer(rq->curr, idle);
7562 	idle->on_rq = TASK_ON_RQ_QUEUED;
7563 #ifdef CONFIG_SMP
7564 	idle->on_cpu = 1;
7565 #endif
7566 	raw_spin_rq_unlock(rq);
7567 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7568 
7569 	/* Set the preempt count _outside_ the spinlocks! */
7570 	init_idle_preempt_count(idle, cpu);
7571 
7572 	/*
7573 	 * The idle tasks have their own, simple scheduling class:
7574 	 */
7575 	idle->sched_class = &idle_sched_class;
7576 	ftrace_graph_init_idle_task(idle, cpu);
7577 	vtime_init_idle(idle, cpu);
7578 #ifdef CONFIG_SMP
7579 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7580 #endif
7581 }
7582 
7583 #ifdef CONFIG_SMP
7584 
7585 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7586 			      const struct cpumask *trial)
7587 {
7588 	int ret = 1;
7589 
7590 	if (cpumask_empty(cur))
7591 		return ret;
7592 
7593 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7594 
7595 	return ret;
7596 }
7597 
7598 int task_can_attach(struct task_struct *p)
7599 {
7600 	int ret = 0;
7601 
7602 	/*
7603 	 * Kthreads which disallow setaffinity shouldn't be moved
7604 	 * to a new cpuset; we don't want to change their CPU
7605 	 * affinity and isolating such threads by their set of
7606 	 * allowed nodes is unnecessary.  Thus, cpusets are not
7607 	 * applicable for such threads.  This prevents checking for
7608 	 * success of set_cpus_allowed_ptr() on all attached tasks
7609 	 * before cpus_mask may be changed.
7610 	 */
7611 	if (p->flags & PF_NO_SETAFFINITY)
7612 		ret = -EINVAL;
7613 
7614 	return ret;
7615 }
7616 
7617 bool sched_smp_initialized __read_mostly;
7618 
7619 #ifdef CONFIG_NUMA_BALANCING
7620 /* Migrate current task p to target_cpu */
7621 int migrate_task_to(struct task_struct *p, int target_cpu)
7622 {
7623 	struct migration_arg arg = { p, target_cpu };
7624 	int curr_cpu = task_cpu(p);
7625 
7626 	if (curr_cpu == target_cpu)
7627 		return 0;
7628 
7629 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7630 		return -EINVAL;
7631 
7632 	/* TODO: This is not properly updating schedstats */
7633 
7634 	trace_sched_move_numa(p, curr_cpu, target_cpu);
7635 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7636 }
7637 
7638 /*
7639  * Requeue a task on a given node and accurately track the number of NUMA
7640  * tasks on the runqueues
7641  */
7642 void sched_setnuma(struct task_struct *p, int nid)
7643 {
7644 	bool queued, running;
7645 	struct rq_flags rf;
7646 	struct rq *rq;
7647 
7648 	rq = task_rq_lock(p, &rf);
7649 	queued = task_on_rq_queued(p);
7650 	running = task_current(rq, p);
7651 
7652 	if (queued)
7653 		dequeue_task(rq, p, DEQUEUE_SAVE);
7654 	if (running)
7655 		put_prev_task(rq, p);
7656 
7657 	p->numa_preferred_nid = nid;
7658 
7659 	if (queued)
7660 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7661 	if (running)
7662 		set_next_task(rq, p);
7663 	task_rq_unlock(rq, p, &rf);
7664 }
7665 #endif /* CONFIG_NUMA_BALANCING */
7666 
7667 #ifdef CONFIG_HOTPLUG_CPU
7668 /*
7669  * Ensure that the idle task is using init_mm right before its CPU goes
7670  * offline.
7671  */
7672 void idle_task_exit(void)
7673 {
7674 	struct mm_struct *mm = current->active_mm;
7675 
7676 	BUG_ON(cpu_online(smp_processor_id()));
7677 	BUG_ON(current != this_rq()->idle);
7678 
7679 	if (mm != &init_mm) {
7680 		switch_mm(mm, &init_mm, current);
7681 		finish_arch_post_lock_switch();
7682 	}
7683 
7684 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7685 }
7686 
7687 static int __balance_push_cpu_stop(void *arg)
7688 {
7689 	struct task_struct *p = arg;
7690 	struct rq *rq = this_rq();
7691 	struct rq_flags rf;
7692 	int cpu;
7693 
7694 	raw_spin_lock_irq(&p->pi_lock);
7695 	rq_lock(rq, &rf);
7696 
7697 	update_rq_clock(rq);
7698 
7699 	if (task_rq(p) == rq && task_on_rq_queued(p)) {
7700 		cpu = select_fallback_rq(rq->cpu, p);
7701 		rq = __migrate_task(rq, &rf, p, cpu);
7702 	}
7703 
7704 	rq_unlock(rq, &rf);
7705 	raw_spin_unlock_irq(&p->pi_lock);
7706 
7707 	put_task_struct(p);
7708 
7709 	return 0;
7710 }
7711 
7712 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7713 
7714 /*
7715  * Ensure we only run per-cpu kthreads once the CPU goes !active.
7716  *
7717  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7718  * effective when the hotplug motion is down.
7719  */
7720 static void balance_push(struct rq *rq)
7721 {
7722 	struct task_struct *push_task = rq->curr;
7723 
7724 	lockdep_assert_rq_held(rq);
7725 
7726 	/*
7727 	 * Ensure the thing is persistent until balance_push_set(.on = false);
7728 	 */
7729 	rq->balance_callback = &balance_push_callback;
7730 
7731 	/*
7732 	 * Only active while going offline and when invoked on the outgoing
7733 	 * CPU.
7734 	 */
7735 	if (!cpu_dying(rq->cpu) || rq != this_rq())
7736 		return;
7737 
7738 	/*
7739 	 * Both the cpu-hotplug and stop task are in this case and are
7740 	 * required to complete the hotplug process.
7741 	 */
7742 	if (kthread_is_per_cpu(push_task) ||
7743 	    is_migration_disabled(push_task)) {
7744 
7745 		/*
7746 		 * If this is the idle task on the outgoing CPU try to wake
7747 		 * up the hotplug control thread which might wait for the
7748 		 * last task to vanish. The rcuwait_active() check is
7749 		 * accurate here because the waiter is pinned on this CPU
7750 		 * and can't obviously be running in parallel.
7751 		 *
7752 		 * On RT kernels this also has to check whether there are
7753 		 * pinned and scheduled out tasks on the runqueue. They
7754 		 * need to leave the migrate disabled section first.
7755 		 */
7756 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
7757 		    rcuwait_active(&rq->hotplug_wait)) {
7758 			raw_spin_rq_unlock(rq);
7759 			rcuwait_wake_up(&rq->hotplug_wait);
7760 			raw_spin_rq_lock(rq);
7761 		}
7762 		return;
7763 	}
7764 
7765 	get_task_struct(push_task);
7766 	/*
7767 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
7768 	 * Both preemption and IRQs are still disabled.
7769 	 */
7770 	preempt_disable();
7771 	raw_spin_rq_unlock(rq);
7772 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
7773 			    this_cpu_ptr(&push_work));
7774 	preempt_enable();
7775 	/*
7776 	 * At this point need_resched() is true and we'll take the loop in
7777 	 * schedule(). The next pick is obviously going to be the stop task
7778 	 * which kthread_is_per_cpu() and will push this task away.
7779 	 */
7780 	raw_spin_rq_lock(rq);
7781 }
7782 
7783 static void balance_push_set(int cpu, bool on)
7784 {
7785 	struct rq *rq = cpu_rq(cpu);
7786 	struct rq_flags rf;
7787 
7788 	rq_lock_irqsave(rq, &rf);
7789 	if (on) {
7790 		WARN_ON_ONCE(rq->balance_callback);
7791 		rq->balance_callback = &balance_push_callback;
7792 	} else if (rq->balance_callback == &balance_push_callback) {
7793 		rq->balance_callback = NULL;
7794 	}
7795 	rq_unlock_irqrestore(rq, &rf);
7796 }
7797 
7798 /*
7799  * Invoked from a CPUs hotplug control thread after the CPU has been marked
7800  * inactive. All tasks which are not per CPU kernel threads are either
7801  * pushed off this CPU now via balance_push() or placed on a different CPU
7802  * during wakeup. Wait until the CPU is quiescent.
7803  */
7804 static void balance_hotplug_wait(void)
7805 {
7806 	struct rq *rq = this_rq();
7807 
7808 	rcuwait_wait_event(&rq->hotplug_wait,
7809 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
7810 			   TASK_UNINTERRUPTIBLE);
7811 }
7812 
7813 #else
7814 
7815 static inline void balance_push(struct rq *rq)
7816 {
7817 }
7818 
7819 static inline void balance_push_set(int cpu, bool on)
7820 {
7821 }
7822 
7823 static inline void balance_hotplug_wait(void)
7824 {
7825 }
7826 
7827 #endif /* CONFIG_HOTPLUG_CPU */
7828 
7829 void set_rq_online(struct rq *rq)
7830 {
7831 	if (!rq->online) {
7832 		const struct sched_class *class;
7833 
7834 		cpumask_set_cpu(rq->cpu, rq->rd->online);
7835 		rq->online = 1;
7836 
7837 		for_each_class(class) {
7838 			if (class->rq_online)
7839 				class->rq_online(rq);
7840 		}
7841 	}
7842 }
7843 
7844 void set_rq_offline(struct rq *rq)
7845 {
7846 	if (rq->online) {
7847 		const struct sched_class *class;
7848 
7849 		update_rq_clock(rq);
7850 		for_each_class(class) {
7851 			if (class->rq_offline)
7852 				class->rq_offline(rq);
7853 		}
7854 
7855 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
7856 		rq->online = 0;
7857 	}
7858 }
7859 
7860 static inline void sched_set_rq_online(struct rq *rq, int cpu)
7861 {
7862 	struct rq_flags rf;
7863 
7864 	rq_lock_irqsave(rq, &rf);
7865 	if (rq->rd) {
7866 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7867 		set_rq_online(rq);
7868 	}
7869 	rq_unlock_irqrestore(rq, &rf);
7870 }
7871 
7872 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
7873 {
7874 	struct rq_flags rf;
7875 
7876 	rq_lock_irqsave(rq, &rf);
7877 	if (rq->rd) {
7878 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7879 		set_rq_offline(rq);
7880 	}
7881 	rq_unlock_irqrestore(rq, &rf);
7882 }
7883 
7884 /*
7885  * used to mark begin/end of suspend/resume:
7886  */
7887 static int num_cpus_frozen;
7888 
7889 /*
7890  * Update cpusets according to cpu_active mask.  If cpusets are
7891  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7892  * around partition_sched_domains().
7893  *
7894  * If we come here as part of a suspend/resume, don't touch cpusets because we
7895  * want to restore it back to its original state upon resume anyway.
7896  */
7897 static void cpuset_cpu_active(void)
7898 {
7899 	if (cpuhp_tasks_frozen) {
7900 		/*
7901 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
7902 		 * resume sequence. As long as this is not the last online
7903 		 * operation in the resume sequence, just build a single sched
7904 		 * domain, ignoring cpusets.
7905 		 */
7906 		partition_sched_domains(1, NULL, NULL);
7907 		if (--num_cpus_frozen)
7908 			return;
7909 		/*
7910 		 * This is the last CPU online operation. So fall through and
7911 		 * restore the original sched domains by considering the
7912 		 * cpuset configurations.
7913 		 */
7914 		cpuset_force_rebuild();
7915 	}
7916 	cpuset_update_active_cpus();
7917 }
7918 
7919 static int cpuset_cpu_inactive(unsigned int cpu)
7920 {
7921 	if (!cpuhp_tasks_frozen) {
7922 		int ret = dl_bw_check_overflow(cpu);
7923 
7924 		if (ret)
7925 			return ret;
7926 		cpuset_update_active_cpus();
7927 	} else {
7928 		num_cpus_frozen++;
7929 		partition_sched_domains(1, NULL, NULL);
7930 	}
7931 	return 0;
7932 }
7933 
7934 static inline void sched_smt_present_inc(int cpu)
7935 {
7936 #ifdef CONFIG_SCHED_SMT
7937 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7938 		static_branch_inc_cpuslocked(&sched_smt_present);
7939 #endif
7940 }
7941 
7942 static inline void sched_smt_present_dec(int cpu)
7943 {
7944 #ifdef CONFIG_SCHED_SMT
7945 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
7946 		static_branch_dec_cpuslocked(&sched_smt_present);
7947 #endif
7948 }
7949 
7950 int sched_cpu_activate(unsigned int cpu)
7951 {
7952 	struct rq *rq = cpu_rq(cpu);
7953 
7954 	/*
7955 	 * Clear the balance_push callback and prepare to schedule
7956 	 * regular tasks.
7957 	 */
7958 	balance_push_set(cpu, false);
7959 
7960 	/*
7961 	 * When going up, increment the number of cores with SMT present.
7962 	 */
7963 	sched_smt_present_inc(cpu);
7964 	set_cpu_active(cpu, true);
7965 
7966 	if (sched_smp_initialized) {
7967 		sched_update_numa(cpu, true);
7968 		sched_domains_numa_masks_set(cpu);
7969 		cpuset_cpu_active();
7970 	}
7971 
7972 	/*
7973 	 * Put the rq online, if not already. This happens:
7974 	 *
7975 	 * 1) In the early boot process, because we build the real domains
7976 	 *    after all CPUs have been brought up.
7977 	 *
7978 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
7979 	 *    domains.
7980 	 */
7981 	sched_set_rq_online(rq, cpu);
7982 
7983 	return 0;
7984 }
7985 
7986 int sched_cpu_deactivate(unsigned int cpu)
7987 {
7988 	struct rq *rq = cpu_rq(cpu);
7989 	int ret;
7990 
7991 	/*
7992 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
7993 	 * load balancing when not active
7994 	 */
7995 	nohz_balance_exit_idle(rq);
7996 
7997 	set_cpu_active(cpu, false);
7998 
7999 	/*
8000 	 * From this point forward, this CPU will refuse to run any task that
8001 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8002 	 * push those tasks away until this gets cleared, see
8003 	 * sched_cpu_dying().
8004 	 */
8005 	balance_push_set(cpu, true);
8006 
8007 	/*
8008 	 * We've cleared cpu_active_mask / set balance_push, wait for all
8009 	 * preempt-disabled and RCU users of this state to go away such that
8010 	 * all new such users will observe it.
8011 	 *
8012 	 * Specifically, we rely on ttwu to no longer target this CPU, see
8013 	 * ttwu_queue_cond() and is_cpu_allowed().
8014 	 *
8015 	 * Do sync before park smpboot threads to take care the RCU boost case.
8016 	 */
8017 	synchronize_rcu();
8018 
8019 	sched_set_rq_offline(rq, cpu);
8020 
8021 	/*
8022 	 * When going down, decrement the number of cores with SMT present.
8023 	 */
8024 	sched_smt_present_dec(cpu);
8025 
8026 #ifdef CONFIG_SCHED_SMT
8027 	sched_core_cpu_deactivate(cpu);
8028 #endif
8029 
8030 	if (!sched_smp_initialized)
8031 		return 0;
8032 
8033 	sched_update_numa(cpu, false);
8034 	ret = cpuset_cpu_inactive(cpu);
8035 	if (ret) {
8036 		sched_smt_present_inc(cpu);
8037 		sched_set_rq_online(rq, cpu);
8038 		balance_push_set(cpu, false);
8039 		set_cpu_active(cpu, true);
8040 		sched_update_numa(cpu, true);
8041 		return ret;
8042 	}
8043 	sched_domains_numa_masks_clear(cpu);
8044 	return 0;
8045 }
8046 
8047 static void sched_rq_cpu_starting(unsigned int cpu)
8048 {
8049 	struct rq *rq = cpu_rq(cpu);
8050 
8051 	rq->calc_load_update = calc_load_update;
8052 	update_max_interval();
8053 }
8054 
8055 int sched_cpu_starting(unsigned int cpu)
8056 {
8057 	sched_core_cpu_starting(cpu);
8058 	sched_rq_cpu_starting(cpu);
8059 	sched_tick_start(cpu);
8060 	return 0;
8061 }
8062 
8063 #ifdef CONFIG_HOTPLUG_CPU
8064 
8065 /*
8066  * Invoked immediately before the stopper thread is invoked to bring the
8067  * CPU down completely. At this point all per CPU kthreads except the
8068  * hotplug thread (current) and the stopper thread (inactive) have been
8069  * either parked or have been unbound from the outgoing CPU. Ensure that
8070  * any of those which might be on the way out are gone.
8071  *
8072  * If after this point a bound task is being woken on this CPU then the
8073  * responsible hotplug callback has failed to do it's job.
8074  * sched_cpu_dying() will catch it with the appropriate fireworks.
8075  */
8076 int sched_cpu_wait_empty(unsigned int cpu)
8077 {
8078 	balance_hotplug_wait();
8079 	return 0;
8080 }
8081 
8082 /*
8083  * Since this CPU is going 'away' for a while, fold any nr_active delta we
8084  * might have. Called from the CPU stopper task after ensuring that the
8085  * stopper is the last running task on the CPU, so nr_active count is
8086  * stable. We need to take the tear-down thread which is calling this into
8087  * account, so we hand in adjust = 1 to the load calculation.
8088  *
8089  * Also see the comment "Global load-average calculations".
8090  */
8091 static void calc_load_migrate(struct rq *rq)
8092 {
8093 	long delta = calc_load_fold_active(rq, 1);
8094 
8095 	if (delta)
8096 		atomic_long_add(delta, &calc_load_tasks);
8097 }
8098 
8099 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8100 {
8101 	struct task_struct *g, *p;
8102 	int cpu = cpu_of(rq);
8103 
8104 	lockdep_assert_rq_held(rq);
8105 
8106 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8107 	for_each_process_thread(g, p) {
8108 		if (task_cpu(p) != cpu)
8109 			continue;
8110 
8111 		if (!task_on_rq_queued(p))
8112 			continue;
8113 
8114 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8115 	}
8116 }
8117 
8118 int sched_cpu_dying(unsigned int cpu)
8119 {
8120 	struct rq *rq = cpu_rq(cpu);
8121 	struct rq_flags rf;
8122 
8123 	/* Handle pending wakeups and then migrate everything off */
8124 	sched_tick_stop(cpu);
8125 
8126 	rq_lock_irqsave(rq, &rf);
8127 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8128 		WARN(true, "Dying CPU not properly vacated!");
8129 		dump_rq_tasks(rq, KERN_WARNING);
8130 	}
8131 	rq_unlock_irqrestore(rq, &rf);
8132 
8133 	calc_load_migrate(rq);
8134 	update_max_interval();
8135 	hrtick_clear(rq);
8136 	sched_core_cpu_dying(cpu);
8137 	return 0;
8138 }
8139 #endif
8140 
8141 void __init sched_init_smp(void)
8142 {
8143 	sched_init_numa(NUMA_NO_NODE);
8144 
8145 	/*
8146 	 * There's no userspace yet to cause hotplug operations; hence all the
8147 	 * CPU masks are stable and all blatant races in the below code cannot
8148 	 * happen.
8149 	 */
8150 	mutex_lock(&sched_domains_mutex);
8151 	sched_init_domains(cpu_active_mask);
8152 	mutex_unlock(&sched_domains_mutex);
8153 
8154 	/* Move init over to a non-isolated CPU */
8155 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8156 		BUG();
8157 	current->flags &= ~PF_NO_SETAFFINITY;
8158 	sched_init_granularity();
8159 
8160 	init_sched_rt_class();
8161 	init_sched_dl_class();
8162 
8163 	sched_smp_initialized = true;
8164 }
8165 
8166 static int __init migration_init(void)
8167 {
8168 	sched_cpu_starting(smp_processor_id());
8169 	return 0;
8170 }
8171 early_initcall(migration_init);
8172 
8173 #else
8174 void __init sched_init_smp(void)
8175 {
8176 	sched_init_granularity();
8177 }
8178 #endif /* CONFIG_SMP */
8179 
8180 int in_sched_functions(unsigned long addr)
8181 {
8182 	return in_lock_functions(addr) ||
8183 		(addr >= (unsigned long)__sched_text_start
8184 		&& addr < (unsigned long)__sched_text_end);
8185 }
8186 
8187 #ifdef CONFIG_CGROUP_SCHED
8188 /*
8189  * Default task group.
8190  * Every task in system belongs to this group at bootup.
8191  */
8192 struct task_group root_task_group;
8193 LIST_HEAD(task_groups);
8194 
8195 /* Cacheline aligned slab cache for task_group */
8196 static struct kmem_cache *task_group_cache __ro_after_init;
8197 #endif
8198 
8199 void __init sched_init(void)
8200 {
8201 	unsigned long ptr = 0;
8202 	int i;
8203 
8204 	/* Make sure the linker didn't screw up */
8205 	BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
8206 	       &fair_sched_class != &rt_sched_class + 1 ||
8207 	       &rt_sched_class   != &dl_sched_class + 1);
8208 #ifdef CONFIG_SMP
8209 	BUG_ON(&dl_sched_class != &stop_sched_class + 1);
8210 #endif
8211 
8212 	wait_bit_init();
8213 
8214 #ifdef CONFIG_FAIR_GROUP_SCHED
8215 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8216 #endif
8217 #ifdef CONFIG_RT_GROUP_SCHED
8218 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8219 #endif
8220 	if (ptr) {
8221 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8222 
8223 #ifdef CONFIG_FAIR_GROUP_SCHED
8224 		root_task_group.se = (struct sched_entity **)ptr;
8225 		ptr += nr_cpu_ids * sizeof(void **);
8226 
8227 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8228 		ptr += nr_cpu_ids * sizeof(void **);
8229 
8230 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8231 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8232 #endif /* CONFIG_FAIR_GROUP_SCHED */
8233 #ifdef CONFIG_RT_GROUP_SCHED
8234 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8235 		ptr += nr_cpu_ids * sizeof(void **);
8236 
8237 		root_task_group.rt_rq = (struct rt_rq **)ptr;
8238 		ptr += nr_cpu_ids * sizeof(void **);
8239 
8240 #endif /* CONFIG_RT_GROUP_SCHED */
8241 	}
8242 
8243 	init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
8244 
8245 #ifdef CONFIG_SMP
8246 	init_defrootdomain();
8247 #endif
8248 
8249 #ifdef CONFIG_RT_GROUP_SCHED
8250 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8251 			global_rt_period(), global_rt_runtime());
8252 #endif /* CONFIG_RT_GROUP_SCHED */
8253 
8254 #ifdef CONFIG_CGROUP_SCHED
8255 	task_group_cache = KMEM_CACHE(task_group, 0);
8256 
8257 	list_add(&root_task_group.list, &task_groups);
8258 	INIT_LIST_HEAD(&root_task_group.children);
8259 	INIT_LIST_HEAD(&root_task_group.siblings);
8260 	autogroup_init(&init_task);
8261 #endif /* CONFIG_CGROUP_SCHED */
8262 
8263 	for_each_possible_cpu(i) {
8264 		struct rq *rq;
8265 
8266 		rq = cpu_rq(i);
8267 		raw_spin_lock_init(&rq->__lock);
8268 		rq->nr_running = 0;
8269 		rq->calc_load_active = 0;
8270 		rq->calc_load_update = jiffies + LOAD_FREQ;
8271 		init_cfs_rq(&rq->cfs);
8272 		init_rt_rq(&rq->rt);
8273 		init_dl_rq(&rq->dl);
8274 #ifdef CONFIG_FAIR_GROUP_SCHED
8275 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8276 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8277 		/*
8278 		 * How much CPU bandwidth does root_task_group get?
8279 		 *
8280 		 * In case of task-groups formed through the cgroup filesystem, it
8281 		 * gets 100% of the CPU resources in the system. This overall
8282 		 * system CPU resource is divided among the tasks of
8283 		 * root_task_group and its child task-groups in a fair manner,
8284 		 * based on each entity's (task or task-group's) weight
8285 		 * (se->load.weight).
8286 		 *
8287 		 * In other words, if root_task_group has 10 tasks of weight
8288 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8289 		 * then A0's share of the CPU resource is:
8290 		 *
8291 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8292 		 *
8293 		 * We achieve this by letting root_task_group's tasks sit
8294 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8295 		 */
8296 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8297 #endif /* CONFIG_FAIR_GROUP_SCHED */
8298 
8299 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
8300 #ifdef CONFIG_RT_GROUP_SCHED
8301 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8302 #endif
8303 #ifdef CONFIG_SMP
8304 		rq->sd = NULL;
8305 		rq->rd = NULL;
8306 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8307 		rq->balance_callback = &balance_push_callback;
8308 		rq->active_balance = 0;
8309 		rq->next_balance = jiffies;
8310 		rq->push_cpu = 0;
8311 		rq->cpu = i;
8312 		rq->online = 0;
8313 		rq->idle_stamp = 0;
8314 		rq->avg_idle = 2*sysctl_sched_migration_cost;
8315 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8316 
8317 		INIT_LIST_HEAD(&rq->cfs_tasks);
8318 
8319 		rq_attach_root(rq, &def_root_domain);
8320 #ifdef CONFIG_NO_HZ_COMMON
8321 		rq->last_blocked_load_update_tick = jiffies;
8322 		atomic_set(&rq->nohz_flags, 0);
8323 
8324 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8325 #endif
8326 #ifdef CONFIG_HOTPLUG_CPU
8327 		rcuwait_init(&rq->hotplug_wait);
8328 #endif
8329 #endif /* CONFIG_SMP */
8330 		hrtick_rq_init(rq);
8331 		atomic_set(&rq->nr_iowait, 0);
8332 
8333 #ifdef CONFIG_SCHED_CORE
8334 		rq->core = rq;
8335 		rq->core_pick = NULL;
8336 		rq->core_enabled = 0;
8337 		rq->core_tree = RB_ROOT;
8338 		rq->core_forceidle_count = 0;
8339 		rq->core_forceidle_occupation = 0;
8340 		rq->core_forceidle_start = 0;
8341 
8342 		rq->core_cookie = 0UL;
8343 #endif
8344 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8345 	}
8346 
8347 	set_load_weight(&init_task, false);
8348 
8349 	/*
8350 	 * The boot idle thread does lazy MMU switching as well:
8351 	 */
8352 	mmgrab_lazy_tlb(&init_mm);
8353 	enter_lazy_tlb(&init_mm, current);
8354 
8355 	/*
8356 	 * The idle task doesn't need the kthread struct to function, but it
8357 	 * is dressed up as a per-CPU kthread and thus needs to play the part
8358 	 * if we want to avoid special-casing it in code that deals with per-CPU
8359 	 * kthreads.
8360 	 */
8361 	WARN_ON(!set_kthread_struct(current));
8362 
8363 	/*
8364 	 * Make us the idle thread. Technically, schedule() should not be
8365 	 * called from this thread, however somewhere below it might be,
8366 	 * but because we are the idle thread, we just pick up running again
8367 	 * when this runqueue becomes "idle".
8368 	 */
8369 	init_idle(current, smp_processor_id());
8370 
8371 	calc_load_update = jiffies + LOAD_FREQ;
8372 
8373 #ifdef CONFIG_SMP
8374 	idle_thread_set_boot_cpu();
8375 	balance_push_set(smp_processor_id(), false);
8376 #endif
8377 	init_sched_fair_class();
8378 
8379 	psi_init();
8380 
8381 	init_uclamp();
8382 
8383 	preempt_dynamic_init();
8384 
8385 	scheduler_running = 1;
8386 }
8387 
8388 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8389 
8390 void __might_sleep(const char *file, int line)
8391 {
8392 	unsigned int state = get_current_state();
8393 	/*
8394 	 * Blocking primitives will set (and therefore destroy) current->state,
8395 	 * since we will exit with TASK_RUNNING make sure we enter with it,
8396 	 * otherwise we will destroy state.
8397 	 */
8398 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8399 			"do not call blocking ops when !TASK_RUNNING; "
8400 			"state=%x set at [<%p>] %pS\n", state,
8401 			(void *)current->task_state_change,
8402 			(void *)current->task_state_change);
8403 
8404 	__might_resched(file, line, 0);
8405 }
8406 EXPORT_SYMBOL(__might_sleep);
8407 
8408 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8409 {
8410 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8411 		return;
8412 
8413 	if (preempt_count() == preempt_offset)
8414 		return;
8415 
8416 	pr_err("Preemption disabled at:");
8417 	print_ip_sym(KERN_ERR, ip);
8418 }
8419 
8420 static inline bool resched_offsets_ok(unsigned int offsets)
8421 {
8422 	unsigned int nested = preempt_count();
8423 
8424 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8425 
8426 	return nested == offsets;
8427 }
8428 
8429 void __might_resched(const char *file, int line, unsigned int offsets)
8430 {
8431 	/* Ratelimiting timestamp: */
8432 	static unsigned long prev_jiffy;
8433 
8434 	unsigned long preempt_disable_ip;
8435 
8436 	/* WARN_ON_ONCE() by default, no rate limit required: */
8437 	rcu_sleep_check();
8438 
8439 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8440 	     !is_idle_task(current) && !current->non_block_count) ||
8441 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8442 	    oops_in_progress)
8443 		return;
8444 
8445 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8446 		return;
8447 	prev_jiffy = jiffies;
8448 
8449 	/* Save this before calling printk(), since that will clobber it: */
8450 	preempt_disable_ip = get_preempt_disable_ip(current);
8451 
8452 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8453 	       file, line);
8454 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8455 	       in_atomic(), irqs_disabled(), current->non_block_count,
8456 	       current->pid, current->comm);
8457 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8458 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
8459 
8460 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8461 		pr_err("RCU nest depth: %d, expected: %u\n",
8462 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8463 	}
8464 
8465 	if (task_stack_end_corrupted(current))
8466 		pr_emerg("Thread overran stack, or stack corrupted\n");
8467 
8468 	debug_show_held_locks(current);
8469 	if (irqs_disabled())
8470 		print_irqtrace_events(current);
8471 
8472 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8473 				 preempt_disable_ip);
8474 
8475 	dump_stack();
8476 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8477 }
8478 EXPORT_SYMBOL(__might_resched);
8479 
8480 void __cant_sleep(const char *file, int line, int preempt_offset)
8481 {
8482 	static unsigned long prev_jiffy;
8483 
8484 	if (irqs_disabled())
8485 		return;
8486 
8487 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8488 		return;
8489 
8490 	if (preempt_count() > preempt_offset)
8491 		return;
8492 
8493 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8494 		return;
8495 	prev_jiffy = jiffies;
8496 
8497 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8498 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8499 			in_atomic(), irqs_disabled(),
8500 			current->pid, current->comm);
8501 
8502 	debug_show_held_locks(current);
8503 	dump_stack();
8504 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8505 }
8506 EXPORT_SYMBOL_GPL(__cant_sleep);
8507 
8508 #ifdef CONFIG_SMP
8509 void __cant_migrate(const char *file, int line)
8510 {
8511 	static unsigned long prev_jiffy;
8512 
8513 	if (irqs_disabled())
8514 		return;
8515 
8516 	if (is_migration_disabled(current))
8517 		return;
8518 
8519 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8520 		return;
8521 
8522 	if (preempt_count() > 0)
8523 		return;
8524 
8525 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8526 		return;
8527 	prev_jiffy = jiffies;
8528 
8529 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8530 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8531 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
8532 	       current->pid, current->comm);
8533 
8534 	debug_show_held_locks(current);
8535 	dump_stack();
8536 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8537 }
8538 EXPORT_SYMBOL_GPL(__cant_migrate);
8539 #endif
8540 #endif
8541 
8542 #ifdef CONFIG_MAGIC_SYSRQ
8543 void normalize_rt_tasks(void)
8544 {
8545 	struct task_struct *g, *p;
8546 	struct sched_attr attr = {
8547 		.sched_policy = SCHED_NORMAL,
8548 	};
8549 
8550 	read_lock(&tasklist_lock);
8551 	for_each_process_thread(g, p) {
8552 		/*
8553 		 * Only normalize user tasks:
8554 		 */
8555 		if (p->flags & PF_KTHREAD)
8556 			continue;
8557 
8558 		p->se.exec_start = 0;
8559 		schedstat_set(p->stats.wait_start,  0);
8560 		schedstat_set(p->stats.sleep_start, 0);
8561 		schedstat_set(p->stats.block_start, 0);
8562 
8563 		if (!dl_task(p) && !rt_task(p)) {
8564 			/*
8565 			 * Renice negative nice level userspace
8566 			 * tasks back to 0:
8567 			 */
8568 			if (task_nice(p) < 0)
8569 				set_user_nice(p, 0);
8570 			continue;
8571 		}
8572 
8573 		__sched_setscheduler(p, &attr, false, false);
8574 	}
8575 	read_unlock(&tasklist_lock);
8576 }
8577 
8578 #endif /* CONFIG_MAGIC_SYSRQ */
8579 
8580 #if defined(CONFIG_KGDB_KDB)
8581 /*
8582  * These functions are only useful for KDB.
8583  *
8584  * They can only be called when the whole system has been
8585  * stopped - every CPU needs to be quiescent, and no scheduling
8586  * activity can take place. Using them for anything else would
8587  * be a serious bug, and as a result, they aren't even visible
8588  * under any other configuration.
8589  */
8590 
8591 /**
8592  * curr_task - return the current task for a given CPU.
8593  * @cpu: the processor in question.
8594  *
8595  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8596  *
8597  * Return: The current task for @cpu.
8598  */
8599 struct task_struct *curr_task(int cpu)
8600 {
8601 	return cpu_curr(cpu);
8602 }
8603 
8604 #endif /* defined(CONFIG_KGDB_KDB) */
8605 
8606 #ifdef CONFIG_CGROUP_SCHED
8607 /* task_group_lock serializes the addition/removal of task groups */
8608 static DEFINE_SPINLOCK(task_group_lock);
8609 
8610 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8611 					    struct task_group *parent)
8612 {
8613 #ifdef CONFIG_UCLAMP_TASK_GROUP
8614 	enum uclamp_id clamp_id;
8615 
8616 	for_each_clamp_id(clamp_id) {
8617 		uclamp_se_set(&tg->uclamp_req[clamp_id],
8618 			      uclamp_none(clamp_id), false);
8619 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8620 	}
8621 #endif
8622 }
8623 
8624 static void sched_free_group(struct task_group *tg)
8625 {
8626 	free_fair_sched_group(tg);
8627 	free_rt_sched_group(tg);
8628 	autogroup_free(tg);
8629 	kmem_cache_free(task_group_cache, tg);
8630 }
8631 
8632 static void sched_free_group_rcu(struct rcu_head *rcu)
8633 {
8634 	sched_free_group(container_of(rcu, struct task_group, rcu));
8635 }
8636 
8637 static void sched_unregister_group(struct task_group *tg)
8638 {
8639 	unregister_fair_sched_group(tg);
8640 	unregister_rt_sched_group(tg);
8641 	/*
8642 	 * We have to wait for yet another RCU grace period to expire, as
8643 	 * print_cfs_stats() might run concurrently.
8644 	 */
8645 	call_rcu(&tg->rcu, sched_free_group_rcu);
8646 }
8647 
8648 /* allocate runqueue etc for a new task group */
8649 struct task_group *sched_create_group(struct task_group *parent)
8650 {
8651 	struct task_group *tg;
8652 
8653 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8654 	if (!tg)
8655 		return ERR_PTR(-ENOMEM);
8656 
8657 	if (!alloc_fair_sched_group(tg, parent))
8658 		goto err;
8659 
8660 	if (!alloc_rt_sched_group(tg, parent))
8661 		goto err;
8662 
8663 	alloc_uclamp_sched_group(tg, parent);
8664 
8665 	return tg;
8666 
8667 err:
8668 	sched_free_group(tg);
8669 	return ERR_PTR(-ENOMEM);
8670 }
8671 
8672 void sched_online_group(struct task_group *tg, struct task_group *parent)
8673 {
8674 	unsigned long flags;
8675 
8676 	spin_lock_irqsave(&task_group_lock, flags);
8677 	list_add_rcu(&tg->list, &task_groups);
8678 
8679 	/* Root should already exist: */
8680 	WARN_ON(!parent);
8681 
8682 	tg->parent = parent;
8683 	INIT_LIST_HEAD(&tg->children);
8684 	list_add_rcu(&tg->siblings, &parent->children);
8685 	spin_unlock_irqrestore(&task_group_lock, flags);
8686 
8687 	online_fair_sched_group(tg);
8688 }
8689 
8690 /* RCU callback to free various structures associated with a task group */
8691 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8692 {
8693 	/* Now it should be safe to free those cfs_rqs: */
8694 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
8695 }
8696 
8697 void sched_destroy_group(struct task_group *tg)
8698 {
8699 	/* Wait for possible concurrent references to cfs_rqs complete: */
8700 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
8701 }
8702 
8703 void sched_release_group(struct task_group *tg)
8704 {
8705 	unsigned long flags;
8706 
8707 	/*
8708 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
8709 	 * sched_cfs_period_timer()).
8710 	 *
8711 	 * For this to be effective, we have to wait for all pending users of
8712 	 * this task group to leave their RCU critical section to ensure no new
8713 	 * user will see our dying task group any more. Specifically ensure
8714 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
8715 	 *
8716 	 * We therefore defer calling unregister_fair_sched_group() to
8717 	 * sched_unregister_group() which is guarantied to get called only after the
8718 	 * current RCU grace period has expired.
8719 	 */
8720 	spin_lock_irqsave(&task_group_lock, flags);
8721 	list_del_rcu(&tg->list);
8722 	list_del_rcu(&tg->siblings);
8723 	spin_unlock_irqrestore(&task_group_lock, flags);
8724 }
8725 
8726 static struct task_group *sched_get_task_group(struct task_struct *tsk)
8727 {
8728 	struct task_group *tg;
8729 
8730 	/*
8731 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
8732 	 * which is pointless here. Thus, we pass "true" to task_css_check()
8733 	 * to prevent lockdep warnings.
8734 	 */
8735 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
8736 			  struct task_group, css);
8737 	tg = autogroup_task_group(tsk, tg);
8738 
8739 	return tg;
8740 }
8741 
8742 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
8743 {
8744 	tsk->sched_task_group = group;
8745 
8746 #ifdef CONFIG_FAIR_GROUP_SCHED
8747 	if (tsk->sched_class->task_change_group)
8748 		tsk->sched_class->task_change_group(tsk);
8749 	else
8750 #endif
8751 		set_task_rq(tsk, task_cpu(tsk));
8752 }
8753 
8754 /*
8755  * Change task's runqueue when it moves between groups.
8756  *
8757  * The caller of this function should have put the task in its new group by
8758  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
8759  * its new group.
8760  */
8761 void sched_move_task(struct task_struct *tsk)
8762 {
8763 	int queued, running, queue_flags =
8764 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
8765 	struct task_group *group;
8766 	struct rq *rq;
8767 
8768 	CLASS(task_rq_lock, rq_guard)(tsk);
8769 	rq = rq_guard.rq;
8770 
8771 	/*
8772 	 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
8773 	 * group changes.
8774 	 */
8775 	group = sched_get_task_group(tsk);
8776 	if (group == tsk->sched_task_group)
8777 		return;
8778 
8779 	update_rq_clock(rq);
8780 
8781 	running = task_current(rq, tsk);
8782 	queued = task_on_rq_queued(tsk);
8783 
8784 	if (queued)
8785 		dequeue_task(rq, tsk, queue_flags);
8786 	if (running)
8787 		put_prev_task(rq, tsk);
8788 
8789 	sched_change_group(tsk, group);
8790 
8791 	if (queued)
8792 		enqueue_task(rq, tsk, queue_flags);
8793 	if (running) {
8794 		set_next_task(rq, tsk);
8795 		/*
8796 		 * After changing group, the running task may have joined a
8797 		 * throttled one but it's still the running task. Trigger a
8798 		 * resched to make sure that task can still run.
8799 		 */
8800 		resched_curr(rq);
8801 	}
8802 }
8803 
8804 static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8805 {
8806 	return css ? container_of(css, struct task_group, css) : NULL;
8807 }
8808 
8809 static struct cgroup_subsys_state *
8810 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8811 {
8812 	struct task_group *parent = css_tg(parent_css);
8813 	struct task_group *tg;
8814 
8815 	if (!parent) {
8816 		/* This is early initialization for the top cgroup */
8817 		return &root_task_group.css;
8818 	}
8819 
8820 	tg = sched_create_group(parent);
8821 	if (IS_ERR(tg))
8822 		return ERR_PTR(-ENOMEM);
8823 
8824 	return &tg->css;
8825 }
8826 
8827 /* Expose task group only after completing cgroup initialization */
8828 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
8829 {
8830 	struct task_group *tg = css_tg(css);
8831 	struct task_group *parent = css_tg(css->parent);
8832 
8833 	if (parent)
8834 		sched_online_group(tg, parent);
8835 
8836 #ifdef CONFIG_UCLAMP_TASK_GROUP
8837 	/* Propagate the effective uclamp value for the new group */
8838 	guard(mutex)(&uclamp_mutex);
8839 	guard(rcu)();
8840 	cpu_util_update_eff(css);
8841 #endif
8842 
8843 	return 0;
8844 }
8845 
8846 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8847 {
8848 	struct task_group *tg = css_tg(css);
8849 
8850 	sched_release_group(tg);
8851 }
8852 
8853 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8854 {
8855 	struct task_group *tg = css_tg(css);
8856 
8857 	/*
8858 	 * Relies on the RCU grace period between css_released() and this.
8859 	 */
8860 	sched_unregister_group(tg);
8861 }
8862 
8863 #ifdef CONFIG_RT_GROUP_SCHED
8864 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8865 {
8866 	struct task_struct *task;
8867 	struct cgroup_subsys_state *css;
8868 
8869 	cgroup_taskset_for_each(task, css, tset) {
8870 		if (!sched_rt_can_attach(css_tg(css), task))
8871 			return -EINVAL;
8872 	}
8873 	return 0;
8874 }
8875 #endif
8876 
8877 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8878 {
8879 	struct task_struct *task;
8880 	struct cgroup_subsys_state *css;
8881 
8882 	cgroup_taskset_for_each(task, css, tset)
8883 		sched_move_task(task);
8884 }
8885 
8886 #ifdef CONFIG_UCLAMP_TASK_GROUP
8887 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
8888 {
8889 	struct cgroup_subsys_state *top_css = css;
8890 	struct uclamp_se *uc_parent = NULL;
8891 	struct uclamp_se *uc_se = NULL;
8892 	unsigned int eff[UCLAMP_CNT];
8893 	enum uclamp_id clamp_id;
8894 	unsigned int clamps;
8895 
8896 	lockdep_assert_held(&uclamp_mutex);
8897 	SCHED_WARN_ON(!rcu_read_lock_held());
8898 
8899 	css_for_each_descendant_pre(css, top_css) {
8900 		uc_parent = css_tg(css)->parent
8901 			? css_tg(css)->parent->uclamp : NULL;
8902 
8903 		for_each_clamp_id(clamp_id) {
8904 			/* Assume effective clamps matches requested clamps */
8905 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
8906 			/* Cap effective clamps with parent's effective clamps */
8907 			if (uc_parent &&
8908 			    eff[clamp_id] > uc_parent[clamp_id].value) {
8909 				eff[clamp_id] = uc_parent[clamp_id].value;
8910 			}
8911 		}
8912 		/* Ensure protection is always capped by limit */
8913 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
8914 
8915 		/* Propagate most restrictive effective clamps */
8916 		clamps = 0x0;
8917 		uc_se = css_tg(css)->uclamp;
8918 		for_each_clamp_id(clamp_id) {
8919 			if (eff[clamp_id] == uc_se[clamp_id].value)
8920 				continue;
8921 			uc_se[clamp_id].value = eff[clamp_id];
8922 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
8923 			clamps |= (0x1 << clamp_id);
8924 		}
8925 		if (!clamps) {
8926 			css = css_rightmost_descendant(css);
8927 			continue;
8928 		}
8929 
8930 		/* Immediately update descendants RUNNABLE tasks */
8931 		uclamp_update_active_tasks(css);
8932 	}
8933 }
8934 
8935 /*
8936  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
8937  * C expression. Since there is no way to convert a macro argument (N) into a
8938  * character constant, use two levels of macros.
8939  */
8940 #define _POW10(exp) ((unsigned int)1e##exp)
8941 #define POW10(exp) _POW10(exp)
8942 
8943 struct uclamp_request {
8944 #define UCLAMP_PERCENT_SHIFT	2
8945 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
8946 	s64 percent;
8947 	u64 util;
8948 	int ret;
8949 };
8950 
8951 static inline struct uclamp_request
8952 capacity_from_percent(char *buf)
8953 {
8954 	struct uclamp_request req = {
8955 		.percent = UCLAMP_PERCENT_SCALE,
8956 		.util = SCHED_CAPACITY_SCALE,
8957 		.ret = 0,
8958 	};
8959 
8960 	buf = strim(buf);
8961 	if (strcmp(buf, "max")) {
8962 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
8963 					     &req.percent);
8964 		if (req.ret)
8965 			return req;
8966 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
8967 			req.ret = -ERANGE;
8968 			return req;
8969 		}
8970 
8971 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
8972 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
8973 	}
8974 
8975 	return req;
8976 }
8977 
8978 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
8979 				size_t nbytes, loff_t off,
8980 				enum uclamp_id clamp_id)
8981 {
8982 	struct uclamp_request req;
8983 	struct task_group *tg;
8984 
8985 	req = capacity_from_percent(buf);
8986 	if (req.ret)
8987 		return req.ret;
8988 
8989 	static_branch_enable(&sched_uclamp_used);
8990 
8991 	guard(mutex)(&uclamp_mutex);
8992 	guard(rcu)();
8993 
8994 	tg = css_tg(of_css(of));
8995 	if (tg->uclamp_req[clamp_id].value != req.util)
8996 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
8997 
8998 	/*
8999 	 * Because of not recoverable conversion rounding we keep track of the
9000 	 * exact requested value
9001 	 */
9002 	tg->uclamp_pct[clamp_id] = req.percent;
9003 
9004 	/* Update effective clamps to track the most restrictive value */
9005 	cpu_util_update_eff(of_css(of));
9006 
9007 	return nbytes;
9008 }
9009 
9010 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9011 				    char *buf, size_t nbytes,
9012 				    loff_t off)
9013 {
9014 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9015 }
9016 
9017 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9018 				    char *buf, size_t nbytes,
9019 				    loff_t off)
9020 {
9021 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9022 }
9023 
9024 static inline void cpu_uclamp_print(struct seq_file *sf,
9025 				    enum uclamp_id clamp_id)
9026 {
9027 	struct task_group *tg;
9028 	u64 util_clamp;
9029 	u64 percent;
9030 	u32 rem;
9031 
9032 	scoped_guard (rcu) {
9033 		tg = css_tg(seq_css(sf));
9034 		util_clamp = tg->uclamp_req[clamp_id].value;
9035 	}
9036 
9037 	if (util_clamp == SCHED_CAPACITY_SCALE) {
9038 		seq_puts(sf, "max\n");
9039 		return;
9040 	}
9041 
9042 	percent = tg->uclamp_pct[clamp_id];
9043 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9044 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9045 }
9046 
9047 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9048 {
9049 	cpu_uclamp_print(sf, UCLAMP_MIN);
9050 	return 0;
9051 }
9052 
9053 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9054 {
9055 	cpu_uclamp_print(sf, UCLAMP_MAX);
9056 	return 0;
9057 }
9058 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9059 
9060 #ifdef CONFIG_FAIR_GROUP_SCHED
9061 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9062 				struct cftype *cftype, u64 shareval)
9063 {
9064 	if (shareval > scale_load_down(ULONG_MAX))
9065 		shareval = MAX_SHARES;
9066 	return sched_group_set_shares(css_tg(css), scale_load(shareval));
9067 }
9068 
9069 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9070 			       struct cftype *cft)
9071 {
9072 	struct task_group *tg = css_tg(css);
9073 
9074 	return (u64) scale_load_down(tg->shares);
9075 }
9076 
9077 #ifdef CONFIG_CFS_BANDWIDTH
9078 static DEFINE_MUTEX(cfs_constraints_mutex);
9079 
9080 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9081 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9082 /* More than 203 days if BW_SHIFT equals 20. */
9083 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9084 
9085 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9086 
9087 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9088 				u64 burst)
9089 {
9090 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
9091 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9092 
9093 	if (tg == &root_task_group)
9094 		return -EINVAL;
9095 
9096 	/*
9097 	 * Ensure we have at some amount of bandwidth every period.  This is
9098 	 * to prevent reaching a state of large arrears when throttled via
9099 	 * entity_tick() resulting in prolonged exit starvation.
9100 	 */
9101 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9102 		return -EINVAL;
9103 
9104 	/*
9105 	 * Likewise, bound things on the other side by preventing insane quota
9106 	 * periods.  This also allows us to normalize in computing quota
9107 	 * feasibility.
9108 	 */
9109 	if (period > max_cfs_quota_period)
9110 		return -EINVAL;
9111 
9112 	/*
9113 	 * Bound quota to defend quota against overflow during bandwidth shift.
9114 	 */
9115 	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9116 		return -EINVAL;
9117 
9118 	if (quota != RUNTIME_INF && (burst > quota ||
9119 				     burst + quota > max_cfs_runtime))
9120 		return -EINVAL;
9121 
9122 	/*
9123 	 * Prevent race between setting of cfs_rq->runtime_enabled and
9124 	 * unthrottle_offline_cfs_rqs().
9125 	 */
9126 	guard(cpus_read_lock)();
9127 	guard(mutex)(&cfs_constraints_mutex);
9128 
9129 	ret = __cfs_schedulable(tg, period, quota);
9130 	if (ret)
9131 		return ret;
9132 
9133 	runtime_enabled = quota != RUNTIME_INF;
9134 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9135 	/*
9136 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
9137 	 * before making related changes, and on->off must occur afterwards
9138 	 */
9139 	if (runtime_enabled && !runtime_was_enabled)
9140 		cfs_bandwidth_usage_inc();
9141 
9142 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9143 		cfs_b->period = ns_to_ktime(period);
9144 		cfs_b->quota = quota;
9145 		cfs_b->burst = burst;
9146 
9147 		__refill_cfs_bandwidth_runtime(cfs_b);
9148 
9149 		/*
9150 		 * Restart the period timer (if active) to handle new
9151 		 * period expiry:
9152 		 */
9153 		if (runtime_enabled)
9154 			start_cfs_bandwidth(cfs_b);
9155 	}
9156 
9157 	for_each_online_cpu(i) {
9158 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9159 		struct rq *rq = cfs_rq->rq;
9160 
9161 		guard(rq_lock_irq)(rq);
9162 		cfs_rq->runtime_enabled = runtime_enabled;
9163 		cfs_rq->runtime_remaining = 0;
9164 
9165 		if (cfs_rq->throttled)
9166 			unthrottle_cfs_rq(cfs_rq);
9167 	}
9168 
9169 	if (runtime_was_enabled && !runtime_enabled)
9170 		cfs_bandwidth_usage_dec();
9171 
9172 	return 0;
9173 }
9174 
9175 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9176 {
9177 	u64 quota, period, burst;
9178 
9179 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9180 	burst = tg->cfs_bandwidth.burst;
9181 	if (cfs_quota_us < 0)
9182 		quota = RUNTIME_INF;
9183 	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9184 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9185 	else
9186 		return -EINVAL;
9187 
9188 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9189 }
9190 
9191 static long tg_get_cfs_quota(struct task_group *tg)
9192 {
9193 	u64 quota_us;
9194 
9195 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9196 		return -1;
9197 
9198 	quota_us = tg->cfs_bandwidth.quota;
9199 	do_div(quota_us, NSEC_PER_USEC);
9200 
9201 	return quota_us;
9202 }
9203 
9204 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9205 {
9206 	u64 quota, period, burst;
9207 
9208 	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9209 		return -EINVAL;
9210 
9211 	period = (u64)cfs_period_us * NSEC_PER_USEC;
9212 	quota = tg->cfs_bandwidth.quota;
9213 	burst = tg->cfs_bandwidth.burst;
9214 
9215 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9216 }
9217 
9218 static long tg_get_cfs_period(struct task_group *tg)
9219 {
9220 	u64 cfs_period_us;
9221 
9222 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9223 	do_div(cfs_period_us, NSEC_PER_USEC);
9224 
9225 	return cfs_period_us;
9226 }
9227 
9228 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9229 {
9230 	u64 quota, period, burst;
9231 
9232 	if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9233 		return -EINVAL;
9234 
9235 	burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9236 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9237 	quota = tg->cfs_bandwidth.quota;
9238 
9239 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9240 }
9241 
9242 static long tg_get_cfs_burst(struct task_group *tg)
9243 {
9244 	u64 burst_us;
9245 
9246 	burst_us = tg->cfs_bandwidth.burst;
9247 	do_div(burst_us, NSEC_PER_USEC);
9248 
9249 	return burst_us;
9250 }
9251 
9252 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9253 				  struct cftype *cft)
9254 {
9255 	return tg_get_cfs_quota(css_tg(css));
9256 }
9257 
9258 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9259 				   struct cftype *cftype, s64 cfs_quota_us)
9260 {
9261 	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9262 }
9263 
9264 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9265 				   struct cftype *cft)
9266 {
9267 	return tg_get_cfs_period(css_tg(css));
9268 }
9269 
9270 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9271 				    struct cftype *cftype, u64 cfs_period_us)
9272 {
9273 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
9274 }
9275 
9276 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9277 				  struct cftype *cft)
9278 {
9279 	return tg_get_cfs_burst(css_tg(css));
9280 }
9281 
9282 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9283 				   struct cftype *cftype, u64 cfs_burst_us)
9284 {
9285 	return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9286 }
9287 
9288 struct cfs_schedulable_data {
9289 	struct task_group *tg;
9290 	u64 period, quota;
9291 };
9292 
9293 /*
9294  * normalize group quota/period to be quota/max_period
9295  * note: units are usecs
9296  */
9297 static u64 normalize_cfs_quota(struct task_group *tg,
9298 			       struct cfs_schedulable_data *d)
9299 {
9300 	u64 quota, period;
9301 
9302 	if (tg == d->tg) {
9303 		period = d->period;
9304 		quota = d->quota;
9305 	} else {
9306 		period = tg_get_cfs_period(tg);
9307 		quota = tg_get_cfs_quota(tg);
9308 	}
9309 
9310 	/* note: these should typically be equivalent */
9311 	if (quota == RUNTIME_INF || quota == -1)
9312 		return RUNTIME_INF;
9313 
9314 	return to_ratio(period, quota);
9315 }
9316 
9317 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9318 {
9319 	struct cfs_schedulable_data *d = data;
9320 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9321 	s64 quota = 0, parent_quota = -1;
9322 
9323 	if (!tg->parent) {
9324 		quota = RUNTIME_INF;
9325 	} else {
9326 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9327 
9328 		quota = normalize_cfs_quota(tg, d);
9329 		parent_quota = parent_b->hierarchical_quota;
9330 
9331 		/*
9332 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9333 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
9334 		 * inherit when no limit is set. In both cases this is used
9335 		 * by the scheduler to determine if a given CFS task has a
9336 		 * bandwidth constraint at some higher level.
9337 		 */
9338 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9339 			if (quota == RUNTIME_INF)
9340 				quota = parent_quota;
9341 			else if (parent_quota != RUNTIME_INF)
9342 				quota = min(quota, parent_quota);
9343 		} else {
9344 			if (quota == RUNTIME_INF)
9345 				quota = parent_quota;
9346 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9347 				return -EINVAL;
9348 		}
9349 	}
9350 	cfs_b->hierarchical_quota = quota;
9351 
9352 	return 0;
9353 }
9354 
9355 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9356 {
9357 	struct cfs_schedulable_data data = {
9358 		.tg = tg,
9359 		.period = period,
9360 		.quota = quota,
9361 	};
9362 
9363 	if (quota != RUNTIME_INF) {
9364 		do_div(data.period, NSEC_PER_USEC);
9365 		do_div(data.quota, NSEC_PER_USEC);
9366 	}
9367 
9368 	guard(rcu)();
9369 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9370 }
9371 
9372 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9373 {
9374 	struct task_group *tg = css_tg(seq_css(sf));
9375 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9376 
9377 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9378 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9379 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9380 
9381 	if (schedstat_enabled() && tg != &root_task_group) {
9382 		struct sched_statistics *stats;
9383 		u64 ws = 0;
9384 		int i;
9385 
9386 		for_each_possible_cpu(i) {
9387 			stats = __schedstats_from_se(tg->se[i]);
9388 			ws += schedstat_val(stats->wait_sum);
9389 		}
9390 
9391 		seq_printf(sf, "wait_sum %llu\n", ws);
9392 	}
9393 
9394 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9395 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9396 
9397 	return 0;
9398 }
9399 
9400 static u64 throttled_time_self(struct task_group *tg)
9401 {
9402 	int i;
9403 	u64 total = 0;
9404 
9405 	for_each_possible_cpu(i) {
9406 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9407 	}
9408 
9409 	return total;
9410 }
9411 
9412 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9413 {
9414 	struct task_group *tg = css_tg(seq_css(sf));
9415 
9416 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9417 
9418 	return 0;
9419 }
9420 #endif /* CONFIG_CFS_BANDWIDTH */
9421 #endif /* CONFIG_FAIR_GROUP_SCHED */
9422 
9423 #ifdef CONFIG_RT_GROUP_SCHED
9424 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9425 				struct cftype *cft, s64 val)
9426 {
9427 	return sched_group_set_rt_runtime(css_tg(css), val);
9428 }
9429 
9430 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9431 			       struct cftype *cft)
9432 {
9433 	return sched_group_rt_runtime(css_tg(css));
9434 }
9435 
9436 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9437 				    struct cftype *cftype, u64 rt_period_us)
9438 {
9439 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
9440 }
9441 
9442 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9443 				   struct cftype *cft)
9444 {
9445 	return sched_group_rt_period(css_tg(css));
9446 }
9447 #endif /* CONFIG_RT_GROUP_SCHED */
9448 
9449 #ifdef CONFIG_FAIR_GROUP_SCHED
9450 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9451 			       struct cftype *cft)
9452 {
9453 	return css_tg(css)->idle;
9454 }
9455 
9456 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9457 				struct cftype *cft, s64 idle)
9458 {
9459 	return sched_group_set_idle(css_tg(css), idle);
9460 }
9461 #endif
9462 
9463 static struct cftype cpu_legacy_files[] = {
9464 #ifdef CONFIG_FAIR_GROUP_SCHED
9465 	{
9466 		.name = "shares",
9467 		.read_u64 = cpu_shares_read_u64,
9468 		.write_u64 = cpu_shares_write_u64,
9469 	},
9470 	{
9471 		.name = "idle",
9472 		.read_s64 = cpu_idle_read_s64,
9473 		.write_s64 = cpu_idle_write_s64,
9474 	},
9475 #endif
9476 #ifdef CONFIG_CFS_BANDWIDTH
9477 	{
9478 		.name = "cfs_quota_us",
9479 		.read_s64 = cpu_cfs_quota_read_s64,
9480 		.write_s64 = cpu_cfs_quota_write_s64,
9481 	},
9482 	{
9483 		.name = "cfs_period_us",
9484 		.read_u64 = cpu_cfs_period_read_u64,
9485 		.write_u64 = cpu_cfs_period_write_u64,
9486 	},
9487 	{
9488 		.name = "cfs_burst_us",
9489 		.read_u64 = cpu_cfs_burst_read_u64,
9490 		.write_u64 = cpu_cfs_burst_write_u64,
9491 	},
9492 	{
9493 		.name = "stat",
9494 		.seq_show = cpu_cfs_stat_show,
9495 	},
9496 	{
9497 		.name = "stat.local",
9498 		.seq_show = cpu_cfs_local_stat_show,
9499 	},
9500 #endif
9501 #ifdef CONFIG_RT_GROUP_SCHED
9502 	{
9503 		.name = "rt_runtime_us",
9504 		.read_s64 = cpu_rt_runtime_read,
9505 		.write_s64 = cpu_rt_runtime_write,
9506 	},
9507 	{
9508 		.name = "rt_period_us",
9509 		.read_u64 = cpu_rt_period_read_uint,
9510 		.write_u64 = cpu_rt_period_write_uint,
9511 	},
9512 #endif
9513 #ifdef CONFIG_UCLAMP_TASK_GROUP
9514 	{
9515 		.name = "uclamp.min",
9516 		.flags = CFTYPE_NOT_ON_ROOT,
9517 		.seq_show = cpu_uclamp_min_show,
9518 		.write = cpu_uclamp_min_write,
9519 	},
9520 	{
9521 		.name = "uclamp.max",
9522 		.flags = CFTYPE_NOT_ON_ROOT,
9523 		.seq_show = cpu_uclamp_max_show,
9524 		.write = cpu_uclamp_max_write,
9525 	},
9526 #endif
9527 	{ }	/* Terminate */
9528 };
9529 
9530 static int cpu_extra_stat_show(struct seq_file *sf,
9531 			       struct cgroup_subsys_state *css)
9532 {
9533 #ifdef CONFIG_CFS_BANDWIDTH
9534 	{
9535 		struct task_group *tg = css_tg(css);
9536 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9537 		u64 throttled_usec, burst_usec;
9538 
9539 		throttled_usec = cfs_b->throttled_time;
9540 		do_div(throttled_usec, NSEC_PER_USEC);
9541 		burst_usec = cfs_b->burst_time;
9542 		do_div(burst_usec, NSEC_PER_USEC);
9543 
9544 		seq_printf(sf, "nr_periods %d\n"
9545 			   "nr_throttled %d\n"
9546 			   "throttled_usec %llu\n"
9547 			   "nr_bursts %d\n"
9548 			   "burst_usec %llu\n",
9549 			   cfs_b->nr_periods, cfs_b->nr_throttled,
9550 			   throttled_usec, cfs_b->nr_burst, burst_usec);
9551 	}
9552 #endif
9553 	return 0;
9554 }
9555 
9556 static int cpu_local_stat_show(struct seq_file *sf,
9557 			       struct cgroup_subsys_state *css)
9558 {
9559 #ifdef CONFIG_CFS_BANDWIDTH
9560 	{
9561 		struct task_group *tg = css_tg(css);
9562 		u64 throttled_self_usec;
9563 
9564 		throttled_self_usec = throttled_time_self(tg);
9565 		do_div(throttled_self_usec, NSEC_PER_USEC);
9566 
9567 		seq_printf(sf, "throttled_usec %llu\n",
9568 			   throttled_self_usec);
9569 	}
9570 #endif
9571 	return 0;
9572 }
9573 
9574 #ifdef CONFIG_FAIR_GROUP_SCHED
9575 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9576 			       struct cftype *cft)
9577 {
9578 	struct task_group *tg = css_tg(css);
9579 	u64 weight = scale_load_down(tg->shares);
9580 
9581 	return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
9582 }
9583 
9584 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9585 				struct cftype *cft, u64 weight)
9586 {
9587 	/*
9588 	 * cgroup weight knobs should use the common MIN, DFL and MAX
9589 	 * values which are 1, 100 and 10000 respectively.  While it loses
9590 	 * a bit of range on both ends, it maps pretty well onto the shares
9591 	 * value used by scheduler and the round-trip conversions preserve
9592 	 * the original value over the entire range.
9593 	 */
9594 	if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
9595 		return -ERANGE;
9596 
9597 	weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
9598 
9599 	return sched_group_set_shares(css_tg(css), scale_load(weight));
9600 }
9601 
9602 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9603 				    struct cftype *cft)
9604 {
9605 	unsigned long weight = scale_load_down(css_tg(css)->shares);
9606 	int last_delta = INT_MAX;
9607 	int prio, delta;
9608 
9609 	/* find the closest nice value to the current weight */
9610 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9611 		delta = abs(sched_prio_to_weight[prio] - weight);
9612 		if (delta >= last_delta)
9613 			break;
9614 		last_delta = delta;
9615 	}
9616 
9617 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9618 }
9619 
9620 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9621 				     struct cftype *cft, s64 nice)
9622 {
9623 	unsigned long weight;
9624 	int idx;
9625 
9626 	if (nice < MIN_NICE || nice > MAX_NICE)
9627 		return -ERANGE;
9628 
9629 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9630 	idx = array_index_nospec(idx, 40);
9631 	weight = sched_prio_to_weight[idx];
9632 
9633 	return sched_group_set_shares(css_tg(css), scale_load(weight));
9634 }
9635 #endif
9636 
9637 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9638 						  long period, long quota)
9639 {
9640 	if (quota < 0)
9641 		seq_puts(sf, "max");
9642 	else
9643 		seq_printf(sf, "%ld", quota);
9644 
9645 	seq_printf(sf, " %ld\n", period);
9646 }
9647 
9648 /* caller should put the current value in *@periodp before calling */
9649 static int __maybe_unused cpu_period_quota_parse(char *buf,
9650 						 u64 *periodp, u64 *quotap)
9651 {
9652 	char tok[21];	/* U64_MAX */
9653 
9654 	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9655 		return -EINVAL;
9656 
9657 	*periodp *= NSEC_PER_USEC;
9658 
9659 	if (sscanf(tok, "%llu", quotap))
9660 		*quotap *= NSEC_PER_USEC;
9661 	else if (!strcmp(tok, "max"))
9662 		*quotap = RUNTIME_INF;
9663 	else
9664 		return -EINVAL;
9665 
9666 	return 0;
9667 }
9668 
9669 #ifdef CONFIG_CFS_BANDWIDTH
9670 static int cpu_max_show(struct seq_file *sf, void *v)
9671 {
9672 	struct task_group *tg = css_tg(seq_css(sf));
9673 
9674 	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
9675 	return 0;
9676 }
9677 
9678 static ssize_t cpu_max_write(struct kernfs_open_file *of,
9679 			     char *buf, size_t nbytes, loff_t off)
9680 {
9681 	struct task_group *tg = css_tg(of_css(of));
9682 	u64 period = tg_get_cfs_period(tg);
9683 	u64 burst = tg->cfs_bandwidth.burst;
9684 	u64 quota;
9685 	int ret;
9686 
9687 	ret = cpu_period_quota_parse(buf, &period, &quota);
9688 	if (!ret)
9689 		ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
9690 	return ret ?: nbytes;
9691 }
9692 #endif
9693 
9694 static struct cftype cpu_files[] = {
9695 #ifdef CONFIG_FAIR_GROUP_SCHED
9696 	{
9697 		.name = "weight",
9698 		.flags = CFTYPE_NOT_ON_ROOT,
9699 		.read_u64 = cpu_weight_read_u64,
9700 		.write_u64 = cpu_weight_write_u64,
9701 	},
9702 	{
9703 		.name = "weight.nice",
9704 		.flags = CFTYPE_NOT_ON_ROOT,
9705 		.read_s64 = cpu_weight_nice_read_s64,
9706 		.write_s64 = cpu_weight_nice_write_s64,
9707 	},
9708 	{
9709 		.name = "idle",
9710 		.flags = CFTYPE_NOT_ON_ROOT,
9711 		.read_s64 = cpu_idle_read_s64,
9712 		.write_s64 = cpu_idle_write_s64,
9713 	},
9714 #endif
9715 #ifdef CONFIG_CFS_BANDWIDTH
9716 	{
9717 		.name = "max",
9718 		.flags = CFTYPE_NOT_ON_ROOT,
9719 		.seq_show = cpu_max_show,
9720 		.write = cpu_max_write,
9721 	},
9722 	{
9723 		.name = "max.burst",
9724 		.flags = CFTYPE_NOT_ON_ROOT,
9725 		.read_u64 = cpu_cfs_burst_read_u64,
9726 		.write_u64 = cpu_cfs_burst_write_u64,
9727 	},
9728 #endif
9729 #ifdef CONFIG_UCLAMP_TASK_GROUP
9730 	{
9731 		.name = "uclamp.min",
9732 		.flags = CFTYPE_NOT_ON_ROOT,
9733 		.seq_show = cpu_uclamp_min_show,
9734 		.write = cpu_uclamp_min_write,
9735 	},
9736 	{
9737 		.name = "uclamp.max",
9738 		.flags = CFTYPE_NOT_ON_ROOT,
9739 		.seq_show = cpu_uclamp_max_show,
9740 		.write = cpu_uclamp_max_write,
9741 	},
9742 #endif
9743 	{ }	/* terminate */
9744 };
9745 
9746 struct cgroup_subsys cpu_cgrp_subsys = {
9747 	.css_alloc	= cpu_cgroup_css_alloc,
9748 	.css_online	= cpu_cgroup_css_online,
9749 	.css_released	= cpu_cgroup_css_released,
9750 	.css_free	= cpu_cgroup_css_free,
9751 	.css_extra_stat_show = cpu_extra_stat_show,
9752 	.css_local_stat_show = cpu_local_stat_show,
9753 #ifdef CONFIG_RT_GROUP_SCHED
9754 	.can_attach	= cpu_cgroup_can_attach,
9755 #endif
9756 	.attach		= cpu_cgroup_attach,
9757 	.legacy_cftypes	= cpu_legacy_files,
9758 	.dfl_cftypes	= cpu_files,
9759 	.early_init	= true,
9760 	.threaded	= true,
9761 };
9762 
9763 #endif	/* CONFIG_CGROUP_SCHED */
9764 
9765 void dump_cpu_task(int cpu)
9766 {
9767 	if (cpu == smp_processor_id() && in_hardirq()) {
9768 		struct pt_regs *regs;
9769 
9770 		regs = get_irq_regs();
9771 		if (regs) {
9772 			show_regs(regs);
9773 			return;
9774 		}
9775 	}
9776 
9777 	if (trigger_single_cpu_backtrace(cpu))
9778 		return;
9779 
9780 	pr_info("Task dump for CPU %d:\n", cpu);
9781 	sched_show_task(cpu_curr(cpu));
9782 }
9783 
9784 /*
9785  * Nice levels are multiplicative, with a gentle 10% change for every
9786  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
9787  * nice 1, it will get ~10% less CPU time than another CPU-bound task
9788  * that remained on nice 0.
9789  *
9790  * The "10% effect" is relative and cumulative: from _any_ nice level,
9791  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
9792  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
9793  * If a task goes up by ~10% and another task goes down by ~10% then
9794  * the relative distance between them is ~25%.)
9795  */
9796 const int sched_prio_to_weight[40] = {
9797  /* -20 */     88761,     71755,     56483,     46273,     36291,
9798  /* -15 */     29154,     23254,     18705,     14949,     11916,
9799  /* -10 */      9548,      7620,      6100,      4904,      3906,
9800  /*  -5 */      3121,      2501,      1991,      1586,      1277,
9801  /*   0 */      1024,       820,       655,       526,       423,
9802  /*   5 */       335,       272,       215,       172,       137,
9803  /*  10 */       110,        87,        70,        56,        45,
9804  /*  15 */        36,        29,        23,        18,        15,
9805 };
9806 
9807 /*
9808  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
9809  *
9810  * In cases where the weight does not change often, we can use the
9811  * pre-calculated inverse to speed up arithmetics by turning divisions
9812  * into multiplications:
9813  */
9814 const u32 sched_prio_to_wmult[40] = {
9815  /* -20 */     48388,     59856,     76040,     92818,    118348,
9816  /* -15 */    147320,    184698,    229616,    287308,    360437,
9817  /* -10 */    449829,    563644,    704093,    875809,   1099582,
9818  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
9819  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
9820  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
9821  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
9822  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
9823 };
9824 
9825 void call_trace_sched_update_nr_running(struct rq *rq, int count)
9826 {
9827         trace_sched_update_nr_running_tp(rq, count);
9828 }
9829 
9830 #ifdef CONFIG_SCHED_MM_CID
9831 
9832 /*
9833  * @cid_lock: Guarantee forward-progress of cid allocation.
9834  *
9835  * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
9836  * is only used when contention is detected by the lock-free allocation so
9837  * forward progress can be guaranteed.
9838  */
9839 DEFINE_RAW_SPINLOCK(cid_lock);
9840 
9841 /*
9842  * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
9843  *
9844  * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
9845  * detected, it is set to 1 to ensure that all newly coming allocations are
9846  * serialized by @cid_lock until the allocation which detected contention
9847  * completes and sets @use_cid_lock back to 0. This guarantees forward progress
9848  * of a cid allocation.
9849  */
9850 int use_cid_lock;
9851 
9852 /*
9853  * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
9854  * concurrently with respect to the execution of the source runqueue context
9855  * switch.
9856  *
9857  * There is one basic properties we want to guarantee here:
9858  *
9859  * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
9860  * used by a task. That would lead to concurrent allocation of the cid and
9861  * userspace corruption.
9862  *
9863  * Provide this guarantee by introducing a Dekker memory ordering to guarantee
9864  * that a pair of loads observe at least one of a pair of stores, which can be
9865  * shown as:
9866  *
9867  *      X = Y = 0
9868  *
9869  *      w[X]=1          w[Y]=1
9870  *      MB              MB
9871  *      r[Y]=y          r[X]=x
9872  *
9873  * Which guarantees that x==0 && y==0 is impossible. But rather than using
9874  * values 0 and 1, this algorithm cares about specific state transitions of the
9875  * runqueue current task (as updated by the scheduler context switch), and the
9876  * per-mm/cpu cid value.
9877  *
9878  * Let's introduce task (Y) which has task->mm == mm and task (N) which has
9879  * task->mm != mm for the rest of the discussion. There are two scheduler state
9880  * transitions on context switch we care about:
9881  *
9882  * (TSA) Store to rq->curr with transition from (N) to (Y)
9883  *
9884  * (TSB) Store to rq->curr with transition from (Y) to (N)
9885  *
9886  * On the remote-clear side, there is one transition we care about:
9887  *
9888  * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
9889  *
9890  * There is also a transition to UNSET state which can be performed from all
9891  * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
9892  * guarantees that only a single thread will succeed:
9893  *
9894  * (TMB) cmpxchg to *pcpu_cid to mark UNSET
9895  *
9896  * Just to be clear, what we do _not_ want to happen is a transition to UNSET
9897  * when a thread is actively using the cid (property (1)).
9898  *
9899  * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
9900  *
9901  * Scenario A) (TSA)+(TMA) (from next task perspective)
9902  *
9903  * CPU0                                      CPU1
9904  *
9905  * Context switch CS-1                       Remote-clear
9906  *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
9907  *                                             (implied barrier after cmpxchg)
9908  *   - switch_mm_cid()
9909  *     - memory barrier (see switch_mm_cid()
9910  *       comment explaining how this barrier
9911  *       is combined with other scheduler
9912  *       barriers)
9913  *     - mm_cid_get (next)
9914  *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
9915  *
9916  * This Dekker ensures that either task (Y) is observed by the
9917  * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
9918  * observed.
9919  *
9920  * If task (Y) store is observed by rcu_dereference(), it means that there is
9921  * still an active task on the cpu. Remote-clear will therefore not transition
9922  * to UNSET, which fulfills property (1).
9923  *
9924  * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
9925  * it will move its state to UNSET, which clears the percpu cid perhaps
9926  * uselessly (which is not an issue for correctness). Because task (Y) is not
9927  * observed, CPU1 can move ahead to set the state to UNSET. Because moving
9928  * state to UNSET is done with a cmpxchg expecting that the old state has the
9929  * LAZY flag set, only one thread will successfully UNSET.
9930  *
9931  * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
9932  * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
9933  * CPU1 will observe task (Y) and do nothing more, which is fine.
9934  *
9935  * What we are effectively preventing with this Dekker is a scenario where
9936  * neither LAZY flag nor store (Y) are observed, which would fail property (1)
9937  * because this would UNSET a cid which is actively used.
9938  */
9939 
9940 void sched_mm_cid_migrate_from(struct task_struct *t)
9941 {
9942 	t->migrate_from_cpu = task_cpu(t);
9943 }
9944 
9945 static
9946 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
9947 					  struct task_struct *t,
9948 					  struct mm_cid *src_pcpu_cid)
9949 {
9950 	struct mm_struct *mm = t->mm;
9951 	struct task_struct *src_task;
9952 	int src_cid, last_mm_cid;
9953 
9954 	if (!mm)
9955 		return -1;
9956 
9957 	last_mm_cid = t->last_mm_cid;
9958 	/*
9959 	 * If the migrated task has no last cid, or if the current
9960 	 * task on src rq uses the cid, it means the source cid does not need
9961 	 * to be moved to the destination cpu.
9962 	 */
9963 	if (last_mm_cid == -1)
9964 		return -1;
9965 	src_cid = READ_ONCE(src_pcpu_cid->cid);
9966 	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
9967 		return -1;
9968 
9969 	/*
9970 	 * If we observe an active task using the mm on this rq, it means we
9971 	 * are not the last task to be migrated from this cpu for this mm, so
9972 	 * there is no need to move src_cid to the destination cpu.
9973 	 */
9974 	guard(rcu)();
9975 	src_task = rcu_dereference(src_rq->curr);
9976 	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
9977 		t->last_mm_cid = -1;
9978 		return -1;
9979 	}
9980 
9981 	return src_cid;
9982 }
9983 
9984 static
9985 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
9986 					      struct task_struct *t,
9987 					      struct mm_cid *src_pcpu_cid,
9988 					      int src_cid)
9989 {
9990 	struct task_struct *src_task;
9991 	struct mm_struct *mm = t->mm;
9992 	int lazy_cid;
9993 
9994 	if (src_cid == -1)
9995 		return -1;
9996 
9997 	/*
9998 	 * Attempt to clear the source cpu cid to move it to the destination
9999 	 * cpu.
10000 	 */
10001 	lazy_cid = mm_cid_set_lazy_put(src_cid);
10002 	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10003 		return -1;
10004 
10005 	/*
10006 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10007 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10008 	 * between store to rq->curr and load of prev and next task's
10009 	 * per-mm/cpu cid.
10010 	 *
10011 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10012 	 * rq->curr->mm_cid_active matches the barrier in
10013 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10014 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10015 	 * load of per-mm/cpu cid.
10016 	 */
10017 
10018 	/*
10019 	 * If we observe an active task using the mm on this rq after setting
10020 	 * the lazy-put flag, this task will be responsible for transitioning
10021 	 * from lazy-put flag set to MM_CID_UNSET.
10022 	 */
10023 	scoped_guard (rcu) {
10024 		src_task = rcu_dereference(src_rq->curr);
10025 		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10026 			/*
10027 			 * We observed an active task for this mm, there is therefore
10028 			 * no point in moving this cid to the destination cpu.
10029 			 */
10030 			t->last_mm_cid = -1;
10031 			return -1;
10032 		}
10033 	}
10034 
10035 	/*
10036 	 * The src_cid is unused, so it can be unset.
10037 	 */
10038 	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10039 		return -1;
10040 	return src_cid;
10041 }
10042 
10043 /*
10044  * Migration to dst cpu. Called with dst_rq lock held.
10045  * Interrupts are disabled, which keeps the window of cid ownership without the
10046  * source rq lock held small.
10047  */
10048 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10049 {
10050 	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10051 	struct mm_struct *mm = t->mm;
10052 	int src_cid, dst_cid, src_cpu;
10053 	struct rq *src_rq;
10054 
10055 	lockdep_assert_rq_held(dst_rq);
10056 
10057 	if (!mm)
10058 		return;
10059 	src_cpu = t->migrate_from_cpu;
10060 	if (src_cpu == -1) {
10061 		t->last_mm_cid = -1;
10062 		return;
10063 	}
10064 	/*
10065 	 * Move the src cid if the dst cid is unset. This keeps id
10066 	 * allocation closest to 0 in cases where few threads migrate around
10067 	 * many CPUs.
10068 	 *
10069 	 * If destination cid is already set, we may have to just clear
10070 	 * the src cid to ensure compactness in frequent migrations
10071 	 * scenarios.
10072 	 *
10073 	 * It is not useful to clear the src cid when the number of threads is
10074 	 * greater or equal to the number of allowed CPUs, because user-space
10075 	 * can expect that the number of allowed cids can reach the number of
10076 	 * allowed CPUs.
10077 	 */
10078 	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10079 	dst_cid = READ_ONCE(dst_pcpu_cid->cid);
10080 	if (!mm_cid_is_unset(dst_cid) &&
10081 	    atomic_read(&mm->mm_users) >= t->nr_cpus_allowed)
10082 		return;
10083 	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10084 	src_rq = cpu_rq(src_cpu);
10085 	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10086 	if (src_cid == -1)
10087 		return;
10088 	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10089 							    src_cid);
10090 	if (src_cid == -1)
10091 		return;
10092 	if (!mm_cid_is_unset(dst_cid)) {
10093 		__mm_cid_put(mm, src_cid);
10094 		return;
10095 	}
10096 	/* Move src_cid to dst cpu. */
10097 	mm_cid_snapshot_time(dst_rq, mm);
10098 	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10099 }
10100 
10101 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10102 				      int cpu)
10103 {
10104 	struct rq *rq = cpu_rq(cpu);
10105 	struct task_struct *t;
10106 	int cid, lazy_cid;
10107 
10108 	cid = READ_ONCE(pcpu_cid->cid);
10109 	if (!mm_cid_is_valid(cid))
10110 		return;
10111 
10112 	/*
10113 	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
10114 	 * there happens to be other tasks left on the source cpu using this
10115 	 * mm, the next task using this mm will reallocate its cid on context
10116 	 * switch.
10117 	 */
10118 	lazy_cid = mm_cid_set_lazy_put(cid);
10119 	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10120 		return;
10121 
10122 	/*
10123 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10124 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10125 	 * between store to rq->curr and load of prev and next task's
10126 	 * per-mm/cpu cid.
10127 	 *
10128 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10129 	 * rq->curr->mm_cid_active matches the barrier in
10130 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10131 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10132 	 * load of per-mm/cpu cid.
10133 	 */
10134 
10135 	/*
10136 	 * If we observe an active task using the mm on this rq after setting
10137 	 * the lazy-put flag, that task will be responsible for transitioning
10138 	 * from lazy-put flag set to MM_CID_UNSET.
10139 	 */
10140 	scoped_guard (rcu) {
10141 		t = rcu_dereference(rq->curr);
10142 		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10143 			return;
10144 	}
10145 
10146 	/*
10147 	 * The cid is unused, so it can be unset.
10148 	 * Disable interrupts to keep the window of cid ownership without rq
10149 	 * lock small.
10150 	 */
10151 	scoped_guard (irqsave) {
10152 		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10153 			__mm_cid_put(mm, cid);
10154 	}
10155 }
10156 
10157 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10158 {
10159 	struct rq *rq = cpu_rq(cpu);
10160 	struct mm_cid *pcpu_cid;
10161 	struct task_struct *curr;
10162 	u64 rq_clock;
10163 
10164 	/*
10165 	 * rq->clock load is racy on 32-bit but one spurious clear once in a
10166 	 * while is irrelevant.
10167 	 */
10168 	rq_clock = READ_ONCE(rq->clock);
10169 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10170 
10171 	/*
10172 	 * In order to take care of infrequently scheduled tasks, bump the time
10173 	 * snapshot associated with this cid if an active task using the mm is
10174 	 * observed on this rq.
10175 	 */
10176 	scoped_guard (rcu) {
10177 		curr = rcu_dereference(rq->curr);
10178 		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10179 			WRITE_ONCE(pcpu_cid->time, rq_clock);
10180 			return;
10181 		}
10182 	}
10183 
10184 	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10185 		return;
10186 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10187 }
10188 
10189 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10190 					     int weight)
10191 {
10192 	struct mm_cid *pcpu_cid;
10193 	int cid;
10194 
10195 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10196 	cid = READ_ONCE(pcpu_cid->cid);
10197 	if (!mm_cid_is_valid(cid) || cid < weight)
10198 		return;
10199 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10200 }
10201 
10202 static void task_mm_cid_work(struct callback_head *work)
10203 {
10204 	unsigned long now = jiffies, old_scan, next_scan;
10205 	struct task_struct *t = current;
10206 	struct cpumask *cidmask;
10207 	struct mm_struct *mm;
10208 	int weight, cpu;
10209 
10210 	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10211 
10212 	work->next = work;	/* Prevent double-add */
10213 	if (t->flags & PF_EXITING)
10214 		return;
10215 	mm = t->mm;
10216 	if (!mm)
10217 		return;
10218 	old_scan = READ_ONCE(mm->mm_cid_next_scan);
10219 	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10220 	if (!old_scan) {
10221 		unsigned long res;
10222 
10223 		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10224 		if (res != old_scan)
10225 			old_scan = res;
10226 		else
10227 			old_scan = next_scan;
10228 	}
10229 	if (time_before(now, old_scan))
10230 		return;
10231 	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10232 		return;
10233 	cidmask = mm_cidmask(mm);
10234 	/* Clear cids that were not recently used. */
10235 	for_each_possible_cpu(cpu)
10236 		sched_mm_cid_remote_clear_old(mm, cpu);
10237 	weight = cpumask_weight(cidmask);
10238 	/*
10239 	 * Clear cids that are greater or equal to the cidmask weight to
10240 	 * recompact it.
10241 	 */
10242 	for_each_possible_cpu(cpu)
10243 		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10244 }
10245 
10246 void init_sched_mm_cid(struct task_struct *t)
10247 {
10248 	struct mm_struct *mm = t->mm;
10249 	int mm_users = 0;
10250 
10251 	if (mm) {
10252 		mm_users = atomic_read(&mm->mm_users);
10253 		if (mm_users == 1)
10254 			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10255 	}
10256 	t->cid_work.next = &t->cid_work;	/* Protect against double add */
10257 	init_task_work(&t->cid_work, task_mm_cid_work);
10258 }
10259 
10260 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10261 {
10262 	struct callback_head *work = &curr->cid_work;
10263 	unsigned long now = jiffies;
10264 
10265 	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10266 	    work->next != work)
10267 		return;
10268 	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10269 		return;
10270 	task_work_add(curr, work, TWA_RESUME);
10271 }
10272 
10273 void sched_mm_cid_exit_signals(struct task_struct *t)
10274 {
10275 	struct mm_struct *mm = t->mm;
10276 	struct rq *rq;
10277 
10278 	if (!mm)
10279 		return;
10280 
10281 	preempt_disable();
10282 	rq = this_rq();
10283 	guard(rq_lock_irqsave)(rq);
10284 	preempt_enable_no_resched();	/* holding spinlock */
10285 	WRITE_ONCE(t->mm_cid_active, 0);
10286 	/*
10287 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10288 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10289 	 */
10290 	smp_mb();
10291 	mm_cid_put(mm);
10292 	t->last_mm_cid = t->mm_cid = -1;
10293 }
10294 
10295 void sched_mm_cid_before_execve(struct task_struct *t)
10296 {
10297 	struct mm_struct *mm = t->mm;
10298 	struct rq *rq;
10299 
10300 	if (!mm)
10301 		return;
10302 
10303 	preempt_disable();
10304 	rq = this_rq();
10305 	guard(rq_lock_irqsave)(rq);
10306 	preempt_enable_no_resched();	/* holding spinlock */
10307 	WRITE_ONCE(t->mm_cid_active, 0);
10308 	/*
10309 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10310 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10311 	 */
10312 	smp_mb();
10313 	mm_cid_put(mm);
10314 	t->last_mm_cid = t->mm_cid = -1;
10315 }
10316 
10317 void sched_mm_cid_after_execve(struct task_struct *t)
10318 {
10319 	struct mm_struct *mm = t->mm;
10320 	struct rq *rq;
10321 
10322 	if (!mm)
10323 		return;
10324 
10325 	preempt_disable();
10326 	rq = this_rq();
10327 	scoped_guard (rq_lock_irqsave, rq) {
10328 		preempt_enable_no_resched();	/* holding spinlock */
10329 		WRITE_ONCE(t->mm_cid_active, 1);
10330 		/*
10331 		 * Store t->mm_cid_active before loading per-mm/cpu cid.
10332 		 * Matches barrier in sched_mm_cid_remote_clear_old().
10333 		 */
10334 		smp_mb();
10335 		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm);
10336 	}
10337 	rseq_set_notify_resume(t);
10338 }
10339 
10340 void sched_mm_cid_fork(struct task_struct *t)
10341 {
10342 	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10343 	t->mm_cid_active = 1;
10344 }
10345 #endif
10346