xref: /linux/kernel/sched/core.c (revision d203484f2556f47a435cda36ceb9dd83adc9056e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  kernel/sched/core.c
4  *
5  *  Core kernel CPU scheduler code
6  *
7  *  Copyright (C) 1991-2002  Linus Torvalds
8  *  Copyright (C) 1998-2024  Ingo Molnar, Red Hat
9  */
10 #include <linux/highmem.h>
11 #include <linux/hrtimer_api.h>
12 #include <linux/ktime_api.h>
13 #include <linux/sched/signal.h>
14 #include <linux/syscalls_api.h>
15 #include <linux/debug_locks.h>
16 #include <linux/prefetch.h>
17 #include <linux/capability.h>
18 #include <linux/pgtable_api.h>
19 #include <linux/wait_bit.h>
20 #include <linux/jiffies.h>
21 #include <linux/spinlock_api.h>
22 #include <linux/cpumask_api.h>
23 #include <linux/lockdep_api.h>
24 #include <linux/hardirq.h>
25 #include <linux/softirq.h>
26 #include <linux/refcount_api.h>
27 #include <linux/topology.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/cond_resched.h>
30 #include <linux/sched/cputime.h>
31 #include <linux/sched/debug.h>
32 #include <linux/sched/hotplug.h>
33 #include <linux/sched/init.h>
34 #include <linux/sched/isolation.h>
35 #include <linux/sched/loadavg.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/nohz.h>
38 #include <linux/sched/rseq_api.h>
39 #include <linux/sched/rt.h>
40 
41 #include <linux/blkdev.h>
42 #include <linux/context_tracking.h>
43 #include <linux/cpuset.h>
44 #include <linux/delayacct.h>
45 #include <linux/init_task.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioprio.h>
48 #include <linux/kallsyms.h>
49 #include <linux/kcov.h>
50 #include <linux/kprobes.h>
51 #include <linux/llist_api.h>
52 #include <linux/mmu_context.h>
53 #include <linux/mmzone.h>
54 #include <linux/mutex_api.h>
55 #include <linux/nmi.h>
56 #include <linux/nospec.h>
57 #include <linux/perf_event_api.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcuwait_api.h>
61 #include <linux/rseq.h>
62 #include <linux/sched/wake_q.h>
63 #include <linux/scs.h>
64 #include <linux/slab.h>
65 #include <linux/syscalls.h>
66 #include <linux/vtime.h>
67 #include <linux/wait_api.h>
68 #include <linux/workqueue_api.h>
69 
70 #ifdef CONFIG_PREEMPT_DYNAMIC
71 # ifdef CONFIG_GENERIC_ENTRY
72 #  include <linux/entry-common.h>
73 # endif
74 #endif
75 
76 #include <uapi/linux/sched/types.h>
77 
78 #include <asm/irq_regs.h>
79 #include <asm/switch_to.h>
80 #include <asm/tlb.h>
81 
82 #define CREATE_TRACE_POINTS
83 #include <linux/sched/rseq_api.h>
84 #include <trace/events/sched.h>
85 #include <trace/events/ipi.h>
86 #undef CREATE_TRACE_POINTS
87 
88 #include "sched.h"
89 #include "stats.h"
90 
91 #include "autogroup.h"
92 #include "pelt.h"
93 #include "smp.h"
94 #include "stats.h"
95 
96 #include "../workqueue_internal.h"
97 #include "../../io_uring/io-wq.h"
98 #include "../smpboot.h"
99 
100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
102 
103 /*
104  * Export tracepoints that act as a bare tracehook (ie: have no trace event
105  * associated with them) to allow external modules to probe them.
106  */
107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp);
108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp);
109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp);
113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp);
114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
119 
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
121 
122 #ifdef CONFIG_SCHED_DEBUG
123 /*
124  * Debugging: various feature bits
125  *
126  * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
127  * sysctl_sched_features, defined in sched.h, to allow constants propagation
128  * at compile time and compiler optimization based on features default.
129  */
130 #define SCHED_FEAT(name, enabled)	\
131 	(1UL << __SCHED_FEAT_##name) * enabled |
132 const_debug unsigned int sysctl_sched_features =
133 #include "features.h"
134 	0;
135 #undef SCHED_FEAT
136 
137 /*
138  * Print a warning if need_resched is set for the given duration (if
139  * LATENCY_WARN is enabled).
140  *
141  * If sysctl_resched_latency_warn_once is set, only one warning will be shown
142  * per boot.
143  */
144 __read_mostly int sysctl_resched_latency_warn_ms = 100;
145 __read_mostly int sysctl_resched_latency_warn_once = 1;
146 #endif /* CONFIG_SCHED_DEBUG */
147 
148 /*
149  * Number of tasks to iterate in a single balance run.
150  * Limited because this is done with IRQs disabled.
151  */
152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
153 
154 __read_mostly int scheduler_running;
155 
156 #ifdef CONFIG_SCHED_CORE
157 
158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
159 
160 /* kernel prio, less is more */
__task_prio(const struct task_struct * p)161 static inline int __task_prio(const struct task_struct *p)
162 {
163 	if (p->sched_class == &stop_sched_class) /* trumps deadline */
164 		return -2;
165 
166 	if (p->dl_server)
167 		return -1; /* deadline */
168 
169 	if (rt_or_dl_prio(p->prio))
170 		return p->prio; /* [-1, 99] */
171 
172 	if (p->sched_class == &idle_sched_class)
173 		return MAX_RT_PRIO + NICE_WIDTH; /* 140 */
174 
175 	if (task_on_scx(p))
176 		return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */
177 
178 	return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */
179 }
180 
181 /*
182  * l(a,b)
183  * le(a,b) := !l(b,a)
184  * g(a,b)  := l(b,a)
185  * ge(a,b) := !l(a,b)
186  */
187 
188 /* real prio, less is less */
prio_less(const struct task_struct * a,const struct task_struct * b,bool in_fi)189 static inline bool prio_less(const struct task_struct *a,
190 			     const struct task_struct *b, bool in_fi)
191 {
192 
193 	int pa = __task_prio(a), pb = __task_prio(b);
194 
195 	if (-pa < -pb)
196 		return true;
197 
198 	if (-pb < -pa)
199 		return false;
200 
201 	if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */
202 		const struct sched_dl_entity *a_dl, *b_dl;
203 
204 		a_dl = &a->dl;
205 		/*
206 		 * Since,'a' and 'b' can be CFS tasks served by DL server,
207 		 * __task_prio() can return -1 (for DL) even for those. In that
208 		 * case, get to the dl_server's DL entity.
209 		 */
210 		if (a->dl_server)
211 			a_dl = a->dl_server;
212 
213 		b_dl = &b->dl;
214 		if (b->dl_server)
215 			b_dl = b->dl_server;
216 
217 		return !dl_time_before(a_dl->deadline, b_dl->deadline);
218 	}
219 
220 	if (pa == MAX_RT_PRIO + MAX_NICE)	/* fair */
221 		return cfs_prio_less(a, b, in_fi);
222 
223 #ifdef CONFIG_SCHED_CLASS_EXT
224 	if (pa == MAX_RT_PRIO + MAX_NICE + 1)	/* ext */
225 		return scx_prio_less(a, b, in_fi);
226 #endif
227 
228 	return false;
229 }
230 
__sched_core_less(const struct task_struct * a,const struct task_struct * b)231 static inline bool __sched_core_less(const struct task_struct *a,
232 				     const struct task_struct *b)
233 {
234 	if (a->core_cookie < b->core_cookie)
235 		return true;
236 
237 	if (a->core_cookie > b->core_cookie)
238 		return false;
239 
240 	/* flip prio, so high prio is leftmost */
241 	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))
242 		return true;
243 
244 	return false;
245 }
246 
247 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node)
248 
rb_sched_core_less(struct rb_node * a,const struct rb_node * b)249 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b)
250 {
251 	return __sched_core_less(__node_2_sc(a), __node_2_sc(b));
252 }
253 
rb_sched_core_cmp(const void * key,const struct rb_node * node)254 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node)
255 {
256 	const struct task_struct *p = __node_2_sc(node);
257 	unsigned long cookie = (unsigned long)key;
258 
259 	if (cookie < p->core_cookie)
260 		return -1;
261 
262 	if (cookie > p->core_cookie)
263 		return 1;
264 
265 	return 0;
266 }
267 
sched_core_enqueue(struct rq * rq,struct task_struct * p)268 void sched_core_enqueue(struct rq *rq, struct task_struct *p)
269 {
270 	if (p->se.sched_delayed)
271 		return;
272 
273 	rq->core->core_task_seq++;
274 
275 	if (!p->core_cookie)
276 		return;
277 
278 	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
279 }
280 
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)281 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
282 {
283 	if (p->se.sched_delayed)
284 		return;
285 
286 	rq->core->core_task_seq++;
287 
288 	if (sched_core_enqueued(p)) {
289 		rb_erase(&p->core_node, &rq->core_tree);
290 		RB_CLEAR_NODE(&p->core_node);
291 	}
292 
293 	/*
294 	 * Migrating the last task off the cpu, with the cpu in forced idle
295 	 * state. Reschedule to create an accounting edge for forced idle,
296 	 * and re-examine whether the core is still in forced idle state.
297 	 */
298 	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
299 	    rq->core->core_forceidle_count && rq->curr == rq->idle)
300 		resched_curr(rq);
301 }
302 
sched_task_is_throttled(struct task_struct * p,int cpu)303 static int sched_task_is_throttled(struct task_struct *p, int cpu)
304 {
305 	if (p->sched_class->task_is_throttled)
306 		return p->sched_class->task_is_throttled(p, cpu);
307 
308 	return 0;
309 }
310 
sched_core_next(struct task_struct * p,unsigned long cookie)311 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
312 {
313 	struct rb_node *node = &p->core_node;
314 	int cpu = task_cpu(p);
315 
316 	do {
317 		node = rb_next(node);
318 		if (!node)
319 			return NULL;
320 
321 		p = __node_2_sc(node);
322 		if (p->core_cookie != cookie)
323 			return NULL;
324 
325 	} while (sched_task_is_throttled(p, cpu));
326 
327 	return p;
328 }
329 
330 /*
331  * Find left-most (aka, highest priority) and unthrottled task matching @cookie.
332  * If no suitable task is found, NULL will be returned.
333  */
sched_core_find(struct rq * rq,unsigned long cookie)334 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
335 {
336 	struct task_struct *p;
337 	struct rb_node *node;
338 
339 	node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
340 	if (!node)
341 		return NULL;
342 
343 	p = __node_2_sc(node);
344 	if (!sched_task_is_throttled(p, rq->cpu))
345 		return p;
346 
347 	return sched_core_next(p, cookie);
348 }
349 
350 /*
351  * Magic required such that:
352  *
353  *	raw_spin_rq_lock(rq);
354  *	...
355  *	raw_spin_rq_unlock(rq);
356  *
357  * ends up locking and unlocking the _same_ lock, and all CPUs
358  * always agree on what rq has what lock.
359  *
360  * XXX entirely possible to selectively enable cores, don't bother for now.
361  */
362 
363 static DEFINE_MUTEX(sched_core_mutex);
364 static atomic_t sched_core_count;
365 static struct cpumask sched_core_mask;
366 
sched_core_lock(int cpu,unsigned long * flags)367 static void sched_core_lock(int cpu, unsigned long *flags)
368 {
369 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
370 	int t, i = 0;
371 
372 	local_irq_save(*flags);
373 	for_each_cpu(t, smt_mask)
374 		raw_spin_lock_nested(&cpu_rq(t)->__lock, i++);
375 }
376 
sched_core_unlock(int cpu,unsigned long * flags)377 static void sched_core_unlock(int cpu, unsigned long *flags)
378 {
379 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
380 	int t;
381 
382 	for_each_cpu(t, smt_mask)
383 		raw_spin_unlock(&cpu_rq(t)->__lock);
384 	local_irq_restore(*flags);
385 }
386 
__sched_core_flip(bool enabled)387 static void __sched_core_flip(bool enabled)
388 {
389 	unsigned long flags;
390 	int cpu, t;
391 
392 	cpus_read_lock();
393 
394 	/*
395 	 * Toggle the online cores, one by one.
396 	 */
397 	cpumask_copy(&sched_core_mask, cpu_online_mask);
398 	for_each_cpu(cpu, &sched_core_mask) {
399 		const struct cpumask *smt_mask = cpu_smt_mask(cpu);
400 
401 		sched_core_lock(cpu, &flags);
402 
403 		for_each_cpu(t, smt_mask)
404 			cpu_rq(t)->core_enabled = enabled;
405 
406 		cpu_rq(cpu)->core->core_forceidle_start = 0;
407 
408 		sched_core_unlock(cpu, &flags);
409 
410 		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask);
411 	}
412 
413 	/*
414 	 * Toggle the offline CPUs.
415 	 */
416 	for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask)
417 		cpu_rq(cpu)->core_enabled = enabled;
418 
419 	cpus_read_unlock();
420 }
421 
sched_core_assert_empty(void)422 static void sched_core_assert_empty(void)
423 {
424 	int cpu;
425 
426 	for_each_possible_cpu(cpu)
427 		WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree));
428 }
429 
__sched_core_enable(void)430 static void __sched_core_enable(void)
431 {
432 	static_branch_enable(&__sched_core_enabled);
433 	/*
434 	 * Ensure all previous instances of raw_spin_rq_*lock() have finished
435 	 * and future ones will observe !sched_core_disabled().
436 	 */
437 	synchronize_rcu();
438 	__sched_core_flip(true);
439 	sched_core_assert_empty();
440 }
441 
__sched_core_disable(void)442 static void __sched_core_disable(void)
443 {
444 	sched_core_assert_empty();
445 	__sched_core_flip(false);
446 	static_branch_disable(&__sched_core_enabled);
447 }
448 
sched_core_get(void)449 void sched_core_get(void)
450 {
451 	if (atomic_inc_not_zero(&sched_core_count))
452 		return;
453 
454 	mutex_lock(&sched_core_mutex);
455 	if (!atomic_read(&sched_core_count))
456 		__sched_core_enable();
457 
458 	smp_mb__before_atomic();
459 	atomic_inc(&sched_core_count);
460 	mutex_unlock(&sched_core_mutex);
461 }
462 
__sched_core_put(struct work_struct * work)463 static void __sched_core_put(struct work_struct *work)
464 {
465 	if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
466 		__sched_core_disable();
467 		mutex_unlock(&sched_core_mutex);
468 	}
469 }
470 
sched_core_put(void)471 void sched_core_put(void)
472 {
473 	static DECLARE_WORK(_work, __sched_core_put);
474 
475 	/*
476 	 * "There can be only one"
477 	 *
478 	 * Either this is the last one, or we don't actually need to do any
479 	 * 'work'. If it is the last *again*, we rely on
480 	 * WORK_STRUCT_PENDING_BIT.
481 	 */
482 	if (!atomic_add_unless(&sched_core_count, -1, 1))
483 		schedule_work(&_work);
484 }
485 
486 #else /* !CONFIG_SCHED_CORE */
487 
sched_core_enqueue(struct rq * rq,struct task_struct * p)488 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
489 static inline void
sched_core_dequeue(struct rq * rq,struct task_struct * p,int flags)490 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
491 
492 #endif /* CONFIG_SCHED_CORE */
493 
494 /*
495  * Serialization rules:
496  *
497  * Lock order:
498  *
499  *   p->pi_lock
500  *     rq->lock
501  *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
502  *
503  *  rq1->lock
504  *    rq2->lock  where: rq1 < rq2
505  *
506  * Regular state:
507  *
508  * Normal scheduling state is serialized by rq->lock. __schedule() takes the
509  * local CPU's rq->lock, it optionally removes the task from the runqueue and
510  * always looks at the local rq data structures to find the most eligible task
511  * to run next.
512  *
513  * Task enqueue is also under rq->lock, possibly taken from another CPU.
514  * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
515  * the local CPU to avoid bouncing the runqueue state around [ see
516  * ttwu_queue_wakelist() ]
517  *
518  * Task wakeup, specifically wakeups that involve migration, are horribly
519  * complicated to avoid having to take two rq->locks.
520  *
521  * Special state:
522  *
523  * System-calls and anything external will use task_rq_lock() which acquires
524  * both p->pi_lock and rq->lock. As a consequence the state they change is
525  * stable while holding either lock:
526  *
527  *  - sched_setaffinity()/
528  *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
529  *  - set_user_nice():		p->se.load, p->*prio
530  *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
531  *				p->se.load, p->rt_priority,
532  *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
533  *  - sched_setnuma():		p->numa_preferred_nid
534  *  - sched_move_task():	p->sched_task_group
535  *  - uclamp_update_active()	p->uclamp*
536  *
537  * p->state <- TASK_*:
538  *
539  *   is changed locklessly using set_current_state(), __set_current_state() or
540  *   set_special_state(), see their respective comments, or by
541  *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
542  *   concurrent self.
543  *
544  * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
545  *
546  *   is set by activate_task() and cleared by deactivate_task(), under
547  *   rq->lock. Non-zero indicates the task is runnable, the special
548  *   ON_RQ_MIGRATING state is used for migration without holding both
549  *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
550  *
551  *   Additionally it is possible to be ->on_rq but still be considered not
552  *   runnable when p->se.sched_delayed is true. These tasks are on the runqueue
553  *   but will be dequeued as soon as they get picked again. See the
554  *   task_is_runnable() helper.
555  *
556  * p->on_cpu <- { 0, 1 }:
557  *
558  *   is set by prepare_task() and cleared by finish_task() such that it will be
559  *   set before p is scheduled-in and cleared after p is scheduled-out, both
560  *   under rq->lock. Non-zero indicates the task is running on its CPU.
561  *
562  *   [ The astute reader will observe that it is possible for two tasks on one
563  *     CPU to have ->on_cpu = 1 at the same time. ]
564  *
565  * task_cpu(p): is changed by set_task_cpu(), the rules are:
566  *
567  *  - Don't call set_task_cpu() on a blocked task:
568  *
569  *    We don't care what CPU we're not running on, this simplifies hotplug,
570  *    the CPU assignment of blocked tasks isn't required to be valid.
571  *
572  *  - for try_to_wake_up(), called under p->pi_lock:
573  *
574  *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
575  *
576  *  - for migration called under rq->lock:
577  *    [ see task_on_rq_migrating() in task_rq_lock() ]
578  *
579  *    o move_queued_task()
580  *    o detach_task()
581  *
582  *  - for migration called under double_rq_lock():
583  *
584  *    o __migrate_swap_task()
585  *    o push_rt_task() / pull_rt_task()
586  *    o push_dl_task() / pull_dl_task()
587  *    o dl_task_offline_migration()
588  *
589  */
590 
raw_spin_rq_lock_nested(struct rq * rq,int subclass)591 void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
592 {
593 	raw_spinlock_t *lock;
594 
595 	/* Matches synchronize_rcu() in __sched_core_enable() */
596 	preempt_disable();
597 	if (sched_core_disabled()) {
598 		raw_spin_lock_nested(&rq->__lock, subclass);
599 		/* preempt_count *MUST* be > 1 */
600 		preempt_enable_no_resched();
601 		return;
602 	}
603 
604 	for (;;) {
605 		lock = __rq_lockp(rq);
606 		raw_spin_lock_nested(lock, subclass);
607 		if (likely(lock == __rq_lockp(rq))) {
608 			/* preempt_count *MUST* be > 1 */
609 			preempt_enable_no_resched();
610 			return;
611 		}
612 		raw_spin_unlock(lock);
613 	}
614 }
615 
raw_spin_rq_trylock(struct rq * rq)616 bool raw_spin_rq_trylock(struct rq *rq)
617 {
618 	raw_spinlock_t *lock;
619 	bool ret;
620 
621 	/* Matches synchronize_rcu() in __sched_core_enable() */
622 	preempt_disable();
623 	if (sched_core_disabled()) {
624 		ret = raw_spin_trylock(&rq->__lock);
625 		preempt_enable();
626 		return ret;
627 	}
628 
629 	for (;;) {
630 		lock = __rq_lockp(rq);
631 		ret = raw_spin_trylock(lock);
632 		if (!ret || (likely(lock == __rq_lockp(rq)))) {
633 			preempt_enable();
634 			return ret;
635 		}
636 		raw_spin_unlock(lock);
637 	}
638 }
639 
raw_spin_rq_unlock(struct rq * rq)640 void raw_spin_rq_unlock(struct rq *rq)
641 {
642 	raw_spin_unlock(rq_lockp(rq));
643 }
644 
645 #ifdef CONFIG_SMP
646 /*
647  * double_rq_lock - safely lock two runqueues
648  */
double_rq_lock(struct rq * rq1,struct rq * rq2)649 void double_rq_lock(struct rq *rq1, struct rq *rq2)
650 {
651 	lockdep_assert_irqs_disabled();
652 
653 	if (rq_order_less(rq2, rq1))
654 		swap(rq1, rq2);
655 
656 	raw_spin_rq_lock(rq1);
657 	if (__rq_lockp(rq1) != __rq_lockp(rq2))
658 		raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
659 
660 	double_rq_clock_clear_update(rq1, rq2);
661 }
662 #endif
663 
664 /*
665  * __task_rq_lock - lock the rq @p resides on.
666  */
__task_rq_lock(struct task_struct * p,struct rq_flags * rf)667 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
668 	__acquires(rq->lock)
669 {
670 	struct rq *rq;
671 
672 	lockdep_assert_held(&p->pi_lock);
673 
674 	for (;;) {
675 		rq = task_rq(p);
676 		raw_spin_rq_lock(rq);
677 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
678 			rq_pin_lock(rq, rf);
679 			return rq;
680 		}
681 		raw_spin_rq_unlock(rq);
682 
683 		while (unlikely(task_on_rq_migrating(p)))
684 			cpu_relax();
685 	}
686 }
687 
688 /*
689  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
690  */
task_rq_lock(struct task_struct * p,struct rq_flags * rf)691 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
692 	__acquires(p->pi_lock)
693 	__acquires(rq->lock)
694 {
695 	struct rq *rq;
696 
697 	for (;;) {
698 		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
699 		rq = task_rq(p);
700 		raw_spin_rq_lock(rq);
701 		/*
702 		 *	move_queued_task()		task_rq_lock()
703 		 *
704 		 *	ACQUIRE (rq->lock)
705 		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
706 		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
707 		 *	[S] ->cpu = new_cpu		[L] task_rq()
708 		 *					[L] ->on_rq
709 		 *	RELEASE (rq->lock)
710 		 *
711 		 * If we observe the old CPU in task_rq_lock(), the acquire of
712 		 * the old rq->lock will fully serialize against the stores.
713 		 *
714 		 * If we observe the new CPU in task_rq_lock(), the address
715 		 * dependency headed by '[L] rq = task_rq()' and the acquire
716 		 * will pair with the WMB to ensure we then also see migrating.
717 		 */
718 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
719 			rq_pin_lock(rq, rf);
720 			return rq;
721 		}
722 		raw_spin_rq_unlock(rq);
723 		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
724 
725 		while (unlikely(task_on_rq_migrating(p)))
726 			cpu_relax();
727 	}
728 }
729 
730 /*
731  * RQ-clock updating methods:
732  */
733 
update_rq_clock_task(struct rq * rq,s64 delta)734 static void update_rq_clock_task(struct rq *rq, s64 delta)
735 {
736 /*
737  * In theory, the compile should just see 0 here, and optimize out the call
738  * to sched_rt_avg_update. But I don't trust it...
739  */
740 	s64 __maybe_unused steal = 0, irq_delta = 0;
741 
742 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
743 	if (irqtime_enabled()) {
744 		irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
745 
746 		/*
747 		 * Since irq_time is only updated on {soft,}irq_exit, we might run into
748 		 * this case when a previous update_rq_clock() happened inside a
749 		 * {soft,}IRQ region.
750 		 *
751 		 * When this happens, we stop ->clock_task and only update the
752 		 * prev_irq_time stamp to account for the part that fit, so that a next
753 		 * update will consume the rest. This ensures ->clock_task is
754 		 * monotonic.
755 		 *
756 		 * It does however cause some slight miss-attribution of {soft,}IRQ
757 		 * time, a more accurate solution would be to update the irq_time using
758 		 * the current rq->clock timestamp, except that would require using
759 		 * atomic ops.
760 		 */
761 		if (irq_delta > delta)
762 			irq_delta = delta;
763 
764 		rq->prev_irq_time += irq_delta;
765 		delta -= irq_delta;
766 		delayacct_irq(rq->curr, irq_delta);
767 	}
768 #endif
769 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
770 	if (static_key_false((&paravirt_steal_rq_enabled))) {
771 		u64 prev_steal;
772 
773 		steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
774 		steal -= rq->prev_steal_time_rq;
775 
776 		if (unlikely(steal > delta))
777 			steal = delta;
778 
779 		rq->prev_steal_time_rq = prev_steal;
780 		delta -= steal;
781 	}
782 #endif
783 
784 	rq->clock_task += delta;
785 
786 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
787 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
788 		update_irq_load_avg(rq, irq_delta + steal);
789 #endif
790 	update_rq_clock_pelt(rq, delta);
791 }
792 
update_rq_clock(struct rq * rq)793 void update_rq_clock(struct rq *rq)
794 {
795 	s64 delta;
796 	u64 clock;
797 
798 	lockdep_assert_rq_held(rq);
799 
800 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
801 		return;
802 
803 #ifdef CONFIG_SCHED_DEBUG
804 	if (sched_feat(WARN_DOUBLE_CLOCK))
805 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
806 	rq->clock_update_flags |= RQCF_UPDATED;
807 #endif
808 	clock = sched_clock_cpu(cpu_of(rq));
809 	scx_rq_clock_update(rq, clock);
810 
811 	delta = clock - rq->clock;
812 	if (delta < 0)
813 		return;
814 	rq->clock += delta;
815 
816 	update_rq_clock_task(rq, delta);
817 }
818 
819 #ifdef CONFIG_SCHED_HRTICK
820 /*
821  * Use HR-timers to deliver accurate preemption points.
822  */
823 
hrtick_clear(struct rq * rq)824 static void hrtick_clear(struct rq *rq)
825 {
826 	if (hrtimer_active(&rq->hrtick_timer))
827 		hrtimer_cancel(&rq->hrtick_timer);
828 }
829 
830 /*
831  * High-resolution timer tick.
832  * Runs from hardirq context with interrupts disabled.
833  */
hrtick(struct hrtimer * timer)834 static enum hrtimer_restart hrtick(struct hrtimer *timer)
835 {
836 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
837 	struct rq_flags rf;
838 
839 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
840 
841 	rq_lock(rq, &rf);
842 	update_rq_clock(rq);
843 	rq->donor->sched_class->task_tick(rq, rq->curr, 1);
844 	rq_unlock(rq, &rf);
845 
846 	return HRTIMER_NORESTART;
847 }
848 
849 #ifdef CONFIG_SMP
850 
__hrtick_restart(struct rq * rq)851 static void __hrtick_restart(struct rq *rq)
852 {
853 	struct hrtimer *timer = &rq->hrtick_timer;
854 	ktime_t time = rq->hrtick_time;
855 
856 	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
857 }
858 
859 /*
860  * called from hardirq (IPI) context
861  */
__hrtick_start(void * arg)862 static void __hrtick_start(void *arg)
863 {
864 	struct rq *rq = arg;
865 	struct rq_flags rf;
866 
867 	rq_lock(rq, &rf);
868 	__hrtick_restart(rq);
869 	rq_unlock(rq, &rf);
870 }
871 
872 /*
873  * Called to set the hrtick timer state.
874  *
875  * called with rq->lock held and IRQs disabled
876  */
hrtick_start(struct rq * rq,u64 delay)877 void hrtick_start(struct rq *rq, u64 delay)
878 {
879 	struct hrtimer *timer = &rq->hrtick_timer;
880 	s64 delta;
881 
882 	/*
883 	 * Don't schedule slices shorter than 10000ns, that just
884 	 * doesn't make sense and can cause timer DoS.
885 	 */
886 	delta = max_t(s64, delay, 10000LL);
887 	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
888 
889 	if (rq == this_rq())
890 		__hrtick_restart(rq);
891 	else
892 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
893 }
894 
895 #else
896 /*
897  * Called to set the hrtick timer state.
898  *
899  * called with rq->lock held and IRQs disabled
900  */
hrtick_start(struct rq * rq,u64 delay)901 void hrtick_start(struct rq *rq, u64 delay)
902 {
903 	/*
904 	 * Don't schedule slices shorter than 10000ns, that just
905 	 * doesn't make sense. Rely on vruntime for fairness.
906 	 */
907 	delay = max_t(u64, delay, 10000LL);
908 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
909 		      HRTIMER_MODE_REL_PINNED_HARD);
910 }
911 
912 #endif /* CONFIG_SMP */
913 
hrtick_rq_init(struct rq * rq)914 static void hrtick_rq_init(struct rq *rq)
915 {
916 #ifdef CONFIG_SMP
917 	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
918 #endif
919 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
920 	rq->hrtick_timer.function = hrtick;
921 }
922 #else	/* CONFIG_SCHED_HRTICK */
hrtick_clear(struct rq * rq)923 static inline void hrtick_clear(struct rq *rq)
924 {
925 }
926 
hrtick_rq_init(struct rq * rq)927 static inline void hrtick_rq_init(struct rq *rq)
928 {
929 }
930 #endif	/* CONFIG_SCHED_HRTICK */
931 
932 /*
933  * try_cmpxchg based fetch_or() macro so it works for different integer types:
934  */
935 #define fetch_or(ptr, mask)						\
936 	({								\
937 		typeof(ptr) _ptr = (ptr);				\
938 		typeof(mask) _mask = (mask);				\
939 		typeof(*_ptr) _val = *_ptr;				\
940 									\
941 		do {							\
942 		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
943 	_val;								\
944 })
945 
946 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
947 /*
948  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
949  * this avoids any races wrt polling state changes and thereby avoids
950  * spurious IPIs.
951  */
set_nr_and_not_polling(struct thread_info * ti,int tif)952 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
953 {
954 	return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG);
955 }
956 
957 /*
958  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
959  *
960  * If this returns true, then the idle task promises to call
961  * sched_ttwu_pending() and reschedule soon.
962  */
set_nr_if_polling(struct task_struct * p)963 static bool set_nr_if_polling(struct task_struct *p)
964 {
965 	struct thread_info *ti = task_thread_info(p);
966 	typeof(ti->flags) val = READ_ONCE(ti->flags);
967 
968 	do {
969 		if (!(val & _TIF_POLLING_NRFLAG))
970 			return false;
971 		if (val & _TIF_NEED_RESCHED)
972 			return true;
973 	} while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED));
974 
975 	return true;
976 }
977 
978 #else
set_nr_and_not_polling(struct thread_info * ti,int tif)979 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
980 {
981 	set_ti_thread_flag(ti, tif);
982 	return true;
983 }
984 
985 #ifdef CONFIG_SMP
set_nr_if_polling(struct task_struct * p)986 static inline bool set_nr_if_polling(struct task_struct *p)
987 {
988 	return false;
989 }
990 #endif
991 #endif
992 
__wake_q_add(struct wake_q_head * head,struct task_struct * task)993 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
994 {
995 	struct wake_q_node *node = &task->wake_q;
996 
997 	/*
998 	 * Atomically grab the task, if ->wake_q is !nil already it means
999 	 * it's already queued (either by us or someone else) and will get the
1000 	 * wakeup due to that.
1001 	 *
1002 	 * In order to ensure that a pending wakeup will observe our pending
1003 	 * state, even in the failed case, an explicit smp_mb() must be used.
1004 	 */
1005 	smp_mb__before_atomic();
1006 	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
1007 		return false;
1008 
1009 	/*
1010 	 * The head is context local, there can be no concurrency.
1011 	 */
1012 	*head->lastp = node;
1013 	head->lastp = &node->next;
1014 	return true;
1015 }
1016 
1017 /**
1018  * wake_q_add() - queue a wakeup for 'later' waking.
1019  * @head: the wake_q_head to add @task to
1020  * @task: the task to queue for 'later' wakeup
1021  *
1022  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1023  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1024  * instantly.
1025  *
1026  * This function must be used as-if it were wake_up_process(); IOW the task
1027  * must be ready to be woken at this location.
1028  */
wake_q_add(struct wake_q_head * head,struct task_struct * task)1029 void wake_q_add(struct wake_q_head *head, struct task_struct *task)
1030 {
1031 	if (__wake_q_add(head, task))
1032 		get_task_struct(task);
1033 }
1034 
1035 /**
1036  * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
1037  * @head: the wake_q_head to add @task to
1038  * @task: the task to queue for 'later' wakeup
1039  *
1040  * Queue a task for later wakeup, most likely by the wake_up_q() call in the
1041  * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
1042  * instantly.
1043  *
1044  * This function must be used as-if it were wake_up_process(); IOW the task
1045  * must be ready to be woken at this location.
1046  *
1047  * This function is essentially a task-safe equivalent to wake_q_add(). Callers
1048  * that already hold reference to @task can call the 'safe' version and trust
1049  * wake_q to do the right thing depending whether or not the @task is already
1050  * queued for wakeup.
1051  */
wake_q_add_safe(struct wake_q_head * head,struct task_struct * task)1052 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
1053 {
1054 	if (!__wake_q_add(head, task))
1055 		put_task_struct(task);
1056 }
1057 
wake_up_q(struct wake_q_head * head)1058 void wake_up_q(struct wake_q_head *head)
1059 {
1060 	struct wake_q_node *node = head->first;
1061 
1062 	while (node != WAKE_Q_TAIL) {
1063 		struct task_struct *task;
1064 
1065 		task = container_of(node, struct task_struct, wake_q);
1066 		node = node->next;
1067 		/* pairs with cmpxchg_relaxed() in __wake_q_add() */
1068 		WRITE_ONCE(task->wake_q.next, NULL);
1069 		/* Task can safely be re-inserted now. */
1070 
1071 		/*
1072 		 * wake_up_process() executes a full barrier, which pairs with
1073 		 * the queueing in wake_q_add() so as not to miss wakeups.
1074 		 */
1075 		wake_up_process(task);
1076 		put_task_struct(task);
1077 	}
1078 }
1079 
1080 /*
1081  * resched_curr - mark rq's current task 'to be rescheduled now'.
1082  *
1083  * On UP this means the setting of the need_resched flag, on SMP it
1084  * might also involve a cross-CPU call to trigger the scheduler on
1085  * the target CPU.
1086  */
__resched_curr(struct rq * rq,int tif)1087 static void __resched_curr(struct rq *rq, int tif)
1088 {
1089 	struct task_struct *curr = rq->curr;
1090 	struct thread_info *cti = task_thread_info(curr);
1091 	int cpu;
1092 
1093 	lockdep_assert_rq_held(rq);
1094 
1095 	/*
1096 	 * Always immediately preempt the idle task; no point in delaying doing
1097 	 * actual work.
1098 	 */
1099 	if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
1100 		tif = TIF_NEED_RESCHED;
1101 
1102 	if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
1103 		return;
1104 
1105 	cpu = cpu_of(rq);
1106 
1107 	if (cpu == smp_processor_id()) {
1108 		set_ti_thread_flag(cti, tif);
1109 		if (tif == TIF_NEED_RESCHED)
1110 			set_preempt_need_resched();
1111 		return;
1112 	}
1113 
1114 	if (set_nr_and_not_polling(cti, tif)) {
1115 		if (tif == TIF_NEED_RESCHED)
1116 			smp_send_reschedule(cpu);
1117 	} else {
1118 		trace_sched_wake_idle_without_ipi(cpu);
1119 	}
1120 }
1121 
resched_curr(struct rq * rq)1122 void resched_curr(struct rq *rq)
1123 {
1124 	__resched_curr(rq, TIF_NEED_RESCHED);
1125 }
1126 
1127 #ifdef CONFIG_PREEMPT_DYNAMIC
1128 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
dynamic_preempt_lazy(void)1129 static __always_inline bool dynamic_preempt_lazy(void)
1130 {
1131 	return static_branch_unlikely(&sk_dynamic_preempt_lazy);
1132 }
1133 #else
dynamic_preempt_lazy(void)1134 static __always_inline bool dynamic_preempt_lazy(void)
1135 {
1136 	return IS_ENABLED(CONFIG_PREEMPT_LAZY);
1137 }
1138 #endif
1139 
get_lazy_tif_bit(void)1140 static __always_inline int get_lazy_tif_bit(void)
1141 {
1142 	if (dynamic_preempt_lazy())
1143 		return TIF_NEED_RESCHED_LAZY;
1144 
1145 	return TIF_NEED_RESCHED;
1146 }
1147 
resched_curr_lazy(struct rq * rq)1148 void resched_curr_lazy(struct rq *rq)
1149 {
1150 	__resched_curr(rq, get_lazy_tif_bit());
1151 }
1152 
resched_cpu(int cpu)1153 void resched_cpu(int cpu)
1154 {
1155 	struct rq *rq = cpu_rq(cpu);
1156 	unsigned long flags;
1157 
1158 	raw_spin_rq_lock_irqsave(rq, flags);
1159 	if (cpu_online(cpu) || cpu == smp_processor_id())
1160 		resched_curr(rq);
1161 	raw_spin_rq_unlock_irqrestore(rq, flags);
1162 }
1163 
1164 #ifdef CONFIG_SMP
1165 #ifdef CONFIG_NO_HZ_COMMON
1166 /*
1167  * In the semi idle case, use the nearest busy CPU for migrating timers
1168  * from an idle CPU.  This is good for power-savings.
1169  *
1170  * We don't do similar optimization for completely idle system, as
1171  * selecting an idle CPU will add more delays to the timers than intended
1172  * (as that CPU's timer base may not be up to date wrt jiffies etc).
1173  */
get_nohz_timer_target(void)1174 int get_nohz_timer_target(void)
1175 {
1176 	int i, cpu = smp_processor_id(), default_cpu = -1;
1177 	struct sched_domain *sd;
1178 	const struct cpumask *hk_mask;
1179 
1180 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
1181 		if (!idle_cpu(cpu))
1182 			return cpu;
1183 		default_cpu = cpu;
1184 	}
1185 
1186 	hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
1187 
1188 	guard(rcu)();
1189 
1190 	for_each_domain(cpu, sd) {
1191 		for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
1192 			if (cpu == i)
1193 				continue;
1194 
1195 			if (!idle_cpu(i))
1196 				return i;
1197 		}
1198 	}
1199 
1200 	if (default_cpu == -1)
1201 		default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE);
1202 
1203 	return default_cpu;
1204 }
1205 
1206 /*
1207  * When add_timer_on() enqueues a timer into the timer wheel of an
1208  * idle CPU then this timer might expire before the next timer event
1209  * which is scheduled to wake up that CPU. In case of a completely
1210  * idle system the next event might even be infinite time into the
1211  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1212  * leaves the inner idle loop so the newly added timer is taken into
1213  * account when the CPU goes back to idle and evaluates the timer
1214  * wheel for the next timer event.
1215  */
wake_up_idle_cpu(int cpu)1216 static void wake_up_idle_cpu(int cpu)
1217 {
1218 	struct rq *rq = cpu_rq(cpu);
1219 
1220 	if (cpu == smp_processor_id())
1221 		return;
1222 
1223 	/*
1224 	 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling
1225 	 * part of the idle loop. This forces an exit from the idle loop
1226 	 * and a round trip to schedule(). Now this could be optimized
1227 	 * because a simple new idle loop iteration is enough to
1228 	 * re-evaluate the next tick. Provided some re-ordering of tick
1229 	 * nohz functions that would need to follow TIF_NR_POLLING
1230 	 * clearing:
1231 	 *
1232 	 * - On most architectures, a simple fetch_or on ti::flags with a
1233 	 *   "0" value would be enough to know if an IPI needs to be sent.
1234 	 *
1235 	 * - x86 needs to perform a last need_resched() check between
1236 	 *   monitor and mwait which doesn't take timers into account.
1237 	 *   There a dedicated TIF_TIMER flag would be required to
1238 	 *   fetch_or here and be checked along with TIF_NEED_RESCHED
1239 	 *   before mwait().
1240 	 *
1241 	 * However, remote timer enqueue is not such a frequent event
1242 	 * and testing of the above solutions didn't appear to report
1243 	 * much benefits.
1244 	 */
1245 	if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
1246 		smp_send_reschedule(cpu);
1247 	else
1248 		trace_sched_wake_idle_without_ipi(cpu);
1249 }
1250 
wake_up_full_nohz_cpu(int cpu)1251 static bool wake_up_full_nohz_cpu(int cpu)
1252 {
1253 	/*
1254 	 * We just need the target to call irq_exit() and re-evaluate
1255 	 * the next tick. The nohz full kick at least implies that.
1256 	 * If needed we can still optimize that later with an
1257 	 * empty IRQ.
1258 	 */
1259 	if (cpu_is_offline(cpu))
1260 		return true;  /* Don't try to wake offline CPUs. */
1261 	if (tick_nohz_full_cpu(cpu)) {
1262 		if (cpu != smp_processor_id() ||
1263 		    tick_nohz_tick_stopped())
1264 			tick_nohz_full_kick_cpu(cpu);
1265 		return true;
1266 	}
1267 
1268 	return false;
1269 }
1270 
1271 /*
1272  * Wake up the specified CPU.  If the CPU is going offline, it is the
1273  * caller's responsibility to deal with the lost wakeup, for example,
1274  * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
1275  */
wake_up_nohz_cpu(int cpu)1276 void wake_up_nohz_cpu(int cpu)
1277 {
1278 	if (!wake_up_full_nohz_cpu(cpu))
1279 		wake_up_idle_cpu(cpu);
1280 }
1281 
nohz_csd_func(void * info)1282 static void nohz_csd_func(void *info)
1283 {
1284 	struct rq *rq = info;
1285 	int cpu = cpu_of(rq);
1286 	unsigned int flags;
1287 
1288 	/*
1289 	 * Release the rq::nohz_csd.
1290 	 */
1291 	flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu));
1292 	WARN_ON(!(flags & NOHZ_KICK_MASK));
1293 
1294 	rq->idle_balance = idle_cpu(cpu);
1295 	if (rq->idle_balance) {
1296 		rq->nohz_idle_balance = flags;
1297 		__raise_softirq_irqoff(SCHED_SOFTIRQ);
1298 	}
1299 }
1300 
1301 #endif /* CONFIG_NO_HZ_COMMON */
1302 
1303 #ifdef CONFIG_NO_HZ_FULL
__need_bw_check(struct rq * rq,struct task_struct * p)1304 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
1305 {
1306 	if (rq->nr_running != 1)
1307 		return false;
1308 
1309 	if (p->sched_class != &fair_sched_class)
1310 		return false;
1311 
1312 	if (!task_on_rq_queued(p))
1313 		return false;
1314 
1315 	return true;
1316 }
1317 
sched_can_stop_tick(struct rq * rq)1318 bool sched_can_stop_tick(struct rq *rq)
1319 {
1320 	int fifo_nr_running;
1321 
1322 	/* Deadline tasks, even if single, need the tick */
1323 	if (rq->dl.dl_nr_running)
1324 		return false;
1325 
1326 	/*
1327 	 * If there are more than one RR tasks, we need the tick to affect the
1328 	 * actual RR behaviour.
1329 	 */
1330 	if (rq->rt.rr_nr_running) {
1331 		if (rq->rt.rr_nr_running == 1)
1332 			return true;
1333 		else
1334 			return false;
1335 	}
1336 
1337 	/*
1338 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
1339 	 * forced preemption between FIFO tasks.
1340 	 */
1341 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
1342 	if (fifo_nr_running)
1343 		return true;
1344 
1345 	/*
1346 	 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks
1347 	 * left. For CFS, if there's more than one we need the tick for
1348 	 * involuntary preemption. For SCX, ask.
1349 	 */
1350 	if (scx_enabled() && !scx_can_stop_tick(rq))
1351 		return false;
1352 
1353 	if (rq->cfs.h_nr_queued > 1)
1354 		return false;
1355 
1356 	/*
1357 	 * If there is one task and it has CFS runtime bandwidth constraints
1358 	 * and it's on the cpu now we don't want to stop the tick.
1359 	 * This check prevents clearing the bit if a newly enqueued task here is
1360 	 * dequeued by migrating while the constrained task continues to run.
1361 	 * E.g. going from 2->1 without going through pick_next_task().
1362 	 */
1363 	if (__need_bw_check(rq, rq->curr)) {
1364 		if (cfs_task_bw_constrained(rq->curr))
1365 			return false;
1366 	}
1367 
1368 	return true;
1369 }
1370 #endif /* CONFIG_NO_HZ_FULL */
1371 #endif /* CONFIG_SMP */
1372 
1373 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
1374 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
1375 /*
1376  * Iterate task_group tree rooted at *from, calling @down when first entering a
1377  * node and @up when leaving it for the final time.
1378  *
1379  * Caller must hold rcu_lock or sufficient equivalent.
1380  */
walk_tg_tree_from(struct task_group * from,tg_visitor down,tg_visitor up,void * data)1381 int walk_tg_tree_from(struct task_group *from,
1382 			     tg_visitor down, tg_visitor up, void *data)
1383 {
1384 	struct task_group *parent, *child;
1385 	int ret;
1386 
1387 	parent = from;
1388 
1389 down:
1390 	ret = (*down)(parent, data);
1391 	if (ret)
1392 		goto out;
1393 	list_for_each_entry_rcu(child, &parent->children, siblings) {
1394 		parent = child;
1395 		goto down;
1396 
1397 up:
1398 		continue;
1399 	}
1400 	ret = (*up)(parent, data);
1401 	if (ret || parent == from)
1402 		goto out;
1403 
1404 	child = parent;
1405 	parent = parent->parent;
1406 	if (parent)
1407 		goto up;
1408 out:
1409 	return ret;
1410 }
1411 
tg_nop(struct task_group * tg,void * data)1412 int tg_nop(struct task_group *tg, void *data)
1413 {
1414 	return 0;
1415 }
1416 #endif
1417 
set_load_weight(struct task_struct * p,bool update_load)1418 void set_load_weight(struct task_struct *p, bool update_load)
1419 {
1420 	int prio = p->static_prio - MAX_RT_PRIO;
1421 	struct load_weight lw;
1422 
1423 	if (task_has_idle_policy(p)) {
1424 		lw.weight = scale_load(WEIGHT_IDLEPRIO);
1425 		lw.inv_weight = WMULT_IDLEPRIO;
1426 	} else {
1427 		lw.weight = scale_load(sched_prio_to_weight[prio]);
1428 		lw.inv_weight = sched_prio_to_wmult[prio];
1429 	}
1430 
1431 	/*
1432 	 * SCHED_OTHER tasks have to update their load when changing their
1433 	 * weight
1434 	 */
1435 	if (update_load && p->sched_class->reweight_task)
1436 		p->sched_class->reweight_task(task_rq(p), p, &lw);
1437 	else
1438 		p->se.load = lw;
1439 }
1440 
1441 #ifdef CONFIG_UCLAMP_TASK
1442 /*
1443  * Serializes updates of utilization clamp values
1444  *
1445  * The (slow-path) user-space triggers utilization clamp value updates which
1446  * can require updates on (fast-path) scheduler's data structures used to
1447  * support enqueue/dequeue operations.
1448  * While the per-CPU rq lock protects fast-path update operations, user-space
1449  * requests are serialized using a mutex to reduce the risk of conflicting
1450  * updates or API abuses.
1451  */
1452 static __maybe_unused DEFINE_MUTEX(uclamp_mutex);
1453 
1454 /* Max allowed minimum utilization */
1455 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE;
1456 
1457 /* Max allowed maximum utilization */
1458 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
1459 
1460 /*
1461  * By default RT tasks run at the maximum performance point/capacity of the
1462  * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to
1463  * SCHED_CAPACITY_SCALE.
1464  *
1465  * This knob allows admins to change the default behavior when uclamp is being
1466  * used. In battery powered devices, particularly, running at the maximum
1467  * capacity and frequency will increase energy consumption and shorten the
1468  * battery life.
1469  *
1470  * This knob only affects RT tasks that their uclamp_se->user_defined == false.
1471  *
1472  * This knob will not override the system default sched_util_clamp_min defined
1473  * above.
1474  */
1475 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE;
1476 
1477 /* All clamps are required to be less or equal than these values */
1478 static struct uclamp_se uclamp_default[UCLAMP_CNT];
1479 
1480 /*
1481  * This static key is used to reduce the uclamp overhead in the fast path. It
1482  * primarily disables the call to uclamp_rq_{inc, dec}() in
1483  * enqueue/dequeue_task().
1484  *
1485  * This allows users to continue to enable uclamp in their kernel config with
1486  * minimum uclamp overhead in the fast path.
1487  *
1488  * As soon as userspace modifies any of the uclamp knobs, the static key is
1489  * enabled, since we have an actual users that make use of uclamp
1490  * functionality.
1491  *
1492  * The knobs that would enable this static key are:
1493  *
1494  *   * A task modifying its uclamp value with sched_setattr().
1495  *   * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
1496  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
1497  */
1498 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
1499 
1500 static inline unsigned int
uclamp_idle_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1501 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
1502 		  unsigned int clamp_value)
1503 {
1504 	/*
1505 	 * Avoid blocked utilization pushing up the frequency when we go
1506 	 * idle (which drops the max-clamp) by retaining the last known
1507 	 * max-clamp.
1508 	 */
1509 	if (clamp_id == UCLAMP_MAX) {
1510 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
1511 		return clamp_value;
1512 	}
1513 
1514 	return uclamp_none(UCLAMP_MIN);
1515 }
1516 
uclamp_idle_reset(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1517 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
1518 				     unsigned int clamp_value)
1519 {
1520 	/* Reset max-clamp retention only on idle exit */
1521 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1522 		return;
1523 
1524 	uclamp_rq_set(rq, clamp_id, clamp_value);
1525 }
1526 
1527 static inline
uclamp_rq_max_value(struct rq * rq,enum uclamp_id clamp_id,unsigned int clamp_value)1528 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
1529 				   unsigned int clamp_value)
1530 {
1531 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
1532 	int bucket_id = UCLAMP_BUCKETS - 1;
1533 
1534 	/*
1535 	 * Since both min and max clamps are max aggregated, find the
1536 	 * top most bucket with tasks in.
1537 	 */
1538 	for ( ; bucket_id >= 0; bucket_id--) {
1539 		if (!bucket[bucket_id].tasks)
1540 			continue;
1541 		return bucket[bucket_id].value;
1542 	}
1543 
1544 	/* No tasks -- default clamp values */
1545 	return uclamp_idle_value(rq, clamp_id, clamp_value);
1546 }
1547 
__uclamp_update_util_min_rt_default(struct task_struct * p)1548 static void __uclamp_update_util_min_rt_default(struct task_struct *p)
1549 {
1550 	unsigned int default_util_min;
1551 	struct uclamp_se *uc_se;
1552 
1553 	lockdep_assert_held(&p->pi_lock);
1554 
1555 	uc_se = &p->uclamp_req[UCLAMP_MIN];
1556 
1557 	/* Only sync if user didn't override the default */
1558 	if (uc_se->user_defined)
1559 		return;
1560 
1561 	default_util_min = sysctl_sched_uclamp_util_min_rt_default;
1562 	uclamp_se_set(uc_se, default_util_min, false);
1563 }
1564 
uclamp_update_util_min_rt_default(struct task_struct * p)1565 static void uclamp_update_util_min_rt_default(struct task_struct *p)
1566 {
1567 	if (!rt_task(p))
1568 		return;
1569 
1570 	/* Protect updates to p->uclamp_* */
1571 	guard(task_rq_lock)(p);
1572 	__uclamp_update_util_min_rt_default(p);
1573 }
1574 
1575 static inline struct uclamp_se
uclamp_tg_restrict(struct task_struct * p,enum uclamp_id clamp_id)1576 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id)
1577 {
1578 	/* Copy by value as we could modify it */
1579 	struct uclamp_se uc_req = p->uclamp_req[clamp_id];
1580 #ifdef CONFIG_UCLAMP_TASK_GROUP
1581 	unsigned int tg_min, tg_max, value;
1582 
1583 	/*
1584 	 * Tasks in autogroups or root task group will be
1585 	 * restricted by system defaults.
1586 	 */
1587 	if (task_group_is_autogroup(task_group(p)))
1588 		return uc_req;
1589 	if (task_group(p) == &root_task_group)
1590 		return uc_req;
1591 
1592 	tg_min = task_group(p)->uclamp[UCLAMP_MIN].value;
1593 	tg_max = task_group(p)->uclamp[UCLAMP_MAX].value;
1594 	value = uc_req.value;
1595 	value = clamp(value, tg_min, tg_max);
1596 	uclamp_se_set(&uc_req, value, false);
1597 #endif
1598 
1599 	return uc_req;
1600 }
1601 
1602 /*
1603  * The effective clamp bucket index of a task depends on, by increasing
1604  * priority:
1605  * - the task specific clamp value, when explicitly requested from userspace
1606  * - the task group effective clamp value, for tasks not either in the root
1607  *   group or in an autogroup
1608  * - the system default clamp value, defined by the sysadmin
1609  */
1610 static inline struct uclamp_se
uclamp_eff_get(struct task_struct * p,enum uclamp_id clamp_id)1611 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
1612 {
1613 	struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
1614 	struct uclamp_se uc_max = uclamp_default[clamp_id];
1615 
1616 	/* System default restrictions always apply */
1617 	if (unlikely(uc_req.value > uc_max.value))
1618 		return uc_max;
1619 
1620 	return uc_req;
1621 }
1622 
uclamp_eff_value(struct task_struct * p,enum uclamp_id clamp_id)1623 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
1624 {
1625 	struct uclamp_se uc_eff;
1626 
1627 	/* Task currently refcounted: use back-annotated (effective) value */
1628 	if (p->uclamp[clamp_id].active)
1629 		return (unsigned long)p->uclamp[clamp_id].value;
1630 
1631 	uc_eff = uclamp_eff_get(p, clamp_id);
1632 
1633 	return (unsigned long)uc_eff.value;
1634 }
1635 
1636 /*
1637  * When a task is enqueued on a rq, the clamp bucket currently defined by the
1638  * task's uclamp::bucket_id is refcounted on that rq. This also immediately
1639  * updates the rq's clamp value if required.
1640  *
1641  * Tasks can have a task-specific value requested from user-space, track
1642  * within each bucket the maximum value for tasks refcounted in it.
1643  * This "local max aggregation" allows to track the exact "requested" value
1644  * for each bucket when all its RUNNABLE tasks require the same clamp.
1645  */
uclamp_rq_inc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1646 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
1647 				    enum uclamp_id clamp_id)
1648 {
1649 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1650 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1651 	struct uclamp_bucket *bucket;
1652 
1653 	lockdep_assert_rq_held(rq);
1654 
1655 	/* Update task effective clamp */
1656 	p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id);
1657 
1658 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1659 	bucket->tasks++;
1660 	uc_se->active = true;
1661 
1662 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
1663 
1664 	/*
1665 	 * Local max aggregation: rq buckets always track the max
1666 	 * "requested" clamp value of its RUNNABLE tasks.
1667 	 */
1668 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
1669 		bucket->value = uc_se->value;
1670 
1671 	if (uc_se->value > uclamp_rq_get(rq, clamp_id))
1672 		uclamp_rq_set(rq, clamp_id, uc_se->value);
1673 }
1674 
1675 /*
1676  * When a task is dequeued from a rq, the clamp bucket refcounted by the task
1677  * is released. If this is the last task reference counting the rq's max
1678  * active clamp value, then the rq's clamp value is updated.
1679  *
1680  * Both refcounted tasks and rq's cached clamp values are expected to be
1681  * always valid. If it's detected they are not, as defensive programming,
1682  * enforce the expected state and warn.
1683  */
uclamp_rq_dec_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1684 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
1685 				    enum uclamp_id clamp_id)
1686 {
1687 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
1688 	struct uclamp_se *uc_se = &p->uclamp[clamp_id];
1689 	struct uclamp_bucket *bucket;
1690 	unsigned int bkt_clamp;
1691 	unsigned int rq_clamp;
1692 
1693 	lockdep_assert_rq_held(rq);
1694 
1695 	/*
1696 	 * If sched_uclamp_used was enabled after task @p was enqueued,
1697 	 * we could end up with unbalanced call to uclamp_rq_dec_id().
1698 	 *
1699 	 * In this case the uc_se->active flag should be false since no uclamp
1700 	 * accounting was performed at enqueue time and we can just return
1701 	 * here.
1702 	 *
1703 	 * Need to be careful of the following enqueue/dequeue ordering
1704 	 * problem too
1705 	 *
1706 	 *	enqueue(taskA)
1707 	 *	// sched_uclamp_used gets enabled
1708 	 *	enqueue(taskB)
1709 	 *	dequeue(taskA)
1710 	 *	// Must not decrement bucket->tasks here
1711 	 *	dequeue(taskB)
1712 	 *
1713 	 * where we could end up with stale data in uc_se and
1714 	 * bucket[uc_se->bucket_id].
1715 	 *
1716 	 * The following check here eliminates the possibility of such race.
1717 	 */
1718 	if (unlikely(!uc_se->active))
1719 		return;
1720 
1721 	bucket = &uc_rq->bucket[uc_se->bucket_id];
1722 
1723 	SCHED_WARN_ON(!bucket->tasks);
1724 	if (likely(bucket->tasks))
1725 		bucket->tasks--;
1726 
1727 	uc_se->active = false;
1728 
1729 	/*
1730 	 * Keep "local max aggregation" simple and accept to (possibly)
1731 	 * overboost some RUNNABLE tasks in the same bucket.
1732 	 * The rq clamp bucket value is reset to its base value whenever
1733 	 * there are no more RUNNABLE tasks refcounting it.
1734 	 */
1735 	if (likely(bucket->tasks))
1736 		return;
1737 
1738 	rq_clamp = uclamp_rq_get(rq, clamp_id);
1739 	/*
1740 	 * Defensive programming: this should never happen. If it happens,
1741 	 * e.g. due to future modification, warn and fix up the expected value.
1742 	 */
1743 	SCHED_WARN_ON(bucket->value > rq_clamp);
1744 	if (bucket->value >= rq_clamp) {
1745 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
1746 		uclamp_rq_set(rq, clamp_id, bkt_clamp);
1747 	}
1748 }
1749 
uclamp_rq_inc(struct rq * rq,struct task_struct * p)1750 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
1751 {
1752 	enum uclamp_id clamp_id;
1753 
1754 	/*
1755 	 * Avoid any overhead until uclamp is actually used by the userspace.
1756 	 *
1757 	 * The condition is constructed such that a NOP is generated when
1758 	 * sched_uclamp_used is disabled.
1759 	 */
1760 	if (!static_branch_unlikely(&sched_uclamp_used))
1761 		return;
1762 
1763 	if (unlikely(!p->sched_class->uclamp_enabled))
1764 		return;
1765 
1766 	if (p->se.sched_delayed)
1767 		return;
1768 
1769 	for_each_clamp_id(clamp_id)
1770 		uclamp_rq_inc_id(rq, p, clamp_id);
1771 
1772 	/* Reset clamp idle holding when there is one RUNNABLE task */
1773 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
1774 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1775 }
1776 
uclamp_rq_dec(struct rq * rq,struct task_struct * p)1777 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
1778 {
1779 	enum uclamp_id clamp_id;
1780 
1781 	/*
1782 	 * Avoid any overhead until uclamp is actually used by the userspace.
1783 	 *
1784 	 * The condition is constructed such that a NOP is generated when
1785 	 * sched_uclamp_used is disabled.
1786 	 */
1787 	if (!static_branch_unlikely(&sched_uclamp_used))
1788 		return;
1789 
1790 	if (unlikely(!p->sched_class->uclamp_enabled))
1791 		return;
1792 
1793 	if (p->se.sched_delayed)
1794 		return;
1795 
1796 	for_each_clamp_id(clamp_id)
1797 		uclamp_rq_dec_id(rq, p, clamp_id);
1798 }
1799 
uclamp_rq_reinc_id(struct rq * rq,struct task_struct * p,enum uclamp_id clamp_id)1800 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
1801 				      enum uclamp_id clamp_id)
1802 {
1803 	if (!p->uclamp[clamp_id].active)
1804 		return;
1805 
1806 	uclamp_rq_dec_id(rq, p, clamp_id);
1807 	uclamp_rq_inc_id(rq, p, clamp_id);
1808 
1809 	/*
1810 	 * Make sure to clear the idle flag if we've transiently reached 0
1811 	 * active tasks on rq.
1812 	 */
1813 	if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
1814 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
1815 }
1816 
1817 static inline void
uclamp_update_active(struct task_struct * p)1818 uclamp_update_active(struct task_struct *p)
1819 {
1820 	enum uclamp_id clamp_id;
1821 	struct rq_flags rf;
1822 	struct rq *rq;
1823 
1824 	/*
1825 	 * Lock the task and the rq where the task is (or was) queued.
1826 	 *
1827 	 * We might lock the (previous) rq of a !RUNNABLE task, but that's the
1828 	 * price to pay to safely serialize util_{min,max} updates with
1829 	 * enqueues, dequeues and migration operations.
1830 	 * This is the same locking schema used by __set_cpus_allowed_ptr().
1831 	 */
1832 	rq = task_rq_lock(p, &rf);
1833 
1834 	/*
1835 	 * Setting the clamp bucket is serialized by task_rq_lock().
1836 	 * If the task is not yet RUNNABLE and its task_struct is not
1837 	 * affecting a valid clamp bucket, the next time it's enqueued,
1838 	 * it will already see the updated clamp bucket value.
1839 	 */
1840 	for_each_clamp_id(clamp_id)
1841 		uclamp_rq_reinc_id(rq, p, clamp_id);
1842 
1843 	task_rq_unlock(rq, p, &rf);
1844 }
1845 
1846 #ifdef CONFIG_UCLAMP_TASK_GROUP
1847 static inline void
uclamp_update_active_tasks(struct cgroup_subsys_state * css)1848 uclamp_update_active_tasks(struct cgroup_subsys_state *css)
1849 {
1850 	struct css_task_iter it;
1851 	struct task_struct *p;
1852 
1853 	css_task_iter_start(css, 0, &it);
1854 	while ((p = css_task_iter_next(&it)))
1855 		uclamp_update_active(p);
1856 	css_task_iter_end(&it);
1857 }
1858 
1859 static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1860 #endif
1861 
1862 #ifdef CONFIG_SYSCTL
1863 #ifdef CONFIG_UCLAMP_TASK_GROUP
uclamp_update_root_tg(void)1864 static void uclamp_update_root_tg(void)
1865 {
1866 	struct task_group *tg = &root_task_group;
1867 
1868 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN],
1869 		      sysctl_sched_uclamp_util_min, false);
1870 	uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX],
1871 		      sysctl_sched_uclamp_util_max, false);
1872 
1873 	guard(rcu)();
1874 	cpu_util_update_eff(&root_task_group.css);
1875 }
1876 #else
uclamp_update_root_tg(void)1877 static void uclamp_update_root_tg(void) { }
1878 #endif
1879 
uclamp_sync_util_min_rt_default(void)1880 static void uclamp_sync_util_min_rt_default(void)
1881 {
1882 	struct task_struct *g, *p;
1883 
1884 	/*
1885 	 * copy_process()			sysctl_uclamp
1886 	 *					  uclamp_min_rt = X;
1887 	 *   write_lock(&tasklist_lock)		  read_lock(&tasklist_lock)
1888 	 *   // link thread			  smp_mb__after_spinlock()
1889 	 *   write_unlock(&tasklist_lock)	  read_unlock(&tasklist_lock);
1890 	 *   sched_post_fork()			  for_each_process_thread()
1891 	 *     __uclamp_sync_rt()		    __uclamp_sync_rt()
1892 	 *
1893 	 * Ensures that either sched_post_fork() will observe the new
1894 	 * uclamp_min_rt or for_each_process_thread() will observe the new
1895 	 * task.
1896 	 */
1897 	read_lock(&tasklist_lock);
1898 	smp_mb__after_spinlock();
1899 	read_unlock(&tasklist_lock);
1900 
1901 	guard(rcu)();
1902 	for_each_process_thread(g, p)
1903 		uclamp_update_util_min_rt_default(p);
1904 }
1905 
sysctl_sched_uclamp_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1906 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
1907 				void *buffer, size_t *lenp, loff_t *ppos)
1908 {
1909 	bool update_root_tg = false;
1910 	int old_min, old_max, old_min_rt;
1911 	int result;
1912 
1913 	guard(mutex)(&uclamp_mutex);
1914 
1915 	old_min = sysctl_sched_uclamp_util_min;
1916 	old_max = sysctl_sched_uclamp_util_max;
1917 	old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
1918 
1919 	result = proc_dointvec(table, write, buffer, lenp, ppos);
1920 	if (result)
1921 		goto undo;
1922 	if (!write)
1923 		return 0;
1924 
1925 	if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
1926 	    sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE	||
1927 	    sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) {
1928 
1929 		result = -EINVAL;
1930 		goto undo;
1931 	}
1932 
1933 	if (old_min != sysctl_sched_uclamp_util_min) {
1934 		uclamp_se_set(&uclamp_default[UCLAMP_MIN],
1935 			      sysctl_sched_uclamp_util_min, false);
1936 		update_root_tg = true;
1937 	}
1938 	if (old_max != sysctl_sched_uclamp_util_max) {
1939 		uclamp_se_set(&uclamp_default[UCLAMP_MAX],
1940 			      sysctl_sched_uclamp_util_max, false);
1941 		update_root_tg = true;
1942 	}
1943 
1944 	if (update_root_tg) {
1945 		static_branch_enable(&sched_uclamp_used);
1946 		uclamp_update_root_tg();
1947 	}
1948 
1949 	if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
1950 		static_branch_enable(&sched_uclamp_used);
1951 		uclamp_sync_util_min_rt_default();
1952 	}
1953 
1954 	/*
1955 	 * We update all RUNNABLE tasks only when task groups are in use.
1956 	 * Otherwise, keep it simple and do just a lazy update at each next
1957 	 * task enqueue time.
1958 	 */
1959 	return 0;
1960 
1961 undo:
1962 	sysctl_sched_uclamp_util_min = old_min;
1963 	sysctl_sched_uclamp_util_max = old_max;
1964 	sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
1965 	return result;
1966 }
1967 #endif
1968 
uclamp_fork(struct task_struct * p)1969 static void uclamp_fork(struct task_struct *p)
1970 {
1971 	enum uclamp_id clamp_id;
1972 
1973 	/*
1974 	 * We don't need to hold task_rq_lock() when updating p->uclamp_* here
1975 	 * as the task is still at its early fork stages.
1976 	 */
1977 	for_each_clamp_id(clamp_id)
1978 		p->uclamp[clamp_id].active = false;
1979 
1980 	if (likely(!p->sched_reset_on_fork))
1981 		return;
1982 
1983 	for_each_clamp_id(clamp_id) {
1984 		uclamp_se_set(&p->uclamp_req[clamp_id],
1985 			      uclamp_none(clamp_id), false);
1986 	}
1987 }
1988 
uclamp_post_fork(struct task_struct * p)1989 static void uclamp_post_fork(struct task_struct *p)
1990 {
1991 	uclamp_update_util_min_rt_default(p);
1992 }
1993 
init_uclamp_rq(struct rq * rq)1994 static void __init init_uclamp_rq(struct rq *rq)
1995 {
1996 	enum uclamp_id clamp_id;
1997 	struct uclamp_rq *uc_rq = rq->uclamp;
1998 
1999 	for_each_clamp_id(clamp_id) {
2000 		uc_rq[clamp_id] = (struct uclamp_rq) {
2001 			.value = uclamp_none(clamp_id)
2002 		};
2003 	}
2004 
2005 	rq->uclamp_flags = UCLAMP_FLAG_IDLE;
2006 }
2007 
init_uclamp(void)2008 static void __init init_uclamp(void)
2009 {
2010 	struct uclamp_se uc_max = {};
2011 	enum uclamp_id clamp_id;
2012 	int cpu;
2013 
2014 	for_each_possible_cpu(cpu)
2015 		init_uclamp_rq(cpu_rq(cpu));
2016 
2017 	for_each_clamp_id(clamp_id) {
2018 		uclamp_se_set(&init_task.uclamp_req[clamp_id],
2019 			      uclamp_none(clamp_id), false);
2020 	}
2021 
2022 	/* System defaults allow max clamp values for both indexes */
2023 	uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false);
2024 	for_each_clamp_id(clamp_id) {
2025 		uclamp_default[clamp_id] = uc_max;
2026 #ifdef CONFIG_UCLAMP_TASK_GROUP
2027 		root_task_group.uclamp_req[clamp_id] = uc_max;
2028 		root_task_group.uclamp[clamp_id] = uc_max;
2029 #endif
2030 	}
2031 }
2032 
2033 #else /* !CONFIG_UCLAMP_TASK */
uclamp_rq_inc(struct rq * rq,struct task_struct * p)2034 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
uclamp_rq_dec(struct rq * rq,struct task_struct * p)2035 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
uclamp_fork(struct task_struct * p)2036 static inline void uclamp_fork(struct task_struct *p) { }
uclamp_post_fork(struct task_struct * p)2037 static inline void uclamp_post_fork(struct task_struct *p) { }
init_uclamp(void)2038 static inline void init_uclamp(void) { }
2039 #endif /* CONFIG_UCLAMP_TASK */
2040 
sched_task_on_rq(struct task_struct * p)2041 bool sched_task_on_rq(struct task_struct *p)
2042 {
2043 	return task_on_rq_queued(p);
2044 }
2045 
get_wchan(struct task_struct * p)2046 unsigned long get_wchan(struct task_struct *p)
2047 {
2048 	unsigned long ip = 0;
2049 	unsigned int state;
2050 
2051 	if (!p || p == current)
2052 		return 0;
2053 
2054 	/* Only get wchan if task is blocked and we can keep it that way. */
2055 	raw_spin_lock_irq(&p->pi_lock);
2056 	state = READ_ONCE(p->__state);
2057 	smp_rmb(); /* see try_to_wake_up() */
2058 	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
2059 		ip = __get_wchan(p);
2060 	raw_spin_unlock_irq(&p->pi_lock);
2061 
2062 	return ip;
2063 }
2064 
enqueue_task(struct rq * rq,struct task_struct * p,int flags)2065 void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2066 {
2067 	if (!(flags & ENQUEUE_NOCLOCK))
2068 		update_rq_clock(rq);
2069 
2070 	p->sched_class->enqueue_task(rq, p, flags);
2071 	/*
2072 	 * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
2073 	 * ->sched_delayed.
2074 	 */
2075 	uclamp_rq_inc(rq, p);
2076 
2077 	psi_enqueue(p, flags);
2078 
2079 	if (!(flags & ENQUEUE_RESTORE))
2080 		sched_info_enqueue(rq, p);
2081 
2082 	if (sched_core_enabled(rq))
2083 		sched_core_enqueue(rq, p);
2084 }
2085 
2086 /*
2087  * Must only return false when DEQUEUE_SLEEP.
2088  */
dequeue_task(struct rq * rq,struct task_struct * p,int flags)2089 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
2090 {
2091 	if (sched_core_enabled(rq))
2092 		sched_core_dequeue(rq, p, flags);
2093 
2094 	if (!(flags & DEQUEUE_NOCLOCK))
2095 		update_rq_clock(rq);
2096 
2097 	if (!(flags & DEQUEUE_SAVE))
2098 		sched_info_dequeue(rq, p);
2099 
2100 	psi_dequeue(p, flags);
2101 
2102 	/*
2103 	 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
2104 	 * and mark the task ->sched_delayed.
2105 	 */
2106 	uclamp_rq_dec(rq, p);
2107 	return p->sched_class->dequeue_task(rq, p, flags);
2108 }
2109 
activate_task(struct rq * rq,struct task_struct * p,int flags)2110 void activate_task(struct rq *rq, struct task_struct *p, int flags)
2111 {
2112 	if (task_on_rq_migrating(p))
2113 		flags |= ENQUEUE_MIGRATED;
2114 	if (flags & ENQUEUE_MIGRATED)
2115 		sched_mm_cid_migrate_to(rq, p);
2116 
2117 	enqueue_task(rq, p, flags);
2118 
2119 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
2120 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2121 }
2122 
deactivate_task(struct rq * rq,struct task_struct * p,int flags)2123 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
2124 {
2125 	SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2126 
2127 	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
2128 	ASSERT_EXCLUSIVE_WRITER(p->on_rq);
2129 
2130 	/*
2131 	 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before*
2132 	 * dequeue_task() and cleared *after* enqueue_task().
2133 	 */
2134 
2135 	dequeue_task(rq, p, flags);
2136 }
2137 
block_task(struct rq * rq,struct task_struct * p,int flags)2138 static void block_task(struct rq *rq, struct task_struct *p, int flags)
2139 {
2140 	if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
2141 		__block_task(rq, p);
2142 }
2143 
2144 /**
2145  * task_curr - is this task currently executing on a CPU?
2146  * @p: the task in question.
2147  *
2148  * Return: 1 if the task is currently executing. 0 otherwise.
2149  */
task_curr(const struct task_struct * p)2150 inline int task_curr(const struct task_struct *p)
2151 {
2152 	return cpu_curr(task_cpu(p)) == p;
2153 }
2154 
2155 /*
2156  * ->switching_to() is called with the pi_lock and rq_lock held and must not
2157  * mess with locking.
2158  */
check_class_changing(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class)2159 void check_class_changing(struct rq *rq, struct task_struct *p,
2160 			  const struct sched_class *prev_class)
2161 {
2162 	if (prev_class != p->sched_class && p->sched_class->switching_to)
2163 		p->sched_class->switching_to(rq, p);
2164 }
2165 
2166 /*
2167  * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
2168  * use the balance_callback list if you want balancing.
2169  *
2170  * this means any call to check_class_changed() must be followed by a call to
2171  * balance_callback().
2172  */
check_class_changed(struct rq * rq,struct task_struct * p,const struct sched_class * prev_class,int oldprio)2173 void check_class_changed(struct rq *rq, struct task_struct *p,
2174 			 const struct sched_class *prev_class,
2175 			 int oldprio)
2176 {
2177 	if (prev_class != p->sched_class) {
2178 		if (prev_class->switched_from)
2179 			prev_class->switched_from(rq, p);
2180 
2181 		p->sched_class->switched_to(rq, p);
2182 	} else if (oldprio != p->prio || dl_task(p))
2183 		p->sched_class->prio_changed(rq, p, oldprio);
2184 }
2185 
wakeup_preempt(struct rq * rq,struct task_struct * p,int flags)2186 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2187 {
2188 	struct task_struct *donor = rq->donor;
2189 
2190 	if (p->sched_class == donor->sched_class)
2191 		donor->sched_class->wakeup_preempt(rq, p, flags);
2192 	else if (sched_class_above(p->sched_class, donor->sched_class))
2193 		resched_curr(rq);
2194 
2195 	/*
2196 	 * A queue event has occurred, and we're going to schedule.  In
2197 	 * this case, we can save a useless back to back clock update.
2198 	 */
2199 	if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
2200 		rq_clock_skip_update(rq);
2201 }
2202 
2203 static __always_inline
__task_state_match(struct task_struct * p,unsigned int state)2204 int __task_state_match(struct task_struct *p, unsigned int state)
2205 {
2206 	if (READ_ONCE(p->__state) & state)
2207 		return 1;
2208 
2209 	if (READ_ONCE(p->saved_state) & state)
2210 		return -1;
2211 
2212 	return 0;
2213 }
2214 
2215 static __always_inline
task_state_match(struct task_struct * p,unsigned int state)2216 int task_state_match(struct task_struct *p, unsigned int state)
2217 {
2218 	/*
2219 	 * Serialize against current_save_and_set_rtlock_wait_state(),
2220 	 * current_restore_rtlock_saved_state(), and __refrigerator().
2221 	 */
2222 	guard(raw_spinlock_irq)(&p->pi_lock);
2223 	return __task_state_match(p, state);
2224 }
2225 
2226 /*
2227  * wait_task_inactive - wait for a thread to unschedule.
2228  *
2229  * Wait for the thread to block in any of the states set in @match_state.
2230  * If it changes, i.e. @p might have woken up, then return zero.  When we
2231  * succeed in waiting for @p to be off its CPU, we return a positive number
2232  * (its total switch count).  If a second call a short while later returns the
2233  * same number, the caller can be sure that @p has remained unscheduled the
2234  * whole time.
2235  *
2236  * The caller must ensure that the task *will* unschedule sometime soon,
2237  * else this function might spin for a *long* time. This function can't
2238  * be called with interrupts off, or it may introduce deadlock with
2239  * smp_call_function() if an IPI is sent by the same process we are
2240  * waiting to become inactive.
2241  */
wait_task_inactive(struct task_struct * p,unsigned int match_state)2242 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
2243 {
2244 	int running, queued, match;
2245 	struct rq_flags rf;
2246 	unsigned long ncsw;
2247 	struct rq *rq;
2248 
2249 	for (;;) {
2250 		/*
2251 		 * We do the initial early heuristics without holding
2252 		 * any task-queue locks at all. We'll only try to get
2253 		 * the runqueue lock when things look like they will
2254 		 * work out!
2255 		 */
2256 		rq = task_rq(p);
2257 
2258 		/*
2259 		 * If the task is actively running on another CPU
2260 		 * still, just relax and busy-wait without holding
2261 		 * any locks.
2262 		 *
2263 		 * NOTE! Since we don't hold any locks, it's not
2264 		 * even sure that "rq" stays as the right runqueue!
2265 		 * But we don't care, since "task_on_cpu()" will
2266 		 * return false if the runqueue has changed and p
2267 		 * is actually now running somewhere else!
2268 		 */
2269 		while (task_on_cpu(rq, p)) {
2270 			if (!task_state_match(p, match_state))
2271 				return 0;
2272 			cpu_relax();
2273 		}
2274 
2275 		/*
2276 		 * Ok, time to look more closely! We need the rq
2277 		 * lock now, to be *sure*. If we're wrong, we'll
2278 		 * just go back and repeat.
2279 		 */
2280 		rq = task_rq_lock(p, &rf);
2281 		trace_sched_wait_task(p);
2282 		running = task_on_cpu(rq, p);
2283 		queued = task_on_rq_queued(p);
2284 		ncsw = 0;
2285 		if ((match = __task_state_match(p, match_state))) {
2286 			/*
2287 			 * When matching on p->saved_state, consider this task
2288 			 * still queued so it will wait.
2289 			 */
2290 			if (match < 0)
2291 				queued = 1;
2292 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2293 		}
2294 		task_rq_unlock(rq, p, &rf);
2295 
2296 		/*
2297 		 * If it changed from the expected state, bail out now.
2298 		 */
2299 		if (unlikely(!ncsw))
2300 			break;
2301 
2302 		/*
2303 		 * Was it really running after all now that we
2304 		 * checked with the proper locks actually held?
2305 		 *
2306 		 * Oops. Go back and try again..
2307 		 */
2308 		if (unlikely(running)) {
2309 			cpu_relax();
2310 			continue;
2311 		}
2312 
2313 		/*
2314 		 * It's not enough that it's not actively running,
2315 		 * it must be off the runqueue _entirely_, and not
2316 		 * preempted!
2317 		 *
2318 		 * So if it was still runnable (but just not actively
2319 		 * running right now), it's preempted, and we should
2320 		 * yield - it could be a while.
2321 		 */
2322 		if (unlikely(queued)) {
2323 			ktime_t to = NSEC_PER_SEC / HZ;
2324 
2325 			set_current_state(TASK_UNINTERRUPTIBLE);
2326 			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
2327 			continue;
2328 		}
2329 
2330 		/*
2331 		 * Ahh, all good. It wasn't running, and it wasn't
2332 		 * runnable, which means that it will never become
2333 		 * running in the future either. We're all done!
2334 		 */
2335 		break;
2336 	}
2337 
2338 	return ncsw;
2339 }
2340 
2341 #ifdef CONFIG_SMP
2342 
2343 static void
2344 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2345 
migrate_disable_switch(struct rq * rq,struct task_struct * p)2346 static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
2347 {
2348 	struct affinity_context ac = {
2349 		.new_mask  = cpumask_of(rq->cpu),
2350 		.flags     = SCA_MIGRATE_DISABLE,
2351 	};
2352 
2353 	if (likely(!p->migration_disabled))
2354 		return;
2355 
2356 	if (p->cpus_ptr != &p->cpus_mask)
2357 		return;
2358 
2359 	/*
2360 	 * Violates locking rules! See comment in __do_set_cpus_allowed().
2361 	 */
2362 	__do_set_cpus_allowed(p, &ac);
2363 }
2364 
migrate_disable(void)2365 void migrate_disable(void)
2366 {
2367 	struct task_struct *p = current;
2368 
2369 	if (p->migration_disabled) {
2370 #ifdef CONFIG_DEBUG_PREEMPT
2371 		/*
2372 		 *Warn about overflow half-way through the range.
2373 		 */
2374 		WARN_ON_ONCE((s16)p->migration_disabled < 0);
2375 #endif
2376 		p->migration_disabled++;
2377 		return;
2378 	}
2379 
2380 	guard(preempt)();
2381 	this_rq()->nr_pinned++;
2382 	p->migration_disabled = 1;
2383 }
2384 EXPORT_SYMBOL_GPL(migrate_disable);
2385 
migrate_enable(void)2386 void migrate_enable(void)
2387 {
2388 	struct task_struct *p = current;
2389 	struct affinity_context ac = {
2390 		.new_mask  = &p->cpus_mask,
2391 		.flags     = SCA_MIGRATE_ENABLE,
2392 	};
2393 
2394 #ifdef CONFIG_DEBUG_PREEMPT
2395 	/*
2396 	 * Check both overflow from migrate_disable() and superfluous
2397 	 * migrate_enable().
2398 	 */
2399 	if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
2400 		return;
2401 #endif
2402 
2403 	if (p->migration_disabled > 1) {
2404 		p->migration_disabled--;
2405 		return;
2406 	}
2407 
2408 	/*
2409 	 * Ensure stop_task runs either before or after this, and that
2410 	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
2411 	 */
2412 	guard(preempt)();
2413 	if (p->cpus_ptr != &p->cpus_mask)
2414 		__set_cpus_allowed_ptr(p, &ac);
2415 	/*
2416 	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
2417 	 * regular cpus_mask, otherwise things that race (eg.
2418 	 * select_fallback_rq) get confused.
2419 	 */
2420 	barrier();
2421 	p->migration_disabled = 0;
2422 	this_rq()->nr_pinned--;
2423 }
2424 EXPORT_SYMBOL_GPL(migrate_enable);
2425 
rq_has_pinned_tasks(struct rq * rq)2426 static inline bool rq_has_pinned_tasks(struct rq *rq)
2427 {
2428 	return rq->nr_pinned;
2429 }
2430 
2431 /*
2432  * Per-CPU kthreads are allowed to run on !active && online CPUs, see
2433  * __set_cpus_allowed_ptr() and select_fallback_rq().
2434  */
is_cpu_allowed(struct task_struct * p,int cpu)2435 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
2436 {
2437 	/* When not in the task's cpumask, no point in looking further. */
2438 	if (!task_allowed_on_cpu(p, cpu))
2439 		return false;
2440 
2441 	/* migrate_disabled() must be allowed to finish. */
2442 	if (is_migration_disabled(p))
2443 		return cpu_online(cpu);
2444 
2445 	/* Non kernel threads are not allowed during either online or offline. */
2446 	if (!(p->flags & PF_KTHREAD))
2447 		return cpu_active(cpu);
2448 
2449 	/* KTHREAD_IS_PER_CPU is always allowed. */
2450 	if (kthread_is_per_cpu(p))
2451 		return cpu_online(cpu);
2452 
2453 	/* Regular kernel threads don't get to stay during offline. */
2454 	if (cpu_dying(cpu))
2455 		return false;
2456 
2457 	/* But are allowed during online. */
2458 	return cpu_online(cpu);
2459 }
2460 
2461 /*
2462  * This is how migration works:
2463  *
2464  * 1) we invoke migration_cpu_stop() on the target CPU using
2465  *    stop_one_cpu().
2466  * 2) stopper starts to run (implicitly forcing the migrated thread
2467  *    off the CPU)
2468  * 3) it checks whether the migrated task is still in the wrong runqueue.
2469  * 4) if it's in the wrong runqueue then the migration thread removes
2470  *    it and puts it into the right queue.
2471  * 5) stopper completes and stop_one_cpu() returns and the migration
2472  *    is done.
2473  */
2474 
2475 /*
2476  * move_queued_task - move a queued task to new rq.
2477  *
2478  * Returns (locked) new rq. Old rq's lock is released.
2479  */
move_queued_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int new_cpu)2480 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
2481 				   struct task_struct *p, int new_cpu)
2482 {
2483 	lockdep_assert_rq_held(rq);
2484 
2485 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
2486 	set_task_cpu(p, new_cpu);
2487 	rq_unlock(rq, rf);
2488 
2489 	rq = cpu_rq(new_cpu);
2490 
2491 	rq_lock(rq, rf);
2492 	WARN_ON_ONCE(task_cpu(p) != new_cpu);
2493 	activate_task(rq, p, 0);
2494 	wakeup_preempt(rq, p, 0);
2495 
2496 	return rq;
2497 }
2498 
2499 struct migration_arg {
2500 	struct task_struct		*task;
2501 	int				dest_cpu;
2502 	struct set_affinity_pending	*pending;
2503 };
2504 
2505 /*
2506  * @refs: number of wait_for_completion()
2507  * @stop_pending: is @stop_work in use
2508  */
2509 struct set_affinity_pending {
2510 	refcount_t		refs;
2511 	unsigned int		stop_pending;
2512 	struct completion	done;
2513 	struct cpu_stop_work	stop_work;
2514 	struct migration_arg	arg;
2515 };
2516 
2517 /*
2518  * Move (not current) task off this CPU, onto the destination CPU. We're doing
2519  * this because either it can't run here any more (set_cpus_allowed()
2520  * away from this CPU, or CPU going down), or because we're
2521  * attempting to rebalance this task on exec (sched_exec).
2522  *
2523  * So we race with normal scheduler movements, but that's OK, as long
2524  * as the task is no longer on this CPU.
2525  */
__migrate_task(struct rq * rq,struct rq_flags * rf,struct task_struct * p,int dest_cpu)2526 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
2527 				 struct task_struct *p, int dest_cpu)
2528 {
2529 	/* Affinity changed (again). */
2530 	if (!is_cpu_allowed(p, dest_cpu))
2531 		return rq;
2532 
2533 	rq = move_queued_task(rq, rf, p, dest_cpu);
2534 
2535 	return rq;
2536 }
2537 
2538 /*
2539  * migration_cpu_stop - this will be executed by a high-prio stopper thread
2540  * and performs thread migration by bumping thread off CPU then
2541  * 'pushing' onto another runqueue.
2542  */
migration_cpu_stop(void * data)2543 static int migration_cpu_stop(void *data)
2544 {
2545 	struct migration_arg *arg = data;
2546 	struct set_affinity_pending *pending = arg->pending;
2547 	struct task_struct *p = arg->task;
2548 	struct rq *rq = this_rq();
2549 	bool complete = false;
2550 	struct rq_flags rf;
2551 
2552 	/*
2553 	 * The original target CPU might have gone down and we might
2554 	 * be on another CPU but it doesn't matter.
2555 	 */
2556 	local_irq_save(rf.flags);
2557 	/*
2558 	 * We need to explicitly wake pending tasks before running
2559 	 * __migrate_task() such that we will not miss enforcing cpus_ptr
2560 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
2561 	 */
2562 	flush_smp_call_function_queue();
2563 
2564 	raw_spin_lock(&p->pi_lock);
2565 	rq_lock(rq, &rf);
2566 
2567 	/*
2568 	 * If we were passed a pending, then ->stop_pending was set, thus
2569 	 * p->migration_pending must have remained stable.
2570 	 */
2571 	WARN_ON_ONCE(pending && pending != p->migration_pending);
2572 
2573 	/*
2574 	 * If task_rq(p) != rq, it cannot be migrated here, because we're
2575 	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
2576 	 * we're holding p->pi_lock.
2577 	 */
2578 	if (task_rq(p) == rq) {
2579 		if (is_migration_disabled(p))
2580 			goto out;
2581 
2582 		if (pending) {
2583 			p->migration_pending = NULL;
2584 			complete = true;
2585 
2586 			if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
2587 				goto out;
2588 		}
2589 
2590 		if (task_on_rq_queued(p)) {
2591 			update_rq_clock(rq);
2592 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
2593 		} else {
2594 			p->wake_cpu = arg->dest_cpu;
2595 		}
2596 
2597 		/*
2598 		 * XXX __migrate_task() can fail, at which point we might end
2599 		 * up running on a dodgy CPU, AFAICT this can only happen
2600 		 * during CPU hotplug, at which point we'll get pushed out
2601 		 * anyway, so it's probably not a big deal.
2602 		 */
2603 
2604 	} else if (pending) {
2605 		/*
2606 		 * This happens when we get migrated between migrate_enable()'s
2607 		 * preempt_enable() and scheduling the stopper task. At that
2608 		 * point we're a regular task again and not current anymore.
2609 		 *
2610 		 * A !PREEMPT kernel has a giant hole here, which makes it far
2611 		 * more likely.
2612 		 */
2613 
2614 		/*
2615 		 * The task moved before the stopper got to run. We're holding
2616 		 * ->pi_lock, so the allowed mask is stable - if it got
2617 		 * somewhere allowed, we're done.
2618 		 */
2619 		if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) {
2620 			p->migration_pending = NULL;
2621 			complete = true;
2622 			goto out;
2623 		}
2624 
2625 		/*
2626 		 * When migrate_enable() hits a rq mis-match we can't reliably
2627 		 * determine is_migration_disabled() and so have to chase after
2628 		 * it.
2629 		 */
2630 		WARN_ON_ONCE(!pending->stop_pending);
2631 		preempt_disable();
2632 		task_rq_unlock(rq, p, &rf);
2633 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
2634 				    &pending->arg, &pending->stop_work);
2635 		preempt_enable();
2636 		return 0;
2637 	}
2638 out:
2639 	if (pending)
2640 		pending->stop_pending = false;
2641 	task_rq_unlock(rq, p, &rf);
2642 
2643 	if (complete)
2644 		complete_all(&pending->done);
2645 
2646 	return 0;
2647 }
2648 
push_cpu_stop(void * arg)2649 int push_cpu_stop(void *arg)
2650 {
2651 	struct rq *lowest_rq = NULL, *rq = this_rq();
2652 	struct task_struct *p = arg;
2653 
2654 	raw_spin_lock_irq(&p->pi_lock);
2655 	raw_spin_rq_lock(rq);
2656 
2657 	if (task_rq(p) != rq)
2658 		goto out_unlock;
2659 
2660 	if (is_migration_disabled(p)) {
2661 		p->migration_flags |= MDF_PUSH;
2662 		goto out_unlock;
2663 	}
2664 
2665 	p->migration_flags &= ~MDF_PUSH;
2666 
2667 	if (p->sched_class->find_lock_rq)
2668 		lowest_rq = p->sched_class->find_lock_rq(p, rq);
2669 
2670 	if (!lowest_rq)
2671 		goto out_unlock;
2672 
2673 	// XXX validate p is still the highest prio task
2674 	if (task_rq(p) == rq) {
2675 		move_queued_task_locked(rq, lowest_rq, p);
2676 		resched_curr(lowest_rq);
2677 	}
2678 
2679 	double_unlock_balance(rq, lowest_rq);
2680 
2681 out_unlock:
2682 	rq->push_busy = false;
2683 	raw_spin_rq_unlock(rq);
2684 	raw_spin_unlock_irq(&p->pi_lock);
2685 
2686 	put_task_struct(p);
2687 	return 0;
2688 }
2689 
2690 /*
2691  * sched_class::set_cpus_allowed must do the below, but is not required to
2692  * actually call this function.
2693  */
set_cpus_allowed_common(struct task_struct * p,struct affinity_context * ctx)2694 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
2695 {
2696 	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
2697 		p->cpus_ptr = ctx->new_mask;
2698 		return;
2699 	}
2700 
2701 	cpumask_copy(&p->cpus_mask, ctx->new_mask);
2702 	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
2703 
2704 	/*
2705 	 * Swap in a new user_cpus_ptr if SCA_USER flag set
2706 	 */
2707 	if (ctx->flags & SCA_USER)
2708 		swap(p->user_cpus_ptr, ctx->user_mask);
2709 }
2710 
2711 static void
__do_set_cpus_allowed(struct task_struct * p,struct affinity_context * ctx)2712 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
2713 {
2714 	struct rq *rq = task_rq(p);
2715 	bool queued, running;
2716 
2717 	/*
2718 	 * This here violates the locking rules for affinity, since we're only
2719 	 * supposed to change these variables while holding both rq->lock and
2720 	 * p->pi_lock.
2721 	 *
2722 	 * HOWEVER, it magically works, because ttwu() is the only code that
2723 	 * accesses these variables under p->pi_lock and only does so after
2724 	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
2725 	 * before finish_task().
2726 	 *
2727 	 * XXX do further audits, this smells like something putrid.
2728 	 */
2729 	if (ctx->flags & SCA_MIGRATE_DISABLE)
2730 		SCHED_WARN_ON(!p->on_cpu);
2731 	else
2732 		lockdep_assert_held(&p->pi_lock);
2733 
2734 	queued = task_on_rq_queued(p);
2735 	running = task_current_donor(rq, p);
2736 
2737 	if (queued) {
2738 		/*
2739 		 * Because __kthread_bind() calls this on blocked tasks without
2740 		 * holding rq->lock.
2741 		 */
2742 		lockdep_assert_rq_held(rq);
2743 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
2744 	}
2745 	if (running)
2746 		put_prev_task(rq, p);
2747 
2748 	p->sched_class->set_cpus_allowed(p, ctx);
2749 	mm_set_cpus_allowed(p->mm, ctx->new_mask);
2750 
2751 	if (queued)
2752 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
2753 	if (running)
2754 		set_next_task(rq, p);
2755 }
2756 
2757 /*
2758  * Used for kthread_bind() and select_fallback_rq(), in both cases the user
2759  * affinity (if any) should be destroyed too.
2760  */
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)2761 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
2762 {
2763 	struct affinity_context ac = {
2764 		.new_mask  = new_mask,
2765 		.user_mask = NULL,
2766 		.flags     = SCA_USER,	/* clear the user requested mask */
2767 	};
2768 	union cpumask_rcuhead {
2769 		cpumask_t cpumask;
2770 		struct rcu_head rcu;
2771 	};
2772 
2773 	__do_set_cpus_allowed(p, &ac);
2774 
2775 	/*
2776 	 * Because this is called with p->pi_lock held, it is not possible
2777 	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
2778 	 * kfree_rcu().
2779 	 */
2780 	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
2781 }
2782 
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)2783 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
2784 		      int node)
2785 {
2786 	cpumask_t *user_mask;
2787 	unsigned long flags;
2788 
2789 	/*
2790 	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
2791 	 * may differ by now due to racing.
2792 	 */
2793 	dst->user_cpus_ptr = NULL;
2794 
2795 	/*
2796 	 * This check is racy and losing the race is a valid situation.
2797 	 * It is not worth the extra overhead of taking the pi_lock on
2798 	 * every fork/clone.
2799 	 */
2800 	if (data_race(!src->user_cpus_ptr))
2801 		return 0;
2802 
2803 	user_mask = alloc_user_cpus_ptr(node);
2804 	if (!user_mask)
2805 		return -ENOMEM;
2806 
2807 	/*
2808 	 * Use pi_lock to protect content of user_cpus_ptr
2809 	 *
2810 	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
2811 	 * do_set_cpus_allowed().
2812 	 */
2813 	raw_spin_lock_irqsave(&src->pi_lock, flags);
2814 	if (src->user_cpus_ptr) {
2815 		swap(dst->user_cpus_ptr, user_mask);
2816 		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
2817 	}
2818 	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
2819 
2820 	if (unlikely(user_mask))
2821 		kfree(user_mask);
2822 
2823 	return 0;
2824 }
2825 
clear_user_cpus_ptr(struct task_struct * p)2826 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
2827 {
2828 	struct cpumask *user_mask = NULL;
2829 
2830 	swap(p->user_cpus_ptr, user_mask);
2831 
2832 	return user_mask;
2833 }
2834 
release_user_cpus_ptr(struct task_struct * p)2835 void release_user_cpus_ptr(struct task_struct *p)
2836 {
2837 	kfree(clear_user_cpus_ptr(p));
2838 }
2839 
2840 /*
2841  * This function is wildly self concurrent; here be dragons.
2842  *
2843  *
2844  * When given a valid mask, __set_cpus_allowed_ptr() must block until the
2845  * designated task is enqueued on an allowed CPU. If that task is currently
2846  * running, we have to kick it out using the CPU stopper.
2847  *
2848  * Migrate-Disable comes along and tramples all over our nice sandcastle.
2849  * Consider:
2850  *
2851  *     Initial conditions: P0->cpus_mask = [0, 1]
2852  *
2853  *     P0@CPU0                  P1
2854  *
2855  *     migrate_disable();
2856  *     <preempted>
2857  *                              set_cpus_allowed_ptr(P0, [1]);
2858  *
2859  * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes
2860  * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region).
2861  * This means we need the following scheme:
2862  *
2863  *     P0@CPU0                  P1
2864  *
2865  *     migrate_disable();
2866  *     <preempted>
2867  *                              set_cpus_allowed_ptr(P0, [1]);
2868  *                                <blocks>
2869  *     <resumes>
2870  *     migrate_enable();
2871  *       __set_cpus_allowed_ptr();
2872  *       <wakes local stopper>
2873  *                         `--> <woken on migration completion>
2874  *
2875  * Now the fun stuff: there may be several P1-like tasks, i.e. multiple
2876  * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any
2877  * task p are serialized by p->pi_lock, which we can leverage: the one that
2878  * should come into effect at the end of the Migrate-Disable region is the last
2879  * one. This means we only need to track a single cpumask (i.e. p->cpus_mask),
2880  * but we still need to properly signal those waiting tasks at the appropriate
2881  * moment.
2882  *
2883  * This is implemented using struct set_affinity_pending. The first
2884  * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will
2885  * setup an instance of that struct and install it on the targeted task_struct.
2886  * Any and all further callers will reuse that instance. Those then wait for
2887  * a completion signaled at the tail of the CPU stopper callback (1), triggered
2888  * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()).
2889  *
2890  *
2891  * (1) In the cases covered above. There is one more where the completion is
2892  * signaled within affine_move_task() itself: when a subsequent affinity request
2893  * occurs after the stopper bailed out due to the targeted task still being
2894  * Migrate-Disable. Consider:
2895  *
2896  *     Initial conditions: P0->cpus_mask = [0, 1]
2897  *
2898  *     CPU0		  P1				P2
2899  *     <P0>
2900  *       migrate_disable();
2901  *       <preempted>
2902  *                        set_cpus_allowed_ptr(P0, [1]);
2903  *                          <blocks>
2904  *     <migration/0>
2905  *       migration_cpu_stop()
2906  *         is_migration_disabled()
2907  *           <bails>
2908  *                                                       set_cpus_allowed_ptr(P0, [0, 1]);
2909  *                                                         <signal completion>
2910  *                          <awakes>
2911  *
2912  * Note that the above is safe vs a concurrent migrate_enable(), as any
2913  * pending affinity completion is preceded by an uninstallation of
2914  * p->migration_pending done with p->pi_lock held.
2915  */
affine_move_task(struct rq * rq,struct task_struct * p,struct rq_flags * rf,int dest_cpu,unsigned int flags)2916 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
2917 			    int dest_cpu, unsigned int flags)
2918 	__releases(rq->lock)
2919 	__releases(p->pi_lock)
2920 {
2921 	struct set_affinity_pending my_pending = { }, *pending = NULL;
2922 	bool stop_pending, complete = false;
2923 
2924 	/* Can the task run on the task's current CPU? If so, we're done */
2925 	if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
2926 		struct task_struct *push_task = NULL;
2927 
2928 		if ((flags & SCA_MIGRATE_ENABLE) &&
2929 		    (p->migration_flags & MDF_PUSH) && !rq->push_busy) {
2930 			rq->push_busy = true;
2931 			push_task = get_task_struct(p);
2932 		}
2933 
2934 		/*
2935 		 * If there are pending waiters, but no pending stop_work,
2936 		 * then complete now.
2937 		 */
2938 		pending = p->migration_pending;
2939 		if (pending && !pending->stop_pending) {
2940 			p->migration_pending = NULL;
2941 			complete = true;
2942 		}
2943 
2944 		preempt_disable();
2945 		task_rq_unlock(rq, p, rf);
2946 		if (push_task) {
2947 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2948 					    p, &rq->push_work);
2949 		}
2950 		preempt_enable();
2951 
2952 		if (complete)
2953 			complete_all(&pending->done);
2954 
2955 		return 0;
2956 	}
2957 
2958 	if (!(flags & SCA_MIGRATE_ENABLE)) {
2959 		/* serialized by p->pi_lock */
2960 		if (!p->migration_pending) {
2961 			/* Install the request */
2962 			refcount_set(&my_pending.refs, 1);
2963 			init_completion(&my_pending.done);
2964 			my_pending.arg = (struct migration_arg) {
2965 				.task = p,
2966 				.dest_cpu = dest_cpu,
2967 				.pending = &my_pending,
2968 			};
2969 
2970 			p->migration_pending = &my_pending;
2971 		} else {
2972 			pending = p->migration_pending;
2973 			refcount_inc(&pending->refs);
2974 			/*
2975 			 * Affinity has changed, but we've already installed a
2976 			 * pending. migration_cpu_stop() *must* see this, else
2977 			 * we risk a completion of the pending despite having a
2978 			 * task on a disallowed CPU.
2979 			 *
2980 			 * Serialized by p->pi_lock, so this is safe.
2981 			 */
2982 			pending->arg.dest_cpu = dest_cpu;
2983 		}
2984 	}
2985 	pending = p->migration_pending;
2986 	/*
2987 	 * - !MIGRATE_ENABLE:
2988 	 *   we'll have installed a pending if there wasn't one already.
2989 	 *
2990 	 * - MIGRATE_ENABLE:
2991 	 *   we're here because the current CPU isn't matching anymore,
2992 	 *   the only way that can happen is because of a concurrent
2993 	 *   set_cpus_allowed_ptr() call, which should then still be
2994 	 *   pending completion.
2995 	 *
2996 	 * Either way, we really should have a @pending here.
2997 	 */
2998 	if (WARN_ON_ONCE(!pending)) {
2999 		task_rq_unlock(rq, p, rf);
3000 		return -EINVAL;
3001 	}
3002 
3003 	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
3004 		/*
3005 		 * MIGRATE_ENABLE gets here because 'p == current', but for
3006 		 * anything else we cannot do is_migration_disabled(), punt
3007 		 * and have the stopper function handle it all race-free.
3008 		 */
3009 		stop_pending = pending->stop_pending;
3010 		if (!stop_pending)
3011 			pending->stop_pending = true;
3012 
3013 		if (flags & SCA_MIGRATE_ENABLE)
3014 			p->migration_flags &= ~MDF_PUSH;
3015 
3016 		preempt_disable();
3017 		task_rq_unlock(rq, p, rf);
3018 		if (!stop_pending) {
3019 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
3020 					    &pending->arg, &pending->stop_work);
3021 		}
3022 		preempt_enable();
3023 
3024 		if (flags & SCA_MIGRATE_ENABLE)
3025 			return 0;
3026 	} else {
3027 
3028 		if (!is_migration_disabled(p)) {
3029 			if (task_on_rq_queued(p))
3030 				rq = move_queued_task(rq, rf, p, dest_cpu);
3031 
3032 			if (!pending->stop_pending) {
3033 				p->migration_pending = NULL;
3034 				complete = true;
3035 			}
3036 		}
3037 		task_rq_unlock(rq, p, rf);
3038 
3039 		if (complete)
3040 			complete_all(&pending->done);
3041 	}
3042 
3043 	wait_for_completion(&pending->done);
3044 
3045 	if (refcount_dec_and_test(&pending->refs))
3046 		wake_up_var(&pending->refs); /* No UaF, just an address */
3047 
3048 	/*
3049 	 * Block the original owner of &pending until all subsequent callers
3050 	 * have seen the completion and decremented the refcount
3051 	 */
3052 	wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
3053 
3054 	/* ARGH */
3055 	WARN_ON_ONCE(my_pending.stop_pending);
3056 
3057 	return 0;
3058 }
3059 
3060 /*
3061  * Called with both p->pi_lock and rq->lock held; drops both before returning.
3062  */
__set_cpus_allowed_ptr_locked(struct task_struct * p,struct affinity_context * ctx,struct rq * rq,struct rq_flags * rf)3063 static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
3064 					 struct affinity_context *ctx,
3065 					 struct rq *rq,
3066 					 struct rq_flags *rf)
3067 	__releases(rq->lock)
3068 	__releases(p->pi_lock)
3069 {
3070 	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
3071 	const struct cpumask *cpu_valid_mask = cpu_active_mask;
3072 	bool kthread = p->flags & PF_KTHREAD;
3073 	unsigned int dest_cpu;
3074 	int ret = 0;
3075 
3076 	update_rq_clock(rq);
3077 
3078 	if (kthread || is_migration_disabled(p)) {
3079 		/*
3080 		 * Kernel threads are allowed on online && !active CPUs,
3081 		 * however, during cpu-hot-unplug, even these might get pushed
3082 		 * away if not KTHREAD_IS_PER_CPU.
3083 		 *
3084 		 * Specifically, migration_disabled() tasks must not fail the
3085 		 * cpumask_any_and_distribute() pick below, esp. so on
3086 		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
3087 		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
3088 		 */
3089 		cpu_valid_mask = cpu_online_mask;
3090 	}
3091 
3092 	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
3093 		ret = -EINVAL;
3094 		goto out;
3095 	}
3096 
3097 	/*
3098 	 * Must re-check here, to close a race against __kthread_bind(),
3099 	 * sched_setaffinity() is not guaranteed to observe the flag.
3100 	 */
3101 	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
3102 		ret = -EINVAL;
3103 		goto out;
3104 	}
3105 
3106 	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
3107 		if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) {
3108 			if (ctx->flags & SCA_USER)
3109 				swap(p->user_cpus_ptr, ctx->user_mask);
3110 			goto out;
3111 		}
3112 
3113 		if (WARN_ON_ONCE(p == current &&
3114 				 is_migration_disabled(p) &&
3115 				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
3116 			ret = -EBUSY;
3117 			goto out;
3118 		}
3119 	}
3120 
3121 	/*
3122 	 * Picking a ~random cpu helps in cases where we are changing affinity
3123 	 * for groups of tasks (ie. cpuset), so that load balancing is not
3124 	 * immediately required to distribute the tasks within their new mask.
3125 	 */
3126 	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
3127 	if (dest_cpu >= nr_cpu_ids) {
3128 		ret = -EINVAL;
3129 		goto out;
3130 	}
3131 
3132 	__do_set_cpus_allowed(p, ctx);
3133 
3134 	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
3135 
3136 out:
3137 	task_rq_unlock(rq, p, rf);
3138 
3139 	return ret;
3140 }
3141 
3142 /*
3143  * Change a given task's CPU affinity. Migrate the thread to a
3144  * proper CPU and schedule it away if the CPU it's executing on
3145  * is removed from the allowed bitmask.
3146  *
3147  * NOTE: the caller must have a valid reference to the task, the
3148  * task must not exit() & deallocate itself prematurely. The
3149  * call is not atomic; no spinlocks may be held.
3150  */
__set_cpus_allowed_ptr(struct task_struct * p,struct affinity_context * ctx)3151 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
3152 {
3153 	struct rq_flags rf;
3154 	struct rq *rq;
3155 
3156 	rq = task_rq_lock(p, &rf);
3157 	/*
3158 	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
3159 	 * flags are set.
3160 	 */
3161 	if (p->user_cpus_ptr &&
3162 	    !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
3163 	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
3164 		ctx->new_mask = rq->scratch_mask;
3165 
3166 	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
3167 }
3168 
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)3169 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3170 {
3171 	struct affinity_context ac = {
3172 		.new_mask  = new_mask,
3173 		.flags     = 0,
3174 	};
3175 
3176 	return __set_cpus_allowed_ptr(p, &ac);
3177 }
3178 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3179 
3180 /*
3181  * Change a given task's CPU affinity to the intersection of its current
3182  * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
3183  * If user_cpus_ptr is defined, use it as the basis for restricting CPU
3184  * affinity or use cpu_online_mask instead.
3185  *
3186  * If the resulting mask is empty, leave the affinity unchanged and return
3187  * -EINVAL.
3188  */
restrict_cpus_allowed_ptr(struct task_struct * p,struct cpumask * new_mask,const struct cpumask * subset_mask)3189 static int restrict_cpus_allowed_ptr(struct task_struct *p,
3190 				     struct cpumask *new_mask,
3191 				     const struct cpumask *subset_mask)
3192 {
3193 	struct affinity_context ac = {
3194 		.new_mask  = new_mask,
3195 		.flags     = 0,
3196 	};
3197 	struct rq_flags rf;
3198 	struct rq *rq;
3199 	int err;
3200 
3201 	rq = task_rq_lock(p, &rf);
3202 
3203 	/*
3204 	 * Forcefully restricting the affinity of a deadline task is
3205 	 * likely to cause problems, so fail and noisily override the
3206 	 * mask entirely.
3207 	 */
3208 	if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
3209 		err = -EPERM;
3210 		goto err_unlock;
3211 	}
3212 
3213 	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
3214 		err = -EINVAL;
3215 		goto err_unlock;
3216 	}
3217 
3218 	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
3219 
3220 err_unlock:
3221 	task_rq_unlock(rq, p, &rf);
3222 	return err;
3223 }
3224 
3225 /*
3226  * Restrict the CPU affinity of task @p so that it is a subset of
3227  * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
3228  * old affinity mask. If the resulting mask is empty, we warn and walk
3229  * up the cpuset hierarchy until we find a suitable mask.
3230  */
force_compatible_cpus_allowed_ptr(struct task_struct * p)3231 void force_compatible_cpus_allowed_ptr(struct task_struct *p)
3232 {
3233 	cpumask_var_t new_mask;
3234 	const struct cpumask *override_mask = task_cpu_possible_mask(p);
3235 
3236 	alloc_cpumask_var(&new_mask, GFP_KERNEL);
3237 
3238 	/*
3239 	 * __migrate_task() can fail silently in the face of concurrent
3240 	 * offlining of the chosen destination CPU, so take the hotplug
3241 	 * lock to ensure that the migration succeeds.
3242 	 */
3243 	cpus_read_lock();
3244 	if (!cpumask_available(new_mask))
3245 		goto out_set_mask;
3246 
3247 	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
3248 		goto out_free_mask;
3249 
3250 	/*
3251 	 * We failed to find a valid subset of the affinity mask for the
3252 	 * task, so override it based on its cpuset hierarchy.
3253 	 */
3254 	cpuset_cpus_allowed(p, new_mask);
3255 	override_mask = new_mask;
3256 
3257 out_set_mask:
3258 	if (printk_ratelimit()) {
3259 		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
3260 				task_pid_nr(p), p->comm,
3261 				cpumask_pr_args(override_mask));
3262 	}
3263 
3264 	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
3265 out_free_mask:
3266 	cpus_read_unlock();
3267 	free_cpumask_var(new_mask);
3268 }
3269 
3270 /*
3271  * Restore the affinity of a task @p which was previously restricted by a
3272  * call to force_compatible_cpus_allowed_ptr().
3273  *
3274  * It is the caller's responsibility to serialise this with any calls to
3275  * force_compatible_cpus_allowed_ptr(@p).
3276  */
relax_compatible_cpus_allowed_ptr(struct task_struct * p)3277 void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
3278 {
3279 	struct affinity_context ac = {
3280 		.new_mask  = task_user_cpus(p),
3281 		.flags     = 0,
3282 	};
3283 	int ret;
3284 
3285 	/*
3286 	 * Try to restore the old affinity mask with __sched_setaffinity().
3287 	 * Cpuset masking will be done there too.
3288 	 */
3289 	ret = __sched_setaffinity(p, &ac);
3290 	WARN_ON_ONCE(ret);
3291 }
3292 
set_task_cpu(struct task_struct * p,unsigned int new_cpu)3293 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
3294 {
3295 #ifdef CONFIG_SCHED_DEBUG
3296 	unsigned int state = READ_ONCE(p->__state);
3297 
3298 	/*
3299 	 * We should never call set_task_cpu() on a blocked task,
3300 	 * ttwu() will sort out the placement.
3301 	 */
3302 	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
3303 
3304 	/*
3305 	 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
3306 	 * because schedstat_wait_{start,end} rebase migrating task's wait_start
3307 	 * time relying on p->on_rq.
3308 	 */
3309 	WARN_ON_ONCE(state == TASK_RUNNING &&
3310 		     p->sched_class == &fair_sched_class &&
3311 		     (p->on_rq && !task_on_rq_migrating(p)));
3312 
3313 #ifdef CONFIG_LOCKDEP
3314 	/*
3315 	 * The caller should hold either p->pi_lock or rq->lock, when changing
3316 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
3317 	 *
3318 	 * sched_move_task() holds both and thus holding either pins the cgroup,
3319 	 * see task_group().
3320 	 *
3321 	 * Furthermore, all task_rq users should acquire both locks, see
3322 	 * task_rq_lock().
3323 	 */
3324 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
3325 				      lockdep_is_held(__rq_lockp(task_rq(p)))));
3326 #endif
3327 	/*
3328 	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
3329 	 */
3330 	WARN_ON_ONCE(!cpu_online(new_cpu));
3331 
3332 	WARN_ON_ONCE(is_migration_disabled(p));
3333 #endif
3334 
3335 	trace_sched_migrate_task(p, new_cpu);
3336 
3337 	if (task_cpu(p) != new_cpu) {
3338 		if (p->sched_class->migrate_task_rq)
3339 			p->sched_class->migrate_task_rq(p, new_cpu);
3340 		p->se.nr_migrations++;
3341 		rseq_migrate(p);
3342 		sched_mm_cid_migrate_from(p);
3343 		perf_event_task_migrate(p);
3344 	}
3345 
3346 	__set_task_cpu(p, new_cpu);
3347 }
3348 
3349 #ifdef CONFIG_NUMA_BALANCING
__migrate_swap_task(struct task_struct * p,int cpu)3350 static void __migrate_swap_task(struct task_struct *p, int cpu)
3351 {
3352 	if (task_on_rq_queued(p)) {
3353 		struct rq *src_rq, *dst_rq;
3354 		struct rq_flags srf, drf;
3355 
3356 		src_rq = task_rq(p);
3357 		dst_rq = cpu_rq(cpu);
3358 
3359 		rq_pin_lock(src_rq, &srf);
3360 		rq_pin_lock(dst_rq, &drf);
3361 
3362 		move_queued_task_locked(src_rq, dst_rq, p);
3363 		wakeup_preempt(dst_rq, p, 0);
3364 
3365 		rq_unpin_lock(dst_rq, &drf);
3366 		rq_unpin_lock(src_rq, &srf);
3367 
3368 	} else {
3369 		/*
3370 		 * Task isn't running anymore; make it appear like we migrated
3371 		 * it before it went to sleep. This means on wakeup we make the
3372 		 * previous CPU our target instead of where it really is.
3373 		 */
3374 		p->wake_cpu = cpu;
3375 	}
3376 }
3377 
3378 struct migration_swap_arg {
3379 	struct task_struct *src_task, *dst_task;
3380 	int src_cpu, dst_cpu;
3381 };
3382 
migrate_swap_stop(void * data)3383 static int migrate_swap_stop(void *data)
3384 {
3385 	struct migration_swap_arg *arg = data;
3386 	struct rq *src_rq, *dst_rq;
3387 
3388 	if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
3389 		return -EAGAIN;
3390 
3391 	src_rq = cpu_rq(arg->src_cpu);
3392 	dst_rq = cpu_rq(arg->dst_cpu);
3393 
3394 	guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
3395 	guard(double_rq_lock)(src_rq, dst_rq);
3396 
3397 	if (task_cpu(arg->dst_task) != arg->dst_cpu)
3398 		return -EAGAIN;
3399 
3400 	if (task_cpu(arg->src_task) != arg->src_cpu)
3401 		return -EAGAIN;
3402 
3403 	if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
3404 		return -EAGAIN;
3405 
3406 	if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
3407 		return -EAGAIN;
3408 
3409 	__migrate_swap_task(arg->src_task, arg->dst_cpu);
3410 	__migrate_swap_task(arg->dst_task, arg->src_cpu);
3411 
3412 	return 0;
3413 }
3414 
3415 /*
3416  * Cross migrate two tasks
3417  */
migrate_swap(struct task_struct * cur,struct task_struct * p,int target_cpu,int curr_cpu)3418 int migrate_swap(struct task_struct *cur, struct task_struct *p,
3419 		int target_cpu, int curr_cpu)
3420 {
3421 	struct migration_swap_arg arg;
3422 	int ret = -EINVAL;
3423 
3424 	arg = (struct migration_swap_arg){
3425 		.src_task = cur,
3426 		.src_cpu = curr_cpu,
3427 		.dst_task = p,
3428 		.dst_cpu = target_cpu,
3429 	};
3430 
3431 	if (arg.src_cpu == arg.dst_cpu)
3432 		goto out;
3433 
3434 	/*
3435 	 * These three tests are all lockless; this is OK since all of them
3436 	 * will be re-checked with proper locks held further down the line.
3437 	 */
3438 	if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
3439 		goto out;
3440 
3441 	if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
3442 		goto out;
3443 
3444 	if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
3445 		goto out;
3446 
3447 	trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
3448 	ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
3449 
3450 out:
3451 	return ret;
3452 }
3453 #endif /* CONFIG_NUMA_BALANCING */
3454 
3455 /***
3456  * kick_process - kick a running thread to enter/exit the kernel
3457  * @p: the to-be-kicked thread
3458  *
3459  * Cause a process which is running on another CPU to enter
3460  * kernel-mode, without any delay. (to get signals handled.)
3461  *
3462  * NOTE: this function doesn't have to take the runqueue lock,
3463  * because all it wants to ensure is that the remote task enters
3464  * the kernel. If the IPI races and the task has been migrated
3465  * to another CPU then no harm is done and the purpose has been
3466  * achieved as well.
3467  */
kick_process(struct task_struct * p)3468 void kick_process(struct task_struct *p)
3469 {
3470 	guard(preempt)();
3471 	int cpu = task_cpu(p);
3472 
3473 	if ((cpu != smp_processor_id()) && task_curr(p))
3474 		smp_send_reschedule(cpu);
3475 }
3476 EXPORT_SYMBOL_GPL(kick_process);
3477 
3478 /*
3479  * ->cpus_ptr is protected by both rq->lock and p->pi_lock
3480  *
3481  * A few notes on cpu_active vs cpu_online:
3482  *
3483  *  - cpu_active must be a subset of cpu_online
3484  *
3485  *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
3486  *    see __set_cpus_allowed_ptr(). At this point the newly online
3487  *    CPU isn't yet part of the sched domains, and balancing will not
3488  *    see it.
3489  *
3490  *  - on CPU-down we clear cpu_active() to mask the sched domains and
3491  *    avoid the load balancer to place new tasks on the to be removed
3492  *    CPU. Existing tasks will remain running there and will be taken
3493  *    off.
3494  *
3495  * This means that fallback selection must not select !active CPUs.
3496  * And can assume that any active CPU must be online. Conversely
3497  * select_task_rq() below may allow selection of !active CPUs in order
3498  * to satisfy the above rules.
3499  */
select_fallback_rq(int cpu,struct task_struct * p)3500 static int select_fallback_rq(int cpu, struct task_struct *p)
3501 {
3502 	int nid = cpu_to_node(cpu);
3503 	const struct cpumask *nodemask = NULL;
3504 	enum { cpuset, possible, fail } state = cpuset;
3505 	int dest_cpu;
3506 
3507 	/*
3508 	 * If the node that the CPU is on has been offlined, cpu_to_node()
3509 	 * will return -1. There is no CPU on the node, and we should
3510 	 * select the CPU on the other node.
3511 	 */
3512 	if (nid != -1) {
3513 		nodemask = cpumask_of_node(nid);
3514 
3515 		/* Look for allowed, online CPU in same node. */
3516 		for_each_cpu(dest_cpu, nodemask) {
3517 			if (is_cpu_allowed(p, dest_cpu))
3518 				return dest_cpu;
3519 		}
3520 	}
3521 
3522 	for (;;) {
3523 		/* Any allowed, online CPU? */
3524 		for_each_cpu(dest_cpu, p->cpus_ptr) {
3525 			if (!is_cpu_allowed(p, dest_cpu))
3526 				continue;
3527 
3528 			goto out;
3529 		}
3530 
3531 		/* No more Mr. Nice Guy. */
3532 		switch (state) {
3533 		case cpuset:
3534 			if (cpuset_cpus_allowed_fallback(p)) {
3535 				state = possible;
3536 				break;
3537 			}
3538 			fallthrough;
3539 		case possible:
3540 			/*
3541 			 * XXX When called from select_task_rq() we only
3542 			 * hold p->pi_lock and again violate locking order.
3543 			 *
3544 			 * More yuck to audit.
3545 			 */
3546 			do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
3547 			state = fail;
3548 			break;
3549 		case fail:
3550 			BUG();
3551 			break;
3552 		}
3553 	}
3554 
3555 out:
3556 	if (state != cpuset) {
3557 		/*
3558 		 * Don't tell them about moving exiting tasks or
3559 		 * kernel threads (both mm NULL), since they never
3560 		 * leave kernel.
3561 		 */
3562 		if (p->mm && printk_ratelimit()) {
3563 			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
3564 					task_pid_nr(p), p->comm, cpu);
3565 		}
3566 	}
3567 
3568 	return dest_cpu;
3569 }
3570 
3571 /*
3572  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
3573  */
3574 static inline
select_task_rq(struct task_struct * p,int cpu,int * wake_flags)3575 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags)
3576 {
3577 	lockdep_assert_held(&p->pi_lock);
3578 
3579 	if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) {
3580 		cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags);
3581 		*wake_flags |= WF_RQ_SELECTED;
3582 	} else {
3583 		cpu = cpumask_any(p->cpus_ptr);
3584 	}
3585 
3586 	/*
3587 	 * In order not to call set_task_cpu() on a blocking task we need
3588 	 * to rely on ttwu() to place the task on a valid ->cpus_ptr
3589 	 * CPU.
3590 	 *
3591 	 * Since this is common to all placement strategies, this lives here.
3592 	 *
3593 	 * [ this allows ->select_task() to simply return task_cpu(p) and
3594 	 *   not worry about this generic constraint ]
3595 	 */
3596 	if (unlikely(!is_cpu_allowed(p, cpu)))
3597 		cpu = select_fallback_rq(task_cpu(p), p);
3598 
3599 	return cpu;
3600 }
3601 
sched_set_stop_task(int cpu,struct task_struct * stop)3602 void sched_set_stop_task(int cpu, struct task_struct *stop)
3603 {
3604 	static struct lock_class_key stop_pi_lock;
3605 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
3606 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
3607 
3608 	if (stop) {
3609 		/*
3610 		 * Make it appear like a SCHED_FIFO task, its something
3611 		 * userspace knows about and won't get confused about.
3612 		 *
3613 		 * Also, it will make PI more or less work without too
3614 		 * much confusion -- but then, stop work should not
3615 		 * rely on PI working anyway.
3616 		 */
3617 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
3618 
3619 		stop->sched_class = &stop_sched_class;
3620 
3621 		/*
3622 		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
3623 		 * adjust the effective priority of a task. As a result,
3624 		 * rt_mutex_setprio() can trigger (RT) balancing operations,
3625 		 * which can then trigger wakeups of the stop thread to push
3626 		 * around the current task.
3627 		 *
3628 		 * The stop task itself will never be part of the PI-chain, it
3629 		 * never blocks, therefore that ->pi_lock recursion is safe.
3630 		 * Tell lockdep about this by placing the stop->pi_lock in its
3631 		 * own class.
3632 		 */
3633 		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
3634 	}
3635 
3636 	cpu_rq(cpu)->stop = stop;
3637 
3638 	if (old_stop) {
3639 		/*
3640 		 * Reset it back to a normal scheduling class so that
3641 		 * it can die in pieces.
3642 		 */
3643 		old_stop->sched_class = &rt_sched_class;
3644 	}
3645 }
3646 
3647 #else /* CONFIG_SMP */
3648 
migrate_disable_switch(struct rq * rq,struct task_struct * p)3649 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
3650 
rq_has_pinned_tasks(struct rq * rq)3651 static inline bool rq_has_pinned_tasks(struct rq *rq)
3652 {
3653 	return false;
3654 }
3655 
3656 #endif /* !CONFIG_SMP */
3657 
3658 static void
ttwu_stat(struct task_struct * p,int cpu,int wake_flags)3659 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
3660 {
3661 	struct rq *rq;
3662 
3663 	if (!schedstat_enabled())
3664 		return;
3665 
3666 	rq = this_rq();
3667 
3668 #ifdef CONFIG_SMP
3669 	if (cpu == rq->cpu) {
3670 		__schedstat_inc(rq->ttwu_local);
3671 		__schedstat_inc(p->stats.nr_wakeups_local);
3672 	} else {
3673 		struct sched_domain *sd;
3674 
3675 		__schedstat_inc(p->stats.nr_wakeups_remote);
3676 
3677 		guard(rcu)();
3678 		for_each_domain(rq->cpu, sd) {
3679 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3680 				__schedstat_inc(sd->ttwu_wake_remote);
3681 				break;
3682 			}
3683 		}
3684 	}
3685 
3686 	if (wake_flags & WF_MIGRATED)
3687 		__schedstat_inc(p->stats.nr_wakeups_migrate);
3688 #endif /* CONFIG_SMP */
3689 
3690 	__schedstat_inc(rq->ttwu_count);
3691 	__schedstat_inc(p->stats.nr_wakeups);
3692 
3693 	if (wake_flags & WF_SYNC)
3694 		__schedstat_inc(p->stats.nr_wakeups_sync);
3695 }
3696 
3697 /*
3698  * Mark the task runnable.
3699  */
ttwu_do_wakeup(struct task_struct * p)3700 static inline void ttwu_do_wakeup(struct task_struct *p)
3701 {
3702 	WRITE_ONCE(p->__state, TASK_RUNNING);
3703 	trace_sched_wakeup(p);
3704 }
3705 
3706 static void
ttwu_do_activate(struct rq * rq,struct task_struct * p,int wake_flags,struct rq_flags * rf)3707 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
3708 		 struct rq_flags *rf)
3709 {
3710 	int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
3711 
3712 	lockdep_assert_rq_held(rq);
3713 
3714 	if (p->sched_contributes_to_load)
3715 		rq->nr_uninterruptible--;
3716 
3717 #ifdef CONFIG_SMP
3718 	if (wake_flags & WF_RQ_SELECTED)
3719 		en_flags |= ENQUEUE_RQ_SELECTED;
3720 	if (wake_flags & WF_MIGRATED)
3721 		en_flags |= ENQUEUE_MIGRATED;
3722 	else
3723 #endif
3724 	if (p->in_iowait) {
3725 		delayacct_blkio_end(p);
3726 		atomic_dec(&task_rq(p)->nr_iowait);
3727 	}
3728 
3729 	activate_task(rq, p, en_flags);
3730 	wakeup_preempt(rq, p, wake_flags);
3731 
3732 	ttwu_do_wakeup(p);
3733 
3734 #ifdef CONFIG_SMP
3735 	if (p->sched_class->task_woken) {
3736 		/*
3737 		 * Our task @p is fully woken up and running; so it's safe to
3738 		 * drop the rq->lock, hereafter rq is only used for statistics.
3739 		 */
3740 		rq_unpin_lock(rq, rf);
3741 		p->sched_class->task_woken(rq, p);
3742 		rq_repin_lock(rq, rf);
3743 	}
3744 
3745 	if (rq->idle_stamp) {
3746 		u64 delta = rq_clock(rq) - rq->idle_stamp;
3747 		u64 max = 2*rq->max_idle_balance_cost;
3748 
3749 		update_avg(&rq->avg_idle, delta);
3750 
3751 		if (rq->avg_idle > max)
3752 			rq->avg_idle = max;
3753 
3754 		rq->idle_stamp = 0;
3755 	}
3756 #endif
3757 }
3758 
3759 /*
3760  * Consider @p being inside a wait loop:
3761  *
3762  *   for (;;) {
3763  *      set_current_state(TASK_UNINTERRUPTIBLE);
3764  *
3765  *      if (CONDITION)
3766  *         break;
3767  *
3768  *      schedule();
3769  *   }
3770  *   __set_current_state(TASK_RUNNING);
3771  *
3772  * between set_current_state() and schedule(). In this case @p is still
3773  * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
3774  * an atomic manner.
3775  *
3776  * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
3777  * then schedule() must still happen and p->state can be changed to
3778  * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
3779  * need to do a full wakeup with enqueue.
3780  *
3781  * Returns: %true when the wakeup is done,
3782  *          %false otherwise.
3783  */
ttwu_runnable(struct task_struct * p,int wake_flags)3784 static int ttwu_runnable(struct task_struct *p, int wake_flags)
3785 {
3786 	struct rq_flags rf;
3787 	struct rq *rq;
3788 	int ret = 0;
3789 
3790 	rq = __task_rq_lock(p, &rf);
3791 	if (task_on_rq_queued(p)) {
3792 		update_rq_clock(rq);
3793 		if (p->se.sched_delayed)
3794 			enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
3795 		if (!task_on_cpu(rq, p)) {
3796 			/*
3797 			 * When on_rq && !on_cpu the task is preempted, see if
3798 			 * it should preempt the task that is current now.
3799 			 */
3800 			wakeup_preempt(rq, p, wake_flags);
3801 		}
3802 		ttwu_do_wakeup(p);
3803 		ret = 1;
3804 	}
3805 	__task_rq_unlock(rq, &rf);
3806 
3807 	return ret;
3808 }
3809 
3810 #ifdef CONFIG_SMP
sched_ttwu_pending(void * arg)3811 void sched_ttwu_pending(void *arg)
3812 {
3813 	struct llist_node *llist = arg;
3814 	struct rq *rq = this_rq();
3815 	struct task_struct *p, *t;
3816 	struct rq_flags rf;
3817 
3818 	if (!llist)
3819 		return;
3820 
3821 	rq_lock_irqsave(rq, &rf);
3822 	update_rq_clock(rq);
3823 
3824 	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
3825 		if (WARN_ON_ONCE(p->on_cpu))
3826 			smp_cond_load_acquire(&p->on_cpu, !VAL);
3827 
3828 		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
3829 			set_task_cpu(p, cpu_of(rq));
3830 
3831 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
3832 	}
3833 
3834 	/*
3835 	 * Must be after enqueueing at least once task such that
3836 	 * idle_cpu() does not observe a false-negative -- if it does,
3837 	 * it is possible for select_idle_siblings() to stack a number
3838 	 * of tasks on this CPU during that window.
3839 	 *
3840 	 * It is OK to clear ttwu_pending when another task pending.
3841 	 * We will receive IPI after local IRQ enabled and then enqueue it.
3842 	 * Since now nr_running > 0, idle_cpu() will always get correct result.
3843 	 */
3844 	WRITE_ONCE(rq->ttwu_pending, 0);
3845 	rq_unlock_irqrestore(rq, &rf);
3846 }
3847 
3848 /*
3849  * Prepare the scene for sending an IPI for a remote smp_call
3850  *
3851  * Returns true if the caller can proceed with sending the IPI.
3852  * Returns false otherwise.
3853  */
call_function_single_prep_ipi(int cpu)3854 bool call_function_single_prep_ipi(int cpu)
3855 {
3856 	if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
3857 		trace_sched_wake_idle_without_ipi(cpu);
3858 		return false;
3859 	}
3860 
3861 	return true;
3862 }
3863 
3864 /*
3865  * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
3866  * necessary. The wakee CPU on receipt of the IPI will queue the task
3867  * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
3868  * of the wakeup instead of the waker.
3869  */
__ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3870 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3871 {
3872 	struct rq *rq = cpu_rq(cpu);
3873 
3874 	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
3875 
3876 	WRITE_ONCE(rq->ttwu_pending, 1);
3877 	__smp_call_single_queue(cpu, &p->wake_entry.llist);
3878 }
3879 
wake_up_if_idle(int cpu)3880 void wake_up_if_idle(int cpu)
3881 {
3882 	struct rq *rq = cpu_rq(cpu);
3883 
3884 	guard(rcu)();
3885 	if (is_idle_task(rcu_dereference(rq->curr))) {
3886 		guard(rq_lock_irqsave)(rq);
3887 		if (is_idle_task(rq->curr))
3888 			resched_curr(rq);
3889 	}
3890 }
3891 
cpus_equal_capacity(int this_cpu,int that_cpu)3892 bool cpus_equal_capacity(int this_cpu, int that_cpu)
3893 {
3894 	if (!sched_asym_cpucap_active())
3895 		return true;
3896 
3897 	if (this_cpu == that_cpu)
3898 		return true;
3899 
3900 	return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu);
3901 }
3902 
cpus_share_cache(int this_cpu,int that_cpu)3903 bool cpus_share_cache(int this_cpu, int that_cpu)
3904 {
3905 	if (this_cpu == that_cpu)
3906 		return true;
3907 
3908 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3909 }
3910 
3911 /*
3912  * Whether CPUs are share cache resources, which means LLC on non-cluster
3913  * machines and LLC tag or L2 on machines with clusters.
3914  */
cpus_share_resources(int this_cpu,int that_cpu)3915 bool cpus_share_resources(int this_cpu, int that_cpu)
3916 {
3917 	if (this_cpu == that_cpu)
3918 		return true;
3919 
3920 	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
3921 }
3922 
ttwu_queue_cond(struct task_struct * p,int cpu)3923 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
3924 {
3925 	/*
3926 	 * The BPF scheduler may depend on select_task_rq() being invoked during
3927 	 * wakeups. In addition, @p may end up executing on a different CPU
3928 	 * regardless of what happens in the wakeup path making the ttwu_queue
3929 	 * optimization less meaningful. Skip if on SCX.
3930 	 */
3931 	if (task_on_scx(p))
3932 		return false;
3933 
3934 	/*
3935 	 * Do not complicate things with the async wake_list while the CPU is
3936 	 * in hotplug state.
3937 	 */
3938 	if (!cpu_active(cpu))
3939 		return false;
3940 
3941 	/* Ensure the task will still be allowed to run on the CPU. */
3942 	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
3943 		return false;
3944 
3945 	/*
3946 	 * If the CPU does not share cache, then queue the task on the
3947 	 * remote rqs wakelist to avoid accessing remote data.
3948 	 */
3949 	if (!cpus_share_cache(smp_processor_id(), cpu))
3950 		return true;
3951 
3952 	if (cpu == smp_processor_id())
3953 		return false;
3954 
3955 	/*
3956 	 * If the wakee cpu is idle, or the task is descheduling and the
3957 	 * only running task on the CPU, then use the wakelist to offload
3958 	 * the task activation to the idle (or soon-to-be-idle) CPU as
3959 	 * the current CPU is likely busy. nr_running is checked to
3960 	 * avoid unnecessary task stacking.
3961 	 *
3962 	 * Note that we can only get here with (wakee) p->on_rq=0,
3963 	 * p->on_cpu can be whatever, we've done the dequeue, so
3964 	 * the wakee has been accounted out of ->nr_running.
3965 	 */
3966 	if (!cpu_rq(cpu)->nr_running)
3967 		return true;
3968 
3969 	return false;
3970 }
3971 
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3972 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3973 {
3974 	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
3975 		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3976 		__ttwu_queue_wakelist(p, cpu, wake_flags);
3977 		return true;
3978 	}
3979 
3980 	return false;
3981 }
3982 
3983 #else /* !CONFIG_SMP */
3984 
ttwu_queue_wakelist(struct task_struct * p,int cpu,int wake_flags)3985 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3986 {
3987 	return false;
3988 }
3989 
3990 #endif /* CONFIG_SMP */
3991 
ttwu_queue(struct task_struct * p,int cpu,int wake_flags)3992 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
3993 {
3994 	struct rq *rq = cpu_rq(cpu);
3995 	struct rq_flags rf;
3996 
3997 	if (ttwu_queue_wakelist(p, cpu, wake_flags))
3998 		return;
3999 
4000 	rq_lock(rq, &rf);
4001 	update_rq_clock(rq);
4002 	ttwu_do_activate(rq, p, wake_flags, &rf);
4003 	rq_unlock(rq, &rf);
4004 }
4005 
4006 /*
4007  * Invoked from try_to_wake_up() to check whether the task can be woken up.
4008  *
4009  * The caller holds p::pi_lock if p != current or has preemption
4010  * disabled when p == current.
4011  *
4012  * The rules of saved_state:
4013  *
4014  *   The related locking code always holds p::pi_lock when updating
4015  *   p::saved_state, which means the code is fully serialized in both cases.
4016  *
4017  *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
4018  *   No other bits set. This allows to distinguish all wakeup scenarios.
4019  *
4020  *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
4021  *   allows us to prevent early wakeup of tasks before they can be run on
4022  *   asymmetric ISA architectures (eg ARMv9).
4023  */
4024 static __always_inline
ttwu_state_match(struct task_struct * p,unsigned int state,int * success)4025 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
4026 {
4027 	int match;
4028 
4029 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
4030 		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
4031 			     state != TASK_RTLOCK_WAIT);
4032 	}
4033 
4034 	*success = !!(match = __task_state_match(p, state));
4035 
4036 	/*
4037 	 * Saved state preserves the task state across blocking on
4038 	 * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
4039 	 * set p::saved_state to TASK_RUNNING, but do not wake the task
4040 	 * because it waits for a lock wakeup or __thaw_task(). Also
4041 	 * indicate success because from the regular waker's point of
4042 	 * view this has succeeded.
4043 	 *
4044 	 * After acquiring the lock the task will restore p::__state
4045 	 * from p::saved_state which ensures that the regular
4046 	 * wakeup is not lost. The restore will also set
4047 	 * p::saved_state to TASK_RUNNING so any further tests will
4048 	 * not result in false positives vs. @success
4049 	 */
4050 	if (match < 0)
4051 		p->saved_state = TASK_RUNNING;
4052 
4053 	return match > 0;
4054 }
4055 
4056 /*
4057  * Notes on Program-Order guarantees on SMP systems.
4058  *
4059  *  MIGRATION
4060  *
4061  * The basic program-order guarantee on SMP systems is that when a task [t]
4062  * migrates, all its activity on its old CPU [c0] happens-before any subsequent
4063  * execution on its new CPU [c1].
4064  *
4065  * For migration (of runnable tasks) this is provided by the following means:
4066  *
4067  *  A) UNLOCK of the rq(c0)->lock scheduling out task t
4068  *  B) migration for t is required to synchronize *both* rq(c0)->lock and
4069  *     rq(c1)->lock (if not at the same time, then in that order).
4070  *  C) LOCK of the rq(c1)->lock scheduling in task
4071  *
4072  * Release/acquire chaining guarantees that B happens after A and C after B.
4073  * Note: the CPU doing B need not be c0 or c1
4074  *
4075  * Example:
4076  *
4077  *   CPU0            CPU1            CPU2
4078  *
4079  *   LOCK rq(0)->lock
4080  *   sched-out X
4081  *   sched-in Y
4082  *   UNLOCK rq(0)->lock
4083  *
4084  *                                   LOCK rq(0)->lock // orders against CPU0
4085  *                                   dequeue X
4086  *                                   UNLOCK rq(0)->lock
4087  *
4088  *                                   LOCK rq(1)->lock
4089  *                                   enqueue X
4090  *                                   UNLOCK rq(1)->lock
4091  *
4092  *                   LOCK rq(1)->lock // orders against CPU2
4093  *                   sched-out Z
4094  *                   sched-in X
4095  *                   UNLOCK rq(1)->lock
4096  *
4097  *
4098  *  BLOCKING -- aka. SLEEP + WAKEUP
4099  *
4100  * For blocking we (obviously) need to provide the same guarantee as for
4101  * migration. However the means are completely different as there is no lock
4102  * chain to provide order. Instead we do:
4103  *
4104  *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
4105  *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
4106  *
4107  * Example:
4108  *
4109  *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
4110  *
4111  *   LOCK rq(0)->lock LOCK X->pi_lock
4112  *   dequeue X
4113  *   sched-out X
4114  *   smp_store_release(X->on_cpu, 0);
4115  *
4116  *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
4117  *                    X->state = WAKING
4118  *                    set_task_cpu(X,2)
4119  *
4120  *                    LOCK rq(2)->lock
4121  *                    enqueue X
4122  *                    X->state = RUNNING
4123  *                    UNLOCK rq(2)->lock
4124  *
4125  *                                          LOCK rq(2)->lock // orders against CPU1
4126  *                                          sched-out Z
4127  *                                          sched-in X
4128  *                                          UNLOCK rq(2)->lock
4129  *
4130  *                    UNLOCK X->pi_lock
4131  *   UNLOCK rq(0)->lock
4132  *
4133  *
4134  * However, for wakeups there is a second guarantee we must provide, namely we
4135  * must ensure that CONDITION=1 done by the caller can not be reordered with
4136  * accesses to the task state; see try_to_wake_up() and set_current_state().
4137  */
4138 
4139 /**
4140  * try_to_wake_up - wake up a thread
4141  * @p: the thread to be awakened
4142  * @state: the mask of task states that can be woken
4143  * @wake_flags: wake modifier flags (WF_*)
4144  *
4145  * Conceptually does:
4146  *
4147  *   If (@state & @p->state) @p->state = TASK_RUNNING.
4148  *
4149  * If the task was not queued/runnable, also place it back on a runqueue.
4150  *
4151  * This function is atomic against schedule() which would dequeue the task.
4152  *
4153  * It issues a full memory barrier before accessing @p->state, see the comment
4154  * with set_current_state().
4155  *
4156  * Uses p->pi_lock to serialize against concurrent wake-ups.
4157  *
4158  * Relies on p->pi_lock stabilizing:
4159  *  - p->sched_class
4160  *  - p->cpus_ptr
4161  *  - p->sched_task_group
4162  * in order to do migration, see its use of select_task_rq()/set_task_cpu().
4163  *
4164  * Tries really hard to only take one task_rq(p)->lock for performance.
4165  * Takes rq->lock in:
4166  *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
4167  *  - ttwu_queue()       -- new rq, for enqueue of the task;
4168  *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
4169  *
4170  * As a consequence we race really badly with just about everything. See the
4171  * many memory barriers and their comments for details.
4172  *
4173  * Return: %true if @p->state changes (an actual wakeup was done),
4174  *	   %false otherwise.
4175  */
try_to_wake_up(struct task_struct * p,unsigned int state,int wake_flags)4176 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
4177 {
4178 	guard(preempt)();
4179 	int cpu, success = 0;
4180 
4181 	wake_flags |= WF_TTWU;
4182 
4183 	if (p == current) {
4184 		/*
4185 		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
4186 		 * == smp_processor_id()'. Together this means we can special
4187 		 * case the whole 'p->on_rq && ttwu_runnable()' case below
4188 		 * without taking any locks.
4189 		 *
4190 		 * Specifically, given current runs ttwu() we must be before
4191 		 * schedule()'s block_task(), as such this must not observe
4192 		 * sched_delayed.
4193 		 *
4194 		 * In particular:
4195 		 *  - we rely on Program-Order guarantees for all the ordering,
4196 		 *  - we're serialized against set_special_state() by virtue of
4197 		 *    it disabling IRQs (this allows not taking ->pi_lock).
4198 		 */
4199 		SCHED_WARN_ON(p->se.sched_delayed);
4200 		if (!ttwu_state_match(p, state, &success))
4201 			goto out;
4202 
4203 		trace_sched_waking(p);
4204 		ttwu_do_wakeup(p);
4205 		goto out;
4206 	}
4207 
4208 	/*
4209 	 * If we are going to wake up a thread waiting for CONDITION we
4210 	 * need to ensure that CONDITION=1 done by the caller can not be
4211 	 * reordered with p->state check below. This pairs with smp_store_mb()
4212 	 * in set_current_state() that the waiting thread does.
4213 	 */
4214 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
4215 		smp_mb__after_spinlock();
4216 		if (!ttwu_state_match(p, state, &success))
4217 			break;
4218 
4219 		trace_sched_waking(p);
4220 
4221 		/*
4222 		 * Ensure we load p->on_rq _after_ p->state, otherwise it would
4223 		 * be possible to, falsely, observe p->on_rq == 0 and get stuck
4224 		 * in smp_cond_load_acquire() below.
4225 		 *
4226 		 * sched_ttwu_pending()			try_to_wake_up()
4227 		 *   STORE p->on_rq = 1			  LOAD p->state
4228 		 *   UNLOCK rq->lock
4229 		 *
4230 		 * __schedule() (switch to task 'p')
4231 		 *   LOCK rq->lock			  smp_rmb();
4232 		 *   smp_mb__after_spinlock();
4233 		 *   UNLOCK rq->lock
4234 		 *
4235 		 * [task p]
4236 		 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
4237 		 *
4238 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4239 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4240 		 *
4241 		 * A similar smp_rmb() lives in __task_needs_rq_lock().
4242 		 */
4243 		smp_rmb();
4244 		if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
4245 			break;
4246 
4247 #ifdef CONFIG_SMP
4248 		/*
4249 		 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
4250 		 * possible to, falsely, observe p->on_cpu == 0.
4251 		 *
4252 		 * One must be running (->on_cpu == 1) in order to remove oneself
4253 		 * from the runqueue.
4254 		 *
4255 		 * __schedule() (switch to task 'p')	try_to_wake_up()
4256 		 *   STORE p->on_cpu = 1		  LOAD p->on_rq
4257 		 *   UNLOCK rq->lock
4258 		 *
4259 		 * __schedule() (put 'p' to sleep)
4260 		 *   LOCK rq->lock			  smp_rmb();
4261 		 *   smp_mb__after_spinlock();
4262 		 *   STORE p->on_rq = 0			  LOAD p->on_cpu
4263 		 *
4264 		 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
4265 		 * __schedule().  See the comment for smp_mb__after_spinlock().
4266 		 *
4267 		 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
4268 		 * schedule()'s deactivate_task() has 'happened' and p will no longer
4269 		 * care about it's own p->state. See the comment in __schedule().
4270 		 */
4271 		smp_acquire__after_ctrl_dep();
4272 
4273 		/*
4274 		 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
4275 		 * == 0), which means we need to do an enqueue, change p->state to
4276 		 * TASK_WAKING such that we can unlock p->pi_lock before doing the
4277 		 * enqueue, such as ttwu_queue_wakelist().
4278 		 */
4279 		WRITE_ONCE(p->__state, TASK_WAKING);
4280 
4281 		/*
4282 		 * If the owning (remote) CPU is still in the middle of schedule() with
4283 		 * this task as prev, considering queueing p on the remote CPUs wake_list
4284 		 * which potentially sends an IPI instead of spinning on p->on_cpu to
4285 		 * let the waker make forward progress. This is safe because IRQs are
4286 		 * disabled and the IPI will deliver after on_cpu is cleared.
4287 		 *
4288 		 * Ensure we load task_cpu(p) after p->on_cpu:
4289 		 *
4290 		 * set_task_cpu(p, cpu);
4291 		 *   STORE p->cpu = @cpu
4292 		 * __schedule() (switch to task 'p')
4293 		 *   LOCK rq->lock
4294 		 *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
4295 		 *   STORE p->on_cpu = 1		LOAD p->cpu
4296 		 *
4297 		 * to ensure we observe the correct CPU on which the task is currently
4298 		 * scheduling.
4299 		 */
4300 		if (smp_load_acquire(&p->on_cpu) &&
4301 		    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4302 			break;
4303 
4304 		/*
4305 		 * If the owning (remote) CPU is still in the middle of schedule() with
4306 		 * this task as prev, wait until it's done referencing the task.
4307 		 *
4308 		 * Pairs with the smp_store_release() in finish_task().
4309 		 *
4310 		 * This ensures that tasks getting woken will be fully ordered against
4311 		 * their previous state and preserve Program Order.
4312 		 */
4313 		smp_cond_load_acquire(&p->on_cpu, !VAL);
4314 
4315 		cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
4316 		if (task_cpu(p) != cpu) {
4317 			if (p->in_iowait) {
4318 				delayacct_blkio_end(p);
4319 				atomic_dec(&task_rq(p)->nr_iowait);
4320 			}
4321 
4322 			wake_flags |= WF_MIGRATED;
4323 			psi_ttwu_dequeue(p);
4324 			set_task_cpu(p, cpu);
4325 		}
4326 #else
4327 		cpu = task_cpu(p);
4328 #endif /* CONFIG_SMP */
4329 
4330 		ttwu_queue(p, cpu, wake_flags);
4331 	}
4332 out:
4333 	if (success)
4334 		ttwu_stat(p, task_cpu(p), wake_flags);
4335 
4336 	return success;
4337 }
4338 
__task_needs_rq_lock(struct task_struct * p)4339 static bool __task_needs_rq_lock(struct task_struct *p)
4340 {
4341 	unsigned int state = READ_ONCE(p->__state);
4342 
4343 	/*
4344 	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
4345 	 * the task is blocked. Make sure to check @state since ttwu() can drop
4346 	 * locks at the end, see ttwu_queue_wakelist().
4347 	 */
4348 	if (state == TASK_RUNNING || state == TASK_WAKING)
4349 		return true;
4350 
4351 	/*
4352 	 * Ensure we load p->on_rq after p->__state, otherwise it would be
4353 	 * possible to, falsely, observe p->on_rq == 0.
4354 	 *
4355 	 * See try_to_wake_up() for a longer comment.
4356 	 */
4357 	smp_rmb();
4358 	if (p->on_rq)
4359 		return true;
4360 
4361 #ifdef CONFIG_SMP
4362 	/*
4363 	 * Ensure the task has finished __schedule() and will not be referenced
4364 	 * anymore. Again, see try_to_wake_up() for a longer comment.
4365 	 */
4366 	smp_rmb();
4367 	smp_cond_load_acquire(&p->on_cpu, !VAL);
4368 #endif
4369 
4370 	return false;
4371 }
4372 
4373 /**
4374  * task_call_func - Invoke a function on task in fixed state
4375  * @p: Process for which the function is to be invoked, can be @current.
4376  * @func: Function to invoke.
4377  * @arg: Argument to function.
4378  *
4379  * Fix the task in it's current state by avoiding wakeups and or rq operations
4380  * and call @func(@arg) on it.  This function can use task_is_runnable() and
4381  * task_curr() to work out what the state is, if required.  Given that @func
4382  * can be invoked with a runqueue lock held, it had better be quite
4383  * lightweight.
4384  *
4385  * Returns:
4386  *   Whatever @func returns
4387  */
task_call_func(struct task_struct * p,task_call_f func,void * arg)4388 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
4389 {
4390 	struct rq *rq = NULL;
4391 	struct rq_flags rf;
4392 	int ret;
4393 
4394 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4395 
4396 	if (__task_needs_rq_lock(p))
4397 		rq = __task_rq_lock(p, &rf);
4398 
4399 	/*
4400 	 * At this point the task is pinned; either:
4401 	 *  - blocked and we're holding off wakeups	 (pi->lock)
4402 	 *  - woken, and we're holding off enqueue	 (rq->lock)
4403 	 *  - queued, and we're holding off schedule	 (rq->lock)
4404 	 *  - running, and we're holding off de-schedule (rq->lock)
4405 	 *
4406 	 * The called function (@func) can use: task_curr(), p->on_rq and
4407 	 * p->__state to differentiate between these states.
4408 	 */
4409 	ret = func(p, arg);
4410 
4411 	if (rq)
4412 		rq_unlock(rq, &rf);
4413 
4414 	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
4415 	return ret;
4416 }
4417 
4418 /**
4419  * cpu_curr_snapshot - Return a snapshot of the currently running task
4420  * @cpu: The CPU on which to snapshot the task.
4421  *
4422  * Returns the task_struct pointer of the task "currently" running on
4423  * the specified CPU.
4424  *
4425  * If the specified CPU was offline, the return value is whatever it
4426  * is, perhaps a pointer to the task_struct structure of that CPU's idle
4427  * task, but there is no guarantee.  Callers wishing a useful return
4428  * value must take some action to ensure that the specified CPU remains
4429  * online throughout.
4430  *
4431  * This function executes full memory barriers before and after fetching
4432  * the pointer, which permits the caller to confine this function's fetch
4433  * with respect to the caller's accesses to other shared variables.
4434  */
cpu_curr_snapshot(int cpu)4435 struct task_struct *cpu_curr_snapshot(int cpu)
4436 {
4437 	struct rq *rq = cpu_rq(cpu);
4438 	struct task_struct *t;
4439 	struct rq_flags rf;
4440 
4441 	rq_lock_irqsave(rq, &rf);
4442 	smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */
4443 	t = rcu_dereference(cpu_curr(cpu));
4444 	rq_unlock_irqrestore(rq, &rf);
4445 	smp_mb(); /* Pairing determined by caller's synchronization design. */
4446 
4447 	return t;
4448 }
4449 
4450 /**
4451  * wake_up_process - Wake up a specific process
4452  * @p: The process to be woken up.
4453  *
4454  * Attempt to wake up the nominated process and move it to the set of runnable
4455  * processes.
4456  *
4457  * Return: 1 if the process was woken up, 0 if it was already running.
4458  *
4459  * This function executes a full memory barrier before accessing the task state.
4460  */
wake_up_process(struct task_struct * p)4461 int wake_up_process(struct task_struct *p)
4462 {
4463 	return try_to_wake_up(p, TASK_NORMAL, 0);
4464 }
4465 EXPORT_SYMBOL(wake_up_process);
4466 
wake_up_state(struct task_struct * p,unsigned int state)4467 int wake_up_state(struct task_struct *p, unsigned int state)
4468 {
4469 	return try_to_wake_up(p, state, 0);
4470 }
4471 
4472 /*
4473  * Perform scheduler related setup for a newly forked process p.
4474  * p is forked by current.
4475  *
4476  * __sched_fork() is basic setup which is also used by sched_init() to
4477  * initialize the boot CPU's idle task.
4478  */
__sched_fork(unsigned long clone_flags,struct task_struct * p)4479 static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
4480 {
4481 	p->on_rq			= 0;
4482 
4483 	p->se.on_rq			= 0;
4484 	p->se.exec_start		= 0;
4485 	p->se.sum_exec_runtime		= 0;
4486 	p->se.prev_sum_exec_runtime	= 0;
4487 	p->se.nr_migrations		= 0;
4488 	p->se.vruntime			= 0;
4489 	p->se.vlag			= 0;
4490 	INIT_LIST_HEAD(&p->se.group_node);
4491 
4492 	/* A delayed task cannot be in clone(). */
4493 	SCHED_WARN_ON(p->se.sched_delayed);
4494 
4495 #ifdef CONFIG_FAIR_GROUP_SCHED
4496 	p->se.cfs_rq			= NULL;
4497 #endif
4498 
4499 #ifdef CONFIG_SCHEDSTATS
4500 	/* Even if schedstat is disabled, there should not be garbage */
4501 	memset(&p->stats, 0, sizeof(p->stats));
4502 #endif
4503 
4504 	init_dl_entity(&p->dl);
4505 
4506 	INIT_LIST_HEAD(&p->rt.run_list);
4507 	p->rt.timeout		= 0;
4508 	p->rt.time_slice	= sched_rr_timeslice;
4509 	p->rt.on_rq		= 0;
4510 	p->rt.on_list		= 0;
4511 
4512 #ifdef CONFIG_SCHED_CLASS_EXT
4513 	init_scx_entity(&p->scx);
4514 #endif
4515 
4516 #ifdef CONFIG_PREEMPT_NOTIFIERS
4517 	INIT_HLIST_HEAD(&p->preempt_notifiers);
4518 #endif
4519 
4520 #ifdef CONFIG_COMPACTION
4521 	p->capture_control = NULL;
4522 #endif
4523 	init_numa_balancing(clone_flags, p);
4524 #ifdef CONFIG_SMP
4525 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
4526 	p->migration_pending = NULL;
4527 #endif
4528 	init_sched_mm_cid(p);
4529 }
4530 
4531 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
4532 
4533 #ifdef CONFIG_NUMA_BALANCING
4534 
4535 int sysctl_numa_balancing_mode;
4536 
__set_numabalancing_state(bool enabled)4537 static void __set_numabalancing_state(bool enabled)
4538 {
4539 	if (enabled)
4540 		static_branch_enable(&sched_numa_balancing);
4541 	else
4542 		static_branch_disable(&sched_numa_balancing);
4543 }
4544 
set_numabalancing_state(bool enabled)4545 void set_numabalancing_state(bool enabled)
4546 {
4547 	if (enabled)
4548 		sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL;
4549 	else
4550 		sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED;
4551 	__set_numabalancing_state(enabled);
4552 }
4553 
4554 #ifdef CONFIG_PROC_SYSCTL
reset_memory_tiering(void)4555 static void reset_memory_tiering(void)
4556 {
4557 	struct pglist_data *pgdat;
4558 
4559 	for_each_online_pgdat(pgdat) {
4560 		pgdat->nbp_threshold = 0;
4561 		pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4562 		pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
4563 	}
4564 }
4565 
sysctl_numa_balancing(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4566 static int sysctl_numa_balancing(const struct ctl_table *table, int write,
4567 			  void *buffer, size_t *lenp, loff_t *ppos)
4568 {
4569 	struct ctl_table t;
4570 	int err;
4571 	int state = sysctl_numa_balancing_mode;
4572 
4573 	if (write && !capable(CAP_SYS_ADMIN))
4574 		return -EPERM;
4575 
4576 	t = *table;
4577 	t.data = &state;
4578 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4579 	if (err < 0)
4580 		return err;
4581 	if (write) {
4582 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4583 		    (state & NUMA_BALANCING_MEMORY_TIERING))
4584 			reset_memory_tiering();
4585 		sysctl_numa_balancing_mode = state;
4586 		__set_numabalancing_state(state);
4587 	}
4588 	return err;
4589 }
4590 #endif
4591 #endif
4592 
4593 #ifdef CONFIG_SCHEDSTATS
4594 
4595 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
4596 
set_schedstats(bool enabled)4597 static void set_schedstats(bool enabled)
4598 {
4599 	if (enabled)
4600 		static_branch_enable(&sched_schedstats);
4601 	else
4602 		static_branch_disable(&sched_schedstats);
4603 }
4604 
force_schedstat_enabled(void)4605 void force_schedstat_enabled(void)
4606 {
4607 	if (!schedstat_enabled()) {
4608 		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
4609 		static_branch_enable(&sched_schedstats);
4610 	}
4611 }
4612 
setup_schedstats(char * str)4613 static int __init setup_schedstats(char *str)
4614 {
4615 	int ret = 0;
4616 	if (!str)
4617 		goto out;
4618 
4619 	if (!strcmp(str, "enable")) {
4620 		set_schedstats(true);
4621 		ret = 1;
4622 	} else if (!strcmp(str, "disable")) {
4623 		set_schedstats(false);
4624 		ret = 1;
4625 	}
4626 out:
4627 	if (!ret)
4628 		pr_warn("Unable to parse schedstats=\n");
4629 
4630 	return ret;
4631 }
4632 __setup("schedstats=", setup_schedstats);
4633 
4634 #ifdef CONFIG_PROC_SYSCTL
sysctl_schedstats(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)4635 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer,
4636 		size_t *lenp, loff_t *ppos)
4637 {
4638 	struct ctl_table t;
4639 	int err;
4640 	int state = static_branch_likely(&sched_schedstats);
4641 
4642 	if (write && !capable(CAP_SYS_ADMIN))
4643 		return -EPERM;
4644 
4645 	t = *table;
4646 	t.data = &state;
4647 	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
4648 	if (err < 0)
4649 		return err;
4650 	if (write)
4651 		set_schedstats(state);
4652 	return err;
4653 }
4654 #endif /* CONFIG_PROC_SYSCTL */
4655 #endif /* CONFIG_SCHEDSTATS */
4656 
4657 #ifdef CONFIG_SYSCTL
4658 static const struct ctl_table sched_core_sysctls[] = {
4659 #ifdef CONFIG_SCHEDSTATS
4660 	{
4661 		.procname       = "sched_schedstats",
4662 		.data           = NULL,
4663 		.maxlen         = sizeof(unsigned int),
4664 		.mode           = 0644,
4665 		.proc_handler   = sysctl_schedstats,
4666 		.extra1         = SYSCTL_ZERO,
4667 		.extra2         = SYSCTL_ONE,
4668 	},
4669 #endif /* CONFIG_SCHEDSTATS */
4670 #ifdef CONFIG_UCLAMP_TASK
4671 	{
4672 		.procname       = "sched_util_clamp_min",
4673 		.data           = &sysctl_sched_uclamp_util_min,
4674 		.maxlen         = sizeof(unsigned int),
4675 		.mode           = 0644,
4676 		.proc_handler   = sysctl_sched_uclamp_handler,
4677 	},
4678 	{
4679 		.procname       = "sched_util_clamp_max",
4680 		.data           = &sysctl_sched_uclamp_util_max,
4681 		.maxlen         = sizeof(unsigned int),
4682 		.mode           = 0644,
4683 		.proc_handler   = sysctl_sched_uclamp_handler,
4684 	},
4685 	{
4686 		.procname       = "sched_util_clamp_min_rt_default",
4687 		.data           = &sysctl_sched_uclamp_util_min_rt_default,
4688 		.maxlen         = sizeof(unsigned int),
4689 		.mode           = 0644,
4690 		.proc_handler   = sysctl_sched_uclamp_handler,
4691 	},
4692 #endif /* CONFIG_UCLAMP_TASK */
4693 #ifdef CONFIG_NUMA_BALANCING
4694 	{
4695 		.procname	= "numa_balancing",
4696 		.data		= NULL, /* filled in by handler */
4697 		.maxlen		= sizeof(unsigned int),
4698 		.mode		= 0644,
4699 		.proc_handler	= sysctl_numa_balancing,
4700 		.extra1		= SYSCTL_ZERO,
4701 		.extra2		= SYSCTL_FOUR,
4702 	},
4703 #endif /* CONFIG_NUMA_BALANCING */
4704 };
sched_core_sysctl_init(void)4705 static int __init sched_core_sysctl_init(void)
4706 {
4707 	register_sysctl_init("kernel", sched_core_sysctls);
4708 	return 0;
4709 }
4710 late_initcall(sched_core_sysctl_init);
4711 #endif /* CONFIG_SYSCTL */
4712 
4713 /*
4714  * fork()/clone()-time setup:
4715  */
sched_fork(unsigned long clone_flags,struct task_struct * p)4716 int sched_fork(unsigned long clone_flags, struct task_struct *p)
4717 {
4718 	__sched_fork(clone_flags, p);
4719 	/*
4720 	 * We mark the process as NEW here. This guarantees that
4721 	 * nobody will actually run it, and a signal or other external
4722 	 * event cannot wake it up and insert it on the runqueue either.
4723 	 */
4724 	p->__state = TASK_NEW;
4725 
4726 	/*
4727 	 * Make sure we do not leak PI boosting priority to the child.
4728 	 */
4729 	p->prio = current->normal_prio;
4730 
4731 	uclamp_fork(p);
4732 
4733 	/*
4734 	 * Revert to default priority/policy on fork if requested.
4735 	 */
4736 	if (unlikely(p->sched_reset_on_fork)) {
4737 		if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
4738 			p->policy = SCHED_NORMAL;
4739 			p->static_prio = NICE_TO_PRIO(0);
4740 			p->rt_priority = 0;
4741 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
4742 			p->static_prio = NICE_TO_PRIO(0);
4743 
4744 		p->prio = p->normal_prio = p->static_prio;
4745 		set_load_weight(p, false);
4746 		p->se.custom_slice = 0;
4747 		p->se.slice = sysctl_sched_base_slice;
4748 
4749 		/*
4750 		 * We don't need the reset flag anymore after the fork. It has
4751 		 * fulfilled its duty:
4752 		 */
4753 		p->sched_reset_on_fork = 0;
4754 	}
4755 
4756 	if (dl_prio(p->prio))
4757 		return -EAGAIN;
4758 
4759 	scx_pre_fork(p);
4760 
4761 	if (rt_prio(p->prio)) {
4762 		p->sched_class = &rt_sched_class;
4763 #ifdef CONFIG_SCHED_CLASS_EXT
4764 	} else if (task_should_scx(p->policy)) {
4765 		p->sched_class = &ext_sched_class;
4766 #endif
4767 	} else {
4768 		p->sched_class = &fair_sched_class;
4769 	}
4770 
4771 	init_entity_runnable_average(&p->se);
4772 
4773 
4774 #ifdef CONFIG_SCHED_INFO
4775 	if (likely(sched_info_on()))
4776 		memset(&p->sched_info, 0, sizeof(p->sched_info));
4777 #endif
4778 #if defined(CONFIG_SMP)
4779 	p->on_cpu = 0;
4780 #endif
4781 	init_task_preempt_count(p);
4782 #ifdef CONFIG_SMP
4783 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
4784 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
4785 #endif
4786 	return 0;
4787 }
4788 
sched_cgroup_fork(struct task_struct * p,struct kernel_clone_args * kargs)4789 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
4790 {
4791 	unsigned long flags;
4792 
4793 	/*
4794 	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
4795 	 * required yet, but lockdep gets upset if rules are violated.
4796 	 */
4797 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4798 #ifdef CONFIG_CGROUP_SCHED
4799 	if (1) {
4800 		struct task_group *tg;
4801 		tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
4802 				  struct task_group, css);
4803 		tg = autogroup_task_group(p, tg);
4804 		p->sched_task_group = tg;
4805 	}
4806 #endif
4807 	rseq_migrate(p);
4808 	/*
4809 	 * We're setting the CPU for the first time, we don't migrate,
4810 	 * so use __set_task_cpu().
4811 	 */
4812 	__set_task_cpu(p, smp_processor_id());
4813 	if (p->sched_class->task_fork)
4814 		p->sched_class->task_fork(p);
4815 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4816 
4817 	return scx_fork(p);
4818 }
4819 
sched_cancel_fork(struct task_struct * p)4820 void sched_cancel_fork(struct task_struct *p)
4821 {
4822 	scx_cancel_fork(p);
4823 }
4824 
sched_post_fork(struct task_struct * p)4825 void sched_post_fork(struct task_struct *p)
4826 {
4827 	uclamp_post_fork(p);
4828 	scx_post_fork(p);
4829 }
4830 
to_ratio(u64 period,u64 runtime)4831 unsigned long to_ratio(u64 period, u64 runtime)
4832 {
4833 	if (runtime == RUNTIME_INF)
4834 		return BW_UNIT;
4835 
4836 	/*
4837 	 * Doing this here saves a lot of checks in all
4838 	 * the calling paths, and returning zero seems
4839 	 * safe for them anyway.
4840 	 */
4841 	if (period == 0)
4842 		return 0;
4843 
4844 	return div64_u64(runtime << BW_SHIFT, period);
4845 }
4846 
4847 /*
4848  * wake_up_new_task - wake up a newly created task for the first time.
4849  *
4850  * This function will do some initial scheduler statistics housekeeping
4851  * that must be done for every newly created context, then puts the task
4852  * on the runqueue and wakes it.
4853  */
wake_up_new_task(struct task_struct * p)4854 void wake_up_new_task(struct task_struct *p)
4855 {
4856 	struct rq_flags rf;
4857 	struct rq *rq;
4858 	int wake_flags = WF_FORK;
4859 
4860 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
4861 	WRITE_ONCE(p->__state, TASK_RUNNING);
4862 #ifdef CONFIG_SMP
4863 	/*
4864 	 * Fork balancing, do it here and not earlier because:
4865 	 *  - cpus_ptr can change in the fork path
4866 	 *  - any previously selected CPU might disappear through hotplug
4867 	 *
4868 	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
4869 	 * as we're not fully set-up yet.
4870 	 */
4871 	p->recent_used_cpu = task_cpu(p);
4872 	rseq_migrate(p);
4873 	__set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
4874 #endif
4875 	rq = __task_rq_lock(p, &rf);
4876 	update_rq_clock(rq);
4877 	post_init_entity_util_avg(p);
4878 
4879 	activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
4880 	trace_sched_wakeup_new(p);
4881 	wakeup_preempt(rq, p, wake_flags);
4882 #ifdef CONFIG_SMP
4883 	if (p->sched_class->task_woken) {
4884 		/*
4885 		 * Nothing relies on rq->lock after this, so it's fine to
4886 		 * drop it.
4887 		 */
4888 		rq_unpin_lock(rq, &rf);
4889 		p->sched_class->task_woken(rq, p);
4890 		rq_repin_lock(rq, &rf);
4891 	}
4892 #endif
4893 	task_rq_unlock(rq, p, &rf);
4894 }
4895 
4896 #ifdef CONFIG_PREEMPT_NOTIFIERS
4897 
4898 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
4899 
preempt_notifier_inc(void)4900 void preempt_notifier_inc(void)
4901 {
4902 	static_branch_inc(&preempt_notifier_key);
4903 }
4904 EXPORT_SYMBOL_GPL(preempt_notifier_inc);
4905 
preempt_notifier_dec(void)4906 void preempt_notifier_dec(void)
4907 {
4908 	static_branch_dec(&preempt_notifier_key);
4909 }
4910 EXPORT_SYMBOL_GPL(preempt_notifier_dec);
4911 
4912 /**
4913  * preempt_notifier_register - tell me when current is being preempted & rescheduled
4914  * @notifier: notifier struct to register
4915  */
preempt_notifier_register(struct preempt_notifier * notifier)4916 void preempt_notifier_register(struct preempt_notifier *notifier)
4917 {
4918 	if (!static_branch_unlikely(&preempt_notifier_key))
4919 		WARN(1, "registering preempt_notifier while notifiers disabled\n");
4920 
4921 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
4922 }
4923 EXPORT_SYMBOL_GPL(preempt_notifier_register);
4924 
4925 /**
4926  * preempt_notifier_unregister - no longer interested in preemption notifications
4927  * @notifier: notifier struct to unregister
4928  *
4929  * This is *not* safe to call from within a preemption notifier.
4930  */
preempt_notifier_unregister(struct preempt_notifier * notifier)4931 void preempt_notifier_unregister(struct preempt_notifier *notifier)
4932 {
4933 	hlist_del(&notifier->link);
4934 }
4935 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
4936 
__fire_sched_in_preempt_notifiers(struct task_struct * curr)4937 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
4938 {
4939 	struct preempt_notifier *notifier;
4940 
4941 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4942 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
4943 }
4944 
fire_sched_in_preempt_notifiers(struct task_struct * curr)4945 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4946 {
4947 	if (static_branch_unlikely(&preempt_notifier_key))
4948 		__fire_sched_in_preempt_notifiers(curr);
4949 }
4950 
4951 static void
__fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4952 __fire_sched_out_preempt_notifiers(struct task_struct *curr,
4953 				   struct task_struct *next)
4954 {
4955 	struct preempt_notifier *notifier;
4956 
4957 	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
4958 		notifier->ops->sched_out(notifier, next);
4959 }
4960 
4961 static __always_inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4962 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4963 				 struct task_struct *next)
4964 {
4965 	if (static_branch_unlikely(&preempt_notifier_key))
4966 		__fire_sched_out_preempt_notifiers(curr, next);
4967 }
4968 
4969 #else /* !CONFIG_PREEMPT_NOTIFIERS */
4970 
fire_sched_in_preempt_notifiers(struct task_struct * curr)4971 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
4972 {
4973 }
4974 
4975 static inline void
fire_sched_out_preempt_notifiers(struct task_struct * curr,struct task_struct * next)4976 fire_sched_out_preempt_notifiers(struct task_struct *curr,
4977 				 struct task_struct *next)
4978 {
4979 }
4980 
4981 #endif /* CONFIG_PREEMPT_NOTIFIERS */
4982 
prepare_task(struct task_struct * next)4983 static inline void prepare_task(struct task_struct *next)
4984 {
4985 #ifdef CONFIG_SMP
4986 	/*
4987 	 * Claim the task as running, we do this before switching to it
4988 	 * such that any running task will have this set.
4989 	 *
4990 	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4991 	 * its ordering comment.
4992 	 */
4993 	WRITE_ONCE(next->on_cpu, 1);
4994 #endif
4995 }
4996 
finish_task(struct task_struct * prev)4997 static inline void finish_task(struct task_struct *prev)
4998 {
4999 #ifdef CONFIG_SMP
5000 	/*
5001 	 * This must be the very last reference to @prev from this CPU. After
5002 	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
5003 	 * must ensure this doesn't happen until the switch is completely
5004 	 * finished.
5005 	 *
5006 	 * In particular, the load of prev->state in finish_task_switch() must
5007 	 * happen before this.
5008 	 *
5009 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
5010 	 */
5011 	smp_store_release(&prev->on_cpu, 0);
5012 #endif
5013 }
5014 
5015 #ifdef CONFIG_SMP
5016 
do_balance_callbacks(struct rq * rq,struct balance_callback * head)5017 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
5018 {
5019 	void (*func)(struct rq *rq);
5020 	struct balance_callback *next;
5021 
5022 	lockdep_assert_rq_held(rq);
5023 
5024 	while (head) {
5025 		func = (void (*)(struct rq *))head->func;
5026 		next = head->next;
5027 		head->next = NULL;
5028 		head = next;
5029 
5030 		func(rq);
5031 	}
5032 }
5033 
5034 static void balance_push(struct rq *rq);
5035 
5036 /*
5037  * balance_push_callback is a right abuse of the callback interface and plays
5038  * by significantly different rules.
5039  *
5040  * Where the normal balance_callback's purpose is to be ran in the same context
5041  * that queued it (only later, when it's safe to drop rq->lock again),
5042  * balance_push_callback is specifically targeted at __schedule().
5043  *
5044  * This abuse is tolerated because it places all the unlikely/odd cases behind
5045  * a single test, namely: rq->balance_callback == NULL.
5046  */
5047 struct balance_callback balance_push_callback = {
5048 	.next = NULL,
5049 	.func = balance_push,
5050 };
5051 
5052 static inline struct balance_callback *
__splice_balance_callbacks(struct rq * rq,bool split)5053 __splice_balance_callbacks(struct rq *rq, bool split)
5054 {
5055 	struct balance_callback *head = rq->balance_callback;
5056 
5057 	if (likely(!head))
5058 		return NULL;
5059 
5060 	lockdep_assert_rq_held(rq);
5061 	/*
5062 	 * Must not take balance_push_callback off the list when
5063 	 * splice_balance_callbacks() and balance_callbacks() are not
5064 	 * in the same rq->lock section.
5065 	 *
5066 	 * In that case it would be possible for __schedule() to interleave
5067 	 * and observe the list empty.
5068 	 */
5069 	if (split && head == &balance_push_callback)
5070 		head = NULL;
5071 	else
5072 		rq->balance_callback = NULL;
5073 
5074 	return head;
5075 }
5076 
splice_balance_callbacks(struct rq * rq)5077 struct balance_callback *splice_balance_callbacks(struct rq *rq)
5078 {
5079 	return __splice_balance_callbacks(rq, true);
5080 }
5081 
__balance_callbacks(struct rq * rq)5082 static void __balance_callbacks(struct rq *rq)
5083 {
5084 	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
5085 }
5086 
balance_callbacks(struct rq * rq,struct balance_callback * head)5087 void balance_callbacks(struct rq *rq, struct balance_callback *head)
5088 {
5089 	unsigned long flags;
5090 
5091 	if (unlikely(head)) {
5092 		raw_spin_rq_lock_irqsave(rq, flags);
5093 		do_balance_callbacks(rq, head);
5094 		raw_spin_rq_unlock_irqrestore(rq, flags);
5095 	}
5096 }
5097 
5098 #else
5099 
__balance_callbacks(struct rq * rq)5100 static inline void __balance_callbacks(struct rq *rq)
5101 {
5102 }
5103 
5104 #endif
5105 
5106 static inline void
prepare_lock_switch(struct rq * rq,struct task_struct * next,struct rq_flags * rf)5107 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
5108 {
5109 	/*
5110 	 * Since the runqueue lock will be released by the next
5111 	 * task (which is an invalid locking op but in the case
5112 	 * of the scheduler it's an obvious special-case), so we
5113 	 * do an early lockdep release here:
5114 	 */
5115 	rq_unpin_lock(rq, rf);
5116 	spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
5117 #ifdef CONFIG_DEBUG_SPINLOCK
5118 	/* this is a valid case when another task releases the spinlock */
5119 	rq_lockp(rq)->owner = next;
5120 #endif
5121 }
5122 
finish_lock_switch(struct rq * rq)5123 static inline void finish_lock_switch(struct rq *rq)
5124 {
5125 	/*
5126 	 * If we are tracking spinlock dependencies then we have to
5127 	 * fix up the runqueue lock - which gets 'carried over' from
5128 	 * prev into current:
5129 	 */
5130 	spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
5131 	__balance_callbacks(rq);
5132 	raw_spin_rq_unlock_irq(rq);
5133 }
5134 
5135 /*
5136  * NOP if the arch has not defined these:
5137  */
5138 
5139 #ifndef prepare_arch_switch
5140 # define prepare_arch_switch(next)	do { } while (0)
5141 #endif
5142 
5143 #ifndef finish_arch_post_lock_switch
5144 # define finish_arch_post_lock_switch()	do { } while (0)
5145 #endif
5146 
kmap_local_sched_out(void)5147 static inline void kmap_local_sched_out(void)
5148 {
5149 #ifdef CONFIG_KMAP_LOCAL
5150 	if (unlikely(current->kmap_ctrl.idx))
5151 		__kmap_local_sched_out();
5152 #endif
5153 }
5154 
kmap_local_sched_in(void)5155 static inline void kmap_local_sched_in(void)
5156 {
5157 #ifdef CONFIG_KMAP_LOCAL
5158 	if (unlikely(current->kmap_ctrl.idx))
5159 		__kmap_local_sched_in();
5160 #endif
5161 }
5162 
5163 /**
5164  * prepare_task_switch - prepare to switch tasks
5165  * @rq: the runqueue preparing to switch
5166  * @prev: the current task that is being switched out
5167  * @next: the task we are going to switch to.
5168  *
5169  * This is called with the rq lock held and interrupts off. It must
5170  * be paired with a subsequent finish_task_switch after the context
5171  * switch.
5172  *
5173  * prepare_task_switch sets up locking and calls architecture specific
5174  * hooks.
5175  */
5176 static inline void
prepare_task_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next)5177 prepare_task_switch(struct rq *rq, struct task_struct *prev,
5178 		    struct task_struct *next)
5179 {
5180 	kcov_prepare_switch(prev);
5181 	sched_info_switch(rq, prev, next);
5182 	perf_event_task_sched_out(prev, next);
5183 	rseq_preempt(prev);
5184 	fire_sched_out_preempt_notifiers(prev, next);
5185 	kmap_local_sched_out();
5186 	prepare_task(next);
5187 	prepare_arch_switch(next);
5188 }
5189 
5190 /**
5191  * finish_task_switch - clean up after a task-switch
5192  * @prev: the thread we just switched away from.
5193  *
5194  * finish_task_switch must be called after the context switch, paired
5195  * with a prepare_task_switch call before the context switch.
5196  * finish_task_switch will reconcile locking set up by prepare_task_switch,
5197  * and do any other architecture-specific cleanup actions.
5198  *
5199  * Note that we may have delayed dropping an mm in context_switch(). If
5200  * so, we finish that here outside of the runqueue lock. (Doing it
5201  * with the lock held can cause deadlocks; see schedule() for
5202  * details.)
5203  *
5204  * The context switch have flipped the stack from under us and restored the
5205  * local variables which were saved when this task called schedule() in the
5206  * past. 'prev == current' is still correct but we need to recalculate this_rq
5207  * because prev may have moved to another CPU.
5208  */
finish_task_switch(struct task_struct * prev)5209 static struct rq *finish_task_switch(struct task_struct *prev)
5210 	__releases(rq->lock)
5211 {
5212 	struct rq *rq = this_rq();
5213 	struct mm_struct *mm = rq->prev_mm;
5214 	unsigned int prev_state;
5215 
5216 	/*
5217 	 * The previous task will have left us with a preempt_count of 2
5218 	 * because it left us after:
5219 	 *
5220 	 *	schedule()
5221 	 *	  preempt_disable();			// 1
5222 	 *	  __schedule()
5223 	 *	    raw_spin_lock_irq(&rq->lock)	// 2
5224 	 *
5225 	 * Also, see FORK_PREEMPT_COUNT.
5226 	 */
5227 	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
5228 		      "corrupted preempt_count: %s/%d/0x%x\n",
5229 		      current->comm, current->pid, preempt_count()))
5230 		preempt_count_set(FORK_PREEMPT_COUNT);
5231 
5232 	rq->prev_mm = NULL;
5233 
5234 	/*
5235 	 * A task struct has one reference for the use as "current".
5236 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
5237 	 * schedule one last time. The schedule call will never return, and
5238 	 * the scheduled task must drop that reference.
5239 	 *
5240 	 * We must observe prev->state before clearing prev->on_cpu (in
5241 	 * finish_task), otherwise a concurrent wakeup can get prev
5242 	 * running on another CPU and we could rave with its RUNNING -> DEAD
5243 	 * transition, resulting in a double drop.
5244 	 */
5245 	prev_state = READ_ONCE(prev->__state);
5246 	vtime_task_switch(prev);
5247 	perf_event_task_sched_in(prev, current);
5248 	finish_task(prev);
5249 	tick_nohz_task_switch();
5250 	finish_lock_switch(rq);
5251 	finish_arch_post_lock_switch();
5252 	kcov_finish_switch(current);
5253 	/*
5254 	 * kmap_local_sched_out() is invoked with rq::lock held and
5255 	 * interrupts disabled. There is no requirement for that, but the
5256 	 * sched out code does not have an interrupt enabled section.
5257 	 * Restoring the maps on sched in does not require interrupts being
5258 	 * disabled either.
5259 	 */
5260 	kmap_local_sched_in();
5261 
5262 	fire_sched_in_preempt_notifiers(current);
5263 	/*
5264 	 * When switching through a kernel thread, the loop in
5265 	 * membarrier_{private,global}_expedited() may have observed that
5266 	 * kernel thread and not issued an IPI. It is therefore possible to
5267 	 * schedule between user->kernel->user threads without passing though
5268 	 * switch_mm(). Membarrier requires a barrier after storing to
5269 	 * rq->curr, before returning to userspace, so provide them here:
5270 	 *
5271 	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
5272 	 *   provided by mmdrop_lazy_tlb(),
5273 	 * - a sync_core for SYNC_CORE.
5274 	 */
5275 	if (mm) {
5276 		membarrier_mm_sync_core_before_usermode(mm);
5277 		mmdrop_lazy_tlb_sched(mm);
5278 	}
5279 
5280 	if (unlikely(prev_state == TASK_DEAD)) {
5281 		if (prev->sched_class->task_dead)
5282 			prev->sched_class->task_dead(prev);
5283 
5284 		/* Task is done with its stack. */
5285 		put_task_stack(prev);
5286 
5287 		put_task_struct_rcu_user(prev);
5288 	}
5289 
5290 	return rq;
5291 }
5292 
5293 /**
5294  * schedule_tail - first thing a freshly forked thread must call.
5295  * @prev: the thread we just switched away from.
5296  */
schedule_tail(struct task_struct * prev)5297 asmlinkage __visible void schedule_tail(struct task_struct *prev)
5298 	__releases(rq->lock)
5299 {
5300 	/*
5301 	 * New tasks start with FORK_PREEMPT_COUNT, see there and
5302 	 * finish_task_switch() for details.
5303 	 *
5304 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
5305 	 * and the preempt_enable() will end up enabling preemption (on
5306 	 * PREEMPT_COUNT kernels).
5307 	 */
5308 
5309 	finish_task_switch(prev);
5310 	preempt_enable();
5311 
5312 	if (current->set_child_tid)
5313 		put_user(task_pid_vnr(current), current->set_child_tid);
5314 
5315 	calculate_sigpending();
5316 }
5317 
5318 /*
5319  * context_switch - switch to the new MM and the new thread's register state.
5320  */
5321 static __always_inline struct rq *
context_switch(struct rq * rq,struct task_struct * prev,struct task_struct * next,struct rq_flags * rf)5322 context_switch(struct rq *rq, struct task_struct *prev,
5323 	       struct task_struct *next, struct rq_flags *rf)
5324 {
5325 	prepare_task_switch(rq, prev, next);
5326 
5327 	/*
5328 	 * For paravirt, this is coupled with an exit in switch_to to
5329 	 * combine the page table reload and the switch backend into
5330 	 * one hypercall.
5331 	 */
5332 	arch_start_context_switch(prev);
5333 
5334 	/*
5335 	 * kernel -> kernel   lazy + transfer active
5336 	 *   user -> kernel   lazy + mmgrab_lazy_tlb() active
5337 	 *
5338 	 * kernel ->   user   switch + mmdrop_lazy_tlb() active
5339 	 *   user ->   user   switch
5340 	 *
5341 	 * switch_mm_cid() needs to be updated if the barriers provided
5342 	 * by context_switch() are modified.
5343 	 */
5344 	if (!next->mm) {                                // to kernel
5345 		enter_lazy_tlb(prev->active_mm, next);
5346 
5347 		next->active_mm = prev->active_mm;
5348 		if (prev->mm)                           // from user
5349 			mmgrab_lazy_tlb(prev->active_mm);
5350 		else
5351 			prev->active_mm = NULL;
5352 	} else {                                        // to user
5353 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
5354 		/*
5355 		 * sys_membarrier() requires an smp_mb() between setting
5356 		 * rq->curr / membarrier_switch_mm() and returning to userspace.
5357 		 *
5358 		 * The below provides this either through switch_mm(), or in
5359 		 * case 'prev->active_mm == next->mm' through
5360 		 * finish_task_switch()'s mmdrop().
5361 		 */
5362 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
5363 		lru_gen_use_mm(next->mm);
5364 
5365 		if (!prev->mm) {                        // from kernel
5366 			/* will mmdrop_lazy_tlb() in finish_task_switch(). */
5367 			rq->prev_mm = prev->active_mm;
5368 			prev->active_mm = NULL;
5369 		}
5370 	}
5371 
5372 	/* switch_mm_cid() requires the memory barriers above. */
5373 	switch_mm_cid(rq, prev, next);
5374 
5375 	prepare_lock_switch(rq, next, rf);
5376 
5377 	/* Here we just switch the register state and the stack. */
5378 	switch_to(prev, next, prev);
5379 	barrier();
5380 
5381 	return finish_task_switch(prev);
5382 }
5383 
5384 /*
5385  * nr_running and nr_context_switches:
5386  *
5387  * externally visible scheduler statistics: current number of runnable
5388  * threads, total number of context switches performed since bootup.
5389  */
nr_running(void)5390 unsigned int nr_running(void)
5391 {
5392 	unsigned int i, sum = 0;
5393 
5394 	for_each_online_cpu(i)
5395 		sum += cpu_rq(i)->nr_running;
5396 
5397 	return sum;
5398 }
5399 
5400 /*
5401  * Check if only the current task is running on the CPU.
5402  *
5403  * Caution: this function does not check that the caller has disabled
5404  * preemption, thus the result might have a time-of-check-to-time-of-use
5405  * race.  The caller is responsible to use it correctly, for example:
5406  *
5407  * - from a non-preemptible section (of course)
5408  *
5409  * - from a thread that is bound to a single CPU
5410  *
5411  * - in a loop with very short iterations (e.g. a polling loop)
5412  */
single_task_running(void)5413 bool single_task_running(void)
5414 {
5415 	return raw_rq()->nr_running == 1;
5416 }
5417 EXPORT_SYMBOL(single_task_running);
5418 
nr_context_switches_cpu(int cpu)5419 unsigned long long nr_context_switches_cpu(int cpu)
5420 {
5421 	return cpu_rq(cpu)->nr_switches;
5422 }
5423 
nr_context_switches(void)5424 unsigned long long nr_context_switches(void)
5425 {
5426 	int i;
5427 	unsigned long long sum = 0;
5428 
5429 	for_each_possible_cpu(i)
5430 		sum += cpu_rq(i)->nr_switches;
5431 
5432 	return sum;
5433 }
5434 
5435 /*
5436  * Consumers of these two interfaces, like for example the cpuidle menu
5437  * governor, are using nonsensical data. Preferring shallow idle state selection
5438  * for a CPU that has IO-wait which might not even end up running the task when
5439  * it does become runnable.
5440  */
5441 
nr_iowait_cpu(int cpu)5442 unsigned int nr_iowait_cpu(int cpu)
5443 {
5444 	return atomic_read(&cpu_rq(cpu)->nr_iowait);
5445 }
5446 
5447 /*
5448  * IO-wait accounting, and how it's mostly bollocks (on SMP).
5449  *
5450  * The idea behind IO-wait account is to account the idle time that we could
5451  * have spend running if it were not for IO. That is, if we were to improve the
5452  * storage performance, we'd have a proportional reduction in IO-wait time.
5453  *
5454  * This all works nicely on UP, where, when a task blocks on IO, we account
5455  * idle time as IO-wait, because if the storage were faster, it could've been
5456  * running and we'd not be idle.
5457  *
5458  * This has been extended to SMP, by doing the same for each CPU. This however
5459  * is broken.
5460  *
5461  * Imagine for instance the case where two tasks block on one CPU, only the one
5462  * CPU will have IO-wait accounted, while the other has regular idle. Even
5463  * though, if the storage were faster, both could've ran at the same time,
5464  * utilising both CPUs.
5465  *
5466  * This means, that when looking globally, the current IO-wait accounting on
5467  * SMP is a lower bound, by reason of under accounting.
5468  *
5469  * Worse, since the numbers are provided per CPU, they are sometimes
5470  * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
5471  * associated with any one particular CPU, it can wake to another CPU than it
5472  * blocked on. This means the per CPU IO-wait number is meaningless.
5473  *
5474  * Task CPU affinities can make all that even more 'interesting'.
5475  */
5476 
nr_iowait(void)5477 unsigned int nr_iowait(void)
5478 {
5479 	unsigned int i, sum = 0;
5480 
5481 	for_each_possible_cpu(i)
5482 		sum += nr_iowait_cpu(i);
5483 
5484 	return sum;
5485 }
5486 
5487 #ifdef CONFIG_SMP
5488 
5489 /*
5490  * sched_exec - execve() is a valuable balancing opportunity, because at
5491  * this point the task has the smallest effective memory and cache footprint.
5492  */
sched_exec(void)5493 void sched_exec(void)
5494 {
5495 	struct task_struct *p = current;
5496 	struct migration_arg arg;
5497 	int dest_cpu;
5498 
5499 	scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
5500 		dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
5501 		if (dest_cpu == smp_processor_id())
5502 			return;
5503 
5504 		if (unlikely(!cpu_active(dest_cpu)))
5505 			return;
5506 
5507 		arg = (struct migration_arg){ p, dest_cpu };
5508 	}
5509 	stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
5510 }
5511 
5512 #endif
5513 
5514 DEFINE_PER_CPU(struct kernel_stat, kstat);
5515 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
5516 
5517 EXPORT_PER_CPU_SYMBOL(kstat);
5518 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
5519 
5520 /*
5521  * The function fair_sched_class.update_curr accesses the struct curr
5522  * and its field curr->exec_start; when called from task_sched_runtime(),
5523  * we observe a high rate of cache misses in practice.
5524  * Prefetching this data results in improved performance.
5525  */
prefetch_curr_exec_start(struct task_struct * p)5526 static inline void prefetch_curr_exec_start(struct task_struct *p)
5527 {
5528 #ifdef CONFIG_FAIR_GROUP_SCHED
5529 	struct sched_entity *curr = p->se.cfs_rq->curr;
5530 #else
5531 	struct sched_entity *curr = task_rq(p)->cfs.curr;
5532 #endif
5533 	prefetch(curr);
5534 	prefetch(&curr->exec_start);
5535 }
5536 
5537 /*
5538  * Return accounted runtime for the task.
5539  * In case the task is currently running, return the runtime plus current's
5540  * pending runtime that have not been accounted yet.
5541  */
task_sched_runtime(struct task_struct * p)5542 unsigned long long task_sched_runtime(struct task_struct *p)
5543 {
5544 	struct rq_flags rf;
5545 	struct rq *rq;
5546 	u64 ns;
5547 
5548 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
5549 	/*
5550 	 * 64-bit doesn't need locks to atomically read a 64-bit value.
5551 	 * So we have a optimization chance when the task's delta_exec is 0.
5552 	 * Reading ->on_cpu is racy, but this is OK.
5553 	 *
5554 	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
5555 	 * If we race with it entering CPU, unaccounted time is 0. This is
5556 	 * indistinguishable from the read occurring a few cycles earlier.
5557 	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
5558 	 * been accounted, so we're correct here as well.
5559 	 */
5560 	if (!p->on_cpu || !task_on_rq_queued(p))
5561 		return p->se.sum_exec_runtime;
5562 #endif
5563 
5564 	rq = task_rq_lock(p, &rf);
5565 	/*
5566 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
5567 	 * project cycles that may never be accounted to this
5568 	 * thread, breaking clock_gettime().
5569 	 */
5570 	if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
5571 		prefetch_curr_exec_start(p);
5572 		update_rq_clock(rq);
5573 		p->sched_class->update_curr(rq);
5574 	}
5575 	ns = p->se.sum_exec_runtime;
5576 	task_rq_unlock(rq, p, &rf);
5577 
5578 	return ns;
5579 }
5580 
5581 #ifdef CONFIG_SCHED_DEBUG
cpu_resched_latency(struct rq * rq)5582 static u64 cpu_resched_latency(struct rq *rq)
5583 {
5584 	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
5585 	u64 resched_latency, now = rq_clock(rq);
5586 	static bool warned_once;
5587 
5588 	if (sysctl_resched_latency_warn_once && warned_once)
5589 		return 0;
5590 
5591 	if (!need_resched() || !latency_warn_ms)
5592 		return 0;
5593 
5594 	if (system_state == SYSTEM_BOOTING)
5595 		return 0;
5596 
5597 	if (!rq->last_seen_need_resched_ns) {
5598 		rq->last_seen_need_resched_ns = now;
5599 		rq->ticks_without_resched = 0;
5600 		return 0;
5601 	}
5602 
5603 	rq->ticks_without_resched++;
5604 	resched_latency = now - rq->last_seen_need_resched_ns;
5605 	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
5606 		return 0;
5607 
5608 	warned_once = true;
5609 
5610 	return resched_latency;
5611 }
5612 
setup_resched_latency_warn_ms(char * str)5613 static int __init setup_resched_latency_warn_ms(char *str)
5614 {
5615 	long val;
5616 
5617 	if ((kstrtol(str, 0, &val))) {
5618 		pr_warn("Unable to set resched_latency_warn_ms\n");
5619 		return 1;
5620 	}
5621 
5622 	sysctl_resched_latency_warn_ms = val;
5623 	return 1;
5624 }
5625 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
5626 #else
cpu_resched_latency(struct rq * rq)5627 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
5628 #endif /* CONFIG_SCHED_DEBUG */
5629 
5630 /*
5631  * This function gets called by the timer code, with HZ frequency.
5632  * We call it with interrupts disabled.
5633  */
sched_tick(void)5634 void sched_tick(void)
5635 {
5636 	int cpu = smp_processor_id();
5637 	struct rq *rq = cpu_rq(cpu);
5638 	/* accounting goes to the donor task */
5639 	struct task_struct *donor;
5640 	struct rq_flags rf;
5641 	unsigned long hw_pressure;
5642 	u64 resched_latency;
5643 
5644 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5645 		arch_scale_freq_tick();
5646 
5647 	sched_clock_tick();
5648 
5649 	rq_lock(rq, &rf);
5650 	donor = rq->donor;
5651 
5652 	psi_account_irqtime(rq, donor, NULL);
5653 
5654 	update_rq_clock(rq);
5655 	hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
5656 	update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
5657 
5658 	if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
5659 		resched_curr(rq);
5660 
5661 	donor->sched_class->task_tick(rq, donor, 0);
5662 	if (sched_feat(LATENCY_WARN))
5663 		resched_latency = cpu_resched_latency(rq);
5664 	calc_global_load_tick(rq);
5665 	sched_core_tick(rq);
5666 	task_tick_mm_cid(rq, donor);
5667 	scx_tick(rq);
5668 
5669 	rq_unlock(rq, &rf);
5670 
5671 	if (sched_feat(LATENCY_WARN) && resched_latency)
5672 		resched_latency_warn(cpu, resched_latency);
5673 
5674 	perf_event_task_tick();
5675 
5676 	if (donor->flags & PF_WQ_WORKER)
5677 		wq_worker_tick(donor);
5678 
5679 #ifdef CONFIG_SMP
5680 	if (!scx_switched_all()) {
5681 		rq->idle_balance = idle_cpu(cpu);
5682 		sched_balance_trigger(rq);
5683 	}
5684 #endif
5685 }
5686 
5687 #ifdef CONFIG_NO_HZ_FULL
5688 
5689 struct tick_work {
5690 	int			cpu;
5691 	atomic_t		state;
5692 	struct delayed_work	work;
5693 };
5694 /* Values for ->state, see diagram below. */
5695 #define TICK_SCHED_REMOTE_OFFLINE	0
5696 #define TICK_SCHED_REMOTE_OFFLINING	1
5697 #define TICK_SCHED_REMOTE_RUNNING	2
5698 
5699 /*
5700  * State diagram for ->state:
5701  *
5702  *
5703  *          TICK_SCHED_REMOTE_OFFLINE
5704  *                    |   ^
5705  *                    |   |
5706  *                    |   | sched_tick_remote()
5707  *                    |   |
5708  *                    |   |
5709  *                    +--TICK_SCHED_REMOTE_OFFLINING
5710  *                    |   ^
5711  *                    |   |
5712  * sched_tick_start() |   | sched_tick_stop()
5713  *                    |   |
5714  *                    V   |
5715  *          TICK_SCHED_REMOTE_RUNNING
5716  *
5717  *
5718  * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5719  * and sched_tick_start() are happy to leave the state in RUNNING.
5720  */
5721 
5722 static struct tick_work __percpu *tick_work_cpu;
5723 
sched_tick_remote(struct work_struct * work)5724 static void sched_tick_remote(struct work_struct *work)
5725 {
5726 	struct delayed_work *dwork = to_delayed_work(work);
5727 	struct tick_work *twork = container_of(dwork, struct tick_work, work);
5728 	int cpu = twork->cpu;
5729 	struct rq *rq = cpu_rq(cpu);
5730 	int os;
5731 
5732 	/*
5733 	 * Handle the tick only if it appears the remote CPU is running in full
5734 	 * dynticks mode. The check is racy by nature, but missing a tick or
5735 	 * having one too much is no big deal because the scheduler tick updates
5736 	 * statistics and checks timeslices in a time-independent way, regardless
5737 	 * of when exactly it is running.
5738 	 */
5739 	if (tick_nohz_tick_stopped_cpu(cpu)) {
5740 		guard(rq_lock_irq)(rq);
5741 		struct task_struct *curr = rq->curr;
5742 
5743 		if (cpu_online(cpu)) {
5744 			/*
5745 			 * Since this is a remote tick for full dynticks mode,
5746 			 * we are always sure that there is no proxy (only a
5747 			 * single task is running).
5748 			 */
5749 			SCHED_WARN_ON(rq->curr != rq->donor);
5750 			update_rq_clock(rq);
5751 
5752 			if (!is_idle_task(curr)) {
5753 				/*
5754 				 * Make sure the next tick runs within a
5755 				 * reasonable amount of time.
5756 				 */
5757 				u64 delta = rq_clock_task(rq) - curr->se.exec_start;
5758 				WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5759 			}
5760 			curr->sched_class->task_tick(rq, curr, 0);
5761 
5762 			calc_load_nohz_remote(rq);
5763 		}
5764 	}
5765 
5766 	/*
5767 	 * Run the remote tick once per second (1Hz). This arbitrary
5768 	 * frequency is large enough to avoid overload but short enough
5769 	 * to keep scheduler internal stats reasonably up to date.  But
5770 	 * first update state to reflect hotplug activity if required.
5771 	 */
5772 	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5773 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5774 	if (os == TICK_SCHED_REMOTE_RUNNING)
5775 		queue_delayed_work(system_unbound_wq, dwork, HZ);
5776 }
5777 
sched_tick_start(int cpu)5778 static void sched_tick_start(int cpu)
5779 {
5780 	int os;
5781 	struct tick_work *twork;
5782 
5783 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5784 		return;
5785 
5786 	WARN_ON_ONCE(!tick_work_cpu);
5787 
5788 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5789 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5790 	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5791 	if (os == TICK_SCHED_REMOTE_OFFLINE) {
5792 		twork->cpu = cpu;
5793 		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5794 		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5795 	}
5796 }
5797 
5798 #ifdef CONFIG_HOTPLUG_CPU
sched_tick_stop(int cpu)5799 static void sched_tick_stop(int cpu)
5800 {
5801 	struct tick_work *twork;
5802 	int os;
5803 
5804 	if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE))
5805 		return;
5806 
5807 	WARN_ON_ONCE(!tick_work_cpu);
5808 
5809 	twork = per_cpu_ptr(tick_work_cpu, cpu);
5810 	/* There cannot be competing actions, but don't rely on stop-machine. */
5811 	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5812 	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5813 	/* Don't cancel, as this would mess up the state machine. */
5814 }
5815 #endif /* CONFIG_HOTPLUG_CPU */
5816 
sched_tick_offload_init(void)5817 int __init sched_tick_offload_init(void)
5818 {
5819 	tick_work_cpu = alloc_percpu(struct tick_work);
5820 	BUG_ON(!tick_work_cpu);
5821 	return 0;
5822 }
5823 
5824 #else /* !CONFIG_NO_HZ_FULL */
sched_tick_start(int cpu)5825 static inline void sched_tick_start(int cpu) { }
sched_tick_stop(int cpu)5826 static inline void sched_tick_stop(int cpu) { }
5827 #endif
5828 
5829 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
5830 				defined(CONFIG_TRACE_PREEMPT_TOGGLE))
5831 /*
5832  * If the value passed in is equal to the current preempt count
5833  * then we just disabled preemption. Start timing the latency.
5834  */
preempt_latency_start(int val)5835 static inline void preempt_latency_start(int val)
5836 {
5837 	if (preempt_count() == val) {
5838 		unsigned long ip = get_lock_parent_ip();
5839 #ifdef CONFIG_DEBUG_PREEMPT
5840 		current->preempt_disable_ip = ip;
5841 #endif
5842 		trace_preempt_off(CALLER_ADDR0, ip);
5843 	}
5844 }
5845 
preempt_count_add(int val)5846 void preempt_count_add(int val)
5847 {
5848 #ifdef CONFIG_DEBUG_PREEMPT
5849 	/*
5850 	 * Underflow?
5851 	 */
5852 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
5853 		return;
5854 #endif
5855 	__preempt_count_add(val);
5856 #ifdef CONFIG_DEBUG_PREEMPT
5857 	/*
5858 	 * Spinlock count overflowing soon?
5859 	 */
5860 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
5861 				PREEMPT_MASK - 10);
5862 #endif
5863 	preempt_latency_start(val);
5864 }
5865 EXPORT_SYMBOL(preempt_count_add);
5866 NOKPROBE_SYMBOL(preempt_count_add);
5867 
5868 /*
5869  * If the value passed in equals to the current preempt count
5870  * then we just enabled preemption. Stop timing the latency.
5871  */
preempt_latency_stop(int val)5872 static inline void preempt_latency_stop(int val)
5873 {
5874 	if (preempt_count() == val)
5875 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
5876 }
5877 
preempt_count_sub(int val)5878 void preempt_count_sub(int val)
5879 {
5880 #ifdef CONFIG_DEBUG_PREEMPT
5881 	/*
5882 	 * Underflow?
5883 	 */
5884 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
5885 		return;
5886 	/*
5887 	 * Is the spinlock portion underflowing?
5888 	 */
5889 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
5890 			!(preempt_count() & PREEMPT_MASK)))
5891 		return;
5892 #endif
5893 
5894 	preempt_latency_stop(val);
5895 	__preempt_count_sub(val);
5896 }
5897 EXPORT_SYMBOL(preempt_count_sub);
5898 NOKPROBE_SYMBOL(preempt_count_sub);
5899 
5900 #else
preempt_latency_start(int val)5901 static inline void preempt_latency_start(int val) { }
preempt_latency_stop(int val)5902 static inline void preempt_latency_stop(int val) { }
5903 #endif
5904 
get_preempt_disable_ip(struct task_struct * p)5905 static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
5906 {
5907 #ifdef CONFIG_DEBUG_PREEMPT
5908 	return p->preempt_disable_ip;
5909 #else
5910 	return 0;
5911 #endif
5912 }
5913 
5914 /*
5915  * Print scheduling while atomic bug:
5916  */
__schedule_bug(struct task_struct * prev)5917 static noinline void __schedule_bug(struct task_struct *prev)
5918 {
5919 	/* Save this before calling printk(), since that will clobber it */
5920 	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
5921 
5922 	if (oops_in_progress)
5923 		return;
5924 
5925 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
5926 		prev->comm, prev->pid, preempt_count());
5927 
5928 	debug_show_held_locks(prev);
5929 	print_modules();
5930 	if (irqs_disabled())
5931 		print_irqtrace_events(prev);
5932 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
5933 		pr_err("Preemption disabled at:");
5934 		print_ip_sym(KERN_ERR, preempt_disable_ip);
5935 	}
5936 	check_panic_on_warn("scheduling while atomic");
5937 
5938 	dump_stack();
5939 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5940 }
5941 
5942 /*
5943  * Various schedule()-time debugging checks and statistics:
5944  */
schedule_debug(struct task_struct * prev,bool preempt)5945 static inline void schedule_debug(struct task_struct *prev, bool preempt)
5946 {
5947 #ifdef CONFIG_SCHED_STACK_END_CHECK
5948 	if (task_stack_end_corrupted(prev))
5949 		panic("corrupted stack end detected inside scheduler\n");
5950 
5951 	if (task_scs_end_corrupted(prev))
5952 		panic("corrupted shadow stack detected inside scheduler\n");
5953 #endif
5954 
5955 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
5956 	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
5957 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
5958 			prev->comm, prev->pid, prev->non_block_count);
5959 		dump_stack();
5960 		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
5961 	}
5962 #endif
5963 
5964 	if (unlikely(in_atomic_preempt_off())) {
5965 		__schedule_bug(prev);
5966 		preempt_count_set(PREEMPT_DISABLED);
5967 	}
5968 	rcu_sleep_check();
5969 	SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5970 
5971 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
5972 
5973 	schedstat_inc(this_rq()->sched_count);
5974 }
5975 
prev_balance(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)5976 static void prev_balance(struct rq *rq, struct task_struct *prev,
5977 			 struct rq_flags *rf)
5978 {
5979 	const struct sched_class *start_class = prev->sched_class;
5980 	const struct sched_class *class;
5981 
5982 #ifdef CONFIG_SCHED_CLASS_EXT
5983 	/*
5984 	 * SCX requires a balance() call before every pick_task() including when
5985 	 * waking up from SCHED_IDLE. If @start_class is below SCX, start from
5986 	 * SCX instead. Also, set a flag to detect missing balance() call.
5987 	 */
5988 	if (scx_enabled()) {
5989 		rq->scx.flags |= SCX_RQ_BAL_PENDING;
5990 		if (sched_class_above(&ext_sched_class, start_class))
5991 			start_class = &ext_sched_class;
5992 	}
5993 #endif
5994 
5995 	/*
5996 	 * We must do the balancing pass before put_prev_task(), such
5997 	 * that when we release the rq->lock the task is in the same
5998 	 * state as before we took rq->lock.
5999 	 *
6000 	 * We can terminate the balance pass as soon as we know there is
6001 	 * a runnable task of @class priority or higher.
6002 	 */
6003 	for_active_class_range(class, start_class, &idle_sched_class) {
6004 		if (class->balance && class->balance(rq, prev, rf))
6005 			break;
6006 	}
6007 }
6008 
6009 /*
6010  * Pick up the highest-prio task:
6011  */
6012 static inline struct task_struct *
__pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6013 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6014 {
6015 	const struct sched_class *class;
6016 	struct task_struct *p;
6017 
6018 	rq->dl_server = NULL;
6019 
6020 	if (scx_enabled())
6021 		goto restart;
6022 
6023 	/*
6024 	 * Optimization: we know that if all tasks are in the fair class we can
6025 	 * call that function directly, but only if the @prev task wasn't of a
6026 	 * higher scheduling class, because otherwise those lose the
6027 	 * opportunity to pull in more work from other CPUs.
6028 	 */
6029 	if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
6030 		   rq->nr_running == rq->cfs.h_nr_queued)) {
6031 
6032 		p = pick_next_task_fair(rq, prev, rf);
6033 		if (unlikely(p == RETRY_TASK))
6034 			goto restart;
6035 
6036 		/* Assume the next prioritized class is idle_sched_class */
6037 		if (!p) {
6038 			p = pick_task_idle(rq);
6039 			put_prev_set_next_task(rq, prev, p);
6040 		}
6041 
6042 		return p;
6043 	}
6044 
6045 restart:
6046 	prev_balance(rq, prev, rf);
6047 
6048 	for_each_active_class(class) {
6049 		if (class->pick_next_task) {
6050 			p = class->pick_next_task(rq, prev);
6051 			if (p)
6052 				return p;
6053 		} else {
6054 			p = class->pick_task(rq);
6055 			if (p) {
6056 				put_prev_set_next_task(rq, prev, p);
6057 				return p;
6058 			}
6059 		}
6060 	}
6061 
6062 	BUG(); /* The idle class should always have a runnable task. */
6063 }
6064 
6065 #ifdef CONFIG_SCHED_CORE
is_task_rq_idle(struct task_struct * t)6066 static inline bool is_task_rq_idle(struct task_struct *t)
6067 {
6068 	return (task_rq(t)->idle == t);
6069 }
6070 
cookie_equals(struct task_struct * a,unsigned long cookie)6071 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie)
6072 {
6073 	return is_task_rq_idle(a) || (a->core_cookie == cookie);
6074 }
6075 
cookie_match(struct task_struct * a,struct task_struct * b)6076 static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
6077 {
6078 	if (is_task_rq_idle(a) || is_task_rq_idle(b))
6079 		return true;
6080 
6081 	return a->core_cookie == b->core_cookie;
6082 }
6083 
pick_task(struct rq * rq)6084 static inline struct task_struct *pick_task(struct rq *rq)
6085 {
6086 	const struct sched_class *class;
6087 	struct task_struct *p;
6088 
6089 	rq->dl_server = NULL;
6090 
6091 	for_each_active_class(class) {
6092 		p = class->pick_task(rq);
6093 		if (p)
6094 			return p;
6095 	}
6096 
6097 	BUG(); /* The idle class should always have a runnable task. */
6098 }
6099 
6100 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6101 
6102 static void queue_core_balance(struct rq *rq);
6103 
6104 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6105 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6106 {
6107 	struct task_struct *next, *p, *max = NULL;
6108 	const struct cpumask *smt_mask;
6109 	bool fi_before = false;
6110 	bool core_clock_updated = (rq == rq->core);
6111 	unsigned long cookie;
6112 	int i, cpu, occ = 0;
6113 	struct rq *rq_i;
6114 	bool need_sync;
6115 
6116 	if (!sched_core_enabled(rq))
6117 		return __pick_next_task(rq, prev, rf);
6118 
6119 	cpu = cpu_of(rq);
6120 
6121 	/* Stopper task is switching into idle, no need core-wide selection. */
6122 	if (cpu_is_offline(cpu)) {
6123 		/*
6124 		 * Reset core_pick so that we don't enter the fastpath when
6125 		 * coming online. core_pick would already be migrated to
6126 		 * another cpu during offline.
6127 		 */
6128 		rq->core_pick = NULL;
6129 		rq->core_dl_server = NULL;
6130 		return __pick_next_task(rq, prev, rf);
6131 	}
6132 
6133 	/*
6134 	 * If there were no {en,de}queues since we picked (IOW, the task
6135 	 * pointers are all still valid), and we haven't scheduled the last
6136 	 * pick yet, do so now.
6137 	 *
6138 	 * rq->core_pick can be NULL if no selection was made for a CPU because
6139 	 * it was either offline or went offline during a sibling's core-wide
6140 	 * selection. In this case, do a core-wide selection.
6141 	 */
6142 	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
6143 	    rq->core->core_pick_seq != rq->core_sched_seq &&
6144 	    rq->core_pick) {
6145 		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
6146 
6147 		next = rq->core_pick;
6148 		rq->dl_server = rq->core_dl_server;
6149 		rq->core_pick = NULL;
6150 		rq->core_dl_server = NULL;
6151 		goto out_set_next;
6152 	}
6153 
6154 	prev_balance(rq, prev, rf);
6155 
6156 	smt_mask = cpu_smt_mask(cpu);
6157 	need_sync = !!rq->core->core_cookie;
6158 
6159 	/* reset state */
6160 	rq->core->core_cookie = 0UL;
6161 	if (rq->core->core_forceidle_count) {
6162 		if (!core_clock_updated) {
6163 			update_rq_clock(rq->core);
6164 			core_clock_updated = true;
6165 		}
6166 		sched_core_account_forceidle(rq);
6167 		/* reset after accounting force idle */
6168 		rq->core->core_forceidle_start = 0;
6169 		rq->core->core_forceidle_count = 0;
6170 		rq->core->core_forceidle_occupation = 0;
6171 		need_sync = true;
6172 		fi_before = true;
6173 	}
6174 
6175 	/*
6176 	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
6177 	 *
6178 	 * @task_seq guards the task state ({en,de}queues)
6179 	 * @pick_seq is the @task_seq we did a selection on
6180 	 * @sched_seq is the @pick_seq we scheduled
6181 	 *
6182 	 * However, preemptions can cause multiple picks on the same task set.
6183 	 * 'Fix' this by also increasing @task_seq for every pick.
6184 	 */
6185 	rq->core->core_task_seq++;
6186 
6187 	/*
6188 	 * Optimize for common case where this CPU has no cookies
6189 	 * and there are no cookied tasks running on siblings.
6190 	 */
6191 	if (!need_sync) {
6192 		next = pick_task(rq);
6193 		if (!next->core_cookie) {
6194 			rq->core_pick = NULL;
6195 			rq->core_dl_server = NULL;
6196 			/*
6197 			 * For robustness, update the min_vruntime_fi for
6198 			 * unconstrained picks as well.
6199 			 */
6200 			WARN_ON_ONCE(fi_before);
6201 			task_vruntime_update(rq, next, false);
6202 			goto out_set_next;
6203 		}
6204 	}
6205 
6206 	/*
6207 	 * For each thread: do the regular task pick and find the max prio task
6208 	 * amongst them.
6209 	 *
6210 	 * Tie-break prio towards the current CPU
6211 	 */
6212 	for_each_cpu_wrap(i, smt_mask, cpu) {
6213 		rq_i = cpu_rq(i);
6214 
6215 		/*
6216 		 * Current cpu always has its clock updated on entrance to
6217 		 * pick_next_task(). If the current cpu is not the core,
6218 		 * the core may also have been updated above.
6219 		 */
6220 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
6221 			update_rq_clock(rq_i);
6222 
6223 		rq_i->core_pick = p = pick_task(rq_i);
6224 		rq_i->core_dl_server = rq_i->dl_server;
6225 
6226 		if (!max || prio_less(max, p, fi_before))
6227 			max = p;
6228 	}
6229 
6230 	cookie = rq->core->core_cookie = max->core_cookie;
6231 
6232 	/*
6233 	 * For each thread: try and find a runnable task that matches @max or
6234 	 * force idle.
6235 	 */
6236 	for_each_cpu(i, smt_mask) {
6237 		rq_i = cpu_rq(i);
6238 		p = rq_i->core_pick;
6239 
6240 		if (!cookie_equals(p, cookie)) {
6241 			p = NULL;
6242 			if (cookie)
6243 				p = sched_core_find(rq_i, cookie);
6244 			if (!p)
6245 				p = idle_sched_class.pick_task(rq_i);
6246 		}
6247 
6248 		rq_i->core_pick = p;
6249 		rq_i->core_dl_server = NULL;
6250 
6251 		if (p == rq_i->idle) {
6252 			if (rq_i->nr_running) {
6253 				rq->core->core_forceidle_count++;
6254 				if (!fi_before)
6255 					rq->core->core_forceidle_seq++;
6256 			}
6257 		} else {
6258 			occ++;
6259 		}
6260 	}
6261 
6262 	if (schedstat_enabled() && rq->core->core_forceidle_count) {
6263 		rq->core->core_forceidle_start = rq_clock(rq->core);
6264 		rq->core->core_forceidle_occupation = occ;
6265 	}
6266 
6267 	rq->core->core_pick_seq = rq->core->core_task_seq;
6268 	next = rq->core_pick;
6269 	rq->core_sched_seq = rq->core->core_pick_seq;
6270 
6271 	/* Something should have been selected for current CPU */
6272 	WARN_ON_ONCE(!next);
6273 
6274 	/*
6275 	 * Reschedule siblings
6276 	 *
6277 	 * NOTE: L1TF -- at this point we're no longer running the old task and
6278 	 * sending an IPI (below) ensures the sibling will no longer be running
6279 	 * their task. This ensures there is no inter-sibling overlap between
6280 	 * non-matching user state.
6281 	 */
6282 	for_each_cpu(i, smt_mask) {
6283 		rq_i = cpu_rq(i);
6284 
6285 		/*
6286 		 * An online sibling might have gone offline before a task
6287 		 * could be picked for it, or it might be offline but later
6288 		 * happen to come online, but its too late and nothing was
6289 		 * picked for it.  That's Ok - it will pick tasks for itself,
6290 		 * so ignore it.
6291 		 */
6292 		if (!rq_i->core_pick)
6293 			continue;
6294 
6295 		/*
6296 		 * Update for new !FI->FI transitions, or if continuing to be in !FI:
6297 		 * fi_before     fi      update?
6298 		 *  0            0       1
6299 		 *  0            1       1
6300 		 *  1            0       1
6301 		 *  1            1       0
6302 		 */
6303 		if (!(fi_before && rq->core->core_forceidle_count))
6304 			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
6305 
6306 		rq_i->core_pick->core_occupation = occ;
6307 
6308 		if (i == cpu) {
6309 			rq_i->core_pick = NULL;
6310 			rq_i->core_dl_server = NULL;
6311 			continue;
6312 		}
6313 
6314 		/* Did we break L1TF mitigation requirements? */
6315 		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));
6316 
6317 		if (rq_i->curr == rq_i->core_pick) {
6318 			rq_i->core_pick = NULL;
6319 			rq_i->core_dl_server = NULL;
6320 			continue;
6321 		}
6322 
6323 		resched_curr(rq_i);
6324 	}
6325 
6326 out_set_next:
6327 	put_prev_set_next_task(rq, prev, next);
6328 	if (rq->core->core_forceidle_count && next == rq->idle)
6329 		queue_core_balance(rq);
6330 
6331 	return next;
6332 }
6333 
try_steal_cookie(int this,int that)6334 static bool try_steal_cookie(int this, int that)
6335 {
6336 	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
6337 	struct task_struct *p;
6338 	unsigned long cookie;
6339 	bool success = false;
6340 
6341 	guard(irq)();
6342 	guard(double_rq_lock)(dst, src);
6343 
6344 	cookie = dst->core->core_cookie;
6345 	if (!cookie)
6346 		return false;
6347 
6348 	if (dst->curr != dst->idle)
6349 		return false;
6350 
6351 	p = sched_core_find(src, cookie);
6352 	if (!p)
6353 		return false;
6354 
6355 	do {
6356 		if (p == src->core_pick || p == src->curr)
6357 			goto next;
6358 
6359 		if (!is_cpu_allowed(p, this))
6360 			goto next;
6361 
6362 		if (p->core_occupation > dst->idle->core_occupation)
6363 			goto next;
6364 		/*
6365 		 * sched_core_find() and sched_core_next() will ensure
6366 		 * that task @p is not throttled now, we also need to
6367 		 * check whether the runqueue of the destination CPU is
6368 		 * being throttled.
6369 		 */
6370 		if (sched_task_is_throttled(p, this))
6371 			goto next;
6372 
6373 		move_queued_task_locked(src, dst, p);
6374 		resched_curr(dst);
6375 
6376 		success = true;
6377 		break;
6378 
6379 next:
6380 		p = sched_core_next(p, cookie);
6381 	} while (p);
6382 
6383 	return success;
6384 }
6385 
steal_cookie_task(int cpu,struct sched_domain * sd)6386 static bool steal_cookie_task(int cpu, struct sched_domain *sd)
6387 {
6388 	int i;
6389 
6390 	for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
6391 		if (i == cpu)
6392 			continue;
6393 
6394 		if (need_resched())
6395 			break;
6396 
6397 		if (try_steal_cookie(cpu, i))
6398 			return true;
6399 	}
6400 
6401 	return false;
6402 }
6403 
sched_core_balance(struct rq * rq)6404 static void sched_core_balance(struct rq *rq)
6405 {
6406 	struct sched_domain *sd;
6407 	int cpu = cpu_of(rq);
6408 
6409 	guard(preempt)();
6410 	guard(rcu)();
6411 
6412 	raw_spin_rq_unlock_irq(rq);
6413 	for_each_domain(cpu, sd) {
6414 		if (need_resched())
6415 			break;
6416 
6417 		if (steal_cookie_task(cpu, sd))
6418 			break;
6419 	}
6420 	raw_spin_rq_lock_irq(rq);
6421 }
6422 
6423 static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
6424 
queue_core_balance(struct rq * rq)6425 static void queue_core_balance(struct rq *rq)
6426 {
6427 	if (!sched_core_enabled(rq))
6428 		return;
6429 
6430 	if (!rq->core->core_cookie)
6431 		return;
6432 
6433 	if (!rq->nr_running) /* not forced idle */
6434 		return;
6435 
6436 	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
6437 }
6438 
6439 DEFINE_LOCK_GUARD_1(core_lock, int,
6440 		    sched_core_lock(*_T->lock, &_T->flags),
6441 		    sched_core_unlock(*_T->lock, &_T->flags),
6442 		    unsigned long flags)
6443 
sched_core_cpu_starting(unsigned int cpu)6444 static void sched_core_cpu_starting(unsigned int cpu)
6445 {
6446 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6447 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6448 	int t;
6449 
6450 	guard(core_lock)(&cpu);
6451 
6452 	WARN_ON_ONCE(rq->core != rq);
6453 
6454 	/* if we're the first, we'll be our own leader */
6455 	if (cpumask_weight(smt_mask) == 1)
6456 		return;
6457 
6458 	/* find the leader */
6459 	for_each_cpu(t, smt_mask) {
6460 		if (t == cpu)
6461 			continue;
6462 		rq = cpu_rq(t);
6463 		if (rq->core == rq) {
6464 			core_rq = rq;
6465 			break;
6466 		}
6467 	}
6468 
6469 	if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
6470 		return;
6471 
6472 	/* install and validate core_rq */
6473 	for_each_cpu(t, smt_mask) {
6474 		rq = cpu_rq(t);
6475 
6476 		if (t == cpu)
6477 			rq->core = core_rq;
6478 
6479 		WARN_ON_ONCE(rq->core != core_rq);
6480 	}
6481 }
6482 
sched_core_cpu_deactivate(unsigned int cpu)6483 static void sched_core_cpu_deactivate(unsigned int cpu)
6484 {
6485 	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6486 	struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6487 	int t;
6488 
6489 	guard(core_lock)(&cpu);
6490 
6491 	/* if we're the last man standing, nothing to do */
6492 	if (cpumask_weight(smt_mask) == 1) {
6493 		WARN_ON_ONCE(rq->core != rq);
6494 		return;
6495 	}
6496 
6497 	/* if we're not the leader, nothing to do */
6498 	if (rq->core != rq)
6499 		return;
6500 
6501 	/* find a new leader */
6502 	for_each_cpu(t, smt_mask) {
6503 		if (t == cpu)
6504 			continue;
6505 		core_rq = cpu_rq(t);
6506 		break;
6507 	}
6508 
6509 	if (WARN_ON_ONCE(!core_rq)) /* impossible */
6510 		return;
6511 
6512 	/* copy the shared state to the new leader */
6513 	core_rq->core_task_seq             = rq->core_task_seq;
6514 	core_rq->core_pick_seq             = rq->core_pick_seq;
6515 	core_rq->core_cookie               = rq->core_cookie;
6516 	core_rq->core_forceidle_count      = rq->core_forceidle_count;
6517 	core_rq->core_forceidle_seq        = rq->core_forceidle_seq;
6518 	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6519 
6520 	/*
6521 	 * Accounting edge for forced idle is handled in pick_next_task().
6522 	 * Don't need another one here, since the hotplug thread shouldn't
6523 	 * have a cookie.
6524 	 */
6525 	core_rq->core_forceidle_start = 0;
6526 
6527 	/* install new leader */
6528 	for_each_cpu(t, smt_mask) {
6529 		rq = cpu_rq(t);
6530 		rq->core = core_rq;
6531 	}
6532 }
6533 
sched_core_cpu_dying(unsigned int cpu)6534 static inline void sched_core_cpu_dying(unsigned int cpu)
6535 {
6536 	struct rq *rq = cpu_rq(cpu);
6537 
6538 	if (rq->core != rq)
6539 		rq->core = rq;
6540 }
6541 
6542 #else /* !CONFIG_SCHED_CORE */
6543 
sched_core_cpu_starting(unsigned int cpu)6544 static inline void sched_core_cpu_starting(unsigned int cpu) {}
sched_core_cpu_deactivate(unsigned int cpu)6545 static inline void sched_core_cpu_deactivate(unsigned int cpu) {}
sched_core_cpu_dying(unsigned int cpu)6546 static inline void sched_core_cpu_dying(unsigned int cpu) {}
6547 
6548 static struct task_struct *
pick_next_task(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)6549 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6550 {
6551 	return __pick_next_task(rq, prev, rf);
6552 }
6553 
6554 #endif /* CONFIG_SCHED_CORE */
6555 
6556 /*
6557  * Constants for the sched_mode argument of __schedule().
6558  *
6559  * The mode argument allows RT enabled kernels to differentiate a
6560  * preemption from blocking on an 'sleeping' spin/rwlock.
6561  */
6562 #define SM_IDLE			(-1)
6563 #define SM_NONE			0
6564 #define SM_PREEMPT		1
6565 #define SM_RTLOCK_WAIT		2
6566 
6567 /*
6568  * Helper function for __schedule()
6569  *
6570  * If a task does not have signals pending, deactivate it
6571  * Otherwise marks the task's __state as RUNNING
6572  */
try_to_block_task(struct rq * rq,struct task_struct * p,unsigned long task_state)6573 static bool try_to_block_task(struct rq *rq, struct task_struct *p,
6574 			      unsigned long task_state)
6575 {
6576 	int flags = DEQUEUE_NOCLOCK;
6577 
6578 	if (signal_pending_state(task_state, p)) {
6579 		WRITE_ONCE(p->__state, TASK_RUNNING);
6580 		return false;
6581 	}
6582 
6583 	p->sched_contributes_to_load =
6584 		(task_state & TASK_UNINTERRUPTIBLE) &&
6585 		!(task_state & TASK_NOLOAD) &&
6586 		!(task_state & TASK_FROZEN);
6587 
6588 	if (unlikely(is_special_task_state(task_state)))
6589 		flags |= DEQUEUE_SPECIAL;
6590 
6591 	/*
6592 	 * __schedule()			ttwu()
6593 	 *   prev_state = prev->state;    if (p->on_rq && ...)
6594 	 *   if (prev_state)		    goto out;
6595 	 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
6596 	 *				  p->state = TASK_WAKING
6597 	 *
6598 	 * Where __schedule() and ttwu() have matching control dependencies.
6599 	 *
6600 	 * After this, schedule() must not care about p->state any more.
6601 	 */
6602 	block_task(rq, p, flags);
6603 	return true;
6604 }
6605 
6606 /*
6607  * __schedule() is the main scheduler function.
6608  *
6609  * The main means of driving the scheduler and thus entering this function are:
6610  *
6611  *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
6612  *
6613  *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
6614  *      paths. For example, see arch/x86/entry_64.S.
6615  *
6616  *      To drive preemption between tasks, the scheduler sets the flag in timer
6617  *      interrupt handler sched_tick().
6618  *
6619  *   3. Wakeups don't really cause entry into schedule(). They add a
6620  *      task to the run-queue and that's it.
6621  *
6622  *      Now, if the new task added to the run-queue preempts the current
6623  *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
6624  *      called on the nearest possible occasion:
6625  *
6626  *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
6627  *
6628  *         - in syscall or exception context, at the next outmost
6629  *           preempt_enable(). (this might be as soon as the wake_up()'s
6630  *           spin_unlock()!)
6631  *
6632  *         - in IRQ context, return from interrupt-handler to
6633  *           preemptible context
6634  *
6635  *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
6636  *         then at the next:
6637  *
6638  *          - cond_resched() call
6639  *          - explicit schedule() call
6640  *          - return from syscall or exception to user-space
6641  *          - return from interrupt-handler to user-space
6642  *
6643  * WARNING: must be called with preemption disabled!
6644  */
__schedule(int sched_mode)6645 static void __sched notrace __schedule(int sched_mode)
6646 {
6647 	struct task_struct *prev, *next;
6648 	/*
6649 	 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted
6650 	 * as a preemption by schedule_debug() and RCU.
6651 	 */
6652 	bool preempt = sched_mode > SM_NONE;
6653 	unsigned long *switch_count;
6654 	unsigned long prev_state;
6655 	struct rq_flags rf;
6656 	struct rq *rq;
6657 	int cpu;
6658 
6659 	cpu = smp_processor_id();
6660 	rq = cpu_rq(cpu);
6661 	prev = rq->curr;
6662 
6663 	schedule_debug(prev, preempt);
6664 
6665 	if (sched_feat(HRTICK) || sched_feat(HRTICK_DL))
6666 		hrtick_clear(rq);
6667 
6668 	local_irq_disable();
6669 	rcu_note_context_switch(preempt);
6670 
6671 	/*
6672 	 * Make sure that signal_pending_state()->signal_pending() below
6673 	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
6674 	 * done by the caller to avoid the race with signal_wake_up():
6675 	 *
6676 	 * __set_current_state(@state)		signal_wake_up()
6677 	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
6678 	 *					  wake_up_state(p, state)
6679 	 *   LOCK rq->lock			    LOCK p->pi_state
6680 	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
6681 	 *     if (signal_pending_state())	    if (p->state & @state)
6682 	 *
6683 	 * Also, the membarrier system call requires a full memory barrier
6684 	 * after coming from user-space, before storing to rq->curr; this
6685 	 * barrier matches a full barrier in the proximity of the membarrier
6686 	 * system call exit.
6687 	 */
6688 	rq_lock(rq, &rf);
6689 	smp_mb__after_spinlock();
6690 
6691 	/* Promote REQ to ACT */
6692 	rq->clock_update_flags <<= 1;
6693 	update_rq_clock(rq);
6694 	rq->clock_update_flags = RQCF_UPDATED;
6695 
6696 	switch_count = &prev->nivcsw;
6697 
6698 	/* Task state changes only considers SM_PREEMPT as preemption */
6699 	preempt = sched_mode == SM_PREEMPT;
6700 
6701 	/*
6702 	 * We must load prev->state once (task_struct::state is volatile), such
6703 	 * that we form a control dependency vs deactivate_task() below.
6704 	 */
6705 	prev_state = READ_ONCE(prev->__state);
6706 	if (sched_mode == SM_IDLE) {
6707 		/* SCX must consult the BPF scheduler to tell if rq is empty */
6708 		if (!rq->nr_running && !scx_enabled()) {
6709 			next = prev;
6710 			goto picked;
6711 		}
6712 	} else if (!preempt && prev_state) {
6713 		try_to_block_task(rq, prev, prev_state);
6714 		switch_count = &prev->nvcsw;
6715 	}
6716 
6717 	next = pick_next_task(rq, prev, &rf);
6718 	rq_set_donor(rq, next);
6719 picked:
6720 	clear_tsk_need_resched(prev);
6721 	clear_preempt_need_resched();
6722 #ifdef CONFIG_SCHED_DEBUG
6723 	rq->last_seen_need_resched_ns = 0;
6724 #endif
6725 
6726 	if (likely(prev != next)) {
6727 		rq->nr_switches++;
6728 		/*
6729 		 * RCU users of rcu_dereference(rq->curr) may not see
6730 		 * changes to task_struct made by pick_next_task().
6731 		 */
6732 		RCU_INIT_POINTER(rq->curr, next);
6733 		/*
6734 		 * The membarrier system call requires each architecture
6735 		 * to have a full memory barrier after updating
6736 		 * rq->curr, before returning to user-space.
6737 		 *
6738 		 * Here are the schemes providing that barrier on the
6739 		 * various architectures:
6740 		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC,
6741 		 *   RISC-V.  switch_mm() relies on membarrier_arch_switch_mm()
6742 		 *   on PowerPC and on RISC-V.
6743 		 * - finish_lock_switch() for weakly-ordered
6744 		 *   architectures where spin_unlock is a full barrier,
6745 		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
6746 		 *   is a RELEASE barrier),
6747 		 *
6748 		 * The barrier matches a full barrier in the proximity of
6749 		 * the membarrier system call entry.
6750 		 *
6751 		 * On RISC-V, this barrier pairing is also needed for the
6752 		 * SYNC_CORE command when switching between processes, cf.
6753 		 * the inline comments in membarrier_arch_switch_mm().
6754 		 */
6755 		++*switch_count;
6756 
6757 		migrate_disable_switch(rq, prev);
6758 		psi_account_irqtime(rq, prev, next);
6759 		psi_sched_switch(prev, next, !task_on_rq_queued(prev) ||
6760 					     prev->se.sched_delayed);
6761 
6762 		trace_sched_switch(preempt, prev, next, prev_state);
6763 
6764 		/* Also unlocks the rq: */
6765 		rq = context_switch(rq, prev, next, &rf);
6766 	} else {
6767 		rq_unpin_lock(rq, &rf);
6768 		__balance_callbacks(rq);
6769 		raw_spin_rq_unlock_irq(rq);
6770 	}
6771 }
6772 
do_task_dead(void)6773 void __noreturn do_task_dead(void)
6774 {
6775 	/* Causes final put_task_struct in finish_task_switch(): */
6776 	set_special_state(TASK_DEAD);
6777 
6778 	/* Tell freezer to ignore us: */
6779 	current->flags |= PF_NOFREEZE;
6780 
6781 	__schedule(SM_NONE);
6782 	BUG();
6783 
6784 	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
6785 	for (;;)
6786 		cpu_relax();
6787 }
6788 
sched_submit_work(struct task_struct * tsk)6789 static inline void sched_submit_work(struct task_struct *tsk)
6790 {
6791 	static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
6792 	unsigned int task_flags;
6793 
6794 	/*
6795 	 * Establish LD_WAIT_CONFIG context to ensure none of the code called
6796 	 * will use a blocking primitive -- which would lead to recursion.
6797 	 */
6798 	lock_map_acquire_try(&sched_map);
6799 
6800 	task_flags = tsk->flags;
6801 	/*
6802 	 * If a worker goes to sleep, notify and ask workqueue whether it
6803 	 * wants to wake up a task to maintain concurrency.
6804 	 */
6805 	if (task_flags & PF_WQ_WORKER)
6806 		wq_worker_sleeping(tsk);
6807 	else if (task_flags & PF_IO_WORKER)
6808 		io_wq_worker_sleeping(tsk);
6809 
6810 	/*
6811 	 * spinlock and rwlock must not flush block requests.  This will
6812 	 * deadlock if the callback attempts to acquire a lock which is
6813 	 * already acquired.
6814 	 */
6815 	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6816 
6817 	/*
6818 	 * If we are going to sleep and we have plugged IO queued,
6819 	 * make sure to submit it to avoid deadlocks.
6820 	 */
6821 	blk_flush_plug(tsk->plug, true);
6822 
6823 	lock_map_release(&sched_map);
6824 }
6825 
sched_update_worker(struct task_struct * tsk)6826 static void sched_update_worker(struct task_struct *tsk)
6827 {
6828 	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
6829 		if (tsk->flags & PF_BLOCK_TS)
6830 			blk_plug_invalidate_ts(tsk);
6831 		if (tsk->flags & PF_WQ_WORKER)
6832 			wq_worker_running(tsk);
6833 		else if (tsk->flags & PF_IO_WORKER)
6834 			io_wq_worker_running(tsk);
6835 	}
6836 }
6837 
__schedule_loop(int sched_mode)6838 static __always_inline void __schedule_loop(int sched_mode)
6839 {
6840 	do {
6841 		preempt_disable();
6842 		__schedule(sched_mode);
6843 		sched_preempt_enable_no_resched();
6844 	} while (need_resched());
6845 }
6846 
schedule(void)6847 asmlinkage __visible void __sched schedule(void)
6848 {
6849 	struct task_struct *tsk = current;
6850 
6851 #ifdef CONFIG_RT_MUTEXES
6852 	lockdep_assert(!tsk->sched_rt_mutex);
6853 #endif
6854 
6855 	if (!task_is_running(tsk))
6856 		sched_submit_work(tsk);
6857 	__schedule_loop(SM_NONE);
6858 	sched_update_worker(tsk);
6859 }
6860 EXPORT_SYMBOL(schedule);
6861 
6862 /*
6863  * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
6864  * state (have scheduled out non-voluntarily) by making sure that all
6865  * tasks have either left the run queue or have gone into user space.
6866  * As idle tasks do not do either, they must not ever be preempted
6867  * (schedule out non-voluntarily).
6868  *
6869  * schedule_idle() is similar to schedule_preempt_disable() except that it
6870  * never enables preemption because it does not call sched_submit_work().
6871  */
schedule_idle(void)6872 void __sched schedule_idle(void)
6873 {
6874 	/*
6875 	 * As this skips calling sched_submit_work(), which the idle task does
6876 	 * regardless because that function is a NOP when the task is in a
6877 	 * TASK_RUNNING state, make sure this isn't used someplace that the
6878 	 * current task can be in any other state. Note, idle is always in the
6879 	 * TASK_RUNNING state.
6880 	 */
6881 	WARN_ON_ONCE(current->__state);
6882 	do {
6883 		__schedule(SM_IDLE);
6884 	} while (need_resched());
6885 }
6886 
6887 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
schedule_user(void)6888 asmlinkage __visible void __sched schedule_user(void)
6889 {
6890 	/*
6891 	 * If we come here after a random call to set_need_resched(),
6892 	 * or we have been woken up remotely but the IPI has not yet arrived,
6893 	 * we haven't yet exited the RCU idle mode. Do it here manually until
6894 	 * we find a better solution.
6895 	 *
6896 	 * NB: There are buggy callers of this function.  Ideally we
6897 	 * should warn if prev_state != CT_STATE_USER, but that will trigger
6898 	 * too frequently to make sense yet.
6899 	 */
6900 	enum ctx_state prev_state = exception_enter();
6901 	schedule();
6902 	exception_exit(prev_state);
6903 }
6904 #endif
6905 
6906 /**
6907  * schedule_preempt_disabled - called with preemption disabled
6908  *
6909  * Returns with preemption disabled. Note: preempt_count must be 1
6910  */
schedule_preempt_disabled(void)6911 void __sched schedule_preempt_disabled(void)
6912 {
6913 	sched_preempt_enable_no_resched();
6914 	schedule();
6915 	preempt_disable();
6916 }
6917 
6918 #ifdef CONFIG_PREEMPT_RT
schedule_rtlock(void)6919 void __sched notrace schedule_rtlock(void)
6920 {
6921 	__schedule_loop(SM_RTLOCK_WAIT);
6922 }
6923 NOKPROBE_SYMBOL(schedule_rtlock);
6924 #endif
6925 
preempt_schedule_common(void)6926 static void __sched notrace preempt_schedule_common(void)
6927 {
6928 	do {
6929 		/*
6930 		 * Because the function tracer can trace preempt_count_sub()
6931 		 * and it also uses preempt_enable/disable_notrace(), if
6932 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
6933 		 * by the function tracer will call this function again and
6934 		 * cause infinite recursion.
6935 		 *
6936 		 * Preemption must be disabled here before the function
6937 		 * tracer can trace. Break up preempt_disable() into two
6938 		 * calls. One to disable preemption without fear of being
6939 		 * traced. The other to still record the preemption latency,
6940 		 * which can also be traced by the function tracer.
6941 		 */
6942 		preempt_disable_notrace();
6943 		preempt_latency_start(1);
6944 		__schedule(SM_PREEMPT);
6945 		preempt_latency_stop(1);
6946 		preempt_enable_no_resched_notrace();
6947 
6948 		/*
6949 		 * Check again in case we missed a preemption opportunity
6950 		 * between schedule and now.
6951 		 */
6952 	} while (need_resched());
6953 }
6954 
6955 #ifdef CONFIG_PREEMPTION
6956 /*
6957  * This is the entry point to schedule() from in-kernel preemption
6958  * off of preempt_enable.
6959  */
preempt_schedule(void)6960 asmlinkage __visible void __sched notrace preempt_schedule(void)
6961 {
6962 	/*
6963 	 * If there is a non-zero preempt_count or interrupts are disabled,
6964 	 * we do not want to preempt the current task. Just return..
6965 	 */
6966 	if (likely(!preemptible()))
6967 		return;
6968 	preempt_schedule_common();
6969 }
6970 NOKPROBE_SYMBOL(preempt_schedule);
6971 EXPORT_SYMBOL(preempt_schedule);
6972 
6973 #ifdef CONFIG_PREEMPT_DYNAMIC
6974 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
6975 #ifndef preempt_schedule_dynamic_enabled
6976 #define preempt_schedule_dynamic_enabled	preempt_schedule
6977 #define preempt_schedule_dynamic_disabled	NULL
6978 #endif
6979 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
6980 EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
6981 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
6982 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
dynamic_preempt_schedule(void)6983 void __sched notrace dynamic_preempt_schedule(void)
6984 {
6985 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
6986 		return;
6987 	preempt_schedule();
6988 }
6989 NOKPROBE_SYMBOL(dynamic_preempt_schedule);
6990 EXPORT_SYMBOL(dynamic_preempt_schedule);
6991 #endif
6992 #endif
6993 
6994 /**
6995  * preempt_schedule_notrace - preempt_schedule called by tracing
6996  *
6997  * The tracing infrastructure uses preempt_enable_notrace to prevent
6998  * recursion and tracing preempt enabling caused by the tracing
6999  * infrastructure itself. But as tracing can happen in areas coming
7000  * from userspace or just about to enter userspace, a preempt enable
7001  * can occur before user_exit() is called. This will cause the scheduler
7002  * to be called when the system is still in usermode.
7003  *
7004  * To prevent this, the preempt_enable_notrace will use this function
7005  * instead of preempt_schedule() to exit user context if needed before
7006  * calling the scheduler.
7007  */
preempt_schedule_notrace(void)7008 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
7009 {
7010 	enum ctx_state prev_ctx;
7011 
7012 	if (likely(!preemptible()))
7013 		return;
7014 
7015 	do {
7016 		/*
7017 		 * Because the function tracer can trace preempt_count_sub()
7018 		 * and it also uses preempt_enable/disable_notrace(), if
7019 		 * NEED_RESCHED is set, the preempt_enable_notrace() called
7020 		 * by the function tracer will call this function again and
7021 		 * cause infinite recursion.
7022 		 *
7023 		 * Preemption must be disabled here before the function
7024 		 * tracer can trace. Break up preempt_disable() into two
7025 		 * calls. One to disable preemption without fear of being
7026 		 * traced. The other to still record the preemption latency,
7027 		 * which can also be traced by the function tracer.
7028 		 */
7029 		preempt_disable_notrace();
7030 		preempt_latency_start(1);
7031 		/*
7032 		 * Needs preempt disabled in case user_exit() is traced
7033 		 * and the tracer calls preempt_enable_notrace() causing
7034 		 * an infinite recursion.
7035 		 */
7036 		prev_ctx = exception_enter();
7037 		__schedule(SM_PREEMPT);
7038 		exception_exit(prev_ctx);
7039 
7040 		preempt_latency_stop(1);
7041 		preempt_enable_no_resched_notrace();
7042 	} while (need_resched());
7043 }
7044 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
7045 
7046 #ifdef CONFIG_PREEMPT_DYNAMIC
7047 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7048 #ifndef preempt_schedule_notrace_dynamic_enabled
7049 #define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
7050 #define preempt_schedule_notrace_dynamic_disabled	NULL
7051 #endif
7052 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
7053 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
7054 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7055 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
dynamic_preempt_schedule_notrace(void)7056 void __sched notrace dynamic_preempt_schedule_notrace(void)
7057 {
7058 	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
7059 		return;
7060 	preempt_schedule_notrace();
7061 }
7062 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
7063 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
7064 #endif
7065 #endif
7066 
7067 #endif /* CONFIG_PREEMPTION */
7068 
7069 /*
7070  * This is the entry point to schedule() from kernel preemption
7071  * off of IRQ context.
7072  * Note, that this is called and return with IRQs disabled. This will
7073  * protect us against recursive calling from IRQ contexts.
7074  */
preempt_schedule_irq(void)7075 asmlinkage __visible void __sched preempt_schedule_irq(void)
7076 {
7077 	enum ctx_state prev_state;
7078 
7079 	/* Catch callers which need to be fixed */
7080 	BUG_ON(preempt_count() || !irqs_disabled());
7081 
7082 	prev_state = exception_enter();
7083 
7084 	do {
7085 		preempt_disable();
7086 		local_irq_enable();
7087 		__schedule(SM_PREEMPT);
7088 		local_irq_disable();
7089 		sched_preempt_enable_no_resched();
7090 	} while (need_resched());
7091 
7092 	exception_exit(prev_state);
7093 }
7094 
default_wake_function(wait_queue_entry_t * curr,unsigned mode,int wake_flags,void * key)7095 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
7096 			  void *key)
7097 {
7098 	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
7099 	return try_to_wake_up(curr->private, mode, wake_flags);
7100 }
7101 EXPORT_SYMBOL(default_wake_function);
7102 
__setscheduler_class(int policy,int prio)7103 const struct sched_class *__setscheduler_class(int policy, int prio)
7104 {
7105 	if (dl_prio(prio))
7106 		return &dl_sched_class;
7107 
7108 	if (rt_prio(prio))
7109 		return &rt_sched_class;
7110 
7111 #ifdef CONFIG_SCHED_CLASS_EXT
7112 	if (task_should_scx(policy))
7113 		return &ext_sched_class;
7114 #endif
7115 
7116 	return &fair_sched_class;
7117 }
7118 
7119 #ifdef CONFIG_RT_MUTEXES
7120 
7121 /*
7122  * Would be more useful with typeof()/auto_type but they don't mix with
7123  * bit-fields. Since it's a local thing, use int. Keep the generic sounding
7124  * name such that if someone were to implement this function we get to compare
7125  * notes.
7126  */
7127 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
7128 
rt_mutex_pre_schedule(void)7129 void rt_mutex_pre_schedule(void)
7130 {
7131 	lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
7132 	sched_submit_work(current);
7133 }
7134 
rt_mutex_schedule(void)7135 void rt_mutex_schedule(void)
7136 {
7137 	lockdep_assert(current->sched_rt_mutex);
7138 	__schedule_loop(SM_NONE);
7139 }
7140 
rt_mutex_post_schedule(void)7141 void rt_mutex_post_schedule(void)
7142 {
7143 	sched_update_worker(current);
7144 	lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
7145 }
7146 
7147 /*
7148  * rt_mutex_setprio - set the current priority of a task
7149  * @p: task to boost
7150  * @pi_task: donor task
7151  *
7152  * This function changes the 'effective' priority of a task. It does
7153  * not touch ->normal_prio like __setscheduler().
7154  *
7155  * Used by the rt_mutex code to implement priority inheritance
7156  * logic. Call site only calls if the priority of the task changed.
7157  */
rt_mutex_setprio(struct task_struct * p,struct task_struct * pi_task)7158 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
7159 {
7160 	int prio, oldprio, queued, running, queue_flag =
7161 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
7162 	const struct sched_class *prev_class, *next_class;
7163 	struct rq_flags rf;
7164 	struct rq *rq;
7165 
7166 	/* XXX used to be waiter->prio, not waiter->task->prio */
7167 	prio = __rt_effective_prio(pi_task, p->normal_prio);
7168 
7169 	/*
7170 	 * If nothing changed; bail early.
7171 	 */
7172 	if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
7173 		return;
7174 
7175 	rq = __task_rq_lock(p, &rf);
7176 	update_rq_clock(rq);
7177 	/*
7178 	 * Set under pi_lock && rq->lock, such that the value can be used under
7179 	 * either lock.
7180 	 *
7181 	 * Note that there is loads of tricky to make this pointer cache work
7182 	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
7183 	 * ensure a task is de-boosted (pi_task is set to NULL) before the
7184 	 * task is allowed to run again (and can exit). This ensures the pointer
7185 	 * points to a blocked task -- which guarantees the task is present.
7186 	 */
7187 	p->pi_top_task = pi_task;
7188 
7189 	/*
7190 	 * For FIFO/RR we only need to set prio, if that matches we're done.
7191 	 */
7192 	if (prio == p->prio && !dl_prio(prio))
7193 		goto out_unlock;
7194 
7195 	/*
7196 	 * Idle task boosting is a no-no in general. There is one
7197 	 * exception, when PREEMPT_RT and NOHZ is active:
7198 	 *
7199 	 * The idle task calls get_next_timer_interrupt() and holds
7200 	 * the timer wheel base->lock on the CPU and another CPU wants
7201 	 * to access the timer (probably to cancel it). We can safely
7202 	 * ignore the boosting request, as the idle CPU runs this code
7203 	 * with interrupts disabled and will complete the lock
7204 	 * protected section without being interrupted. So there is no
7205 	 * real need to boost.
7206 	 */
7207 	if (unlikely(p == rq->idle)) {
7208 		WARN_ON(p != rq->curr);
7209 		WARN_ON(p->pi_blocked_on);
7210 		goto out_unlock;
7211 	}
7212 
7213 	trace_sched_pi_setprio(p, pi_task);
7214 	oldprio = p->prio;
7215 
7216 	if (oldprio == prio)
7217 		queue_flag &= ~DEQUEUE_MOVE;
7218 
7219 	prev_class = p->sched_class;
7220 	next_class = __setscheduler_class(p->policy, prio);
7221 
7222 	if (prev_class != next_class && p->se.sched_delayed)
7223 		dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
7224 
7225 	queued = task_on_rq_queued(p);
7226 	running = task_current_donor(rq, p);
7227 	if (queued)
7228 		dequeue_task(rq, p, queue_flag);
7229 	if (running)
7230 		put_prev_task(rq, p);
7231 
7232 	/*
7233 	 * Boosting condition are:
7234 	 * 1. -rt task is running and holds mutex A
7235 	 *      --> -dl task blocks on mutex A
7236 	 *
7237 	 * 2. -dl task is running and holds mutex A
7238 	 *      --> -dl task blocks on mutex A and could preempt the
7239 	 *          running task
7240 	 */
7241 	if (dl_prio(prio)) {
7242 		if (!dl_prio(p->normal_prio) ||
7243 		    (pi_task && dl_prio(pi_task->prio) &&
7244 		     dl_entity_preempt(&pi_task->dl, &p->dl))) {
7245 			p->dl.pi_se = pi_task->dl.pi_se;
7246 			queue_flag |= ENQUEUE_REPLENISH;
7247 		} else {
7248 			p->dl.pi_se = &p->dl;
7249 		}
7250 	} else if (rt_prio(prio)) {
7251 		if (dl_prio(oldprio))
7252 			p->dl.pi_se = &p->dl;
7253 		if (oldprio < prio)
7254 			queue_flag |= ENQUEUE_HEAD;
7255 	} else {
7256 		if (dl_prio(oldprio))
7257 			p->dl.pi_se = &p->dl;
7258 		if (rt_prio(oldprio))
7259 			p->rt.timeout = 0;
7260 	}
7261 
7262 	p->sched_class = next_class;
7263 	p->prio = prio;
7264 
7265 	check_class_changing(rq, p, prev_class);
7266 
7267 	if (queued)
7268 		enqueue_task(rq, p, queue_flag);
7269 	if (running)
7270 		set_next_task(rq, p);
7271 
7272 	check_class_changed(rq, p, prev_class, oldprio);
7273 out_unlock:
7274 	/* Avoid rq from going away on us: */
7275 	preempt_disable();
7276 
7277 	rq_unpin_lock(rq, &rf);
7278 	__balance_callbacks(rq);
7279 	raw_spin_rq_unlock(rq);
7280 
7281 	preempt_enable();
7282 }
7283 #endif
7284 
7285 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
__cond_resched(void)7286 int __sched __cond_resched(void)
7287 {
7288 	if (should_resched(0) && !irqs_disabled()) {
7289 		preempt_schedule_common();
7290 		return 1;
7291 	}
7292 	/*
7293 	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
7294 	 * whether the current CPU is in an RCU read-side critical section,
7295 	 * so the tick can report quiescent states even for CPUs looping
7296 	 * in kernel context.  In contrast, in non-preemptible kernels,
7297 	 * RCU readers leave no in-memory hints, which means that CPU-bound
7298 	 * processes executing in kernel context might never report an
7299 	 * RCU quiescent state.  Therefore, the following code causes
7300 	 * cond_resched() to report a quiescent state, but only when RCU
7301 	 * is in urgent need of one.
7302 	 */
7303 #ifndef CONFIG_PREEMPT_RCU
7304 	rcu_all_qs();
7305 #endif
7306 	return 0;
7307 }
7308 EXPORT_SYMBOL(__cond_resched);
7309 #endif
7310 
7311 #ifdef CONFIG_PREEMPT_DYNAMIC
7312 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7313 #define cond_resched_dynamic_enabled	__cond_resched
7314 #define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
7315 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
7316 EXPORT_STATIC_CALL_TRAMP(cond_resched);
7317 
7318 #define might_resched_dynamic_enabled	__cond_resched
7319 #define might_resched_dynamic_disabled	((void *)&__static_call_return0)
7320 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
7321 EXPORT_STATIC_CALL_TRAMP(might_resched);
7322 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7323 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
dynamic_cond_resched(void)7324 int __sched dynamic_cond_resched(void)
7325 {
7326 	klp_sched_try_switch();
7327 	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
7328 		return 0;
7329 	return __cond_resched();
7330 }
7331 EXPORT_SYMBOL(dynamic_cond_resched);
7332 
7333 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
dynamic_might_resched(void)7334 int __sched dynamic_might_resched(void)
7335 {
7336 	if (!static_branch_unlikely(&sk_dynamic_might_resched))
7337 		return 0;
7338 	return __cond_resched();
7339 }
7340 EXPORT_SYMBOL(dynamic_might_resched);
7341 #endif
7342 #endif
7343 
7344 /*
7345  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
7346  * call schedule, and on return reacquire the lock.
7347  *
7348  * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
7349  * operations here to prevent schedule() from being called twice (once via
7350  * spin_unlock(), once by hand).
7351  */
__cond_resched_lock(spinlock_t * lock)7352 int __cond_resched_lock(spinlock_t *lock)
7353 {
7354 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7355 	int ret = 0;
7356 
7357 	lockdep_assert_held(lock);
7358 
7359 	if (spin_needbreak(lock) || resched) {
7360 		spin_unlock(lock);
7361 		if (!_cond_resched())
7362 			cpu_relax();
7363 		ret = 1;
7364 		spin_lock(lock);
7365 	}
7366 	return ret;
7367 }
7368 EXPORT_SYMBOL(__cond_resched_lock);
7369 
__cond_resched_rwlock_read(rwlock_t * lock)7370 int __cond_resched_rwlock_read(rwlock_t *lock)
7371 {
7372 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7373 	int ret = 0;
7374 
7375 	lockdep_assert_held_read(lock);
7376 
7377 	if (rwlock_needbreak(lock) || resched) {
7378 		read_unlock(lock);
7379 		if (!_cond_resched())
7380 			cpu_relax();
7381 		ret = 1;
7382 		read_lock(lock);
7383 	}
7384 	return ret;
7385 }
7386 EXPORT_SYMBOL(__cond_resched_rwlock_read);
7387 
__cond_resched_rwlock_write(rwlock_t * lock)7388 int __cond_resched_rwlock_write(rwlock_t *lock)
7389 {
7390 	int resched = should_resched(PREEMPT_LOCK_OFFSET);
7391 	int ret = 0;
7392 
7393 	lockdep_assert_held_write(lock);
7394 
7395 	if (rwlock_needbreak(lock) || resched) {
7396 		write_unlock(lock);
7397 		if (!_cond_resched())
7398 			cpu_relax();
7399 		ret = 1;
7400 		write_lock(lock);
7401 	}
7402 	return ret;
7403 }
7404 EXPORT_SYMBOL(__cond_resched_rwlock_write);
7405 
7406 #ifdef CONFIG_PREEMPT_DYNAMIC
7407 
7408 #ifdef CONFIG_GENERIC_ENTRY
7409 #include <linux/entry-common.h>
7410 #endif
7411 
7412 /*
7413  * SC:cond_resched
7414  * SC:might_resched
7415  * SC:preempt_schedule
7416  * SC:preempt_schedule_notrace
7417  * SC:irqentry_exit_cond_resched
7418  *
7419  *
7420  * NONE:
7421  *   cond_resched               <- __cond_resched
7422  *   might_resched              <- RET0
7423  *   preempt_schedule           <- NOP
7424  *   preempt_schedule_notrace   <- NOP
7425  *   irqentry_exit_cond_resched <- NOP
7426  *   dynamic_preempt_lazy       <- false
7427  *
7428  * VOLUNTARY:
7429  *   cond_resched               <- __cond_resched
7430  *   might_resched              <- __cond_resched
7431  *   preempt_schedule           <- NOP
7432  *   preempt_schedule_notrace   <- NOP
7433  *   irqentry_exit_cond_resched <- NOP
7434  *   dynamic_preempt_lazy       <- false
7435  *
7436  * FULL:
7437  *   cond_resched               <- RET0
7438  *   might_resched              <- RET0
7439  *   preempt_schedule           <- preempt_schedule
7440  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7441  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7442  *   dynamic_preempt_lazy       <- false
7443  *
7444  * LAZY:
7445  *   cond_resched               <- RET0
7446  *   might_resched              <- RET0
7447  *   preempt_schedule           <- preempt_schedule
7448  *   preempt_schedule_notrace   <- preempt_schedule_notrace
7449  *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
7450  *   dynamic_preempt_lazy       <- true
7451  */
7452 
7453 enum {
7454 	preempt_dynamic_undefined = -1,
7455 	preempt_dynamic_none,
7456 	preempt_dynamic_voluntary,
7457 	preempt_dynamic_full,
7458 	preempt_dynamic_lazy,
7459 };
7460 
7461 int preempt_dynamic_mode = preempt_dynamic_undefined;
7462 
sched_dynamic_mode(const char * str)7463 int sched_dynamic_mode(const char *str)
7464 {
7465 #ifndef CONFIG_PREEMPT_RT
7466 	if (!strcmp(str, "none"))
7467 		return preempt_dynamic_none;
7468 
7469 	if (!strcmp(str, "voluntary"))
7470 		return preempt_dynamic_voluntary;
7471 #endif
7472 
7473 	if (!strcmp(str, "full"))
7474 		return preempt_dynamic_full;
7475 
7476 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
7477 	if (!strcmp(str, "lazy"))
7478 		return preempt_dynamic_lazy;
7479 #endif
7480 
7481 	return -EINVAL;
7482 }
7483 
7484 #define preempt_dynamic_key_enable(f)	static_key_enable(&sk_dynamic_##f.key)
7485 #define preempt_dynamic_key_disable(f)	static_key_disable(&sk_dynamic_##f.key)
7486 
7487 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
7488 #define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
7489 #define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
7490 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
7491 #define preempt_dynamic_enable(f)	preempt_dynamic_key_enable(f)
7492 #define preempt_dynamic_disable(f)	preempt_dynamic_key_disable(f)
7493 #else
7494 #error "Unsupported PREEMPT_DYNAMIC mechanism"
7495 #endif
7496 
7497 static DEFINE_MUTEX(sched_dynamic_mutex);
7498 static bool klp_override;
7499 
__sched_dynamic_update(int mode)7500 static void __sched_dynamic_update(int mode)
7501 {
7502 	/*
7503 	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
7504 	 * the ZERO state, which is invalid.
7505 	 */
7506 	if (!klp_override)
7507 		preempt_dynamic_enable(cond_resched);
7508 	preempt_dynamic_enable(might_resched);
7509 	preempt_dynamic_enable(preempt_schedule);
7510 	preempt_dynamic_enable(preempt_schedule_notrace);
7511 	preempt_dynamic_enable(irqentry_exit_cond_resched);
7512 	preempt_dynamic_key_disable(preempt_lazy);
7513 
7514 	switch (mode) {
7515 	case preempt_dynamic_none:
7516 		if (!klp_override)
7517 			preempt_dynamic_enable(cond_resched);
7518 		preempt_dynamic_disable(might_resched);
7519 		preempt_dynamic_disable(preempt_schedule);
7520 		preempt_dynamic_disable(preempt_schedule_notrace);
7521 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7522 		preempt_dynamic_key_disable(preempt_lazy);
7523 		if (mode != preempt_dynamic_mode)
7524 			pr_info("Dynamic Preempt: none\n");
7525 		break;
7526 
7527 	case preempt_dynamic_voluntary:
7528 		if (!klp_override)
7529 			preempt_dynamic_enable(cond_resched);
7530 		preempt_dynamic_enable(might_resched);
7531 		preempt_dynamic_disable(preempt_schedule);
7532 		preempt_dynamic_disable(preempt_schedule_notrace);
7533 		preempt_dynamic_disable(irqentry_exit_cond_resched);
7534 		preempt_dynamic_key_disable(preempt_lazy);
7535 		if (mode != preempt_dynamic_mode)
7536 			pr_info("Dynamic Preempt: voluntary\n");
7537 		break;
7538 
7539 	case preempt_dynamic_full:
7540 		if (!klp_override)
7541 			preempt_dynamic_disable(cond_resched);
7542 		preempt_dynamic_disable(might_resched);
7543 		preempt_dynamic_enable(preempt_schedule);
7544 		preempt_dynamic_enable(preempt_schedule_notrace);
7545 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7546 		preempt_dynamic_key_disable(preempt_lazy);
7547 		if (mode != preempt_dynamic_mode)
7548 			pr_info("Dynamic Preempt: full\n");
7549 		break;
7550 
7551 	case preempt_dynamic_lazy:
7552 		if (!klp_override)
7553 			preempt_dynamic_disable(cond_resched);
7554 		preempt_dynamic_disable(might_resched);
7555 		preempt_dynamic_enable(preempt_schedule);
7556 		preempt_dynamic_enable(preempt_schedule_notrace);
7557 		preempt_dynamic_enable(irqentry_exit_cond_resched);
7558 		preempt_dynamic_key_enable(preempt_lazy);
7559 		if (mode != preempt_dynamic_mode)
7560 			pr_info("Dynamic Preempt: lazy\n");
7561 		break;
7562 	}
7563 
7564 	preempt_dynamic_mode = mode;
7565 }
7566 
sched_dynamic_update(int mode)7567 void sched_dynamic_update(int mode)
7568 {
7569 	mutex_lock(&sched_dynamic_mutex);
7570 	__sched_dynamic_update(mode);
7571 	mutex_unlock(&sched_dynamic_mutex);
7572 }
7573 
7574 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL
7575 
klp_cond_resched(void)7576 static int klp_cond_resched(void)
7577 {
7578 	__klp_sched_try_switch();
7579 	return __cond_resched();
7580 }
7581 
sched_dynamic_klp_enable(void)7582 void sched_dynamic_klp_enable(void)
7583 {
7584 	mutex_lock(&sched_dynamic_mutex);
7585 
7586 	klp_override = true;
7587 	static_call_update(cond_resched, klp_cond_resched);
7588 
7589 	mutex_unlock(&sched_dynamic_mutex);
7590 }
7591 
sched_dynamic_klp_disable(void)7592 void sched_dynamic_klp_disable(void)
7593 {
7594 	mutex_lock(&sched_dynamic_mutex);
7595 
7596 	klp_override = false;
7597 	__sched_dynamic_update(preempt_dynamic_mode);
7598 
7599 	mutex_unlock(&sched_dynamic_mutex);
7600 }
7601 
7602 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
7603 
setup_preempt_mode(char * str)7604 static int __init setup_preempt_mode(char *str)
7605 {
7606 	int mode = sched_dynamic_mode(str);
7607 	if (mode < 0) {
7608 		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
7609 		return 0;
7610 	}
7611 
7612 	sched_dynamic_update(mode);
7613 	return 1;
7614 }
7615 __setup("preempt=", setup_preempt_mode);
7616 
preempt_dynamic_init(void)7617 static void __init preempt_dynamic_init(void)
7618 {
7619 	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
7620 		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
7621 			sched_dynamic_update(preempt_dynamic_none);
7622 		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
7623 			sched_dynamic_update(preempt_dynamic_voluntary);
7624 		} else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) {
7625 			sched_dynamic_update(preempt_dynamic_lazy);
7626 		} else {
7627 			/* Default static call setting, nothing to do */
7628 			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
7629 			preempt_dynamic_mode = preempt_dynamic_full;
7630 			pr_info("Dynamic Preempt: full\n");
7631 		}
7632 	}
7633 }
7634 
7635 #define PREEMPT_MODEL_ACCESSOR(mode) \
7636 	bool preempt_model_##mode(void)						 \
7637 	{									 \
7638 		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
7639 		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
7640 	}									 \
7641 	EXPORT_SYMBOL_GPL(preempt_model_##mode)
7642 
7643 PREEMPT_MODEL_ACCESSOR(none);
7644 PREEMPT_MODEL_ACCESSOR(voluntary);
7645 PREEMPT_MODEL_ACCESSOR(full);
7646 PREEMPT_MODEL_ACCESSOR(lazy);
7647 
7648 #else /* !CONFIG_PREEMPT_DYNAMIC: */
7649 
preempt_dynamic_init(void)7650 static inline void preempt_dynamic_init(void) { }
7651 
7652 #endif /* CONFIG_PREEMPT_DYNAMIC */
7653 
io_schedule_prepare(void)7654 int io_schedule_prepare(void)
7655 {
7656 	int old_iowait = current->in_iowait;
7657 
7658 	current->in_iowait = 1;
7659 	blk_flush_plug(current->plug, true);
7660 	return old_iowait;
7661 }
7662 
io_schedule_finish(int token)7663 void io_schedule_finish(int token)
7664 {
7665 	current->in_iowait = token;
7666 }
7667 
7668 /*
7669  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
7670  * that process accounting knows that this is a task in IO wait state.
7671  */
io_schedule_timeout(long timeout)7672 long __sched io_schedule_timeout(long timeout)
7673 {
7674 	int token;
7675 	long ret;
7676 
7677 	token = io_schedule_prepare();
7678 	ret = schedule_timeout(timeout);
7679 	io_schedule_finish(token);
7680 
7681 	return ret;
7682 }
7683 EXPORT_SYMBOL(io_schedule_timeout);
7684 
io_schedule(void)7685 void __sched io_schedule(void)
7686 {
7687 	int token;
7688 
7689 	token = io_schedule_prepare();
7690 	schedule();
7691 	io_schedule_finish(token);
7692 }
7693 EXPORT_SYMBOL(io_schedule);
7694 
sched_show_task(struct task_struct * p)7695 void sched_show_task(struct task_struct *p)
7696 {
7697 	unsigned long free;
7698 	int ppid;
7699 
7700 	if (!try_get_task_stack(p))
7701 		return;
7702 
7703 	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
7704 
7705 	if (task_is_running(p))
7706 		pr_cont("  running task    ");
7707 	free = stack_not_used(p);
7708 	ppid = 0;
7709 	rcu_read_lock();
7710 	if (pid_alive(p))
7711 		ppid = task_pid_nr(rcu_dereference(p->real_parent));
7712 	rcu_read_unlock();
7713 	pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n",
7714 		free, task_pid_nr(p), task_tgid_nr(p),
7715 		ppid, p->flags, read_task_thread_flags(p));
7716 
7717 	print_worker_info(KERN_INFO, p);
7718 	print_stop_info(KERN_INFO, p);
7719 	print_scx_info(KERN_INFO, p);
7720 	show_stack(p, NULL, KERN_INFO);
7721 	put_task_stack(p);
7722 }
7723 EXPORT_SYMBOL_GPL(sched_show_task);
7724 
7725 static inline bool
state_filter_match(unsigned long state_filter,struct task_struct * p)7726 state_filter_match(unsigned long state_filter, struct task_struct *p)
7727 {
7728 	unsigned int state = READ_ONCE(p->__state);
7729 
7730 	/* no filter, everything matches */
7731 	if (!state_filter)
7732 		return true;
7733 
7734 	/* filter, but doesn't match */
7735 	if (!(state & state_filter))
7736 		return false;
7737 
7738 	/*
7739 	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
7740 	 * TASK_KILLABLE).
7741 	 */
7742 	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
7743 		return false;
7744 
7745 	return true;
7746 }
7747 
7748 
show_state_filter(unsigned int state_filter)7749 void show_state_filter(unsigned int state_filter)
7750 {
7751 	struct task_struct *g, *p;
7752 
7753 	rcu_read_lock();
7754 	for_each_process_thread(g, p) {
7755 		/*
7756 		 * reset the NMI-timeout, listing all files on a slow
7757 		 * console might take a lot of time:
7758 		 * Also, reset softlockup watchdogs on all CPUs, because
7759 		 * another CPU might be blocked waiting for us to process
7760 		 * an IPI.
7761 		 */
7762 		touch_nmi_watchdog();
7763 		touch_all_softlockup_watchdogs();
7764 		if (state_filter_match(state_filter, p))
7765 			sched_show_task(p);
7766 	}
7767 
7768 #ifdef CONFIG_SCHED_DEBUG
7769 	if (!state_filter)
7770 		sysrq_sched_debug_show();
7771 #endif
7772 	rcu_read_unlock();
7773 	/*
7774 	 * Only show locks if all tasks are dumped:
7775 	 */
7776 	if (!state_filter)
7777 		debug_show_all_locks();
7778 }
7779 
7780 /**
7781  * init_idle - set up an idle thread for a given CPU
7782  * @idle: task in question
7783  * @cpu: CPU the idle task belongs to
7784  *
7785  * NOTE: this function does not set the idle thread's NEED_RESCHED
7786  * flag, to make booting more robust.
7787  */
init_idle(struct task_struct * idle,int cpu)7788 void __init init_idle(struct task_struct *idle, int cpu)
7789 {
7790 #ifdef CONFIG_SMP
7791 	struct affinity_context ac = (struct affinity_context) {
7792 		.new_mask  = cpumask_of(cpu),
7793 		.flags     = 0,
7794 	};
7795 #endif
7796 	struct rq *rq = cpu_rq(cpu);
7797 	unsigned long flags;
7798 
7799 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
7800 	raw_spin_rq_lock(rq);
7801 
7802 	idle->__state = TASK_RUNNING;
7803 	idle->se.exec_start = sched_clock();
7804 	/*
7805 	 * PF_KTHREAD should already be set at this point; regardless, make it
7806 	 * look like a proper per-CPU kthread.
7807 	 */
7808 	idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
7809 	kthread_set_per_cpu(idle, cpu);
7810 
7811 #ifdef CONFIG_SMP
7812 	/*
7813 	 * No validation and serialization required at boot time and for
7814 	 * setting up the idle tasks of not yet online CPUs.
7815 	 */
7816 	set_cpus_allowed_common(idle, &ac);
7817 #endif
7818 	/*
7819 	 * We're having a chicken and egg problem, even though we are
7820 	 * holding rq->lock, the CPU isn't yet set to this CPU so the
7821 	 * lockdep check in task_group() will fail.
7822 	 *
7823 	 * Similar case to sched_fork(). / Alternatively we could
7824 	 * use task_rq_lock() here and obtain the other rq->lock.
7825 	 *
7826 	 * Silence PROVE_RCU
7827 	 */
7828 	rcu_read_lock();
7829 	__set_task_cpu(idle, cpu);
7830 	rcu_read_unlock();
7831 
7832 	rq->idle = idle;
7833 	rq_set_donor(rq, idle);
7834 	rcu_assign_pointer(rq->curr, idle);
7835 	idle->on_rq = TASK_ON_RQ_QUEUED;
7836 #ifdef CONFIG_SMP
7837 	idle->on_cpu = 1;
7838 #endif
7839 	raw_spin_rq_unlock(rq);
7840 	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
7841 
7842 	/* Set the preempt count _outside_ the spinlocks! */
7843 	init_idle_preempt_count(idle, cpu);
7844 
7845 	/*
7846 	 * The idle tasks have their own, simple scheduling class:
7847 	 */
7848 	idle->sched_class = &idle_sched_class;
7849 	ftrace_graph_init_idle_task(idle, cpu);
7850 	vtime_init_idle(idle, cpu);
7851 #ifdef CONFIG_SMP
7852 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
7853 #endif
7854 }
7855 
7856 #ifdef CONFIG_SMP
7857 
cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)7858 int cpuset_cpumask_can_shrink(const struct cpumask *cur,
7859 			      const struct cpumask *trial)
7860 {
7861 	int ret = 1;
7862 
7863 	if (cpumask_empty(cur))
7864 		return ret;
7865 
7866 	ret = dl_cpuset_cpumask_can_shrink(cur, trial);
7867 
7868 	return ret;
7869 }
7870 
task_can_attach(struct task_struct * p)7871 int task_can_attach(struct task_struct *p)
7872 {
7873 	int ret = 0;
7874 
7875 	/*
7876 	 * Kthreads which disallow setaffinity shouldn't be moved
7877 	 * to a new cpuset; we don't want to change their CPU
7878 	 * affinity and isolating such threads by their set of
7879 	 * allowed nodes is unnecessary.  Thus, cpusets are not
7880 	 * applicable for such threads.  This prevents checking for
7881 	 * success of set_cpus_allowed_ptr() on all attached tasks
7882 	 * before cpus_mask may be changed.
7883 	 */
7884 	if (p->flags & PF_NO_SETAFFINITY)
7885 		ret = -EINVAL;
7886 
7887 	return ret;
7888 }
7889 
7890 bool sched_smp_initialized __read_mostly;
7891 
7892 #ifdef CONFIG_NUMA_BALANCING
7893 /* Migrate current task p to target_cpu */
migrate_task_to(struct task_struct * p,int target_cpu)7894 int migrate_task_to(struct task_struct *p, int target_cpu)
7895 {
7896 	struct migration_arg arg = { p, target_cpu };
7897 	int curr_cpu = task_cpu(p);
7898 
7899 	if (curr_cpu == target_cpu)
7900 		return 0;
7901 
7902 	if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
7903 		return -EINVAL;
7904 
7905 	/* TODO: This is not properly updating schedstats */
7906 
7907 	trace_sched_move_numa(p, curr_cpu, target_cpu);
7908 	return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
7909 }
7910 
7911 /*
7912  * Requeue a task on a given node and accurately track the number of NUMA
7913  * tasks on the runqueues
7914  */
sched_setnuma(struct task_struct * p,int nid)7915 void sched_setnuma(struct task_struct *p, int nid)
7916 {
7917 	bool queued, running;
7918 	struct rq_flags rf;
7919 	struct rq *rq;
7920 
7921 	rq = task_rq_lock(p, &rf);
7922 	queued = task_on_rq_queued(p);
7923 	running = task_current_donor(rq, p);
7924 
7925 	if (queued)
7926 		dequeue_task(rq, p, DEQUEUE_SAVE);
7927 	if (running)
7928 		put_prev_task(rq, p);
7929 
7930 	p->numa_preferred_nid = nid;
7931 
7932 	if (queued)
7933 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
7934 	if (running)
7935 		set_next_task(rq, p);
7936 	task_rq_unlock(rq, p, &rf);
7937 }
7938 #endif /* CONFIG_NUMA_BALANCING */
7939 
7940 #ifdef CONFIG_HOTPLUG_CPU
7941 /*
7942  * Invoked on the outgoing CPU in context of the CPU hotplug thread
7943  * after ensuring that there are no user space tasks left on the CPU.
7944  *
7945  * If there is a lazy mm in use on the hotplug thread, drop it and
7946  * switch to init_mm.
7947  *
7948  * The reference count on init_mm is dropped in finish_cpu().
7949  */
sched_force_init_mm(void)7950 static void sched_force_init_mm(void)
7951 {
7952 	struct mm_struct *mm = current->active_mm;
7953 
7954 	if (mm != &init_mm) {
7955 		mmgrab_lazy_tlb(&init_mm);
7956 		local_irq_disable();
7957 		current->active_mm = &init_mm;
7958 		switch_mm_irqs_off(mm, &init_mm, current);
7959 		local_irq_enable();
7960 		finish_arch_post_lock_switch();
7961 		mmdrop_lazy_tlb(mm);
7962 	}
7963 
7964 	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
7965 }
7966 
__balance_push_cpu_stop(void * arg)7967 static int __balance_push_cpu_stop(void *arg)
7968 {
7969 	struct task_struct *p = arg;
7970 	struct rq *rq = this_rq();
7971 	struct rq_flags rf;
7972 	int cpu;
7973 
7974 	raw_spin_lock_irq(&p->pi_lock);
7975 	rq_lock(rq, &rf);
7976 
7977 	update_rq_clock(rq);
7978 
7979 	if (task_rq(p) == rq && task_on_rq_queued(p)) {
7980 		cpu = select_fallback_rq(rq->cpu, p);
7981 		rq = __migrate_task(rq, &rf, p, cpu);
7982 	}
7983 
7984 	rq_unlock(rq, &rf);
7985 	raw_spin_unlock_irq(&p->pi_lock);
7986 
7987 	put_task_struct(p);
7988 
7989 	return 0;
7990 }
7991 
7992 static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
7993 
7994 /*
7995  * Ensure we only run per-cpu kthreads once the CPU goes !active.
7996  *
7997  * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
7998  * effective when the hotplug motion is down.
7999  */
balance_push(struct rq * rq)8000 static void balance_push(struct rq *rq)
8001 {
8002 	struct task_struct *push_task = rq->curr;
8003 
8004 	lockdep_assert_rq_held(rq);
8005 
8006 	/*
8007 	 * Ensure the thing is persistent until balance_push_set(.on = false);
8008 	 */
8009 	rq->balance_callback = &balance_push_callback;
8010 
8011 	/*
8012 	 * Only active while going offline and when invoked on the outgoing
8013 	 * CPU.
8014 	 */
8015 	if (!cpu_dying(rq->cpu) || rq != this_rq())
8016 		return;
8017 
8018 	/*
8019 	 * Both the cpu-hotplug and stop task are in this case and are
8020 	 * required to complete the hotplug process.
8021 	 */
8022 	if (kthread_is_per_cpu(push_task) ||
8023 	    is_migration_disabled(push_task)) {
8024 
8025 		/*
8026 		 * If this is the idle task on the outgoing CPU try to wake
8027 		 * up the hotplug control thread which might wait for the
8028 		 * last task to vanish. The rcuwait_active() check is
8029 		 * accurate here because the waiter is pinned on this CPU
8030 		 * and can't obviously be running in parallel.
8031 		 *
8032 		 * On RT kernels this also has to check whether there are
8033 		 * pinned and scheduled out tasks on the runqueue. They
8034 		 * need to leave the migrate disabled section first.
8035 		 */
8036 		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
8037 		    rcuwait_active(&rq->hotplug_wait)) {
8038 			raw_spin_rq_unlock(rq);
8039 			rcuwait_wake_up(&rq->hotplug_wait);
8040 			raw_spin_rq_lock(rq);
8041 		}
8042 		return;
8043 	}
8044 
8045 	get_task_struct(push_task);
8046 	/*
8047 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
8048 	 * Both preemption and IRQs are still disabled.
8049 	 */
8050 	preempt_disable();
8051 	raw_spin_rq_unlock(rq);
8052 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
8053 			    this_cpu_ptr(&push_work));
8054 	preempt_enable();
8055 	/*
8056 	 * At this point need_resched() is true and we'll take the loop in
8057 	 * schedule(). The next pick is obviously going to be the stop task
8058 	 * which kthread_is_per_cpu() and will push this task away.
8059 	 */
8060 	raw_spin_rq_lock(rq);
8061 }
8062 
balance_push_set(int cpu,bool on)8063 static void balance_push_set(int cpu, bool on)
8064 {
8065 	struct rq *rq = cpu_rq(cpu);
8066 	struct rq_flags rf;
8067 
8068 	rq_lock_irqsave(rq, &rf);
8069 	if (on) {
8070 		WARN_ON_ONCE(rq->balance_callback);
8071 		rq->balance_callback = &balance_push_callback;
8072 	} else if (rq->balance_callback == &balance_push_callback) {
8073 		rq->balance_callback = NULL;
8074 	}
8075 	rq_unlock_irqrestore(rq, &rf);
8076 }
8077 
8078 /*
8079  * Invoked from a CPUs hotplug control thread after the CPU has been marked
8080  * inactive. All tasks which are not per CPU kernel threads are either
8081  * pushed off this CPU now via balance_push() or placed on a different CPU
8082  * during wakeup. Wait until the CPU is quiescent.
8083  */
balance_hotplug_wait(void)8084 static void balance_hotplug_wait(void)
8085 {
8086 	struct rq *rq = this_rq();
8087 
8088 	rcuwait_wait_event(&rq->hotplug_wait,
8089 			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
8090 			   TASK_UNINTERRUPTIBLE);
8091 }
8092 
8093 #else
8094 
balance_push(struct rq * rq)8095 static inline void balance_push(struct rq *rq)
8096 {
8097 }
8098 
balance_push_set(int cpu,bool on)8099 static inline void balance_push_set(int cpu, bool on)
8100 {
8101 }
8102 
balance_hotplug_wait(void)8103 static inline void balance_hotplug_wait(void)
8104 {
8105 }
8106 
8107 #endif /* CONFIG_HOTPLUG_CPU */
8108 
set_rq_online(struct rq * rq)8109 void set_rq_online(struct rq *rq)
8110 {
8111 	if (!rq->online) {
8112 		const struct sched_class *class;
8113 
8114 		cpumask_set_cpu(rq->cpu, rq->rd->online);
8115 		rq->online = 1;
8116 
8117 		for_each_class(class) {
8118 			if (class->rq_online)
8119 				class->rq_online(rq);
8120 		}
8121 	}
8122 }
8123 
set_rq_offline(struct rq * rq)8124 void set_rq_offline(struct rq *rq)
8125 {
8126 	if (rq->online) {
8127 		const struct sched_class *class;
8128 
8129 		update_rq_clock(rq);
8130 		for_each_class(class) {
8131 			if (class->rq_offline)
8132 				class->rq_offline(rq);
8133 		}
8134 
8135 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
8136 		rq->online = 0;
8137 	}
8138 }
8139 
sched_set_rq_online(struct rq * rq,int cpu)8140 static inline void sched_set_rq_online(struct rq *rq, int cpu)
8141 {
8142 	struct rq_flags rf;
8143 
8144 	rq_lock_irqsave(rq, &rf);
8145 	if (rq->rd) {
8146 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8147 		set_rq_online(rq);
8148 	}
8149 	rq_unlock_irqrestore(rq, &rf);
8150 }
8151 
sched_set_rq_offline(struct rq * rq,int cpu)8152 static inline void sched_set_rq_offline(struct rq *rq, int cpu)
8153 {
8154 	struct rq_flags rf;
8155 
8156 	rq_lock_irqsave(rq, &rf);
8157 	if (rq->rd) {
8158 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
8159 		set_rq_offline(rq);
8160 	}
8161 	rq_unlock_irqrestore(rq, &rf);
8162 }
8163 
8164 /*
8165  * used to mark begin/end of suspend/resume:
8166  */
8167 static int num_cpus_frozen;
8168 
8169 /*
8170  * Update cpusets according to cpu_active mask.  If cpusets are
8171  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
8172  * around partition_sched_domains().
8173  *
8174  * If we come here as part of a suspend/resume, don't touch cpusets because we
8175  * want to restore it back to its original state upon resume anyway.
8176  */
cpuset_cpu_active(void)8177 static void cpuset_cpu_active(void)
8178 {
8179 	if (cpuhp_tasks_frozen) {
8180 		/*
8181 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
8182 		 * resume sequence. As long as this is not the last online
8183 		 * operation in the resume sequence, just build a single sched
8184 		 * domain, ignoring cpusets.
8185 		 */
8186 		partition_sched_domains(1, NULL, NULL);
8187 		if (--num_cpus_frozen)
8188 			return;
8189 		/*
8190 		 * This is the last CPU online operation. So fall through and
8191 		 * restore the original sched domains by considering the
8192 		 * cpuset configurations.
8193 		 */
8194 		cpuset_force_rebuild();
8195 	}
8196 	cpuset_update_active_cpus();
8197 }
8198 
cpuset_cpu_inactive(unsigned int cpu)8199 static void cpuset_cpu_inactive(unsigned int cpu)
8200 {
8201 	if (!cpuhp_tasks_frozen) {
8202 		cpuset_update_active_cpus();
8203 	} else {
8204 		num_cpus_frozen++;
8205 		partition_sched_domains(1, NULL, NULL);
8206 	}
8207 }
8208 
sched_smt_present_inc(int cpu)8209 static inline void sched_smt_present_inc(int cpu)
8210 {
8211 #ifdef CONFIG_SCHED_SMT
8212 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8213 		static_branch_inc_cpuslocked(&sched_smt_present);
8214 #endif
8215 }
8216 
sched_smt_present_dec(int cpu)8217 static inline void sched_smt_present_dec(int cpu)
8218 {
8219 #ifdef CONFIG_SCHED_SMT
8220 	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
8221 		static_branch_dec_cpuslocked(&sched_smt_present);
8222 #endif
8223 }
8224 
sched_cpu_activate(unsigned int cpu)8225 int sched_cpu_activate(unsigned int cpu)
8226 {
8227 	struct rq *rq = cpu_rq(cpu);
8228 
8229 	/*
8230 	 * Clear the balance_push callback and prepare to schedule
8231 	 * regular tasks.
8232 	 */
8233 	balance_push_set(cpu, false);
8234 
8235 	/*
8236 	 * When going up, increment the number of cores with SMT present.
8237 	 */
8238 	sched_smt_present_inc(cpu);
8239 	set_cpu_active(cpu, true);
8240 
8241 	if (sched_smp_initialized) {
8242 		sched_update_numa(cpu, true);
8243 		sched_domains_numa_masks_set(cpu);
8244 		cpuset_cpu_active();
8245 	}
8246 
8247 	scx_rq_activate(rq);
8248 
8249 	/*
8250 	 * Put the rq online, if not already. This happens:
8251 	 *
8252 	 * 1) In the early boot process, because we build the real domains
8253 	 *    after all CPUs have been brought up.
8254 	 *
8255 	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
8256 	 *    domains.
8257 	 */
8258 	sched_set_rq_online(rq, cpu);
8259 
8260 	return 0;
8261 }
8262 
sched_cpu_deactivate(unsigned int cpu)8263 int sched_cpu_deactivate(unsigned int cpu)
8264 {
8265 	struct rq *rq = cpu_rq(cpu);
8266 	int ret;
8267 
8268 	ret = dl_bw_deactivate(cpu);
8269 
8270 	if (ret)
8271 		return ret;
8272 
8273 	/*
8274 	 * Remove CPU from nohz.idle_cpus_mask to prevent participating in
8275 	 * load balancing when not active
8276 	 */
8277 	nohz_balance_exit_idle(rq);
8278 
8279 	set_cpu_active(cpu, false);
8280 
8281 	/*
8282 	 * From this point forward, this CPU will refuse to run any task that
8283 	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
8284 	 * push those tasks away until this gets cleared, see
8285 	 * sched_cpu_dying().
8286 	 */
8287 	balance_push_set(cpu, true);
8288 
8289 	/*
8290 	 * We've cleared cpu_active_mask / set balance_push, wait for all
8291 	 * preempt-disabled and RCU users of this state to go away such that
8292 	 * all new such users will observe it.
8293 	 *
8294 	 * Specifically, we rely on ttwu to no longer target this CPU, see
8295 	 * ttwu_queue_cond() and is_cpu_allowed().
8296 	 *
8297 	 * Do sync before park smpboot threads to take care the RCU boost case.
8298 	 */
8299 	synchronize_rcu();
8300 
8301 	sched_set_rq_offline(rq, cpu);
8302 
8303 	scx_rq_deactivate(rq);
8304 
8305 	/*
8306 	 * When going down, decrement the number of cores with SMT present.
8307 	 */
8308 	sched_smt_present_dec(cpu);
8309 
8310 #ifdef CONFIG_SCHED_SMT
8311 	sched_core_cpu_deactivate(cpu);
8312 #endif
8313 
8314 	if (!sched_smp_initialized)
8315 		return 0;
8316 
8317 	sched_update_numa(cpu, false);
8318 	cpuset_cpu_inactive(cpu);
8319 	sched_domains_numa_masks_clear(cpu);
8320 	return 0;
8321 }
8322 
sched_rq_cpu_starting(unsigned int cpu)8323 static void sched_rq_cpu_starting(unsigned int cpu)
8324 {
8325 	struct rq *rq = cpu_rq(cpu);
8326 
8327 	rq->calc_load_update = calc_load_update;
8328 	update_max_interval();
8329 }
8330 
sched_cpu_starting(unsigned int cpu)8331 int sched_cpu_starting(unsigned int cpu)
8332 {
8333 	sched_core_cpu_starting(cpu);
8334 	sched_rq_cpu_starting(cpu);
8335 	sched_tick_start(cpu);
8336 	return 0;
8337 }
8338 
8339 #ifdef CONFIG_HOTPLUG_CPU
8340 
8341 /*
8342  * Invoked immediately before the stopper thread is invoked to bring the
8343  * CPU down completely. At this point all per CPU kthreads except the
8344  * hotplug thread (current) and the stopper thread (inactive) have been
8345  * either parked or have been unbound from the outgoing CPU. Ensure that
8346  * any of those which might be on the way out are gone.
8347  *
8348  * If after this point a bound task is being woken on this CPU then the
8349  * responsible hotplug callback has failed to do it's job.
8350  * sched_cpu_dying() will catch it with the appropriate fireworks.
8351  */
sched_cpu_wait_empty(unsigned int cpu)8352 int sched_cpu_wait_empty(unsigned int cpu)
8353 {
8354 	balance_hotplug_wait();
8355 	sched_force_init_mm();
8356 	return 0;
8357 }
8358 
8359 /*
8360  * Since this CPU is going 'away' for a while, fold any nr_active delta we
8361  * might have. Called from the CPU stopper task after ensuring that the
8362  * stopper is the last running task on the CPU, so nr_active count is
8363  * stable. We need to take the tear-down thread which is calling this into
8364  * account, so we hand in adjust = 1 to the load calculation.
8365  *
8366  * Also see the comment "Global load-average calculations".
8367  */
calc_load_migrate(struct rq * rq)8368 static void calc_load_migrate(struct rq *rq)
8369 {
8370 	long delta = calc_load_fold_active(rq, 1);
8371 
8372 	if (delta)
8373 		atomic_long_add(delta, &calc_load_tasks);
8374 }
8375 
dump_rq_tasks(struct rq * rq,const char * loglvl)8376 static void dump_rq_tasks(struct rq *rq, const char *loglvl)
8377 {
8378 	struct task_struct *g, *p;
8379 	int cpu = cpu_of(rq);
8380 
8381 	lockdep_assert_rq_held(rq);
8382 
8383 	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
8384 	for_each_process_thread(g, p) {
8385 		if (task_cpu(p) != cpu)
8386 			continue;
8387 
8388 		if (!task_on_rq_queued(p))
8389 			continue;
8390 
8391 		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
8392 	}
8393 }
8394 
sched_cpu_dying(unsigned int cpu)8395 int sched_cpu_dying(unsigned int cpu)
8396 {
8397 	struct rq *rq = cpu_rq(cpu);
8398 	struct rq_flags rf;
8399 
8400 	/* Handle pending wakeups and then migrate everything off */
8401 	sched_tick_stop(cpu);
8402 
8403 	rq_lock_irqsave(rq, &rf);
8404 	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
8405 		WARN(true, "Dying CPU not properly vacated!");
8406 		dump_rq_tasks(rq, KERN_WARNING);
8407 	}
8408 	rq_unlock_irqrestore(rq, &rf);
8409 
8410 	calc_load_migrate(rq);
8411 	update_max_interval();
8412 	hrtick_clear(rq);
8413 	sched_core_cpu_dying(cpu);
8414 	return 0;
8415 }
8416 #endif
8417 
sched_init_smp(void)8418 void __init sched_init_smp(void)
8419 {
8420 	sched_init_numa(NUMA_NO_NODE);
8421 
8422 	/*
8423 	 * There's no userspace yet to cause hotplug operations; hence all the
8424 	 * CPU masks are stable and all blatant races in the below code cannot
8425 	 * happen.
8426 	 */
8427 	mutex_lock(&sched_domains_mutex);
8428 	sched_init_domains(cpu_active_mask);
8429 	mutex_unlock(&sched_domains_mutex);
8430 
8431 	/* Move init over to a non-isolated CPU */
8432 	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
8433 		BUG();
8434 	current->flags &= ~PF_NO_SETAFFINITY;
8435 	sched_init_granularity();
8436 
8437 	init_sched_rt_class();
8438 	init_sched_dl_class();
8439 
8440 	sched_smp_initialized = true;
8441 }
8442 
migration_init(void)8443 static int __init migration_init(void)
8444 {
8445 	sched_cpu_starting(smp_processor_id());
8446 	return 0;
8447 }
8448 early_initcall(migration_init);
8449 
8450 #else
sched_init_smp(void)8451 void __init sched_init_smp(void)
8452 {
8453 	sched_init_granularity();
8454 }
8455 #endif /* CONFIG_SMP */
8456 
in_sched_functions(unsigned long addr)8457 int in_sched_functions(unsigned long addr)
8458 {
8459 	return in_lock_functions(addr) ||
8460 		(addr >= (unsigned long)__sched_text_start
8461 		&& addr < (unsigned long)__sched_text_end);
8462 }
8463 
8464 #ifdef CONFIG_CGROUP_SCHED
8465 /*
8466  * Default task group.
8467  * Every task in system belongs to this group at bootup.
8468  */
8469 struct task_group root_task_group;
8470 LIST_HEAD(task_groups);
8471 
8472 /* Cacheline aligned slab cache for task_group */
8473 static struct kmem_cache *task_group_cache __ro_after_init;
8474 #endif
8475 
sched_init(void)8476 void __init sched_init(void)
8477 {
8478 	unsigned long ptr = 0;
8479 	int i;
8480 
8481 	/* Make sure the linker didn't screw up */
8482 #ifdef CONFIG_SMP
8483 	BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class));
8484 #endif
8485 	BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class));
8486 	BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class));
8487 	BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class));
8488 #ifdef CONFIG_SCHED_CLASS_EXT
8489 	BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class));
8490 	BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
8491 #endif
8492 
8493 	wait_bit_init();
8494 
8495 #ifdef CONFIG_FAIR_GROUP_SCHED
8496 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8497 #endif
8498 #ifdef CONFIG_RT_GROUP_SCHED
8499 	ptr += 2 * nr_cpu_ids * sizeof(void **);
8500 #endif
8501 	if (ptr) {
8502 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
8503 
8504 #ifdef CONFIG_FAIR_GROUP_SCHED
8505 		root_task_group.se = (struct sched_entity **)ptr;
8506 		ptr += nr_cpu_ids * sizeof(void **);
8507 
8508 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
8509 		ptr += nr_cpu_ids * sizeof(void **);
8510 
8511 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
8512 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
8513 #endif /* CONFIG_FAIR_GROUP_SCHED */
8514 #ifdef CONFIG_EXT_GROUP_SCHED
8515 		root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
8516 #endif /* CONFIG_EXT_GROUP_SCHED */
8517 #ifdef CONFIG_RT_GROUP_SCHED
8518 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
8519 		ptr += nr_cpu_ids * sizeof(void **);
8520 
8521 		root_task_group.rt_rq = (struct rt_rq **)ptr;
8522 		ptr += nr_cpu_ids * sizeof(void **);
8523 
8524 #endif /* CONFIG_RT_GROUP_SCHED */
8525 	}
8526 
8527 #ifdef CONFIG_SMP
8528 	init_defrootdomain();
8529 #endif
8530 
8531 #ifdef CONFIG_RT_GROUP_SCHED
8532 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
8533 			global_rt_period(), global_rt_runtime());
8534 #endif /* CONFIG_RT_GROUP_SCHED */
8535 
8536 #ifdef CONFIG_CGROUP_SCHED
8537 	task_group_cache = KMEM_CACHE(task_group, 0);
8538 
8539 	list_add(&root_task_group.list, &task_groups);
8540 	INIT_LIST_HEAD(&root_task_group.children);
8541 	INIT_LIST_HEAD(&root_task_group.siblings);
8542 	autogroup_init(&init_task);
8543 #endif /* CONFIG_CGROUP_SCHED */
8544 
8545 	for_each_possible_cpu(i) {
8546 		struct rq *rq;
8547 
8548 		rq = cpu_rq(i);
8549 		raw_spin_lock_init(&rq->__lock);
8550 		rq->nr_running = 0;
8551 		rq->calc_load_active = 0;
8552 		rq->calc_load_update = jiffies + LOAD_FREQ;
8553 		init_cfs_rq(&rq->cfs);
8554 		init_rt_rq(&rq->rt);
8555 		init_dl_rq(&rq->dl);
8556 #ifdef CONFIG_FAIR_GROUP_SCHED
8557 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
8558 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
8559 		/*
8560 		 * How much CPU bandwidth does root_task_group get?
8561 		 *
8562 		 * In case of task-groups formed through the cgroup filesystem, it
8563 		 * gets 100% of the CPU resources in the system. This overall
8564 		 * system CPU resource is divided among the tasks of
8565 		 * root_task_group and its child task-groups in a fair manner,
8566 		 * based on each entity's (task or task-group's) weight
8567 		 * (se->load.weight).
8568 		 *
8569 		 * In other words, if root_task_group has 10 tasks of weight
8570 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8571 		 * then A0's share of the CPU resource is:
8572 		 *
8573 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8574 		 *
8575 		 * We achieve this by letting root_task_group's tasks sit
8576 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
8577 		 */
8578 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
8579 #endif /* CONFIG_FAIR_GROUP_SCHED */
8580 
8581 #ifdef CONFIG_RT_GROUP_SCHED
8582 		/*
8583 		 * This is required for init cpu because rt.c:__enable_runtime()
8584 		 * starts working after scheduler_running, which is not the case
8585 		 * yet.
8586 		 */
8587 		rq->rt.rt_runtime = global_rt_runtime();
8588 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
8589 #endif
8590 #ifdef CONFIG_SMP
8591 		rq->sd = NULL;
8592 		rq->rd = NULL;
8593 		rq->cpu_capacity = SCHED_CAPACITY_SCALE;
8594 		rq->balance_callback = &balance_push_callback;
8595 		rq->active_balance = 0;
8596 		rq->next_balance = jiffies;
8597 		rq->push_cpu = 0;
8598 		rq->cpu = i;
8599 		rq->online = 0;
8600 		rq->idle_stamp = 0;
8601 		rq->avg_idle = 2*sysctl_sched_migration_cost;
8602 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
8603 
8604 		INIT_LIST_HEAD(&rq->cfs_tasks);
8605 
8606 		rq_attach_root(rq, &def_root_domain);
8607 #ifdef CONFIG_NO_HZ_COMMON
8608 		rq->last_blocked_load_update_tick = jiffies;
8609 		atomic_set(&rq->nohz_flags, 0);
8610 
8611 		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
8612 #endif
8613 #ifdef CONFIG_HOTPLUG_CPU
8614 		rcuwait_init(&rq->hotplug_wait);
8615 #endif
8616 #endif /* CONFIG_SMP */
8617 		hrtick_rq_init(rq);
8618 		atomic_set(&rq->nr_iowait, 0);
8619 		fair_server_init(rq);
8620 
8621 #ifdef CONFIG_SCHED_CORE
8622 		rq->core = rq;
8623 		rq->core_pick = NULL;
8624 		rq->core_dl_server = NULL;
8625 		rq->core_enabled = 0;
8626 		rq->core_tree = RB_ROOT;
8627 		rq->core_forceidle_count = 0;
8628 		rq->core_forceidle_occupation = 0;
8629 		rq->core_forceidle_start = 0;
8630 
8631 		rq->core_cookie = 0UL;
8632 #endif
8633 		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
8634 	}
8635 
8636 	set_load_weight(&init_task, false);
8637 	init_task.se.slice = sysctl_sched_base_slice,
8638 
8639 	/*
8640 	 * The boot idle thread does lazy MMU switching as well:
8641 	 */
8642 	mmgrab_lazy_tlb(&init_mm);
8643 	enter_lazy_tlb(&init_mm, current);
8644 
8645 	/*
8646 	 * The idle task doesn't need the kthread struct to function, but it
8647 	 * is dressed up as a per-CPU kthread and thus needs to play the part
8648 	 * if we want to avoid special-casing it in code that deals with per-CPU
8649 	 * kthreads.
8650 	 */
8651 	WARN_ON(!set_kthread_struct(current));
8652 
8653 	/*
8654 	 * Make us the idle thread. Technically, schedule() should not be
8655 	 * called from this thread, however somewhere below it might be,
8656 	 * but because we are the idle thread, we just pick up running again
8657 	 * when this runqueue becomes "idle".
8658 	 */
8659 	__sched_fork(0, current);
8660 	init_idle(current, smp_processor_id());
8661 
8662 	calc_load_update = jiffies + LOAD_FREQ;
8663 
8664 #ifdef CONFIG_SMP
8665 	idle_thread_set_boot_cpu();
8666 	balance_push_set(smp_processor_id(), false);
8667 #endif
8668 	init_sched_fair_class();
8669 	init_sched_ext_class();
8670 
8671 	psi_init();
8672 
8673 	init_uclamp();
8674 
8675 	preempt_dynamic_init();
8676 
8677 	scheduler_running = 1;
8678 }
8679 
8680 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
8681 
__might_sleep(const char * file,int line)8682 void __might_sleep(const char *file, int line)
8683 {
8684 	unsigned int state = get_current_state();
8685 	/*
8686 	 * Blocking primitives will set (and therefore destroy) current->state,
8687 	 * since we will exit with TASK_RUNNING make sure we enter with it,
8688 	 * otherwise we will destroy state.
8689 	 */
8690 	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
8691 			"do not call blocking ops when !TASK_RUNNING; "
8692 			"state=%x set at [<%p>] %pS\n", state,
8693 			(void *)current->task_state_change,
8694 			(void *)current->task_state_change);
8695 
8696 	__might_resched(file, line, 0);
8697 }
8698 EXPORT_SYMBOL(__might_sleep);
8699 
print_preempt_disable_ip(int preempt_offset,unsigned long ip)8700 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
8701 {
8702 	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
8703 		return;
8704 
8705 	if (preempt_count() == preempt_offset)
8706 		return;
8707 
8708 	pr_err("Preemption disabled at:");
8709 	print_ip_sym(KERN_ERR, ip);
8710 }
8711 
resched_offsets_ok(unsigned int offsets)8712 static inline bool resched_offsets_ok(unsigned int offsets)
8713 {
8714 	unsigned int nested = preempt_count();
8715 
8716 	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
8717 
8718 	return nested == offsets;
8719 }
8720 
__might_resched(const char * file,int line,unsigned int offsets)8721 void __might_resched(const char *file, int line, unsigned int offsets)
8722 {
8723 	/* Ratelimiting timestamp: */
8724 	static unsigned long prev_jiffy;
8725 
8726 	unsigned long preempt_disable_ip;
8727 
8728 	/* WARN_ON_ONCE() by default, no rate limit required: */
8729 	rcu_sleep_check();
8730 
8731 	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
8732 	     !is_idle_task(current) && !current->non_block_count) ||
8733 	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
8734 	    oops_in_progress)
8735 		return;
8736 
8737 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8738 		return;
8739 	prev_jiffy = jiffies;
8740 
8741 	/* Save this before calling printk(), since that will clobber it: */
8742 	preempt_disable_ip = get_preempt_disable_ip(current);
8743 
8744 	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
8745 	       file, line);
8746 	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
8747 	       in_atomic(), irqs_disabled(), current->non_block_count,
8748 	       current->pid, current->comm);
8749 	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
8750 	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
8751 
8752 	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
8753 		pr_err("RCU nest depth: %d, expected: %u\n",
8754 		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
8755 	}
8756 
8757 	if (task_stack_end_corrupted(current))
8758 		pr_emerg("Thread overran stack, or stack corrupted\n");
8759 
8760 	debug_show_held_locks(current);
8761 	if (irqs_disabled())
8762 		print_irqtrace_events(current);
8763 
8764 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
8765 				 preempt_disable_ip);
8766 
8767 	dump_stack();
8768 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8769 }
8770 EXPORT_SYMBOL(__might_resched);
8771 
__cant_sleep(const char * file,int line,int preempt_offset)8772 void __cant_sleep(const char *file, int line, int preempt_offset)
8773 {
8774 	static unsigned long prev_jiffy;
8775 
8776 	if (irqs_disabled())
8777 		return;
8778 
8779 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8780 		return;
8781 
8782 	if (preempt_count() > preempt_offset)
8783 		return;
8784 
8785 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8786 		return;
8787 	prev_jiffy = jiffies;
8788 
8789 	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
8790 	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8791 			in_atomic(), irqs_disabled(),
8792 			current->pid, current->comm);
8793 
8794 	debug_show_held_locks(current);
8795 	dump_stack();
8796 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8797 }
8798 EXPORT_SYMBOL_GPL(__cant_sleep);
8799 
8800 #ifdef CONFIG_SMP
__cant_migrate(const char * file,int line)8801 void __cant_migrate(const char *file, int line)
8802 {
8803 	static unsigned long prev_jiffy;
8804 
8805 	if (irqs_disabled())
8806 		return;
8807 
8808 	if (is_migration_disabled(current))
8809 		return;
8810 
8811 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
8812 		return;
8813 
8814 	if (preempt_count() > 0)
8815 		return;
8816 
8817 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8818 		return;
8819 	prev_jiffy = jiffies;
8820 
8821 	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
8822 	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
8823 	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
8824 	       current->pid, current->comm);
8825 
8826 	debug_show_held_locks(current);
8827 	dump_stack();
8828 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
8829 }
8830 EXPORT_SYMBOL_GPL(__cant_migrate);
8831 #endif
8832 #endif
8833 
8834 #ifdef CONFIG_MAGIC_SYSRQ
normalize_rt_tasks(void)8835 void normalize_rt_tasks(void)
8836 {
8837 	struct task_struct *g, *p;
8838 	struct sched_attr attr = {
8839 		.sched_policy = SCHED_NORMAL,
8840 	};
8841 
8842 	read_lock(&tasklist_lock);
8843 	for_each_process_thread(g, p) {
8844 		/*
8845 		 * Only normalize user tasks:
8846 		 */
8847 		if (p->flags & PF_KTHREAD)
8848 			continue;
8849 
8850 		p->se.exec_start = 0;
8851 		schedstat_set(p->stats.wait_start,  0);
8852 		schedstat_set(p->stats.sleep_start, 0);
8853 		schedstat_set(p->stats.block_start, 0);
8854 
8855 		if (!rt_or_dl_task(p)) {
8856 			/*
8857 			 * Renice negative nice level userspace
8858 			 * tasks back to 0:
8859 			 */
8860 			if (task_nice(p) < 0)
8861 				set_user_nice(p, 0);
8862 			continue;
8863 		}
8864 
8865 		__sched_setscheduler(p, &attr, false, false);
8866 	}
8867 	read_unlock(&tasklist_lock);
8868 }
8869 
8870 #endif /* CONFIG_MAGIC_SYSRQ */
8871 
8872 #if defined(CONFIG_KGDB_KDB)
8873 /*
8874  * These functions are only useful for KDB.
8875  *
8876  * They can only be called when the whole system has been
8877  * stopped - every CPU needs to be quiescent, and no scheduling
8878  * activity can take place. Using them for anything else would
8879  * be a serious bug, and as a result, they aren't even visible
8880  * under any other configuration.
8881  */
8882 
8883 /**
8884  * curr_task - return the current task for a given CPU.
8885  * @cpu: the processor in question.
8886  *
8887  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8888  *
8889  * Return: The current task for @cpu.
8890  */
curr_task(int cpu)8891 struct task_struct *curr_task(int cpu)
8892 {
8893 	return cpu_curr(cpu);
8894 }
8895 
8896 #endif /* defined(CONFIG_KGDB_KDB) */
8897 
8898 #ifdef CONFIG_CGROUP_SCHED
8899 /* task_group_lock serializes the addition/removal of task groups */
8900 static DEFINE_SPINLOCK(task_group_lock);
8901 
alloc_uclamp_sched_group(struct task_group * tg,struct task_group * parent)8902 static inline void alloc_uclamp_sched_group(struct task_group *tg,
8903 					    struct task_group *parent)
8904 {
8905 #ifdef CONFIG_UCLAMP_TASK_GROUP
8906 	enum uclamp_id clamp_id;
8907 
8908 	for_each_clamp_id(clamp_id) {
8909 		uclamp_se_set(&tg->uclamp_req[clamp_id],
8910 			      uclamp_none(clamp_id), false);
8911 		tg->uclamp[clamp_id] = parent->uclamp[clamp_id];
8912 	}
8913 #endif
8914 }
8915 
sched_free_group(struct task_group * tg)8916 static void sched_free_group(struct task_group *tg)
8917 {
8918 	free_fair_sched_group(tg);
8919 	free_rt_sched_group(tg);
8920 	autogroup_free(tg);
8921 	kmem_cache_free(task_group_cache, tg);
8922 }
8923 
sched_free_group_rcu(struct rcu_head * rcu)8924 static void sched_free_group_rcu(struct rcu_head *rcu)
8925 {
8926 	sched_free_group(container_of(rcu, struct task_group, rcu));
8927 }
8928 
sched_unregister_group(struct task_group * tg)8929 static void sched_unregister_group(struct task_group *tg)
8930 {
8931 	unregister_fair_sched_group(tg);
8932 	unregister_rt_sched_group(tg);
8933 	/*
8934 	 * We have to wait for yet another RCU grace period to expire, as
8935 	 * print_cfs_stats() might run concurrently.
8936 	 */
8937 	call_rcu(&tg->rcu, sched_free_group_rcu);
8938 }
8939 
8940 /* allocate runqueue etc for a new task group */
sched_create_group(struct task_group * parent)8941 struct task_group *sched_create_group(struct task_group *parent)
8942 {
8943 	struct task_group *tg;
8944 
8945 	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
8946 	if (!tg)
8947 		return ERR_PTR(-ENOMEM);
8948 
8949 	if (!alloc_fair_sched_group(tg, parent))
8950 		goto err;
8951 
8952 	if (!alloc_rt_sched_group(tg, parent))
8953 		goto err;
8954 
8955 	scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
8956 	alloc_uclamp_sched_group(tg, parent);
8957 
8958 	return tg;
8959 
8960 err:
8961 	sched_free_group(tg);
8962 	return ERR_PTR(-ENOMEM);
8963 }
8964 
sched_online_group(struct task_group * tg,struct task_group * parent)8965 void sched_online_group(struct task_group *tg, struct task_group *parent)
8966 {
8967 	unsigned long flags;
8968 
8969 	spin_lock_irqsave(&task_group_lock, flags);
8970 	list_add_rcu(&tg->list, &task_groups);
8971 
8972 	/* Root should already exist: */
8973 	WARN_ON(!parent);
8974 
8975 	tg->parent = parent;
8976 	INIT_LIST_HEAD(&tg->children);
8977 	list_add_rcu(&tg->siblings, &parent->children);
8978 	spin_unlock_irqrestore(&task_group_lock, flags);
8979 
8980 	online_fair_sched_group(tg);
8981 }
8982 
8983 /* RCU callback to free various structures associated with a task group */
sched_unregister_group_rcu(struct rcu_head * rhp)8984 static void sched_unregister_group_rcu(struct rcu_head *rhp)
8985 {
8986 	/* Now it should be safe to free those cfs_rqs: */
8987 	sched_unregister_group(container_of(rhp, struct task_group, rcu));
8988 }
8989 
sched_destroy_group(struct task_group * tg)8990 void sched_destroy_group(struct task_group *tg)
8991 {
8992 	/* Wait for possible concurrent references to cfs_rqs complete: */
8993 	call_rcu(&tg->rcu, sched_unregister_group_rcu);
8994 }
8995 
sched_release_group(struct task_group * tg)8996 void sched_release_group(struct task_group *tg)
8997 {
8998 	unsigned long flags;
8999 
9000 	/*
9001 	 * Unlink first, to avoid walk_tg_tree_from() from finding us (via
9002 	 * sched_cfs_period_timer()).
9003 	 *
9004 	 * For this to be effective, we have to wait for all pending users of
9005 	 * this task group to leave their RCU critical section to ensure no new
9006 	 * user will see our dying task group any more. Specifically ensure
9007 	 * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
9008 	 *
9009 	 * We therefore defer calling unregister_fair_sched_group() to
9010 	 * sched_unregister_group() which is guarantied to get called only after the
9011 	 * current RCU grace period has expired.
9012 	 */
9013 	spin_lock_irqsave(&task_group_lock, flags);
9014 	list_del_rcu(&tg->list);
9015 	list_del_rcu(&tg->siblings);
9016 	spin_unlock_irqrestore(&task_group_lock, flags);
9017 }
9018 
sched_get_task_group(struct task_struct * tsk)9019 static struct task_group *sched_get_task_group(struct task_struct *tsk)
9020 {
9021 	struct task_group *tg;
9022 
9023 	/*
9024 	 * All callers are synchronized by task_rq_lock(); we do not use RCU
9025 	 * which is pointless here. Thus, we pass "true" to task_css_check()
9026 	 * to prevent lockdep warnings.
9027 	 */
9028 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
9029 			  struct task_group, css);
9030 	tg = autogroup_task_group(tsk, tg);
9031 
9032 	return tg;
9033 }
9034 
sched_change_group(struct task_struct * tsk,struct task_group * group)9035 static void sched_change_group(struct task_struct *tsk, struct task_group *group)
9036 {
9037 	tsk->sched_task_group = group;
9038 
9039 #ifdef CONFIG_FAIR_GROUP_SCHED
9040 	if (tsk->sched_class->task_change_group)
9041 		tsk->sched_class->task_change_group(tsk);
9042 	else
9043 #endif
9044 		set_task_rq(tsk, task_cpu(tsk));
9045 }
9046 
9047 /*
9048  * Change task's runqueue when it moves between groups.
9049  *
9050  * The caller of this function should have put the task in its new group by
9051  * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
9052  * its new group.
9053  */
sched_move_task(struct task_struct * tsk,bool for_autogroup)9054 void sched_move_task(struct task_struct *tsk, bool for_autogroup)
9055 {
9056 	int queued, running, queue_flags =
9057 		DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
9058 	struct task_group *group;
9059 	struct rq *rq;
9060 
9061 	CLASS(task_rq_lock, rq_guard)(tsk);
9062 	rq = rq_guard.rq;
9063 
9064 	/*
9065 	 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
9066 	 * group changes.
9067 	 */
9068 	group = sched_get_task_group(tsk);
9069 	if (group == tsk->sched_task_group)
9070 		return;
9071 
9072 	update_rq_clock(rq);
9073 
9074 	running = task_current_donor(rq, tsk);
9075 	queued = task_on_rq_queued(tsk);
9076 
9077 	if (queued)
9078 		dequeue_task(rq, tsk, queue_flags);
9079 	if (running)
9080 		put_prev_task(rq, tsk);
9081 
9082 	sched_change_group(tsk, group);
9083 	if (!for_autogroup)
9084 		scx_cgroup_move_task(tsk);
9085 
9086 	if (queued)
9087 		enqueue_task(rq, tsk, queue_flags);
9088 	if (running) {
9089 		set_next_task(rq, tsk);
9090 		/*
9091 		 * After changing group, the running task may have joined a
9092 		 * throttled one but it's still the running task. Trigger a
9093 		 * resched to make sure that task can still run.
9094 		 */
9095 		resched_curr(rq);
9096 	}
9097 }
9098 
9099 static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)9100 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9101 {
9102 	struct task_group *parent = css_tg(parent_css);
9103 	struct task_group *tg;
9104 
9105 	if (!parent) {
9106 		/* This is early initialization for the top cgroup */
9107 		return &root_task_group.css;
9108 	}
9109 
9110 	tg = sched_create_group(parent);
9111 	if (IS_ERR(tg))
9112 		return ERR_PTR(-ENOMEM);
9113 
9114 	return &tg->css;
9115 }
9116 
9117 /* Expose task group only after completing cgroup initialization */
cpu_cgroup_css_online(struct cgroup_subsys_state * css)9118 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
9119 {
9120 	struct task_group *tg = css_tg(css);
9121 	struct task_group *parent = css_tg(css->parent);
9122 	int ret;
9123 
9124 	ret = scx_tg_online(tg);
9125 	if (ret)
9126 		return ret;
9127 
9128 	if (parent)
9129 		sched_online_group(tg, parent);
9130 
9131 #ifdef CONFIG_UCLAMP_TASK_GROUP
9132 	/* Propagate the effective uclamp value for the new group */
9133 	guard(mutex)(&uclamp_mutex);
9134 	guard(rcu)();
9135 	cpu_util_update_eff(css);
9136 #endif
9137 
9138 	return 0;
9139 }
9140 
cpu_cgroup_css_offline(struct cgroup_subsys_state * css)9141 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
9142 {
9143 	struct task_group *tg = css_tg(css);
9144 
9145 	scx_tg_offline(tg);
9146 }
9147 
cpu_cgroup_css_released(struct cgroup_subsys_state * css)9148 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
9149 {
9150 	struct task_group *tg = css_tg(css);
9151 
9152 	sched_release_group(tg);
9153 }
9154 
cpu_cgroup_css_free(struct cgroup_subsys_state * css)9155 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
9156 {
9157 	struct task_group *tg = css_tg(css);
9158 
9159 	/*
9160 	 * Relies on the RCU grace period between css_released() and this.
9161 	 */
9162 	sched_unregister_group(tg);
9163 }
9164 
cpu_cgroup_can_attach(struct cgroup_taskset * tset)9165 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
9166 {
9167 #ifdef CONFIG_RT_GROUP_SCHED
9168 	struct task_struct *task;
9169 	struct cgroup_subsys_state *css;
9170 
9171 	cgroup_taskset_for_each(task, css, tset) {
9172 		if (!sched_rt_can_attach(css_tg(css), task))
9173 			return -EINVAL;
9174 	}
9175 #endif
9176 	return scx_cgroup_can_attach(tset);
9177 }
9178 
cpu_cgroup_attach(struct cgroup_taskset * tset)9179 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
9180 {
9181 	struct task_struct *task;
9182 	struct cgroup_subsys_state *css;
9183 
9184 	cgroup_taskset_for_each(task, css, tset)
9185 		sched_move_task(task, false);
9186 
9187 	scx_cgroup_finish_attach();
9188 }
9189 
cpu_cgroup_cancel_attach(struct cgroup_taskset * tset)9190 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
9191 {
9192 	scx_cgroup_cancel_attach(tset);
9193 }
9194 
9195 #ifdef CONFIG_UCLAMP_TASK_GROUP
cpu_util_update_eff(struct cgroup_subsys_state * css)9196 static void cpu_util_update_eff(struct cgroup_subsys_state *css)
9197 {
9198 	struct cgroup_subsys_state *top_css = css;
9199 	struct uclamp_se *uc_parent = NULL;
9200 	struct uclamp_se *uc_se = NULL;
9201 	unsigned int eff[UCLAMP_CNT];
9202 	enum uclamp_id clamp_id;
9203 	unsigned int clamps;
9204 
9205 	lockdep_assert_held(&uclamp_mutex);
9206 	SCHED_WARN_ON(!rcu_read_lock_held());
9207 
9208 	css_for_each_descendant_pre(css, top_css) {
9209 		uc_parent = css_tg(css)->parent
9210 			? css_tg(css)->parent->uclamp : NULL;
9211 
9212 		for_each_clamp_id(clamp_id) {
9213 			/* Assume effective clamps matches requested clamps */
9214 			eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value;
9215 			/* Cap effective clamps with parent's effective clamps */
9216 			if (uc_parent &&
9217 			    eff[clamp_id] > uc_parent[clamp_id].value) {
9218 				eff[clamp_id] = uc_parent[clamp_id].value;
9219 			}
9220 		}
9221 		/* Ensure protection is always capped by limit */
9222 		eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]);
9223 
9224 		/* Propagate most restrictive effective clamps */
9225 		clamps = 0x0;
9226 		uc_se = css_tg(css)->uclamp;
9227 		for_each_clamp_id(clamp_id) {
9228 			if (eff[clamp_id] == uc_se[clamp_id].value)
9229 				continue;
9230 			uc_se[clamp_id].value = eff[clamp_id];
9231 			uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]);
9232 			clamps |= (0x1 << clamp_id);
9233 		}
9234 		if (!clamps) {
9235 			css = css_rightmost_descendant(css);
9236 			continue;
9237 		}
9238 
9239 		/* Immediately update descendants RUNNABLE tasks */
9240 		uclamp_update_active_tasks(css);
9241 	}
9242 }
9243 
9244 /*
9245  * Integer 10^N with a given N exponent by casting to integer the literal "1eN"
9246  * C expression. Since there is no way to convert a macro argument (N) into a
9247  * character constant, use two levels of macros.
9248  */
9249 #define _POW10(exp) ((unsigned int)1e##exp)
9250 #define POW10(exp) _POW10(exp)
9251 
9252 struct uclamp_request {
9253 #define UCLAMP_PERCENT_SHIFT	2
9254 #define UCLAMP_PERCENT_SCALE	(100 * POW10(UCLAMP_PERCENT_SHIFT))
9255 	s64 percent;
9256 	u64 util;
9257 	int ret;
9258 };
9259 
9260 static inline struct uclamp_request
capacity_from_percent(char * buf)9261 capacity_from_percent(char *buf)
9262 {
9263 	struct uclamp_request req = {
9264 		.percent = UCLAMP_PERCENT_SCALE,
9265 		.util = SCHED_CAPACITY_SCALE,
9266 		.ret = 0,
9267 	};
9268 
9269 	buf = strim(buf);
9270 	if (strcmp(buf, "max")) {
9271 		req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT,
9272 					     &req.percent);
9273 		if (req.ret)
9274 			return req;
9275 		if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
9276 			req.ret = -ERANGE;
9277 			return req;
9278 		}
9279 
9280 		req.util = req.percent << SCHED_CAPACITY_SHIFT;
9281 		req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE);
9282 	}
9283 
9284 	return req;
9285 }
9286 
cpu_uclamp_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off,enum uclamp_id clamp_id)9287 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
9288 				size_t nbytes, loff_t off,
9289 				enum uclamp_id clamp_id)
9290 {
9291 	struct uclamp_request req;
9292 	struct task_group *tg;
9293 
9294 	req = capacity_from_percent(buf);
9295 	if (req.ret)
9296 		return req.ret;
9297 
9298 	static_branch_enable(&sched_uclamp_used);
9299 
9300 	guard(mutex)(&uclamp_mutex);
9301 	guard(rcu)();
9302 
9303 	tg = css_tg(of_css(of));
9304 	if (tg->uclamp_req[clamp_id].value != req.util)
9305 		uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false);
9306 
9307 	/*
9308 	 * Because of not recoverable conversion rounding we keep track of the
9309 	 * exact requested value
9310 	 */
9311 	tg->uclamp_pct[clamp_id] = req.percent;
9312 
9313 	/* Update effective clamps to track the most restrictive value */
9314 	cpu_util_update_eff(of_css(of));
9315 
9316 	return nbytes;
9317 }
9318 
cpu_uclamp_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9319 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of,
9320 				    char *buf, size_t nbytes,
9321 				    loff_t off)
9322 {
9323 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN);
9324 }
9325 
cpu_uclamp_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)9326 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of,
9327 				    char *buf, size_t nbytes,
9328 				    loff_t off)
9329 {
9330 	return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX);
9331 }
9332 
cpu_uclamp_print(struct seq_file * sf,enum uclamp_id clamp_id)9333 static inline void cpu_uclamp_print(struct seq_file *sf,
9334 				    enum uclamp_id clamp_id)
9335 {
9336 	struct task_group *tg;
9337 	u64 util_clamp;
9338 	u64 percent;
9339 	u32 rem;
9340 
9341 	scoped_guard (rcu) {
9342 		tg = css_tg(seq_css(sf));
9343 		util_clamp = tg->uclamp_req[clamp_id].value;
9344 	}
9345 
9346 	if (util_clamp == SCHED_CAPACITY_SCALE) {
9347 		seq_puts(sf, "max\n");
9348 		return;
9349 	}
9350 
9351 	percent = tg->uclamp_pct[clamp_id];
9352 	percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem);
9353 	seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem);
9354 }
9355 
cpu_uclamp_min_show(struct seq_file * sf,void * v)9356 static int cpu_uclamp_min_show(struct seq_file *sf, void *v)
9357 {
9358 	cpu_uclamp_print(sf, UCLAMP_MIN);
9359 	return 0;
9360 }
9361 
cpu_uclamp_max_show(struct seq_file * sf,void * v)9362 static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
9363 {
9364 	cpu_uclamp_print(sf, UCLAMP_MAX);
9365 	return 0;
9366 }
9367 #endif /* CONFIG_UCLAMP_TASK_GROUP */
9368 
9369 #ifdef CONFIG_GROUP_SCHED_WEIGHT
tg_weight(struct task_group * tg)9370 static unsigned long tg_weight(struct task_group *tg)
9371 {
9372 #ifdef CONFIG_FAIR_GROUP_SCHED
9373 	return scale_load_down(tg->shares);
9374 #else
9375 	return sched_weight_from_cgroup(tg->scx_weight);
9376 #endif
9377 }
9378 
cpu_shares_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 shareval)9379 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
9380 				struct cftype *cftype, u64 shareval)
9381 {
9382 	int ret;
9383 
9384 	if (shareval > scale_load_down(ULONG_MAX))
9385 		shareval = MAX_SHARES;
9386 	ret = sched_group_set_shares(css_tg(css), scale_load(shareval));
9387 	if (!ret)
9388 		scx_group_set_weight(css_tg(css),
9389 				     sched_weight_to_cgroup(shareval));
9390 	return ret;
9391 }
9392 
cpu_shares_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9393 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
9394 			       struct cftype *cft)
9395 {
9396 	return tg_weight(css_tg(css));
9397 }
9398 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9399 
9400 #ifdef CONFIG_CFS_BANDWIDTH
9401 static DEFINE_MUTEX(cfs_constraints_mutex);
9402 
9403 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
9404 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
9405 /* More than 203 days if BW_SHIFT equals 20. */
9406 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC;
9407 
9408 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
9409 
tg_set_cfs_bandwidth(struct task_group * tg,u64 period,u64 quota,u64 burst)9410 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
9411 				u64 burst)
9412 {
9413 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
9414 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9415 
9416 	if (tg == &root_task_group)
9417 		return -EINVAL;
9418 
9419 	/*
9420 	 * Ensure we have at some amount of bandwidth every period.  This is
9421 	 * to prevent reaching a state of large arrears when throttled via
9422 	 * entity_tick() resulting in prolonged exit starvation.
9423 	 */
9424 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
9425 		return -EINVAL;
9426 
9427 	/*
9428 	 * Likewise, bound things on the other side by preventing insane quota
9429 	 * periods.  This also allows us to normalize in computing quota
9430 	 * feasibility.
9431 	 */
9432 	if (period > max_cfs_quota_period)
9433 		return -EINVAL;
9434 
9435 	/*
9436 	 * Bound quota to defend quota against overflow during bandwidth shift.
9437 	 */
9438 	if (quota != RUNTIME_INF && quota > max_cfs_runtime)
9439 		return -EINVAL;
9440 
9441 	if (quota != RUNTIME_INF && (burst > quota ||
9442 				     burst + quota > max_cfs_runtime))
9443 		return -EINVAL;
9444 
9445 	/*
9446 	 * Prevent race between setting of cfs_rq->runtime_enabled and
9447 	 * unthrottle_offline_cfs_rqs().
9448 	 */
9449 	guard(cpus_read_lock)();
9450 	guard(mutex)(&cfs_constraints_mutex);
9451 
9452 	ret = __cfs_schedulable(tg, period, quota);
9453 	if (ret)
9454 		return ret;
9455 
9456 	runtime_enabled = quota != RUNTIME_INF;
9457 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
9458 	/*
9459 	 * If we need to toggle cfs_bandwidth_used, off->on must occur
9460 	 * before making related changes, and on->off must occur afterwards
9461 	 */
9462 	if (runtime_enabled && !runtime_was_enabled)
9463 		cfs_bandwidth_usage_inc();
9464 
9465 	scoped_guard (raw_spinlock_irq, &cfs_b->lock) {
9466 		cfs_b->period = ns_to_ktime(period);
9467 		cfs_b->quota = quota;
9468 		cfs_b->burst = burst;
9469 
9470 		__refill_cfs_bandwidth_runtime(cfs_b);
9471 
9472 		/*
9473 		 * Restart the period timer (if active) to handle new
9474 		 * period expiry:
9475 		 */
9476 		if (runtime_enabled)
9477 			start_cfs_bandwidth(cfs_b);
9478 	}
9479 
9480 	for_each_online_cpu(i) {
9481 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
9482 		struct rq *rq = cfs_rq->rq;
9483 
9484 		guard(rq_lock_irq)(rq);
9485 		cfs_rq->runtime_enabled = runtime_enabled;
9486 		cfs_rq->runtime_remaining = 0;
9487 
9488 		if (cfs_rq->throttled)
9489 			unthrottle_cfs_rq(cfs_rq);
9490 	}
9491 
9492 	if (runtime_was_enabled && !runtime_enabled)
9493 		cfs_bandwidth_usage_dec();
9494 
9495 	return 0;
9496 }
9497 
tg_set_cfs_quota(struct task_group * tg,long cfs_quota_us)9498 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
9499 {
9500 	u64 quota, period, burst;
9501 
9502 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9503 	burst = tg->cfs_bandwidth.burst;
9504 	if (cfs_quota_us < 0)
9505 		quota = RUNTIME_INF;
9506 	else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
9507 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
9508 	else
9509 		return -EINVAL;
9510 
9511 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9512 }
9513 
tg_get_cfs_quota(struct task_group * tg)9514 static long tg_get_cfs_quota(struct task_group *tg)
9515 {
9516 	u64 quota_us;
9517 
9518 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
9519 		return -1;
9520 
9521 	quota_us = tg->cfs_bandwidth.quota;
9522 	do_div(quota_us, NSEC_PER_USEC);
9523 
9524 	return quota_us;
9525 }
9526 
tg_set_cfs_period(struct task_group * tg,long cfs_period_us)9527 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
9528 {
9529 	u64 quota, period, burst;
9530 
9531 	if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
9532 		return -EINVAL;
9533 
9534 	period = (u64)cfs_period_us * NSEC_PER_USEC;
9535 	quota = tg->cfs_bandwidth.quota;
9536 	burst = tg->cfs_bandwidth.burst;
9537 
9538 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9539 }
9540 
tg_get_cfs_period(struct task_group * tg)9541 static long tg_get_cfs_period(struct task_group *tg)
9542 {
9543 	u64 cfs_period_us;
9544 
9545 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
9546 	do_div(cfs_period_us, NSEC_PER_USEC);
9547 
9548 	return cfs_period_us;
9549 }
9550 
tg_set_cfs_burst(struct task_group * tg,long cfs_burst_us)9551 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us)
9552 {
9553 	u64 quota, period, burst;
9554 
9555 	if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC)
9556 		return -EINVAL;
9557 
9558 	burst = (u64)cfs_burst_us * NSEC_PER_USEC;
9559 	period = ktime_to_ns(tg->cfs_bandwidth.period);
9560 	quota = tg->cfs_bandwidth.quota;
9561 
9562 	return tg_set_cfs_bandwidth(tg, period, quota, burst);
9563 }
9564 
tg_get_cfs_burst(struct task_group * tg)9565 static long tg_get_cfs_burst(struct task_group *tg)
9566 {
9567 	u64 burst_us;
9568 
9569 	burst_us = tg->cfs_bandwidth.burst;
9570 	do_div(burst_us, NSEC_PER_USEC);
9571 
9572 	return burst_us;
9573 }
9574 
cpu_cfs_quota_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9575 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
9576 				  struct cftype *cft)
9577 {
9578 	return tg_get_cfs_quota(css_tg(css));
9579 }
9580 
cpu_cfs_quota_write_s64(struct cgroup_subsys_state * css,struct cftype * cftype,s64 cfs_quota_us)9581 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
9582 				   struct cftype *cftype, s64 cfs_quota_us)
9583 {
9584 	return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
9585 }
9586 
cpu_cfs_period_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9587 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
9588 				   struct cftype *cft)
9589 {
9590 	return tg_get_cfs_period(css_tg(css));
9591 }
9592 
cpu_cfs_period_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_period_us)9593 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
9594 				    struct cftype *cftype, u64 cfs_period_us)
9595 {
9596 	return tg_set_cfs_period(css_tg(css), cfs_period_us);
9597 }
9598 
cpu_cfs_burst_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9599 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css,
9600 				  struct cftype *cft)
9601 {
9602 	return tg_get_cfs_burst(css_tg(css));
9603 }
9604 
cpu_cfs_burst_write_u64(struct cgroup_subsys_state * css,struct cftype * cftype,u64 cfs_burst_us)9605 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css,
9606 				   struct cftype *cftype, u64 cfs_burst_us)
9607 {
9608 	return tg_set_cfs_burst(css_tg(css), cfs_burst_us);
9609 }
9610 
9611 struct cfs_schedulable_data {
9612 	struct task_group *tg;
9613 	u64 period, quota;
9614 };
9615 
9616 /*
9617  * normalize group quota/period to be quota/max_period
9618  * note: units are usecs
9619  */
normalize_cfs_quota(struct task_group * tg,struct cfs_schedulable_data * d)9620 static u64 normalize_cfs_quota(struct task_group *tg,
9621 			       struct cfs_schedulable_data *d)
9622 {
9623 	u64 quota, period;
9624 
9625 	if (tg == d->tg) {
9626 		period = d->period;
9627 		quota = d->quota;
9628 	} else {
9629 		period = tg_get_cfs_period(tg);
9630 		quota = tg_get_cfs_quota(tg);
9631 	}
9632 
9633 	/* note: these should typically be equivalent */
9634 	if (quota == RUNTIME_INF || quota == -1)
9635 		return RUNTIME_INF;
9636 
9637 	return to_ratio(period, quota);
9638 }
9639 
tg_cfs_schedulable_down(struct task_group * tg,void * data)9640 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
9641 {
9642 	struct cfs_schedulable_data *d = data;
9643 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9644 	s64 quota = 0, parent_quota = -1;
9645 
9646 	if (!tg->parent) {
9647 		quota = RUNTIME_INF;
9648 	} else {
9649 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
9650 
9651 		quota = normalize_cfs_quota(tg, d);
9652 		parent_quota = parent_b->hierarchical_quota;
9653 
9654 		/*
9655 		 * Ensure max(child_quota) <= parent_quota.  On cgroup2,
9656 		 * always take the non-RUNTIME_INF min.  On cgroup1, only
9657 		 * inherit when no limit is set. In both cases this is used
9658 		 * by the scheduler to determine if a given CFS task has a
9659 		 * bandwidth constraint at some higher level.
9660 		 */
9661 		if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
9662 			if (quota == RUNTIME_INF)
9663 				quota = parent_quota;
9664 			else if (parent_quota != RUNTIME_INF)
9665 				quota = min(quota, parent_quota);
9666 		} else {
9667 			if (quota == RUNTIME_INF)
9668 				quota = parent_quota;
9669 			else if (parent_quota != RUNTIME_INF && quota > parent_quota)
9670 				return -EINVAL;
9671 		}
9672 	}
9673 	cfs_b->hierarchical_quota = quota;
9674 
9675 	return 0;
9676 }
9677 
__cfs_schedulable(struct task_group * tg,u64 period,u64 quota)9678 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
9679 {
9680 	struct cfs_schedulable_data data = {
9681 		.tg = tg,
9682 		.period = period,
9683 		.quota = quota,
9684 	};
9685 
9686 	if (quota != RUNTIME_INF) {
9687 		do_div(data.period, NSEC_PER_USEC);
9688 		do_div(data.quota, NSEC_PER_USEC);
9689 	}
9690 
9691 	guard(rcu)();
9692 	return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
9693 }
9694 
cpu_cfs_stat_show(struct seq_file * sf,void * v)9695 static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
9696 {
9697 	struct task_group *tg = css_tg(seq_css(sf));
9698 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9699 
9700 	seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
9701 	seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
9702 	seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
9703 
9704 	if (schedstat_enabled() && tg != &root_task_group) {
9705 		struct sched_statistics *stats;
9706 		u64 ws = 0;
9707 		int i;
9708 
9709 		for_each_possible_cpu(i) {
9710 			stats = __schedstats_from_se(tg->se[i]);
9711 			ws += schedstat_val(stats->wait_sum);
9712 		}
9713 
9714 		seq_printf(sf, "wait_sum %llu\n", ws);
9715 	}
9716 
9717 	seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
9718 	seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time);
9719 
9720 	return 0;
9721 }
9722 
throttled_time_self(struct task_group * tg)9723 static u64 throttled_time_self(struct task_group *tg)
9724 {
9725 	int i;
9726 	u64 total = 0;
9727 
9728 	for_each_possible_cpu(i) {
9729 		total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
9730 	}
9731 
9732 	return total;
9733 }
9734 
cpu_cfs_local_stat_show(struct seq_file * sf,void * v)9735 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
9736 {
9737 	struct task_group *tg = css_tg(seq_css(sf));
9738 
9739 	seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
9740 
9741 	return 0;
9742 }
9743 #endif /* CONFIG_CFS_BANDWIDTH */
9744 
9745 #ifdef CONFIG_RT_GROUP_SCHED
cpu_rt_runtime_write(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)9746 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
9747 				struct cftype *cft, s64 val)
9748 {
9749 	return sched_group_set_rt_runtime(css_tg(css), val);
9750 }
9751 
cpu_rt_runtime_read(struct cgroup_subsys_state * css,struct cftype * cft)9752 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
9753 			       struct cftype *cft)
9754 {
9755 	return sched_group_rt_runtime(css_tg(css));
9756 }
9757 
cpu_rt_period_write_uint(struct cgroup_subsys_state * css,struct cftype * cftype,u64 rt_period_us)9758 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
9759 				    struct cftype *cftype, u64 rt_period_us)
9760 {
9761 	return sched_group_set_rt_period(css_tg(css), rt_period_us);
9762 }
9763 
cpu_rt_period_read_uint(struct cgroup_subsys_state * css,struct cftype * cft)9764 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
9765 				   struct cftype *cft)
9766 {
9767 	return sched_group_rt_period(css_tg(css));
9768 }
9769 #endif /* CONFIG_RT_GROUP_SCHED */
9770 
9771 #ifdef CONFIG_GROUP_SCHED_WEIGHT
cpu_idle_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9772 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css,
9773 			       struct cftype *cft)
9774 {
9775 	return css_tg(css)->idle;
9776 }
9777 
cpu_idle_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 idle)9778 static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
9779 				struct cftype *cft, s64 idle)
9780 {
9781 	int ret;
9782 
9783 	ret = sched_group_set_idle(css_tg(css), idle);
9784 	if (!ret)
9785 		scx_group_set_idle(css_tg(css), idle);
9786 	return ret;
9787 }
9788 #endif
9789 
9790 static struct cftype cpu_legacy_files[] = {
9791 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9792 	{
9793 		.name = "shares",
9794 		.read_u64 = cpu_shares_read_u64,
9795 		.write_u64 = cpu_shares_write_u64,
9796 	},
9797 	{
9798 		.name = "idle",
9799 		.read_s64 = cpu_idle_read_s64,
9800 		.write_s64 = cpu_idle_write_s64,
9801 	},
9802 #endif
9803 #ifdef CONFIG_CFS_BANDWIDTH
9804 	{
9805 		.name = "cfs_quota_us",
9806 		.read_s64 = cpu_cfs_quota_read_s64,
9807 		.write_s64 = cpu_cfs_quota_write_s64,
9808 	},
9809 	{
9810 		.name = "cfs_period_us",
9811 		.read_u64 = cpu_cfs_period_read_u64,
9812 		.write_u64 = cpu_cfs_period_write_u64,
9813 	},
9814 	{
9815 		.name = "cfs_burst_us",
9816 		.read_u64 = cpu_cfs_burst_read_u64,
9817 		.write_u64 = cpu_cfs_burst_write_u64,
9818 	},
9819 	{
9820 		.name = "stat",
9821 		.seq_show = cpu_cfs_stat_show,
9822 	},
9823 	{
9824 		.name = "stat.local",
9825 		.seq_show = cpu_cfs_local_stat_show,
9826 	},
9827 #endif
9828 #ifdef CONFIG_RT_GROUP_SCHED
9829 	{
9830 		.name = "rt_runtime_us",
9831 		.read_s64 = cpu_rt_runtime_read,
9832 		.write_s64 = cpu_rt_runtime_write,
9833 	},
9834 	{
9835 		.name = "rt_period_us",
9836 		.read_u64 = cpu_rt_period_read_uint,
9837 		.write_u64 = cpu_rt_period_write_uint,
9838 	},
9839 #endif
9840 #ifdef CONFIG_UCLAMP_TASK_GROUP
9841 	{
9842 		.name = "uclamp.min",
9843 		.flags = CFTYPE_NOT_ON_ROOT,
9844 		.seq_show = cpu_uclamp_min_show,
9845 		.write = cpu_uclamp_min_write,
9846 	},
9847 	{
9848 		.name = "uclamp.max",
9849 		.flags = CFTYPE_NOT_ON_ROOT,
9850 		.seq_show = cpu_uclamp_max_show,
9851 		.write = cpu_uclamp_max_write,
9852 	},
9853 #endif
9854 	{ }	/* Terminate */
9855 };
9856 
cpu_extra_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9857 static int cpu_extra_stat_show(struct seq_file *sf,
9858 			       struct cgroup_subsys_state *css)
9859 {
9860 #ifdef CONFIG_CFS_BANDWIDTH
9861 	{
9862 		struct task_group *tg = css_tg(css);
9863 		struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
9864 		u64 throttled_usec, burst_usec;
9865 
9866 		throttled_usec = cfs_b->throttled_time;
9867 		do_div(throttled_usec, NSEC_PER_USEC);
9868 		burst_usec = cfs_b->burst_time;
9869 		do_div(burst_usec, NSEC_PER_USEC);
9870 
9871 		seq_printf(sf, "nr_periods %d\n"
9872 			   "nr_throttled %d\n"
9873 			   "throttled_usec %llu\n"
9874 			   "nr_bursts %d\n"
9875 			   "burst_usec %llu\n",
9876 			   cfs_b->nr_periods, cfs_b->nr_throttled,
9877 			   throttled_usec, cfs_b->nr_burst, burst_usec);
9878 	}
9879 #endif
9880 	return 0;
9881 }
9882 
cpu_local_stat_show(struct seq_file * sf,struct cgroup_subsys_state * css)9883 static int cpu_local_stat_show(struct seq_file *sf,
9884 			       struct cgroup_subsys_state *css)
9885 {
9886 #ifdef CONFIG_CFS_BANDWIDTH
9887 	{
9888 		struct task_group *tg = css_tg(css);
9889 		u64 throttled_self_usec;
9890 
9891 		throttled_self_usec = throttled_time_self(tg);
9892 		do_div(throttled_self_usec, NSEC_PER_USEC);
9893 
9894 		seq_printf(sf, "throttled_usec %llu\n",
9895 			   throttled_self_usec);
9896 	}
9897 #endif
9898 	return 0;
9899 }
9900 
9901 #ifdef CONFIG_GROUP_SCHED_WEIGHT
9902 
cpu_weight_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)9903 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
9904 			       struct cftype *cft)
9905 {
9906 	return sched_weight_to_cgroup(tg_weight(css_tg(css)));
9907 }
9908 
cpu_weight_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 cgrp_weight)9909 static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
9910 				struct cftype *cft, u64 cgrp_weight)
9911 {
9912 	unsigned long weight;
9913 	int ret;
9914 
9915 	if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX)
9916 		return -ERANGE;
9917 
9918 	weight = sched_weight_from_cgroup(cgrp_weight);
9919 
9920 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9921 	if (!ret)
9922 		scx_group_set_weight(css_tg(css), cgrp_weight);
9923 	return ret;
9924 }
9925 
cpu_weight_nice_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)9926 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
9927 				    struct cftype *cft)
9928 {
9929 	unsigned long weight = tg_weight(css_tg(css));
9930 	int last_delta = INT_MAX;
9931 	int prio, delta;
9932 
9933 	/* find the closest nice value to the current weight */
9934 	for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
9935 		delta = abs(sched_prio_to_weight[prio] - weight);
9936 		if (delta >= last_delta)
9937 			break;
9938 		last_delta = delta;
9939 	}
9940 
9941 	return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
9942 }
9943 
cpu_weight_nice_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 nice)9944 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
9945 				     struct cftype *cft, s64 nice)
9946 {
9947 	unsigned long weight;
9948 	int idx, ret;
9949 
9950 	if (nice < MIN_NICE || nice > MAX_NICE)
9951 		return -ERANGE;
9952 
9953 	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
9954 	idx = array_index_nospec(idx, 40);
9955 	weight = sched_prio_to_weight[idx];
9956 
9957 	ret = sched_group_set_shares(css_tg(css), scale_load(weight));
9958 	if (!ret)
9959 		scx_group_set_weight(css_tg(css),
9960 				     sched_weight_to_cgroup(weight));
9961 	return ret;
9962 }
9963 #endif /* CONFIG_GROUP_SCHED_WEIGHT */
9964 
cpu_period_quota_print(struct seq_file * sf,long period,long quota)9965 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
9966 						  long period, long quota)
9967 {
9968 	if (quota < 0)
9969 		seq_puts(sf, "max");
9970 	else
9971 		seq_printf(sf, "%ld", quota);
9972 
9973 	seq_printf(sf, " %ld\n", period);
9974 }
9975 
9976 /* caller should put the current value in *@periodp before calling */
cpu_period_quota_parse(char * buf,u64 * periodp,u64 * quotap)9977 static int __maybe_unused cpu_period_quota_parse(char *buf,
9978 						 u64 *periodp, u64 *quotap)
9979 {
9980 	char tok[21];	/* U64_MAX */
9981 
9982 	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
9983 		return -EINVAL;
9984 
9985 	*periodp *= NSEC_PER_USEC;
9986 
9987 	if (sscanf(tok, "%llu", quotap))
9988 		*quotap *= NSEC_PER_USEC;
9989 	else if (!strcmp(tok, "max"))
9990 		*quotap = RUNTIME_INF;
9991 	else
9992 		return -EINVAL;
9993 
9994 	return 0;
9995 }
9996 
9997 #ifdef CONFIG_CFS_BANDWIDTH
cpu_max_show(struct seq_file * sf,void * v)9998 static int cpu_max_show(struct seq_file *sf, void *v)
9999 {
10000 	struct task_group *tg = css_tg(seq_css(sf));
10001 
10002 	cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
10003 	return 0;
10004 }
10005 
cpu_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)10006 static ssize_t cpu_max_write(struct kernfs_open_file *of,
10007 			     char *buf, size_t nbytes, loff_t off)
10008 {
10009 	struct task_group *tg = css_tg(of_css(of));
10010 	u64 period = tg_get_cfs_period(tg);
10011 	u64 burst = tg->cfs_bandwidth.burst;
10012 	u64 quota;
10013 	int ret;
10014 
10015 	ret = cpu_period_quota_parse(buf, &period, &quota);
10016 	if (!ret)
10017 		ret = tg_set_cfs_bandwidth(tg, period, quota, burst);
10018 	return ret ?: nbytes;
10019 }
10020 #endif
10021 
10022 static struct cftype cpu_files[] = {
10023 #ifdef CONFIG_GROUP_SCHED_WEIGHT
10024 	{
10025 		.name = "weight",
10026 		.flags = CFTYPE_NOT_ON_ROOT,
10027 		.read_u64 = cpu_weight_read_u64,
10028 		.write_u64 = cpu_weight_write_u64,
10029 	},
10030 	{
10031 		.name = "weight.nice",
10032 		.flags = CFTYPE_NOT_ON_ROOT,
10033 		.read_s64 = cpu_weight_nice_read_s64,
10034 		.write_s64 = cpu_weight_nice_write_s64,
10035 	},
10036 	{
10037 		.name = "idle",
10038 		.flags = CFTYPE_NOT_ON_ROOT,
10039 		.read_s64 = cpu_idle_read_s64,
10040 		.write_s64 = cpu_idle_write_s64,
10041 	},
10042 #endif
10043 #ifdef CONFIG_CFS_BANDWIDTH
10044 	{
10045 		.name = "max",
10046 		.flags = CFTYPE_NOT_ON_ROOT,
10047 		.seq_show = cpu_max_show,
10048 		.write = cpu_max_write,
10049 	},
10050 	{
10051 		.name = "max.burst",
10052 		.flags = CFTYPE_NOT_ON_ROOT,
10053 		.read_u64 = cpu_cfs_burst_read_u64,
10054 		.write_u64 = cpu_cfs_burst_write_u64,
10055 	},
10056 #endif
10057 #ifdef CONFIG_UCLAMP_TASK_GROUP
10058 	{
10059 		.name = "uclamp.min",
10060 		.flags = CFTYPE_NOT_ON_ROOT,
10061 		.seq_show = cpu_uclamp_min_show,
10062 		.write = cpu_uclamp_min_write,
10063 	},
10064 	{
10065 		.name = "uclamp.max",
10066 		.flags = CFTYPE_NOT_ON_ROOT,
10067 		.seq_show = cpu_uclamp_max_show,
10068 		.write = cpu_uclamp_max_write,
10069 	},
10070 #endif
10071 	{ }	/* terminate */
10072 };
10073 
10074 struct cgroup_subsys cpu_cgrp_subsys = {
10075 	.css_alloc	= cpu_cgroup_css_alloc,
10076 	.css_online	= cpu_cgroup_css_online,
10077 	.css_offline	= cpu_cgroup_css_offline,
10078 	.css_released	= cpu_cgroup_css_released,
10079 	.css_free	= cpu_cgroup_css_free,
10080 	.css_extra_stat_show = cpu_extra_stat_show,
10081 	.css_local_stat_show = cpu_local_stat_show,
10082 	.can_attach	= cpu_cgroup_can_attach,
10083 	.attach		= cpu_cgroup_attach,
10084 	.cancel_attach	= cpu_cgroup_cancel_attach,
10085 	.legacy_cftypes	= cpu_legacy_files,
10086 	.dfl_cftypes	= cpu_files,
10087 	.early_init	= true,
10088 	.threaded	= true,
10089 };
10090 
10091 #endif	/* CONFIG_CGROUP_SCHED */
10092 
dump_cpu_task(int cpu)10093 void dump_cpu_task(int cpu)
10094 {
10095 	if (in_hardirq() && cpu == smp_processor_id()) {
10096 		struct pt_regs *regs;
10097 
10098 		regs = get_irq_regs();
10099 		if (regs) {
10100 			show_regs(regs);
10101 			return;
10102 		}
10103 	}
10104 
10105 	if (trigger_single_cpu_backtrace(cpu))
10106 		return;
10107 
10108 	pr_info("Task dump for CPU %d:\n", cpu);
10109 	sched_show_task(cpu_curr(cpu));
10110 }
10111 
10112 /*
10113  * Nice levels are multiplicative, with a gentle 10% change for every
10114  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
10115  * nice 1, it will get ~10% less CPU time than another CPU-bound task
10116  * that remained on nice 0.
10117  *
10118  * The "10% effect" is relative and cumulative: from _any_ nice level,
10119  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
10120  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
10121  * If a task goes up by ~10% and another task goes down by ~10% then
10122  * the relative distance between them is ~25%.)
10123  */
10124 const int sched_prio_to_weight[40] = {
10125  /* -20 */     88761,     71755,     56483,     46273,     36291,
10126  /* -15 */     29154,     23254,     18705,     14949,     11916,
10127  /* -10 */      9548,      7620,      6100,      4904,      3906,
10128  /*  -5 */      3121,      2501,      1991,      1586,      1277,
10129  /*   0 */      1024,       820,       655,       526,       423,
10130  /*   5 */       335,       272,       215,       172,       137,
10131  /*  10 */       110,        87,        70,        56,        45,
10132  /*  15 */        36,        29,        23,        18,        15,
10133 };
10134 
10135 /*
10136  * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
10137  *
10138  * In cases where the weight does not change often, we can use the
10139  * pre-calculated inverse to speed up arithmetics by turning divisions
10140  * into multiplications:
10141  */
10142 const u32 sched_prio_to_wmult[40] = {
10143  /* -20 */     48388,     59856,     76040,     92818,    118348,
10144  /* -15 */    147320,    184698,    229616,    287308,    360437,
10145  /* -10 */    449829,    563644,    704093,    875809,   1099582,
10146  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
10147  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
10148  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
10149  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
10150  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
10151 };
10152 
call_trace_sched_update_nr_running(struct rq * rq,int count)10153 void call_trace_sched_update_nr_running(struct rq *rq, int count)
10154 {
10155         trace_sched_update_nr_running_tp(rq, count);
10156 }
10157 
10158 #ifdef CONFIG_SCHED_MM_CID
10159 
10160 /*
10161  * @cid_lock: Guarantee forward-progress of cid allocation.
10162  *
10163  * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock
10164  * is only used when contention is detected by the lock-free allocation so
10165  * forward progress can be guaranteed.
10166  */
10167 DEFINE_RAW_SPINLOCK(cid_lock);
10168 
10169 /*
10170  * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock.
10171  *
10172  * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is
10173  * detected, it is set to 1 to ensure that all newly coming allocations are
10174  * serialized by @cid_lock until the allocation which detected contention
10175  * completes and sets @use_cid_lock back to 0. This guarantees forward progress
10176  * of a cid allocation.
10177  */
10178 int use_cid_lock;
10179 
10180 /*
10181  * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid
10182  * concurrently with respect to the execution of the source runqueue context
10183  * switch.
10184  *
10185  * There is one basic properties we want to guarantee here:
10186  *
10187  * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively
10188  * used by a task. That would lead to concurrent allocation of the cid and
10189  * userspace corruption.
10190  *
10191  * Provide this guarantee by introducing a Dekker memory ordering to guarantee
10192  * that a pair of loads observe at least one of a pair of stores, which can be
10193  * shown as:
10194  *
10195  *      X = Y = 0
10196  *
10197  *      w[X]=1          w[Y]=1
10198  *      MB              MB
10199  *      r[Y]=y          r[X]=x
10200  *
10201  * Which guarantees that x==0 && y==0 is impossible. But rather than using
10202  * values 0 and 1, this algorithm cares about specific state transitions of the
10203  * runqueue current task (as updated by the scheduler context switch), and the
10204  * per-mm/cpu cid value.
10205  *
10206  * Let's introduce task (Y) which has task->mm == mm and task (N) which has
10207  * task->mm != mm for the rest of the discussion. There are two scheduler state
10208  * transitions on context switch we care about:
10209  *
10210  * (TSA) Store to rq->curr with transition from (N) to (Y)
10211  *
10212  * (TSB) Store to rq->curr with transition from (Y) to (N)
10213  *
10214  * On the remote-clear side, there is one transition we care about:
10215  *
10216  * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag
10217  *
10218  * There is also a transition to UNSET state which can be performed from all
10219  * sides (scheduler, remote-clear). It is always performed with a cmpxchg which
10220  * guarantees that only a single thread will succeed:
10221  *
10222  * (TMB) cmpxchg to *pcpu_cid to mark UNSET
10223  *
10224  * Just to be clear, what we do _not_ want to happen is a transition to UNSET
10225  * when a thread is actively using the cid (property (1)).
10226  *
10227  * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions.
10228  *
10229  * Scenario A) (TSA)+(TMA) (from next task perspective)
10230  *
10231  * CPU0                                      CPU1
10232  *
10233  * Context switch CS-1                       Remote-clear
10234  *   - store to rq->curr: (N)->(Y) (TSA)     - cmpxchg to *pcpu_id to LAZY (TMA)
10235  *                                             (implied barrier after cmpxchg)
10236  *   - switch_mm_cid()
10237  *     - memory barrier (see switch_mm_cid()
10238  *       comment explaining how this barrier
10239  *       is combined with other scheduler
10240  *       barriers)
10241  *     - mm_cid_get (next)
10242  *       - READ_ONCE(*pcpu_cid)              - rcu_dereference(src_rq->curr)
10243  *
10244  * This Dekker ensures that either task (Y) is observed by the
10245  * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are
10246  * observed.
10247  *
10248  * If task (Y) store is observed by rcu_dereference(), it means that there is
10249  * still an active task on the cpu. Remote-clear will therefore not transition
10250  * to UNSET, which fulfills property (1).
10251  *
10252  * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(),
10253  * it will move its state to UNSET, which clears the percpu cid perhaps
10254  * uselessly (which is not an issue for correctness). Because task (Y) is not
10255  * observed, CPU1 can move ahead to set the state to UNSET. Because moving
10256  * state to UNSET is done with a cmpxchg expecting that the old state has the
10257  * LAZY flag set, only one thread will successfully UNSET.
10258  *
10259  * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0
10260  * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and
10261  * CPU1 will observe task (Y) and do nothing more, which is fine.
10262  *
10263  * What we are effectively preventing with this Dekker is a scenario where
10264  * neither LAZY flag nor store (Y) are observed, which would fail property (1)
10265  * because this would UNSET a cid which is actively used.
10266  */
10267 
sched_mm_cid_migrate_from(struct task_struct * t)10268 void sched_mm_cid_migrate_from(struct task_struct *t)
10269 {
10270 	t->migrate_from_cpu = task_cpu(t);
10271 }
10272 
10273 static
__sched_mm_cid_migrate_from_fetch_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid)10274 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq,
10275 					  struct task_struct *t,
10276 					  struct mm_cid *src_pcpu_cid)
10277 {
10278 	struct mm_struct *mm = t->mm;
10279 	struct task_struct *src_task;
10280 	int src_cid, last_mm_cid;
10281 
10282 	if (!mm)
10283 		return -1;
10284 
10285 	last_mm_cid = t->last_mm_cid;
10286 	/*
10287 	 * If the migrated task has no last cid, or if the current
10288 	 * task on src rq uses the cid, it means the source cid does not need
10289 	 * to be moved to the destination cpu.
10290 	 */
10291 	if (last_mm_cid == -1)
10292 		return -1;
10293 	src_cid = READ_ONCE(src_pcpu_cid->cid);
10294 	if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid)
10295 		return -1;
10296 
10297 	/*
10298 	 * If we observe an active task using the mm on this rq, it means we
10299 	 * are not the last task to be migrated from this cpu for this mm, so
10300 	 * there is no need to move src_cid to the destination cpu.
10301 	 */
10302 	guard(rcu)();
10303 	src_task = rcu_dereference(src_rq->curr);
10304 	if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10305 		t->last_mm_cid = -1;
10306 		return -1;
10307 	}
10308 
10309 	return src_cid;
10310 }
10311 
10312 static
__sched_mm_cid_migrate_from_try_steal_cid(struct rq * src_rq,struct task_struct * t,struct mm_cid * src_pcpu_cid,int src_cid)10313 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq,
10314 					      struct task_struct *t,
10315 					      struct mm_cid *src_pcpu_cid,
10316 					      int src_cid)
10317 {
10318 	struct task_struct *src_task;
10319 	struct mm_struct *mm = t->mm;
10320 	int lazy_cid;
10321 
10322 	if (src_cid == -1)
10323 		return -1;
10324 
10325 	/*
10326 	 * Attempt to clear the source cpu cid to move it to the destination
10327 	 * cpu.
10328 	 */
10329 	lazy_cid = mm_cid_set_lazy_put(src_cid);
10330 	if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid))
10331 		return -1;
10332 
10333 	/*
10334 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10335 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10336 	 * between store to rq->curr and load of prev and next task's
10337 	 * per-mm/cpu cid.
10338 	 *
10339 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10340 	 * rq->curr->mm_cid_active matches the barrier in
10341 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10342 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10343 	 * load of per-mm/cpu cid.
10344 	 */
10345 
10346 	/*
10347 	 * If we observe an active task using the mm on this rq after setting
10348 	 * the lazy-put flag, this task will be responsible for transitioning
10349 	 * from lazy-put flag set to MM_CID_UNSET.
10350 	 */
10351 	scoped_guard (rcu) {
10352 		src_task = rcu_dereference(src_rq->curr);
10353 		if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) {
10354 			/*
10355 			 * We observed an active task for this mm, there is therefore
10356 			 * no point in moving this cid to the destination cpu.
10357 			 */
10358 			t->last_mm_cid = -1;
10359 			return -1;
10360 		}
10361 	}
10362 
10363 	/*
10364 	 * The src_cid is unused, so it can be unset.
10365 	 */
10366 	if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10367 		return -1;
10368 	WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET);
10369 	return src_cid;
10370 }
10371 
10372 /*
10373  * Migration to dst cpu. Called with dst_rq lock held.
10374  * Interrupts are disabled, which keeps the window of cid ownership without the
10375  * source rq lock held small.
10376  */
sched_mm_cid_migrate_to(struct rq * dst_rq,struct task_struct * t)10377 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t)
10378 {
10379 	struct mm_cid *src_pcpu_cid, *dst_pcpu_cid;
10380 	struct mm_struct *mm = t->mm;
10381 	int src_cid, src_cpu;
10382 	bool dst_cid_is_set;
10383 	struct rq *src_rq;
10384 
10385 	lockdep_assert_rq_held(dst_rq);
10386 
10387 	if (!mm)
10388 		return;
10389 	src_cpu = t->migrate_from_cpu;
10390 	if (src_cpu == -1) {
10391 		t->last_mm_cid = -1;
10392 		return;
10393 	}
10394 	/*
10395 	 * Move the src cid if the dst cid is unset. This keeps id
10396 	 * allocation closest to 0 in cases where few threads migrate around
10397 	 * many CPUs.
10398 	 *
10399 	 * If destination cid or recent cid is already set, we may have
10400 	 * to just clear the src cid to ensure compactness in frequent
10401 	 * migrations scenarios.
10402 	 *
10403 	 * It is not useful to clear the src cid when the number of threads is
10404 	 * greater or equal to the number of allowed CPUs, because user-space
10405 	 * can expect that the number of allowed cids can reach the number of
10406 	 * allowed CPUs.
10407 	 */
10408 	dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq));
10409 	dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) ||
10410 			 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid));
10411 	if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed))
10412 		return;
10413 	src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu);
10414 	src_rq = cpu_rq(src_cpu);
10415 	src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid);
10416 	if (src_cid == -1)
10417 		return;
10418 	src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid,
10419 							    src_cid);
10420 	if (src_cid == -1)
10421 		return;
10422 	if (dst_cid_is_set) {
10423 		__mm_cid_put(mm, src_cid);
10424 		return;
10425 	}
10426 	/* Move src_cid to dst cpu. */
10427 	mm_cid_snapshot_time(dst_rq, mm);
10428 	WRITE_ONCE(dst_pcpu_cid->cid, src_cid);
10429 	WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid);
10430 }
10431 
sched_mm_cid_remote_clear(struct mm_struct * mm,struct mm_cid * pcpu_cid,int cpu)10432 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid,
10433 				      int cpu)
10434 {
10435 	struct rq *rq = cpu_rq(cpu);
10436 	struct task_struct *t;
10437 	int cid, lazy_cid;
10438 
10439 	cid = READ_ONCE(pcpu_cid->cid);
10440 	if (!mm_cid_is_valid(cid))
10441 		return;
10442 
10443 	/*
10444 	 * Clear the cpu cid if it is set to keep cid allocation compact.  If
10445 	 * there happens to be other tasks left on the source cpu using this
10446 	 * mm, the next task using this mm will reallocate its cid on context
10447 	 * switch.
10448 	 */
10449 	lazy_cid = mm_cid_set_lazy_put(cid);
10450 	if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid))
10451 		return;
10452 
10453 	/*
10454 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10455 	 * rq->curr->mm matches the scheduler barrier in context_switch()
10456 	 * between store to rq->curr and load of prev and next task's
10457 	 * per-mm/cpu cid.
10458 	 *
10459 	 * The implicit barrier after cmpxchg per-mm/cpu cid before loading
10460 	 * rq->curr->mm_cid_active matches the barrier in
10461 	 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and
10462 	 * sched_mm_cid_after_execve() between store to t->mm_cid_active and
10463 	 * load of per-mm/cpu cid.
10464 	 */
10465 
10466 	/*
10467 	 * If we observe an active task using the mm on this rq after setting
10468 	 * the lazy-put flag, that task will be responsible for transitioning
10469 	 * from lazy-put flag set to MM_CID_UNSET.
10470 	 */
10471 	scoped_guard (rcu) {
10472 		t = rcu_dereference(rq->curr);
10473 		if (READ_ONCE(t->mm_cid_active) && t->mm == mm)
10474 			return;
10475 	}
10476 
10477 	/*
10478 	 * The cid is unused, so it can be unset.
10479 	 * Disable interrupts to keep the window of cid ownership without rq
10480 	 * lock small.
10481 	 */
10482 	scoped_guard (irqsave) {
10483 		if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET))
10484 			__mm_cid_put(mm, cid);
10485 	}
10486 }
10487 
sched_mm_cid_remote_clear_old(struct mm_struct * mm,int cpu)10488 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu)
10489 {
10490 	struct rq *rq = cpu_rq(cpu);
10491 	struct mm_cid *pcpu_cid;
10492 	struct task_struct *curr;
10493 	u64 rq_clock;
10494 
10495 	/*
10496 	 * rq->clock load is racy on 32-bit but one spurious clear once in a
10497 	 * while is irrelevant.
10498 	 */
10499 	rq_clock = READ_ONCE(rq->clock);
10500 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10501 
10502 	/*
10503 	 * In order to take care of infrequently scheduled tasks, bump the time
10504 	 * snapshot associated with this cid if an active task using the mm is
10505 	 * observed on this rq.
10506 	 */
10507 	scoped_guard (rcu) {
10508 		curr = rcu_dereference(rq->curr);
10509 		if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) {
10510 			WRITE_ONCE(pcpu_cid->time, rq_clock);
10511 			return;
10512 		}
10513 	}
10514 
10515 	if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS)
10516 		return;
10517 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10518 }
10519 
sched_mm_cid_remote_clear_weight(struct mm_struct * mm,int cpu,int weight)10520 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu,
10521 					     int weight)
10522 {
10523 	struct mm_cid *pcpu_cid;
10524 	int cid;
10525 
10526 	pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu);
10527 	cid = READ_ONCE(pcpu_cid->cid);
10528 	if (!mm_cid_is_valid(cid) || cid < weight)
10529 		return;
10530 	sched_mm_cid_remote_clear(mm, pcpu_cid, cpu);
10531 }
10532 
task_mm_cid_work(struct callback_head * work)10533 static void task_mm_cid_work(struct callback_head *work)
10534 {
10535 	unsigned long now = jiffies, old_scan, next_scan;
10536 	struct task_struct *t = current;
10537 	struct cpumask *cidmask;
10538 	struct mm_struct *mm;
10539 	int weight, cpu;
10540 
10541 	SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10542 
10543 	work->next = work;	/* Prevent double-add */
10544 	if (t->flags & PF_EXITING)
10545 		return;
10546 	mm = t->mm;
10547 	if (!mm)
10548 		return;
10549 	old_scan = READ_ONCE(mm->mm_cid_next_scan);
10550 	next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10551 	if (!old_scan) {
10552 		unsigned long res;
10553 
10554 		res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan);
10555 		if (res != old_scan)
10556 			old_scan = res;
10557 		else
10558 			old_scan = next_scan;
10559 	}
10560 	if (time_before(now, old_scan))
10561 		return;
10562 	if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan))
10563 		return;
10564 	cidmask = mm_cidmask(mm);
10565 	/* Clear cids that were not recently used. */
10566 	for_each_possible_cpu(cpu)
10567 		sched_mm_cid_remote_clear_old(mm, cpu);
10568 	weight = cpumask_weight(cidmask);
10569 	/*
10570 	 * Clear cids that are greater or equal to the cidmask weight to
10571 	 * recompact it.
10572 	 */
10573 	for_each_possible_cpu(cpu)
10574 		sched_mm_cid_remote_clear_weight(mm, cpu, weight);
10575 }
10576 
init_sched_mm_cid(struct task_struct * t)10577 void init_sched_mm_cid(struct task_struct *t)
10578 {
10579 	struct mm_struct *mm = t->mm;
10580 	int mm_users = 0;
10581 
10582 	if (mm) {
10583 		mm_users = atomic_read(&mm->mm_users);
10584 		if (mm_users == 1)
10585 			mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY);
10586 	}
10587 	t->cid_work.next = &t->cid_work;	/* Protect against double add */
10588 	init_task_work(&t->cid_work, task_mm_cid_work);
10589 }
10590 
task_tick_mm_cid(struct rq * rq,struct task_struct * curr)10591 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
10592 {
10593 	struct callback_head *work = &curr->cid_work;
10594 	unsigned long now = jiffies;
10595 
10596 	if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) ||
10597 	    work->next != work)
10598 		return;
10599 	if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
10600 		return;
10601 
10602 	/* No page allocation under rq lock */
10603 	task_work_add(curr, work, TWA_RESUME);
10604 }
10605 
sched_mm_cid_exit_signals(struct task_struct * t)10606 void sched_mm_cid_exit_signals(struct task_struct *t)
10607 {
10608 	struct mm_struct *mm = t->mm;
10609 	struct rq *rq;
10610 
10611 	if (!mm)
10612 		return;
10613 
10614 	preempt_disable();
10615 	rq = this_rq();
10616 	guard(rq_lock_irqsave)(rq);
10617 	preempt_enable_no_resched();	/* holding spinlock */
10618 	WRITE_ONCE(t->mm_cid_active, 0);
10619 	/*
10620 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10621 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10622 	 */
10623 	smp_mb();
10624 	mm_cid_put(mm);
10625 	t->last_mm_cid = t->mm_cid = -1;
10626 }
10627 
sched_mm_cid_before_execve(struct task_struct * t)10628 void sched_mm_cid_before_execve(struct task_struct *t)
10629 {
10630 	struct mm_struct *mm = t->mm;
10631 	struct rq *rq;
10632 
10633 	if (!mm)
10634 		return;
10635 
10636 	preempt_disable();
10637 	rq = this_rq();
10638 	guard(rq_lock_irqsave)(rq);
10639 	preempt_enable_no_resched();	/* holding spinlock */
10640 	WRITE_ONCE(t->mm_cid_active, 0);
10641 	/*
10642 	 * Store t->mm_cid_active before loading per-mm/cpu cid.
10643 	 * Matches barrier in sched_mm_cid_remote_clear_old().
10644 	 */
10645 	smp_mb();
10646 	mm_cid_put(mm);
10647 	t->last_mm_cid = t->mm_cid = -1;
10648 }
10649 
sched_mm_cid_after_execve(struct task_struct * t)10650 void sched_mm_cid_after_execve(struct task_struct *t)
10651 {
10652 	struct mm_struct *mm = t->mm;
10653 	struct rq *rq;
10654 
10655 	if (!mm)
10656 		return;
10657 
10658 	preempt_disable();
10659 	rq = this_rq();
10660 	scoped_guard (rq_lock_irqsave, rq) {
10661 		preempt_enable_no_resched();	/* holding spinlock */
10662 		WRITE_ONCE(t->mm_cid_active, 1);
10663 		/*
10664 		 * Store t->mm_cid_active before loading per-mm/cpu cid.
10665 		 * Matches barrier in sched_mm_cid_remote_clear_old().
10666 		 */
10667 		smp_mb();
10668 		t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm);
10669 	}
10670 	rseq_set_notify_resume(t);
10671 }
10672 
sched_mm_cid_fork(struct task_struct * t)10673 void sched_mm_cid_fork(struct task_struct *t)
10674 {
10675 	WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
10676 	t->mm_cid_active = 1;
10677 }
10678 #endif
10679 
10680 #ifdef CONFIG_SCHED_CLASS_EXT
sched_deq_and_put_task(struct task_struct * p,int queue_flags,struct sched_enq_and_set_ctx * ctx)10681 void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
10682 			    struct sched_enq_and_set_ctx *ctx)
10683 {
10684 	struct rq *rq = task_rq(p);
10685 
10686 	lockdep_assert_rq_held(rq);
10687 
10688 	*ctx = (struct sched_enq_and_set_ctx){
10689 		.p = p,
10690 		.queue_flags = queue_flags,
10691 		.queued = task_on_rq_queued(p),
10692 		.running = task_current(rq, p),
10693 	};
10694 
10695 	update_rq_clock(rq);
10696 	if (ctx->queued)
10697 		dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK);
10698 	if (ctx->running)
10699 		put_prev_task(rq, p);
10700 }
10701 
sched_enq_and_set_task(struct sched_enq_and_set_ctx * ctx)10702 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx)
10703 {
10704 	struct rq *rq = task_rq(ctx->p);
10705 
10706 	lockdep_assert_rq_held(rq);
10707 
10708 	if (ctx->queued)
10709 		enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK);
10710 	if (ctx->running)
10711 		set_next_task(rq, ctx->p);
10712 }
10713 #endif	/* CONFIG_SCHED_CLASS_EXT */
10714