xref: /linux/kernel/sched/rt.c (revision a06c3fad49a50d5d5eb078f93e70f4d3eca5d5a5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
10 
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12 
13 struct rt_bandwidth def_rt_bandwidth;
14 
15 /*
16  * period over which we measure -rt task CPU usage in us.
17  * default: 1s
18  */
19 int sysctl_sched_rt_period = 1000000;
20 
21 /*
22  * part of the period that we allow rt tasks to run in us.
23  * default: 0.95s
24  */
25 int sysctl_sched_rt_runtime = 950000;
26 
27 #ifdef CONFIG_SYSCTL
28 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
29 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
30 		size_t *lenp, loff_t *ppos);
31 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
32 		size_t *lenp, loff_t *ppos);
33 static struct ctl_table sched_rt_sysctls[] = {
34 	{
35 		.procname       = "sched_rt_period_us",
36 		.data           = &sysctl_sched_rt_period,
37 		.maxlen         = sizeof(int),
38 		.mode           = 0644,
39 		.proc_handler   = sched_rt_handler,
40 		.extra1         = SYSCTL_ONE,
41 		.extra2         = SYSCTL_INT_MAX,
42 	},
43 	{
44 		.procname       = "sched_rt_runtime_us",
45 		.data           = &sysctl_sched_rt_runtime,
46 		.maxlen         = sizeof(int),
47 		.mode           = 0644,
48 		.proc_handler   = sched_rt_handler,
49 		.extra1         = SYSCTL_NEG_ONE,
50 		.extra2         = (void *)&sysctl_sched_rt_period,
51 	},
52 	{
53 		.procname       = "sched_rr_timeslice_ms",
54 		.data           = &sysctl_sched_rr_timeslice,
55 		.maxlen         = sizeof(int),
56 		.mode           = 0644,
57 		.proc_handler   = sched_rr_handler,
58 	},
59 };
60 
61 static int __init sched_rt_sysctl_init(void)
62 {
63 	register_sysctl_init("kernel", sched_rt_sysctls);
64 	return 0;
65 }
66 late_initcall(sched_rt_sysctl_init);
67 #endif
68 
69 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
70 {
71 	struct rt_bandwidth *rt_b =
72 		container_of(timer, struct rt_bandwidth, rt_period_timer);
73 	int idle = 0;
74 	int overrun;
75 
76 	raw_spin_lock(&rt_b->rt_runtime_lock);
77 	for (;;) {
78 		overrun = hrtimer_forward_now(timer, rt_b->rt_period);
79 		if (!overrun)
80 			break;
81 
82 		raw_spin_unlock(&rt_b->rt_runtime_lock);
83 		idle = do_sched_rt_period_timer(rt_b, overrun);
84 		raw_spin_lock(&rt_b->rt_runtime_lock);
85 	}
86 	if (idle)
87 		rt_b->rt_period_active = 0;
88 	raw_spin_unlock(&rt_b->rt_runtime_lock);
89 
90 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
91 }
92 
93 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
94 {
95 	rt_b->rt_period = ns_to_ktime(period);
96 	rt_b->rt_runtime = runtime;
97 
98 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
99 
100 	hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
101 		     HRTIMER_MODE_REL_HARD);
102 	rt_b->rt_period_timer.function = sched_rt_period_timer;
103 }
104 
105 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
106 {
107 	raw_spin_lock(&rt_b->rt_runtime_lock);
108 	if (!rt_b->rt_period_active) {
109 		rt_b->rt_period_active = 1;
110 		/*
111 		 * SCHED_DEADLINE updates the bandwidth, as a run away
112 		 * RT task with a DL task could hog a CPU. But DL does
113 		 * not reset the period. If a deadline task was running
114 		 * without an RT task running, it can cause RT tasks to
115 		 * throttle when they start up. Kick the timer right away
116 		 * to update the period.
117 		 */
118 		hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
119 		hrtimer_start_expires(&rt_b->rt_period_timer,
120 				      HRTIMER_MODE_ABS_PINNED_HARD);
121 	}
122 	raw_spin_unlock(&rt_b->rt_runtime_lock);
123 }
124 
125 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
126 {
127 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
128 		return;
129 
130 	do_start_rt_bandwidth(rt_b);
131 }
132 
133 void init_rt_rq(struct rt_rq *rt_rq)
134 {
135 	struct rt_prio_array *array;
136 	int i;
137 
138 	array = &rt_rq->active;
139 	for (i = 0; i < MAX_RT_PRIO; i++) {
140 		INIT_LIST_HEAD(array->queue + i);
141 		__clear_bit(i, array->bitmap);
142 	}
143 	/* delimiter for bit-search: */
144 	__set_bit(MAX_RT_PRIO, array->bitmap);
145 
146 #if defined CONFIG_SMP
147 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
148 	rt_rq->highest_prio.next = MAX_RT_PRIO-1;
149 	rt_rq->overloaded = 0;
150 	plist_head_init(&rt_rq->pushable_tasks);
151 #endif /* CONFIG_SMP */
152 	/* We start is dequeued state, because no RT tasks are queued */
153 	rt_rq->rt_queued = 0;
154 
155 	rt_rq->rt_time = 0;
156 	rt_rq->rt_throttled = 0;
157 	rt_rq->rt_runtime = 0;
158 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
159 }
160 
161 #ifdef CONFIG_RT_GROUP_SCHED
162 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
163 {
164 	hrtimer_cancel(&rt_b->rt_period_timer);
165 }
166 
167 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
168 
169 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
170 {
171 #ifdef CONFIG_SCHED_DEBUG
172 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
173 #endif
174 	return container_of(rt_se, struct task_struct, rt);
175 }
176 
177 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
178 {
179 	return rt_rq->rq;
180 }
181 
182 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
183 {
184 	return rt_se->rt_rq;
185 }
186 
187 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
188 {
189 	struct rt_rq *rt_rq = rt_se->rt_rq;
190 
191 	return rt_rq->rq;
192 }
193 
194 void unregister_rt_sched_group(struct task_group *tg)
195 {
196 	if (tg->rt_se)
197 		destroy_rt_bandwidth(&tg->rt_bandwidth);
198 
199 }
200 
201 void free_rt_sched_group(struct task_group *tg)
202 {
203 	int i;
204 
205 	for_each_possible_cpu(i) {
206 		if (tg->rt_rq)
207 			kfree(tg->rt_rq[i]);
208 		if (tg->rt_se)
209 			kfree(tg->rt_se[i]);
210 	}
211 
212 	kfree(tg->rt_rq);
213 	kfree(tg->rt_se);
214 }
215 
216 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
217 		struct sched_rt_entity *rt_se, int cpu,
218 		struct sched_rt_entity *parent)
219 {
220 	struct rq *rq = cpu_rq(cpu);
221 
222 	rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
223 	rt_rq->rt_nr_boosted = 0;
224 	rt_rq->rq = rq;
225 	rt_rq->tg = tg;
226 
227 	tg->rt_rq[cpu] = rt_rq;
228 	tg->rt_se[cpu] = rt_se;
229 
230 	if (!rt_se)
231 		return;
232 
233 	if (!parent)
234 		rt_se->rt_rq = &rq->rt;
235 	else
236 		rt_se->rt_rq = parent->my_q;
237 
238 	rt_se->my_q = rt_rq;
239 	rt_se->parent = parent;
240 	INIT_LIST_HEAD(&rt_se->run_list);
241 }
242 
243 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
244 {
245 	struct rt_rq *rt_rq;
246 	struct sched_rt_entity *rt_se;
247 	int i;
248 
249 	tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
250 	if (!tg->rt_rq)
251 		goto err;
252 	tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
253 	if (!tg->rt_se)
254 		goto err;
255 
256 	init_rt_bandwidth(&tg->rt_bandwidth,
257 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
258 
259 	for_each_possible_cpu(i) {
260 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
261 				     GFP_KERNEL, cpu_to_node(i));
262 		if (!rt_rq)
263 			goto err;
264 
265 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
266 				     GFP_KERNEL, cpu_to_node(i));
267 		if (!rt_se)
268 			goto err_free_rq;
269 
270 		init_rt_rq(rt_rq);
271 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
272 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
273 	}
274 
275 	return 1;
276 
277 err_free_rq:
278 	kfree(rt_rq);
279 err:
280 	return 0;
281 }
282 
283 #else /* CONFIG_RT_GROUP_SCHED */
284 
285 #define rt_entity_is_task(rt_se) (1)
286 
287 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
288 {
289 	return container_of(rt_se, struct task_struct, rt);
290 }
291 
292 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
293 {
294 	return container_of(rt_rq, struct rq, rt);
295 }
296 
297 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
298 {
299 	struct task_struct *p = rt_task_of(rt_se);
300 
301 	return task_rq(p);
302 }
303 
304 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
305 {
306 	struct rq *rq = rq_of_rt_se(rt_se);
307 
308 	return &rq->rt;
309 }
310 
311 void unregister_rt_sched_group(struct task_group *tg) { }
312 
313 void free_rt_sched_group(struct task_group *tg) { }
314 
315 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
316 {
317 	return 1;
318 }
319 #endif /* CONFIG_RT_GROUP_SCHED */
320 
321 #ifdef CONFIG_SMP
322 
323 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
324 {
325 	/* Try to pull RT tasks here if we lower this rq's prio */
326 	return rq->online && rq->rt.highest_prio.curr > prev->prio;
327 }
328 
329 static inline int rt_overloaded(struct rq *rq)
330 {
331 	return atomic_read(&rq->rd->rto_count);
332 }
333 
334 static inline void rt_set_overload(struct rq *rq)
335 {
336 	if (!rq->online)
337 		return;
338 
339 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
340 	/*
341 	 * Make sure the mask is visible before we set
342 	 * the overload count. That is checked to determine
343 	 * if we should look at the mask. It would be a shame
344 	 * if we looked at the mask, but the mask was not
345 	 * updated yet.
346 	 *
347 	 * Matched by the barrier in pull_rt_task().
348 	 */
349 	smp_wmb();
350 	atomic_inc(&rq->rd->rto_count);
351 }
352 
353 static inline void rt_clear_overload(struct rq *rq)
354 {
355 	if (!rq->online)
356 		return;
357 
358 	/* the order here really doesn't matter */
359 	atomic_dec(&rq->rd->rto_count);
360 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
361 }
362 
363 static inline int has_pushable_tasks(struct rq *rq)
364 {
365 	return !plist_head_empty(&rq->rt.pushable_tasks);
366 }
367 
368 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
369 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
370 
371 static void push_rt_tasks(struct rq *);
372 static void pull_rt_task(struct rq *);
373 
374 static inline void rt_queue_push_tasks(struct rq *rq)
375 {
376 	if (!has_pushable_tasks(rq))
377 		return;
378 
379 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
380 }
381 
382 static inline void rt_queue_pull_task(struct rq *rq)
383 {
384 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
385 }
386 
387 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
388 {
389 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
390 	plist_node_init(&p->pushable_tasks, p->prio);
391 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
392 
393 	/* Update the highest prio pushable task */
394 	if (p->prio < rq->rt.highest_prio.next)
395 		rq->rt.highest_prio.next = p->prio;
396 
397 	if (!rq->rt.overloaded) {
398 		rt_set_overload(rq);
399 		rq->rt.overloaded = 1;
400 	}
401 }
402 
403 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
404 {
405 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
406 
407 	/* Update the new highest prio pushable task */
408 	if (has_pushable_tasks(rq)) {
409 		p = plist_first_entry(&rq->rt.pushable_tasks,
410 				      struct task_struct, pushable_tasks);
411 		rq->rt.highest_prio.next = p->prio;
412 	} else {
413 		rq->rt.highest_prio.next = MAX_RT_PRIO-1;
414 
415 		if (rq->rt.overloaded) {
416 			rt_clear_overload(rq);
417 			rq->rt.overloaded = 0;
418 		}
419 	}
420 }
421 
422 #else
423 
424 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
425 {
426 }
427 
428 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
429 {
430 }
431 
432 static inline void rt_queue_push_tasks(struct rq *rq)
433 {
434 }
435 #endif /* CONFIG_SMP */
436 
437 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
438 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
439 
440 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
441 {
442 	return rt_se->on_rq;
443 }
444 
445 #ifdef CONFIG_UCLAMP_TASK
446 /*
447  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
448  * settings.
449  *
450  * This check is only important for heterogeneous systems where uclamp_min value
451  * is higher than the capacity of a @cpu. For non-heterogeneous system this
452  * function will always return true.
453  *
454  * The function will return true if the capacity of the @cpu is >= the
455  * uclamp_min and false otherwise.
456  *
457  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
458  * > uclamp_max.
459  */
460 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
461 {
462 	unsigned int min_cap;
463 	unsigned int max_cap;
464 	unsigned int cpu_cap;
465 
466 	/* Only heterogeneous systems can benefit from this check */
467 	if (!sched_asym_cpucap_active())
468 		return true;
469 
470 	min_cap = uclamp_eff_value(p, UCLAMP_MIN);
471 	max_cap = uclamp_eff_value(p, UCLAMP_MAX);
472 
473 	cpu_cap = arch_scale_cpu_capacity(cpu);
474 
475 	return cpu_cap >= min(min_cap, max_cap);
476 }
477 #else
478 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
479 {
480 	return true;
481 }
482 #endif
483 
484 #ifdef CONFIG_RT_GROUP_SCHED
485 
486 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
487 {
488 	if (!rt_rq->tg)
489 		return RUNTIME_INF;
490 
491 	return rt_rq->rt_runtime;
492 }
493 
494 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
495 {
496 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
497 }
498 
499 typedef struct task_group *rt_rq_iter_t;
500 
501 static inline struct task_group *next_task_group(struct task_group *tg)
502 {
503 	do {
504 		tg = list_entry_rcu(tg->list.next,
505 			typeof(struct task_group), list);
506 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
507 
508 	if (&tg->list == &task_groups)
509 		tg = NULL;
510 
511 	return tg;
512 }
513 
514 #define for_each_rt_rq(rt_rq, iter, rq)					\
515 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
516 		(iter = next_task_group(iter)) &&			\
517 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
518 
519 #define for_each_sched_rt_entity(rt_se) \
520 	for (; rt_se; rt_se = rt_se->parent)
521 
522 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
523 {
524 	return rt_se->my_q;
525 }
526 
527 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
528 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
529 
530 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
531 {
532 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
533 	struct rq *rq = rq_of_rt_rq(rt_rq);
534 	struct sched_rt_entity *rt_se;
535 
536 	int cpu = cpu_of(rq);
537 
538 	rt_se = rt_rq->tg->rt_se[cpu];
539 
540 	if (rt_rq->rt_nr_running) {
541 		if (!rt_se)
542 			enqueue_top_rt_rq(rt_rq);
543 		else if (!on_rt_rq(rt_se))
544 			enqueue_rt_entity(rt_se, 0);
545 
546 		if (rt_rq->highest_prio.curr < curr->prio)
547 			resched_curr(rq);
548 	}
549 }
550 
551 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
552 {
553 	struct sched_rt_entity *rt_se;
554 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
555 
556 	rt_se = rt_rq->tg->rt_se[cpu];
557 
558 	if (!rt_se) {
559 		dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
560 		/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
561 		cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
562 	}
563 	else if (on_rt_rq(rt_se))
564 		dequeue_rt_entity(rt_se, 0);
565 }
566 
567 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
568 {
569 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
570 }
571 
572 static int rt_se_boosted(struct sched_rt_entity *rt_se)
573 {
574 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
575 	struct task_struct *p;
576 
577 	if (rt_rq)
578 		return !!rt_rq->rt_nr_boosted;
579 
580 	p = rt_task_of(rt_se);
581 	return p->prio != p->normal_prio;
582 }
583 
584 #ifdef CONFIG_SMP
585 static inline const struct cpumask *sched_rt_period_mask(void)
586 {
587 	return this_rq()->rd->span;
588 }
589 #else
590 static inline const struct cpumask *sched_rt_period_mask(void)
591 {
592 	return cpu_online_mask;
593 }
594 #endif
595 
596 static inline
597 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
598 {
599 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
600 }
601 
602 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
603 {
604 	return &rt_rq->tg->rt_bandwidth;
605 }
606 
607 #else /* !CONFIG_RT_GROUP_SCHED */
608 
609 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
610 {
611 	return rt_rq->rt_runtime;
612 }
613 
614 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
615 {
616 	return ktime_to_ns(def_rt_bandwidth.rt_period);
617 }
618 
619 typedef struct rt_rq *rt_rq_iter_t;
620 
621 #define for_each_rt_rq(rt_rq, iter, rq) \
622 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
623 
624 #define for_each_sched_rt_entity(rt_se) \
625 	for (; rt_se; rt_se = NULL)
626 
627 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
628 {
629 	return NULL;
630 }
631 
632 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
633 {
634 	struct rq *rq = rq_of_rt_rq(rt_rq);
635 
636 	if (!rt_rq->rt_nr_running)
637 		return;
638 
639 	enqueue_top_rt_rq(rt_rq);
640 	resched_curr(rq);
641 }
642 
643 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
644 {
645 	dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
646 }
647 
648 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
649 {
650 	return rt_rq->rt_throttled;
651 }
652 
653 static inline const struct cpumask *sched_rt_period_mask(void)
654 {
655 	return cpu_online_mask;
656 }
657 
658 static inline
659 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
660 {
661 	return &cpu_rq(cpu)->rt;
662 }
663 
664 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
665 {
666 	return &def_rt_bandwidth;
667 }
668 
669 #endif /* CONFIG_RT_GROUP_SCHED */
670 
671 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
672 {
673 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
674 
675 	return (hrtimer_active(&rt_b->rt_period_timer) ||
676 		rt_rq->rt_time < rt_b->rt_runtime);
677 }
678 
679 #ifdef CONFIG_SMP
680 /*
681  * We ran out of runtime, see if we can borrow some from our neighbours.
682  */
683 static void do_balance_runtime(struct rt_rq *rt_rq)
684 {
685 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
686 	struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
687 	int i, weight;
688 	u64 rt_period;
689 
690 	weight = cpumask_weight(rd->span);
691 
692 	raw_spin_lock(&rt_b->rt_runtime_lock);
693 	rt_period = ktime_to_ns(rt_b->rt_period);
694 	for_each_cpu(i, rd->span) {
695 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
696 		s64 diff;
697 
698 		if (iter == rt_rq)
699 			continue;
700 
701 		raw_spin_lock(&iter->rt_runtime_lock);
702 		/*
703 		 * Either all rqs have inf runtime and there's nothing to steal
704 		 * or __disable_runtime() below sets a specific rq to inf to
705 		 * indicate its been disabled and disallow stealing.
706 		 */
707 		if (iter->rt_runtime == RUNTIME_INF)
708 			goto next;
709 
710 		/*
711 		 * From runqueues with spare time, take 1/n part of their
712 		 * spare time, but no more than our period.
713 		 */
714 		diff = iter->rt_runtime - iter->rt_time;
715 		if (diff > 0) {
716 			diff = div_u64((u64)diff, weight);
717 			if (rt_rq->rt_runtime + diff > rt_period)
718 				diff = rt_period - rt_rq->rt_runtime;
719 			iter->rt_runtime -= diff;
720 			rt_rq->rt_runtime += diff;
721 			if (rt_rq->rt_runtime == rt_period) {
722 				raw_spin_unlock(&iter->rt_runtime_lock);
723 				break;
724 			}
725 		}
726 next:
727 		raw_spin_unlock(&iter->rt_runtime_lock);
728 	}
729 	raw_spin_unlock(&rt_b->rt_runtime_lock);
730 }
731 
732 /*
733  * Ensure this RQ takes back all the runtime it lend to its neighbours.
734  */
735 static void __disable_runtime(struct rq *rq)
736 {
737 	struct root_domain *rd = rq->rd;
738 	rt_rq_iter_t iter;
739 	struct rt_rq *rt_rq;
740 
741 	if (unlikely(!scheduler_running))
742 		return;
743 
744 	for_each_rt_rq(rt_rq, iter, rq) {
745 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
746 		s64 want;
747 		int i;
748 
749 		raw_spin_lock(&rt_b->rt_runtime_lock);
750 		raw_spin_lock(&rt_rq->rt_runtime_lock);
751 		/*
752 		 * Either we're all inf and nobody needs to borrow, or we're
753 		 * already disabled and thus have nothing to do, or we have
754 		 * exactly the right amount of runtime to take out.
755 		 */
756 		if (rt_rq->rt_runtime == RUNTIME_INF ||
757 				rt_rq->rt_runtime == rt_b->rt_runtime)
758 			goto balanced;
759 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
760 
761 		/*
762 		 * Calculate the difference between what we started out with
763 		 * and what we current have, that's the amount of runtime
764 		 * we lend and now have to reclaim.
765 		 */
766 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
767 
768 		/*
769 		 * Greedy reclaim, take back as much as we can.
770 		 */
771 		for_each_cpu(i, rd->span) {
772 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
773 			s64 diff;
774 
775 			/*
776 			 * Can't reclaim from ourselves or disabled runqueues.
777 			 */
778 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
779 				continue;
780 
781 			raw_spin_lock(&iter->rt_runtime_lock);
782 			if (want > 0) {
783 				diff = min_t(s64, iter->rt_runtime, want);
784 				iter->rt_runtime -= diff;
785 				want -= diff;
786 			} else {
787 				iter->rt_runtime -= want;
788 				want -= want;
789 			}
790 			raw_spin_unlock(&iter->rt_runtime_lock);
791 
792 			if (!want)
793 				break;
794 		}
795 
796 		raw_spin_lock(&rt_rq->rt_runtime_lock);
797 		/*
798 		 * We cannot be left wanting - that would mean some runtime
799 		 * leaked out of the system.
800 		 */
801 		WARN_ON_ONCE(want);
802 balanced:
803 		/*
804 		 * Disable all the borrow logic by pretending we have inf
805 		 * runtime - in which case borrowing doesn't make sense.
806 		 */
807 		rt_rq->rt_runtime = RUNTIME_INF;
808 		rt_rq->rt_throttled = 0;
809 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
810 		raw_spin_unlock(&rt_b->rt_runtime_lock);
811 
812 		/* Make rt_rq available for pick_next_task() */
813 		sched_rt_rq_enqueue(rt_rq);
814 	}
815 }
816 
817 static void __enable_runtime(struct rq *rq)
818 {
819 	rt_rq_iter_t iter;
820 	struct rt_rq *rt_rq;
821 
822 	if (unlikely(!scheduler_running))
823 		return;
824 
825 	/*
826 	 * Reset each runqueue's bandwidth settings
827 	 */
828 	for_each_rt_rq(rt_rq, iter, rq) {
829 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
830 
831 		raw_spin_lock(&rt_b->rt_runtime_lock);
832 		raw_spin_lock(&rt_rq->rt_runtime_lock);
833 		rt_rq->rt_runtime = rt_b->rt_runtime;
834 		rt_rq->rt_time = 0;
835 		rt_rq->rt_throttled = 0;
836 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
837 		raw_spin_unlock(&rt_b->rt_runtime_lock);
838 	}
839 }
840 
841 static void balance_runtime(struct rt_rq *rt_rq)
842 {
843 	if (!sched_feat(RT_RUNTIME_SHARE))
844 		return;
845 
846 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
847 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
848 		do_balance_runtime(rt_rq);
849 		raw_spin_lock(&rt_rq->rt_runtime_lock);
850 	}
851 }
852 #else /* !CONFIG_SMP */
853 static inline void balance_runtime(struct rt_rq *rt_rq) {}
854 #endif /* CONFIG_SMP */
855 
856 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
857 {
858 	int i, idle = 1, throttled = 0;
859 	const struct cpumask *span;
860 
861 	span = sched_rt_period_mask();
862 #ifdef CONFIG_RT_GROUP_SCHED
863 	/*
864 	 * FIXME: isolated CPUs should really leave the root task group,
865 	 * whether they are isolcpus or were isolated via cpusets, lest
866 	 * the timer run on a CPU which does not service all runqueues,
867 	 * potentially leaving other CPUs indefinitely throttled.  If
868 	 * isolation is really required, the user will turn the throttle
869 	 * off to kill the perturbations it causes anyway.  Meanwhile,
870 	 * this maintains functionality for boot and/or troubleshooting.
871 	 */
872 	if (rt_b == &root_task_group.rt_bandwidth)
873 		span = cpu_online_mask;
874 #endif
875 	for_each_cpu(i, span) {
876 		int enqueue = 0;
877 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
878 		struct rq *rq = rq_of_rt_rq(rt_rq);
879 		struct rq_flags rf;
880 		int skip;
881 
882 		/*
883 		 * When span == cpu_online_mask, taking each rq->lock
884 		 * can be time-consuming. Try to avoid it when possible.
885 		 */
886 		raw_spin_lock(&rt_rq->rt_runtime_lock);
887 		if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
888 			rt_rq->rt_runtime = rt_b->rt_runtime;
889 		skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
890 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
891 		if (skip)
892 			continue;
893 
894 		rq_lock(rq, &rf);
895 		update_rq_clock(rq);
896 
897 		if (rt_rq->rt_time) {
898 			u64 runtime;
899 
900 			raw_spin_lock(&rt_rq->rt_runtime_lock);
901 			if (rt_rq->rt_throttled)
902 				balance_runtime(rt_rq);
903 			runtime = rt_rq->rt_runtime;
904 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
905 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
906 				rt_rq->rt_throttled = 0;
907 				enqueue = 1;
908 
909 				/*
910 				 * When we're idle and a woken (rt) task is
911 				 * throttled wakeup_preempt() will set
912 				 * skip_update and the time between the wakeup
913 				 * and this unthrottle will get accounted as
914 				 * 'runtime'.
915 				 */
916 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
917 					rq_clock_cancel_skipupdate(rq);
918 			}
919 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
920 				idle = 0;
921 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
922 		} else if (rt_rq->rt_nr_running) {
923 			idle = 0;
924 			if (!rt_rq_throttled(rt_rq))
925 				enqueue = 1;
926 		}
927 		if (rt_rq->rt_throttled)
928 			throttled = 1;
929 
930 		if (enqueue)
931 			sched_rt_rq_enqueue(rt_rq);
932 		rq_unlock(rq, &rf);
933 	}
934 
935 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
936 		return 1;
937 
938 	return idle;
939 }
940 
941 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
942 {
943 #ifdef CONFIG_RT_GROUP_SCHED
944 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
945 
946 	if (rt_rq)
947 		return rt_rq->highest_prio.curr;
948 #endif
949 
950 	return rt_task_of(rt_se)->prio;
951 }
952 
953 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
954 {
955 	u64 runtime = sched_rt_runtime(rt_rq);
956 
957 	if (rt_rq->rt_throttled)
958 		return rt_rq_throttled(rt_rq);
959 
960 	if (runtime >= sched_rt_period(rt_rq))
961 		return 0;
962 
963 	balance_runtime(rt_rq);
964 	runtime = sched_rt_runtime(rt_rq);
965 	if (runtime == RUNTIME_INF)
966 		return 0;
967 
968 	if (rt_rq->rt_time > runtime) {
969 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
970 
971 		/*
972 		 * Don't actually throttle groups that have no runtime assigned
973 		 * but accrue some time due to boosting.
974 		 */
975 		if (likely(rt_b->rt_runtime)) {
976 			rt_rq->rt_throttled = 1;
977 			printk_deferred_once("sched: RT throttling activated\n");
978 		} else {
979 			/*
980 			 * In case we did anyway, make it go away,
981 			 * replenishment is a joke, since it will replenish us
982 			 * with exactly 0 ns.
983 			 */
984 			rt_rq->rt_time = 0;
985 		}
986 
987 		if (rt_rq_throttled(rt_rq)) {
988 			sched_rt_rq_dequeue(rt_rq);
989 			return 1;
990 		}
991 	}
992 
993 	return 0;
994 }
995 
996 /*
997  * Update the current task's runtime statistics. Skip current tasks that
998  * are not in our scheduling class.
999  */
1000 static void update_curr_rt(struct rq *rq)
1001 {
1002 	struct task_struct *curr = rq->curr;
1003 	struct sched_rt_entity *rt_se = &curr->rt;
1004 	s64 delta_exec;
1005 
1006 	if (curr->sched_class != &rt_sched_class)
1007 		return;
1008 
1009 	delta_exec = update_curr_common(rq);
1010 	if (unlikely(delta_exec <= 0))
1011 		return;
1012 
1013 	if (!rt_bandwidth_enabled())
1014 		return;
1015 
1016 	for_each_sched_rt_entity(rt_se) {
1017 		struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1018 		int exceeded;
1019 
1020 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1021 			raw_spin_lock(&rt_rq->rt_runtime_lock);
1022 			rt_rq->rt_time += delta_exec;
1023 			exceeded = sched_rt_runtime_exceeded(rt_rq);
1024 			if (exceeded)
1025 				resched_curr(rq);
1026 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
1027 			if (exceeded)
1028 				do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1029 		}
1030 	}
1031 }
1032 
1033 static void
1034 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1035 {
1036 	struct rq *rq = rq_of_rt_rq(rt_rq);
1037 
1038 	BUG_ON(&rq->rt != rt_rq);
1039 
1040 	if (!rt_rq->rt_queued)
1041 		return;
1042 
1043 	BUG_ON(!rq->nr_running);
1044 
1045 	sub_nr_running(rq, count);
1046 	rt_rq->rt_queued = 0;
1047 
1048 }
1049 
1050 static void
1051 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1052 {
1053 	struct rq *rq = rq_of_rt_rq(rt_rq);
1054 
1055 	BUG_ON(&rq->rt != rt_rq);
1056 
1057 	if (rt_rq->rt_queued)
1058 		return;
1059 
1060 	if (rt_rq_throttled(rt_rq))
1061 		return;
1062 
1063 	if (rt_rq->rt_nr_running) {
1064 		add_nr_running(rq, rt_rq->rt_nr_running);
1065 		rt_rq->rt_queued = 1;
1066 	}
1067 
1068 	/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1069 	cpufreq_update_util(rq, 0);
1070 }
1071 
1072 #if defined CONFIG_SMP
1073 
1074 static void
1075 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1076 {
1077 	struct rq *rq = rq_of_rt_rq(rt_rq);
1078 
1079 #ifdef CONFIG_RT_GROUP_SCHED
1080 	/*
1081 	 * Change rq's cpupri only if rt_rq is the top queue.
1082 	 */
1083 	if (&rq->rt != rt_rq)
1084 		return;
1085 #endif
1086 	if (rq->online && prio < prev_prio)
1087 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1088 }
1089 
1090 static void
1091 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1092 {
1093 	struct rq *rq = rq_of_rt_rq(rt_rq);
1094 
1095 #ifdef CONFIG_RT_GROUP_SCHED
1096 	/*
1097 	 * Change rq's cpupri only if rt_rq is the top queue.
1098 	 */
1099 	if (&rq->rt != rt_rq)
1100 		return;
1101 #endif
1102 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1103 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1104 }
1105 
1106 #else /* CONFIG_SMP */
1107 
1108 static inline
1109 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1110 static inline
1111 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1112 
1113 #endif /* CONFIG_SMP */
1114 
1115 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1116 static void
1117 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1118 {
1119 	int prev_prio = rt_rq->highest_prio.curr;
1120 
1121 	if (prio < prev_prio)
1122 		rt_rq->highest_prio.curr = prio;
1123 
1124 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
1125 }
1126 
1127 static void
1128 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1129 {
1130 	int prev_prio = rt_rq->highest_prio.curr;
1131 
1132 	if (rt_rq->rt_nr_running) {
1133 
1134 		WARN_ON(prio < prev_prio);
1135 
1136 		/*
1137 		 * This may have been our highest task, and therefore
1138 		 * we may have some re-computation to do
1139 		 */
1140 		if (prio == prev_prio) {
1141 			struct rt_prio_array *array = &rt_rq->active;
1142 
1143 			rt_rq->highest_prio.curr =
1144 				sched_find_first_bit(array->bitmap);
1145 		}
1146 
1147 	} else {
1148 		rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1149 	}
1150 
1151 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1152 }
1153 
1154 #else
1155 
1156 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1157 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1158 
1159 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1160 
1161 #ifdef CONFIG_RT_GROUP_SCHED
1162 
1163 static void
1164 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1165 {
1166 	if (rt_se_boosted(rt_se))
1167 		rt_rq->rt_nr_boosted++;
1168 
1169 	if (rt_rq->tg)
1170 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1171 }
1172 
1173 static void
1174 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1175 {
1176 	if (rt_se_boosted(rt_se))
1177 		rt_rq->rt_nr_boosted--;
1178 
1179 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1180 }
1181 
1182 #else /* CONFIG_RT_GROUP_SCHED */
1183 
1184 static void
1185 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1186 {
1187 	start_rt_bandwidth(&def_rt_bandwidth);
1188 }
1189 
1190 static inline
1191 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1192 
1193 #endif /* CONFIG_RT_GROUP_SCHED */
1194 
1195 static inline
1196 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1197 {
1198 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1199 
1200 	if (group_rq)
1201 		return group_rq->rt_nr_running;
1202 	else
1203 		return 1;
1204 }
1205 
1206 static inline
1207 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1208 {
1209 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1210 	struct task_struct *tsk;
1211 
1212 	if (group_rq)
1213 		return group_rq->rr_nr_running;
1214 
1215 	tsk = rt_task_of(rt_se);
1216 
1217 	return (tsk->policy == SCHED_RR) ? 1 : 0;
1218 }
1219 
1220 static inline
1221 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1222 {
1223 	int prio = rt_se_prio(rt_se);
1224 
1225 	WARN_ON(!rt_prio(prio));
1226 	rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1227 	rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1228 
1229 	inc_rt_prio(rt_rq, prio);
1230 	inc_rt_group(rt_se, rt_rq);
1231 }
1232 
1233 static inline
1234 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1235 {
1236 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1237 	WARN_ON(!rt_rq->rt_nr_running);
1238 	rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1239 	rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1240 
1241 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1242 	dec_rt_group(rt_se, rt_rq);
1243 }
1244 
1245 /*
1246  * Change rt_se->run_list location unless SAVE && !MOVE
1247  *
1248  * assumes ENQUEUE/DEQUEUE flags match
1249  */
1250 static inline bool move_entity(unsigned int flags)
1251 {
1252 	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1253 		return false;
1254 
1255 	return true;
1256 }
1257 
1258 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1259 {
1260 	list_del_init(&rt_se->run_list);
1261 
1262 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1263 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1264 
1265 	rt_se->on_list = 0;
1266 }
1267 
1268 static inline struct sched_statistics *
1269 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1270 {
1271 #ifdef CONFIG_RT_GROUP_SCHED
1272 	/* schedstats is not supported for rt group. */
1273 	if (!rt_entity_is_task(rt_se))
1274 		return NULL;
1275 #endif
1276 
1277 	return &rt_task_of(rt_se)->stats;
1278 }
1279 
1280 static inline void
1281 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1282 {
1283 	struct sched_statistics *stats;
1284 	struct task_struct *p = NULL;
1285 
1286 	if (!schedstat_enabled())
1287 		return;
1288 
1289 	if (rt_entity_is_task(rt_se))
1290 		p = rt_task_of(rt_se);
1291 
1292 	stats = __schedstats_from_rt_se(rt_se);
1293 	if (!stats)
1294 		return;
1295 
1296 	__update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1297 }
1298 
1299 static inline void
1300 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1301 {
1302 	struct sched_statistics *stats;
1303 	struct task_struct *p = NULL;
1304 
1305 	if (!schedstat_enabled())
1306 		return;
1307 
1308 	if (rt_entity_is_task(rt_se))
1309 		p = rt_task_of(rt_se);
1310 
1311 	stats = __schedstats_from_rt_se(rt_se);
1312 	if (!stats)
1313 		return;
1314 
1315 	__update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1316 }
1317 
1318 static inline void
1319 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1320 			int flags)
1321 {
1322 	if (!schedstat_enabled())
1323 		return;
1324 
1325 	if (flags & ENQUEUE_WAKEUP)
1326 		update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1327 }
1328 
1329 static inline void
1330 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1331 {
1332 	struct sched_statistics *stats;
1333 	struct task_struct *p = NULL;
1334 
1335 	if (!schedstat_enabled())
1336 		return;
1337 
1338 	if (rt_entity_is_task(rt_se))
1339 		p = rt_task_of(rt_se);
1340 
1341 	stats = __schedstats_from_rt_se(rt_se);
1342 	if (!stats)
1343 		return;
1344 
1345 	__update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1346 }
1347 
1348 static inline void
1349 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1350 			int flags)
1351 {
1352 	struct task_struct *p = NULL;
1353 
1354 	if (!schedstat_enabled())
1355 		return;
1356 
1357 	if (rt_entity_is_task(rt_se))
1358 		p = rt_task_of(rt_se);
1359 
1360 	if ((flags & DEQUEUE_SLEEP) && p) {
1361 		unsigned int state;
1362 
1363 		state = READ_ONCE(p->__state);
1364 		if (state & TASK_INTERRUPTIBLE)
1365 			__schedstat_set(p->stats.sleep_start,
1366 					rq_clock(rq_of_rt_rq(rt_rq)));
1367 
1368 		if (state & TASK_UNINTERRUPTIBLE)
1369 			__schedstat_set(p->stats.block_start,
1370 					rq_clock(rq_of_rt_rq(rt_rq)));
1371 	}
1372 }
1373 
1374 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1375 {
1376 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1377 	struct rt_prio_array *array = &rt_rq->active;
1378 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1379 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1380 
1381 	/*
1382 	 * Don't enqueue the group if its throttled, or when empty.
1383 	 * The latter is a consequence of the former when a child group
1384 	 * get throttled and the current group doesn't have any other
1385 	 * active members.
1386 	 */
1387 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1388 		if (rt_se->on_list)
1389 			__delist_rt_entity(rt_se, array);
1390 		return;
1391 	}
1392 
1393 	if (move_entity(flags)) {
1394 		WARN_ON_ONCE(rt_se->on_list);
1395 		if (flags & ENQUEUE_HEAD)
1396 			list_add(&rt_se->run_list, queue);
1397 		else
1398 			list_add_tail(&rt_se->run_list, queue);
1399 
1400 		__set_bit(rt_se_prio(rt_se), array->bitmap);
1401 		rt_se->on_list = 1;
1402 	}
1403 	rt_se->on_rq = 1;
1404 
1405 	inc_rt_tasks(rt_se, rt_rq);
1406 }
1407 
1408 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1409 {
1410 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1411 	struct rt_prio_array *array = &rt_rq->active;
1412 
1413 	if (move_entity(flags)) {
1414 		WARN_ON_ONCE(!rt_se->on_list);
1415 		__delist_rt_entity(rt_se, array);
1416 	}
1417 	rt_se->on_rq = 0;
1418 
1419 	dec_rt_tasks(rt_se, rt_rq);
1420 }
1421 
1422 /*
1423  * Because the prio of an upper entry depends on the lower
1424  * entries, we must remove entries top - down.
1425  */
1426 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1427 {
1428 	struct sched_rt_entity *back = NULL;
1429 	unsigned int rt_nr_running;
1430 
1431 	for_each_sched_rt_entity(rt_se) {
1432 		rt_se->back = back;
1433 		back = rt_se;
1434 	}
1435 
1436 	rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1437 
1438 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1439 		if (on_rt_rq(rt_se))
1440 			__dequeue_rt_entity(rt_se, flags);
1441 	}
1442 
1443 	dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1444 }
1445 
1446 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1447 {
1448 	struct rq *rq = rq_of_rt_se(rt_se);
1449 
1450 	update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1451 
1452 	dequeue_rt_stack(rt_se, flags);
1453 	for_each_sched_rt_entity(rt_se)
1454 		__enqueue_rt_entity(rt_se, flags);
1455 	enqueue_top_rt_rq(&rq->rt);
1456 }
1457 
1458 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1459 {
1460 	struct rq *rq = rq_of_rt_se(rt_se);
1461 
1462 	update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1463 
1464 	dequeue_rt_stack(rt_se, flags);
1465 
1466 	for_each_sched_rt_entity(rt_se) {
1467 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1468 
1469 		if (rt_rq && rt_rq->rt_nr_running)
1470 			__enqueue_rt_entity(rt_se, flags);
1471 	}
1472 	enqueue_top_rt_rq(&rq->rt);
1473 }
1474 
1475 /*
1476  * Adding/removing a task to/from a priority array:
1477  */
1478 static void
1479 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1480 {
1481 	struct sched_rt_entity *rt_se = &p->rt;
1482 
1483 	if (flags & ENQUEUE_WAKEUP)
1484 		rt_se->timeout = 0;
1485 
1486 	check_schedstat_required();
1487 	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1488 
1489 	enqueue_rt_entity(rt_se, flags);
1490 
1491 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1492 		enqueue_pushable_task(rq, p);
1493 }
1494 
1495 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1496 {
1497 	struct sched_rt_entity *rt_se = &p->rt;
1498 
1499 	update_curr_rt(rq);
1500 	dequeue_rt_entity(rt_se, flags);
1501 
1502 	dequeue_pushable_task(rq, p);
1503 }
1504 
1505 /*
1506  * Put task to the head or the end of the run list without the overhead of
1507  * dequeue followed by enqueue.
1508  */
1509 static void
1510 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1511 {
1512 	if (on_rt_rq(rt_se)) {
1513 		struct rt_prio_array *array = &rt_rq->active;
1514 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1515 
1516 		if (head)
1517 			list_move(&rt_se->run_list, queue);
1518 		else
1519 			list_move_tail(&rt_se->run_list, queue);
1520 	}
1521 }
1522 
1523 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1524 {
1525 	struct sched_rt_entity *rt_se = &p->rt;
1526 	struct rt_rq *rt_rq;
1527 
1528 	for_each_sched_rt_entity(rt_se) {
1529 		rt_rq = rt_rq_of_se(rt_se);
1530 		requeue_rt_entity(rt_rq, rt_se, head);
1531 	}
1532 }
1533 
1534 static void yield_task_rt(struct rq *rq)
1535 {
1536 	requeue_task_rt(rq, rq->curr, 0);
1537 }
1538 
1539 #ifdef CONFIG_SMP
1540 static int find_lowest_rq(struct task_struct *task);
1541 
1542 static int
1543 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1544 {
1545 	struct task_struct *curr;
1546 	struct rq *rq;
1547 	bool test;
1548 
1549 	/* For anything but wake ups, just return the task_cpu */
1550 	if (!(flags & (WF_TTWU | WF_FORK)))
1551 		goto out;
1552 
1553 	rq = cpu_rq(cpu);
1554 
1555 	rcu_read_lock();
1556 	curr = READ_ONCE(rq->curr); /* unlocked access */
1557 
1558 	/*
1559 	 * If the current task on @p's runqueue is an RT task, then
1560 	 * try to see if we can wake this RT task up on another
1561 	 * runqueue. Otherwise simply start this RT task
1562 	 * on its current runqueue.
1563 	 *
1564 	 * We want to avoid overloading runqueues. If the woken
1565 	 * task is a higher priority, then it will stay on this CPU
1566 	 * and the lower prio task should be moved to another CPU.
1567 	 * Even though this will probably make the lower prio task
1568 	 * lose its cache, we do not want to bounce a higher task
1569 	 * around just because it gave up its CPU, perhaps for a
1570 	 * lock?
1571 	 *
1572 	 * For equal prio tasks, we just let the scheduler sort it out.
1573 	 *
1574 	 * Otherwise, just let it ride on the affine RQ and the
1575 	 * post-schedule router will push the preempted task away
1576 	 *
1577 	 * This test is optimistic, if we get it wrong the load-balancer
1578 	 * will have to sort it out.
1579 	 *
1580 	 * We take into account the capacity of the CPU to ensure it fits the
1581 	 * requirement of the task - which is only important on heterogeneous
1582 	 * systems like big.LITTLE.
1583 	 */
1584 	test = curr &&
1585 	       unlikely(rt_task(curr)) &&
1586 	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1587 
1588 	if (test || !rt_task_fits_capacity(p, cpu)) {
1589 		int target = find_lowest_rq(p);
1590 
1591 		/*
1592 		 * Bail out if we were forcing a migration to find a better
1593 		 * fitting CPU but our search failed.
1594 		 */
1595 		if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1596 			goto out_unlock;
1597 
1598 		/*
1599 		 * Don't bother moving it if the destination CPU is
1600 		 * not running a lower priority task.
1601 		 */
1602 		if (target != -1 &&
1603 		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
1604 			cpu = target;
1605 	}
1606 
1607 out_unlock:
1608 	rcu_read_unlock();
1609 
1610 out:
1611 	return cpu;
1612 }
1613 
1614 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1615 {
1616 	/*
1617 	 * Current can't be migrated, useless to reschedule,
1618 	 * let's hope p can move out.
1619 	 */
1620 	if (rq->curr->nr_cpus_allowed == 1 ||
1621 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1622 		return;
1623 
1624 	/*
1625 	 * p is migratable, so let's not schedule it and
1626 	 * see if it is pushed or pulled somewhere else.
1627 	 */
1628 	if (p->nr_cpus_allowed != 1 &&
1629 	    cpupri_find(&rq->rd->cpupri, p, NULL))
1630 		return;
1631 
1632 	/*
1633 	 * There appear to be other CPUs that can accept
1634 	 * the current task but none can run 'p', so lets reschedule
1635 	 * to try and push the current task away:
1636 	 */
1637 	requeue_task_rt(rq, p, 1);
1638 	resched_curr(rq);
1639 }
1640 
1641 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1642 {
1643 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1644 		/*
1645 		 * This is OK, because current is on_cpu, which avoids it being
1646 		 * picked for load-balance and preemption/IRQs are still
1647 		 * disabled avoiding further scheduler activity on it and we've
1648 		 * not yet started the picking loop.
1649 		 */
1650 		rq_unpin_lock(rq, rf);
1651 		pull_rt_task(rq);
1652 		rq_repin_lock(rq, rf);
1653 	}
1654 
1655 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1656 }
1657 #endif /* CONFIG_SMP */
1658 
1659 /*
1660  * Preempt the current task with a newly woken task if needed:
1661  */
1662 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1663 {
1664 	if (p->prio < rq->curr->prio) {
1665 		resched_curr(rq);
1666 		return;
1667 	}
1668 
1669 #ifdef CONFIG_SMP
1670 	/*
1671 	 * If:
1672 	 *
1673 	 * - the newly woken task is of equal priority to the current task
1674 	 * - the newly woken task is non-migratable while current is migratable
1675 	 * - current will be preempted on the next reschedule
1676 	 *
1677 	 * we should check to see if current can readily move to a different
1678 	 * cpu.  If so, we will reschedule to allow the push logic to try
1679 	 * to move current somewhere else, making room for our non-migratable
1680 	 * task.
1681 	 */
1682 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1683 		check_preempt_equal_prio(rq, p);
1684 #endif
1685 }
1686 
1687 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1688 {
1689 	struct sched_rt_entity *rt_se = &p->rt;
1690 	struct rt_rq *rt_rq = &rq->rt;
1691 
1692 	p->se.exec_start = rq_clock_task(rq);
1693 	if (on_rt_rq(&p->rt))
1694 		update_stats_wait_end_rt(rt_rq, rt_se);
1695 
1696 	/* The running task is never eligible for pushing */
1697 	dequeue_pushable_task(rq, p);
1698 
1699 	if (!first)
1700 		return;
1701 
1702 	/*
1703 	 * If prev task was rt, put_prev_task() has already updated the
1704 	 * utilization. We only care of the case where we start to schedule a
1705 	 * rt task
1706 	 */
1707 	if (rq->curr->sched_class != &rt_sched_class)
1708 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1709 
1710 	rt_queue_push_tasks(rq);
1711 }
1712 
1713 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1714 {
1715 	struct rt_prio_array *array = &rt_rq->active;
1716 	struct sched_rt_entity *next = NULL;
1717 	struct list_head *queue;
1718 	int idx;
1719 
1720 	idx = sched_find_first_bit(array->bitmap);
1721 	BUG_ON(idx >= MAX_RT_PRIO);
1722 
1723 	queue = array->queue + idx;
1724 	if (SCHED_WARN_ON(list_empty(queue)))
1725 		return NULL;
1726 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1727 
1728 	return next;
1729 }
1730 
1731 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1732 {
1733 	struct sched_rt_entity *rt_se;
1734 	struct rt_rq *rt_rq  = &rq->rt;
1735 
1736 	do {
1737 		rt_se = pick_next_rt_entity(rt_rq);
1738 		if (unlikely(!rt_se))
1739 			return NULL;
1740 		rt_rq = group_rt_rq(rt_se);
1741 	} while (rt_rq);
1742 
1743 	return rt_task_of(rt_se);
1744 }
1745 
1746 static struct task_struct *pick_task_rt(struct rq *rq)
1747 {
1748 	struct task_struct *p;
1749 
1750 	if (!sched_rt_runnable(rq))
1751 		return NULL;
1752 
1753 	p = _pick_next_task_rt(rq);
1754 
1755 	return p;
1756 }
1757 
1758 static struct task_struct *pick_next_task_rt(struct rq *rq)
1759 {
1760 	struct task_struct *p = pick_task_rt(rq);
1761 
1762 	if (p)
1763 		set_next_task_rt(rq, p, true);
1764 
1765 	return p;
1766 }
1767 
1768 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1769 {
1770 	struct sched_rt_entity *rt_se = &p->rt;
1771 	struct rt_rq *rt_rq = &rq->rt;
1772 
1773 	if (on_rt_rq(&p->rt))
1774 		update_stats_wait_start_rt(rt_rq, rt_se);
1775 
1776 	update_curr_rt(rq);
1777 
1778 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1779 
1780 	/*
1781 	 * The previous task needs to be made eligible for pushing
1782 	 * if it is still active
1783 	 */
1784 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1785 		enqueue_pushable_task(rq, p);
1786 }
1787 
1788 #ifdef CONFIG_SMP
1789 
1790 /* Only try algorithms three times */
1791 #define RT_MAX_TRIES 3
1792 
1793 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1794 {
1795 	if (!task_on_cpu(rq, p) &&
1796 	    cpumask_test_cpu(cpu, &p->cpus_mask))
1797 		return 1;
1798 
1799 	return 0;
1800 }
1801 
1802 /*
1803  * Return the highest pushable rq's task, which is suitable to be executed
1804  * on the CPU, NULL otherwise
1805  */
1806 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1807 {
1808 	struct plist_head *head = &rq->rt.pushable_tasks;
1809 	struct task_struct *p;
1810 
1811 	if (!has_pushable_tasks(rq))
1812 		return NULL;
1813 
1814 	plist_for_each_entry(p, head, pushable_tasks) {
1815 		if (pick_rt_task(rq, p, cpu))
1816 			return p;
1817 	}
1818 
1819 	return NULL;
1820 }
1821 
1822 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1823 
1824 static int find_lowest_rq(struct task_struct *task)
1825 {
1826 	struct sched_domain *sd;
1827 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1828 	int this_cpu = smp_processor_id();
1829 	int cpu      = task_cpu(task);
1830 	int ret;
1831 
1832 	/* Make sure the mask is initialized first */
1833 	if (unlikely(!lowest_mask))
1834 		return -1;
1835 
1836 	if (task->nr_cpus_allowed == 1)
1837 		return -1; /* No other targets possible */
1838 
1839 	/*
1840 	 * If we're on asym system ensure we consider the different capacities
1841 	 * of the CPUs when searching for the lowest_mask.
1842 	 */
1843 	if (sched_asym_cpucap_active()) {
1844 
1845 		ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1846 					  task, lowest_mask,
1847 					  rt_task_fits_capacity);
1848 	} else {
1849 
1850 		ret = cpupri_find(&task_rq(task)->rd->cpupri,
1851 				  task, lowest_mask);
1852 	}
1853 
1854 	if (!ret)
1855 		return -1; /* No targets found */
1856 
1857 	/*
1858 	 * At this point we have built a mask of CPUs representing the
1859 	 * lowest priority tasks in the system.  Now we want to elect
1860 	 * the best one based on our affinity and topology.
1861 	 *
1862 	 * We prioritize the last CPU that the task executed on since
1863 	 * it is most likely cache-hot in that location.
1864 	 */
1865 	if (cpumask_test_cpu(cpu, lowest_mask))
1866 		return cpu;
1867 
1868 	/*
1869 	 * Otherwise, we consult the sched_domains span maps to figure
1870 	 * out which CPU is logically closest to our hot cache data.
1871 	 */
1872 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1873 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1874 
1875 	rcu_read_lock();
1876 	for_each_domain(cpu, sd) {
1877 		if (sd->flags & SD_WAKE_AFFINE) {
1878 			int best_cpu;
1879 
1880 			/*
1881 			 * "this_cpu" is cheaper to preempt than a
1882 			 * remote processor.
1883 			 */
1884 			if (this_cpu != -1 &&
1885 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1886 				rcu_read_unlock();
1887 				return this_cpu;
1888 			}
1889 
1890 			best_cpu = cpumask_any_and_distribute(lowest_mask,
1891 							      sched_domain_span(sd));
1892 			if (best_cpu < nr_cpu_ids) {
1893 				rcu_read_unlock();
1894 				return best_cpu;
1895 			}
1896 		}
1897 	}
1898 	rcu_read_unlock();
1899 
1900 	/*
1901 	 * And finally, if there were no matches within the domains
1902 	 * just give the caller *something* to work with from the compatible
1903 	 * locations.
1904 	 */
1905 	if (this_cpu != -1)
1906 		return this_cpu;
1907 
1908 	cpu = cpumask_any_distribute(lowest_mask);
1909 	if (cpu < nr_cpu_ids)
1910 		return cpu;
1911 
1912 	return -1;
1913 }
1914 
1915 /* Will lock the rq it finds */
1916 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1917 {
1918 	struct rq *lowest_rq = NULL;
1919 	int tries;
1920 	int cpu;
1921 
1922 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1923 		cpu = find_lowest_rq(task);
1924 
1925 		if ((cpu == -1) || (cpu == rq->cpu))
1926 			break;
1927 
1928 		lowest_rq = cpu_rq(cpu);
1929 
1930 		if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1931 			/*
1932 			 * Target rq has tasks of equal or higher priority,
1933 			 * retrying does not release any lock and is unlikely
1934 			 * to yield a different result.
1935 			 */
1936 			lowest_rq = NULL;
1937 			break;
1938 		}
1939 
1940 		/* if the prio of this runqueue changed, try again */
1941 		if (double_lock_balance(rq, lowest_rq)) {
1942 			/*
1943 			 * We had to unlock the run queue. In
1944 			 * the mean time, task could have
1945 			 * migrated already or had its affinity changed.
1946 			 * Also make sure that it wasn't scheduled on its rq.
1947 			 * It is possible the task was scheduled, set
1948 			 * "migrate_disabled" and then got preempted, so we must
1949 			 * check the task migration disable flag here too.
1950 			 */
1951 			if (unlikely(task_rq(task) != rq ||
1952 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1953 				     task_on_cpu(rq, task) ||
1954 				     !rt_task(task) ||
1955 				     is_migration_disabled(task) ||
1956 				     !task_on_rq_queued(task))) {
1957 
1958 				double_unlock_balance(rq, lowest_rq);
1959 				lowest_rq = NULL;
1960 				break;
1961 			}
1962 		}
1963 
1964 		/* If this rq is still suitable use it. */
1965 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1966 			break;
1967 
1968 		/* try again */
1969 		double_unlock_balance(rq, lowest_rq);
1970 		lowest_rq = NULL;
1971 	}
1972 
1973 	return lowest_rq;
1974 }
1975 
1976 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1977 {
1978 	struct task_struct *p;
1979 
1980 	if (!has_pushable_tasks(rq))
1981 		return NULL;
1982 
1983 	p = plist_first_entry(&rq->rt.pushable_tasks,
1984 			      struct task_struct, pushable_tasks);
1985 
1986 	BUG_ON(rq->cpu != task_cpu(p));
1987 	BUG_ON(task_current(rq, p));
1988 	BUG_ON(p->nr_cpus_allowed <= 1);
1989 
1990 	BUG_ON(!task_on_rq_queued(p));
1991 	BUG_ON(!rt_task(p));
1992 
1993 	return p;
1994 }
1995 
1996 /*
1997  * If the current CPU has more than one RT task, see if the non
1998  * running task can migrate over to a CPU that is running a task
1999  * of lesser priority.
2000  */
2001 static int push_rt_task(struct rq *rq, bool pull)
2002 {
2003 	struct task_struct *next_task;
2004 	struct rq *lowest_rq;
2005 	int ret = 0;
2006 
2007 	if (!rq->rt.overloaded)
2008 		return 0;
2009 
2010 	next_task = pick_next_pushable_task(rq);
2011 	if (!next_task)
2012 		return 0;
2013 
2014 retry:
2015 	/*
2016 	 * It's possible that the next_task slipped in of
2017 	 * higher priority than current. If that's the case
2018 	 * just reschedule current.
2019 	 */
2020 	if (unlikely(next_task->prio < rq->curr->prio)) {
2021 		resched_curr(rq);
2022 		return 0;
2023 	}
2024 
2025 	if (is_migration_disabled(next_task)) {
2026 		struct task_struct *push_task = NULL;
2027 		int cpu;
2028 
2029 		if (!pull || rq->push_busy)
2030 			return 0;
2031 
2032 		/*
2033 		 * Invoking find_lowest_rq() on anything but an RT task doesn't
2034 		 * make sense. Per the above priority check, curr has to
2035 		 * be of higher priority than next_task, so no need to
2036 		 * reschedule when bailing out.
2037 		 *
2038 		 * Note that the stoppers are masqueraded as SCHED_FIFO
2039 		 * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2040 		 */
2041 		if (rq->curr->sched_class != &rt_sched_class)
2042 			return 0;
2043 
2044 		cpu = find_lowest_rq(rq->curr);
2045 		if (cpu == -1 || cpu == rq->cpu)
2046 			return 0;
2047 
2048 		/*
2049 		 * Given we found a CPU with lower priority than @next_task,
2050 		 * therefore it should be running. However we cannot migrate it
2051 		 * to this other CPU, instead attempt to push the current
2052 		 * running task on this CPU away.
2053 		 */
2054 		push_task = get_push_task(rq);
2055 		if (push_task) {
2056 			preempt_disable();
2057 			raw_spin_rq_unlock(rq);
2058 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2059 					    push_task, &rq->push_work);
2060 			preempt_enable();
2061 			raw_spin_rq_lock(rq);
2062 		}
2063 
2064 		return 0;
2065 	}
2066 
2067 	if (WARN_ON(next_task == rq->curr))
2068 		return 0;
2069 
2070 	/* We might release rq lock */
2071 	get_task_struct(next_task);
2072 
2073 	/* find_lock_lowest_rq locks the rq if found */
2074 	lowest_rq = find_lock_lowest_rq(next_task, rq);
2075 	if (!lowest_rq) {
2076 		struct task_struct *task;
2077 		/*
2078 		 * find_lock_lowest_rq releases rq->lock
2079 		 * so it is possible that next_task has migrated.
2080 		 *
2081 		 * We need to make sure that the task is still on the same
2082 		 * run-queue and is also still the next task eligible for
2083 		 * pushing.
2084 		 */
2085 		task = pick_next_pushable_task(rq);
2086 		if (task == next_task) {
2087 			/*
2088 			 * The task hasn't migrated, and is still the next
2089 			 * eligible task, but we failed to find a run-queue
2090 			 * to push it to.  Do not retry in this case, since
2091 			 * other CPUs will pull from us when ready.
2092 			 */
2093 			goto out;
2094 		}
2095 
2096 		if (!task)
2097 			/* No more tasks, just exit */
2098 			goto out;
2099 
2100 		/*
2101 		 * Something has shifted, try again.
2102 		 */
2103 		put_task_struct(next_task);
2104 		next_task = task;
2105 		goto retry;
2106 	}
2107 
2108 	deactivate_task(rq, next_task, 0);
2109 	set_task_cpu(next_task, lowest_rq->cpu);
2110 	activate_task(lowest_rq, next_task, 0);
2111 	resched_curr(lowest_rq);
2112 	ret = 1;
2113 
2114 	double_unlock_balance(rq, lowest_rq);
2115 out:
2116 	put_task_struct(next_task);
2117 
2118 	return ret;
2119 }
2120 
2121 static void push_rt_tasks(struct rq *rq)
2122 {
2123 	/* push_rt_task will return true if it moved an RT */
2124 	while (push_rt_task(rq, false))
2125 		;
2126 }
2127 
2128 #ifdef HAVE_RT_PUSH_IPI
2129 
2130 /*
2131  * When a high priority task schedules out from a CPU and a lower priority
2132  * task is scheduled in, a check is made to see if there's any RT tasks
2133  * on other CPUs that are waiting to run because a higher priority RT task
2134  * is currently running on its CPU. In this case, the CPU with multiple RT
2135  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2136  * up that may be able to run one of its non-running queued RT tasks.
2137  *
2138  * All CPUs with overloaded RT tasks need to be notified as there is currently
2139  * no way to know which of these CPUs have the highest priority task waiting
2140  * to run. Instead of trying to take a spinlock on each of these CPUs,
2141  * which has shown to cause large latency when done on machines with many
2142  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2143  * RT tasks waiting to run.
2144  *
2145  * Just sending an IPI to each of the CPUs is also an issue, as on large
2146  * count CPU machines, this can cause an IPI storm on a CPU, especially
2147  * if its the only CPU with multiple RT tasks queued, and a large number
2148  * of CPUs scheduling a lower priority task at the same time.
2149  *
2150  * Each root domain has its own IRQ work function that can iterate over
2151  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2152  * task must be checked if there's one or many CPUs that are lowering
2153  * their priority, there's a single IRQ work iterator that will try to
2154  * push off RT tasks that are waiting to run.
2155  *
2156  * When a CPU schedules a lower priority task, it will kick off the
2157  * IRQ work iterator that will jump to each CPU with overloaded RT tasks.
2158  * As it only takes the first CPU that schedules a lower priority task
2159  * to start the process, the rto_start variable is incremented and if
2160  * the atomic result is one, then that CPU will try to take the rto_lock.
2161  * This prevents high contention on the lock as the process handles all
2162  * CPUs scheduling lower priority tasks.
2163  *
2164  * All CPUs that are scheduling a lower priority task will increment the
2165  * rt_loop_next variable. This will make sure that the IRQ work iterator
2166  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2167  * priority task, even if the iterator is in the middle of a scan. Incrementing
2168  * the rt_loop_next will cause the iterator to perform another scan.
2169  *
2170  */
2171 static int rto_next_cpu(struct root_domain *rd)
2172 {
2173 	int next;
2174 	int cpu;
2175 
2176 	/*
2177 	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2178 	 * rt_next_cpu() will simply return the first CPU found in
2179 	 * the rto_mask.
2180 	 *
2181 	 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2182 	 * will return the next CPU found in the rto_mask.
2183 	 *
2184 	 * If there are no more CPUs left in the rto_mask, then a check is made
2185 	 * against rto_loop and rto_loop_next. rto_loop is only updated with
2186 	 * the rto_lock held, but any CPU may increment the rto_loop_next
2187 	 * without any locking.
2188 	 */
2189 	for (;;) {
2190 
2191 		/* When rto_cpu is -1 this acts like cpumask_first() */
2192 		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2193 
2194 		rd->rto_cpu = cpu;
2195 
2196 		if (cpu < nr_cpu_ids)
2197 			return cpu;
2198 
2199 		rd->rto_cpu = -1;
2200 
2201 		/*
2202 		 * ACQUIRE ensures we see the @rto_mask changes
2203 		 * made prior to the @next value observed.
2204 		 *
2205 		 * Matches WMB in rt_set_overload().
2206 		 */
2207 		next = atomic_read_acquire(&rd->rto_loop_next);
2208 
2209 		if (rd->rto_loop == next)
2210 			break;
2211 
2212 		rd->rto_loop = next;
2213 	}
2214 
2215 	return -1;
2216 }
2217 
2218 static inline bool rto_start_trylock(atomic_t *v)
2219 {
2220 	return !atomic_cmpxchg_acquire(v, 0, 1);
2221 }
2222 
2223 static inline void rto_start_unlock(atomic_t *v)
2224 {
2225 	atomic_set_release(v, 0);
2226 }
2227 
2228 static void tell_cpu_to_push(struct rq *rq)
2229 {
2230 	int cpu = -1;
2231 
2232 	/* Keep the loop going if the IPI is currently active */
2233 	atomic_inc(&rq->rd->rto_loop_next);
2234 
2235 	/* Only one CPU can initiate a loop at a time */
2236 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
2237 		return;
2238 
2239 	raw_spin_lock(&rq->rd->rto_lock);
2240 
2241 	/*
2242 	 * The rto_cpu is updated under the lock, if it has a valid CPU
2243 	 * then the IPI is still running and will continue due to the
2244 	 * update to loop_next, and nothing needs to be done here.
2245 	 * Otherwise it is finishing up and an IPI needs to be sent.
2246 	 */
2247 	if (rq->rd->rto_cpu < 0)
2248 		cpu = rto_next_cpu(rq->rd);
2249 
2250 	raw_spin_unlock(&rq->rd->rto_lock);
2251 
2252 	rto_start_unlock(&rq->rd->rto_loop_start);
2253 
2254 	if (cpu >= 0) {
2255 		/* Make sure the rd does not get freed while pushing */
2256 		sched_get_rd(rq->rd);
2257 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2258 	}
2259 }
2260 
2261 /* Called from hardirq context */
2262 void rto_push_irq_work_func(struct irq_work *work)
2263 {
2264 	struct root_domain *rd =
2265 		container_of(work, struct root_domain, rto_push_work);
2266 	struct rq *rq;
2267 	int cpu;
2268 
2269 	rq = this_rq();
2270 
2271 	/*
2272 	 * We do not need to grab the lock to check for has_pushable_tasks.
2273 	 * When it gets updated, a check is made if a push is possible.
2274 	 */
2275 	if (has_pushable_tasks(rq)) {
2276 		raw_spin_rq_lock(rq);
2277 		while (push_rt_task(rq, true))
2278 			;
2279 		raw_spin_rq_unlock(rq);
2280 	}
2281 
2282 	raw_spin_lock(&rd->rto_lock);
2283 
2284 	/* Pass the IPI to the next rt overloaded queue */
2285 	cpu = rto_next_cpu(rd);
2286 
2287 	raw_spin_unlock(&rd->rto_lock);
2288 
2289 	if (cpu < 0) {
2290 		sched_put_rd(rd);
2291 		return;
2292 	}
2293 
2294 	/* Try the next RT overloaded CPU */
2295 	irq_work_queue_on(&rd->rto_push_work, cpu);
2296 }
2297 #endif /* HAVE_RT_PUSH_IPI */
2298 
2299 static void pull_rt_task(struct rq *this_rq)
2300 {
2301 	int this_cpu = this_rq->cpu, cpu;
2302 	bool resched = false;
2303 	struct task_struct *p, *push_task;
2304 	struct rq *src_rq;
2305 	int rt_overload_count = rt_overloaded(this_rq);
2306 
2307 	if (likely(!rt_overload_count))
2308 		return;
2309 
2310 	/*
2311 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
2312 	 * see overloaded we must also see the rto_mask bit.
2313 	 */
2314 	smp_rmb();
2315 
2316 	/* If we are the only overloaded CPU do nothing */
2317 	if (rt_overload_count == 1 &&
2318 	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2319 		return;
2320 
2321 #ifdef HAVE_RT_PUSH_IPI
2322 	if (sched_feat(RT_PUSH_IPI)) {
2323 		tell_cpu_to_push(this_rq);
2324 		return;
2325 	}
2326 #endif
2327 
2328 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
2329 		if (this_cpu == cpu)
2330 			continue;
2331 
2332 		src_rq = cpu_rq(cpu);
2333 
2334 		/*
2335 		 * Don't bother taking the src_rq->lock if the next highest
2336 		 * task is known to be lower-priority than our current task.
2337 		 * This may look racy, but if this value is about to go
2338 		 * logically higher, the src_rq will push this task away.
2339 		 * And if its going logically lower, we do not care
2340 		 */
2341 		if (src_rq->rt.highest_prio.next >=
2342 		    this_rq->rt.highest_prio.curr)
2343 			continue;
2344 
2345 		/*
2346 		 * We can potentially drop this_rq's lock in
2347 		 * double_lock_balance, and another CPU could
2348 		 * alter this_rq
2349 		 */
2350 		push_task = NULL;
2351 		double_lock_balance(this_rq, src_rq);
2352 
2353 		/*
2354 		 * We can pull only a task, which is pushable
2355 		 * on its rq, and no others.
2356 		 */
2357 		p = pick_highest_pushable_task(src_rq, this_cpu);
2358 
2359 		/*
2360 		 * Do we have an RT task that preempts
2361 		 * the to-be-scheduled task?
2362 		 */
2363 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2364 			WARN_ON(p == src_rq->curr);
2365 			WARN_ON(!task_on_rq_queued(p));
2366 
2367 			/*
2368 			 * There's a chance that p is higher in priority
2369 			 * than what's currently running on its CPU.
2370 			 * This is just that p is waking up and hasn't
2371 			 * had a chance to schedule. We only pull
2372 			 * p if it is lower in priority than the
2373 			 * current task on the run queue
2374 			 */
2375 			if (p->prio < src_rq->curr->prio)
2376 				goto skip;
2377 
2378 			if (is_migration_disabled(p)) {
2379 				push_task = get_push_task(src_rq);
2380 			} else {
2381 				deactivate_task(src_rq, p, 0);
2382 				set_task_cpu(p, this_cpu);
2383 				activate_task(this_rq, p, 0);
2384 				resched = true;
2385 			}
2386 			/*
2387 			 * We continue with the search, just in
2388 			 * case there's an even higher prio task
2389 			 * in another runqueue. (low likelihood
2390 			 * but possible)
2391 			 */
2392 		}
2393 skip:
2394 		double_unlock_balance(this_rq, src_rq);
2395 
2396 		if (push_task) {
2397 			preempt_disable();
2398 			raw_spin_rq_unlock(this_rq);
2399 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2400 					    push_task, &src_rq->push_work);
2401 			preempt_enable();
2402 			raw_spin_rq_lock(this_rq);
2403 		}
2404 	}
2405 
2406 	if (resched)
2407 		resched_curr(this_rq);
2408 }
2409 
2410 /*
2411  * If we are not running and we are not going to reschedule soon, we should
2412  * try to push tasks away now
2413  */
2414 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2415 {
2416 	bool need_to_push = !task_on_cpu(rq, p) &&
2417 			    !test_tsk_need_resched(rq->curr) &&
2418 			    p->nr_cpus_allowed > 1 &&
2419 			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
2420 			    (rq->curr->nr_cpus_allowed < 2 ||
2421 			     rq->curr->prio <= p->prio);
2422 
2423 	if (need_to_push)
2424 		push_rt_tasks(rq);
2425 }
2426 
2427 /* Assumes rq->lock is held */
2428 static void rq_online_rt(struct rq *rq)
2429 {
2430 	if (rq->rt.overloaded)
2431 		rt_set_overload(rq);
2432 
2433 	__enable_runtime(rq);
2434 
2435 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2436 }
2437 
2438 /* Assumes rq->lock is held */
2439 static void rq_offline_rt(struct rq *rq)
2440 {
2441 	if (rq->rt.overloaded)
2442 		rt_clear_overload(rq);
2443 
2444 	__disable_runtime(rq);
2445 
2446 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2447 }
2448 
2449 /*
2450  * When switch from the rt queue, we bring ourselves to a position
2451  * that we might want to pull RT tasks from other runqueues.
2452  */
2453 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2454 {
2455 	/*
2456 	 * If there are other RT tasks then we will reschedule
2457 	 * and the scheduling of the other RT tasks will handle
2458 	 * the balancing. But if we are the last RT task
2459 	 * we may need to handle the pulling of RT tasks
2460 	 * now.
2461 	 */
2462 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2463 		return;
2464 
2465 	rt_queue_pull_task(rq);
2466 }
2467 
2468 void __init init_sched_rt_class(void)
2469 {
2470 	unsigned int i;
2471 
2472 	for_each_possible_cpu(i) {
2473 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2474 					GFP_KERNEL, cpu_to_node(i));
2475 	}
2476 }
2477 #endif /* CONFIG_SMP */
2478 
2479 /*
2480  * When switching a task to RT, we may overload the runqueue
2481  * with RT tasks. In this case we try to push them off to
2482  * other runqueues.
2483  */
2484 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2485 {
2486 	/*
2487 	 * If we are running, update the avg_rt tracking, as the running time
2488 	 * will now on be accounted into the latter.
2489 	 */
2490 	if (task_current(rq, p)) {
2491 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2492 		return;
2493 	}
2494 
2495 	/*
2496 	 * If we are not running we may need to preempt the current
2497 	 * running task. If that current running task is also an RT task
2498 	 * then see if we can move to another run queue.
2499 	 */
2500 	if (task_on_rq_queued(p)) {
2501 #ifdef CONFIG_SMP
2502 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2503 			rt_queue_push_tasks(rq);
2504 #endif /* CONFIG_SMP */
2505 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2506 			resched_curr(rq);
2507 	}
2508 }
2509 
2510 /*
2511  * Priority of the task has changed. This may cause
2512  * us to initiate a push or pull.
2513  */
2514 static void
2515 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2516 {
2517 	if (!task_on_rq_queued(p))
2518 		return;
2519 
2520 	if (task_current(rq, p)) {
2521 #ifdef CONFIG_SMP
2522 		/*
2523 		 * If our priority decreases while running, we
2524 		 * may need to pull tasks to this runqueue.
2525 		 */
2526 		if (oldprio < p->prio)
2527 			rt_queue_pull_task(rq);
2528 
2529 		/*
2530 		 * If there's a higher priority task waiting to run
2531 		 * then reschedule.
2532 		 */
2533 		if (p->prio > rq->rt.highest_prio.curr)
2534 			resched_curr(rq);
2535 #else
2536 		/* For UP simply resched on drop of prio */
2537 		if (oldprio < p->prio)
2538 			resched_curr(rq);
2539 #endif /* CONFIG_SMP */
2540 	} else {
2541 		/*
2542 		 * This task is not running, but if it is
2543 		 * greater than the current running task
2544 		 * then reschedule.
2545 		 */
2546 		if (p->prio < rq->curr->prio)
2547 			resched_curr(rq);
2548 	}
2549 }
2550 
2551 #ifdef CONFIG_POSIX_TIMERS
2552 static void watchdog(struct rq *rq, struct task_struct *p)
2553 {
2554 	unsigned long soft, hard;
2555 
2556 	/* max may change after cur was read, this will be fixed next tick */
2557 	soft = task_rlimit(p, RLIMIT_RTTIME);
2558 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
2559 
2560 	if (soft != RLIM_INFINITY) {
2561 		unsigned long next;
2562 
2563 		if (p->rt.watchdog_stamp != jiffies) {
2564 			p->rt.timeout++;
2565 			p->rt.watchdog_stamp = jiffies;
2566 		}
2567 
2568 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2569 		if (p->rt.timeout > next) {
2570 			posix_cputimers_rt_watchdog(&p->posix_cputimers,
2571 						    p->se.sum_exec_runtime);
2572 		}
2573 	}
2574 }
2575 #else
2576 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2577 #endif
2578 
2579 /*
2580  * scheduler tick hitting a task of our scheduling class.
2581  *
2582  * NOTE: This function can be called remotely by the tick offload that
2583  * goes along full dynticks. Therefore no local assumption can be made
2584  * and everything must be accessed through the @rq and @curr passed in
2585  * parameters.
2586  */
2587 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2588 {
2589 	struct sched_rt_entity *rt_se = &p->rt;
2590 
2591 	update_curr_rt(rq);
2592 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2593 
2594 	watchdog(rq, p);
2595 
2596 	/*
2597 	 * RR tasks need a special form of time-slice management.
2598 	 * FIFO tasks have no timeslices.
2599 	 */
2600 	if (p->policy != SCHED_RR)
2601 		return;
2602 
2603 	if (--p->rt.time_slice)
2604 		return;
2605 
2606 	p->rt.time_slice = sched_rr_timeslice;
2607 
2608 	/*
2609 	 * Requeue to the end of queue if we (and all of our ancestors) are not
2610 	 * the only element on the queue
2611 	 */
2612 	for_each_sched_rt_entity(rt_se) {
2613 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2614 			requeue_task_rt(rq, p, 0);
2615 			resched_curr(rq);
2616 			return;
2617 		}
2618 	}
2619 }
2620 
2621 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2622 {
2623 	/*
2624 	 * Time slice is 0 for SCHED_FIFO tasks
2625 	 */
2626 	if (task->policy == SCHED_RR)
2627 		return sched_rr_timeslice;
2628 	else
2629 		return 0;
2630 }
2631 
2632 #ifdef CONFIG_SCHED_CORE
2633 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2634 {
2635 	struct rt_rq *rt_rq;
2636 
2637 #ifdef CONFIG_RT_GROUP_SCHED
2638 	rt_rq = task_group(p)->rt_rq[cpu];
2639 #else
2640 	rt_rq = &cpu_rq(cpu)->rt;
2641 #endif
2642 
2643 	return rt_rq_throttled(rt_rq);
2644 }
2645 #endif
2646 
2647 DEFINE_SCHED_CLASS(rt) = {
2648 
2649 	.enqueue_task		= enqueue_task_rt,
2650 	.dequeue_task		= dequeue_task_rt,
2651 	.yield_task		= yield_task_rt,
2652 
2653 	.wakeup_preempt		= wakeup_preempt_rt,
2654 
2655 	.pick_next_task		= pick_next_task_rt,
2656 	.put_prev_task		= put_prev_task_rt,
2657 	.set_next_task          = set_next_task_rt,
2658 
2659 #ifdef CONFIG_SMP
2660 	.balance		= balance_rt,
2661 	.pick_task		= pick_task_rt,
2662 	.select_task_rq		= select_task_rq_rt,
2663 	.set_cpus_allowed       = set_cpus_allowed_common,
2664 	.rq_online              = rq_online_rt,
2665 	.rq_offline             = rq_offline_rt,
2666 	.task_woken		= task_woken_rt,
2667 	.switched_from		= switched_from_rt,
2668 	.find_lock_rq		= find_lock_lowest_rq,
2669 #endif
2670 
2671 	.task_tick		= task_tick_rt,
2672 
2673 	.get_rr_interval	= get_rr_interval_rt,
2674 
2675 	.prio_changed		= prio_changed_rt,
2676 	.switched_to		= switched_to_rt,
2677 
2678 	.update_curr		= update_curr_rt,
2679 
2680 #ifdef CONFIG_SCHED_CORE
2681 	.task_is_throttled	= task_is_throttled_rt,
2682 #endif
2683 
2684 #ifdef CONFIG_UCLAMP_TASK
2685 	.uclamp_enabled		= 1,
2686 #endif
2687 };
2688 
2689 #ifdef CONFIG_RT_GROUP_SCHED
2690 /*
2691  * Ensure that the real time constraints are schedulable.
2692  */
2693 static DEFINE_MUTEX(rt_constraints_mutex);
2694 
2695 static inline int tg_has_rt_tasks(struct task_group *tg)
2696 {
2697 	struct task_struct *task;
2698 	struct css_task_iter it;
2699 	int ret = 0;
2700 
2701 	/*
2702 	 * Autogroups do not have RT tasks; see autogroup_create().
2703 	 */
2704 	if (task_group_is_autogroup(tg))
2705 		return 0;
2706 
2707 	css_task_iter_start(&tg->css, 0, &it);
2708 	while (!ret && (task = css_task_iter_next(&it)))
2709 		ret |= rt_task(task);
2710 	css_task_iter_end(&it);
2711 
2712 	return ret;
2713 }
2714 
2715 struct rt_schedulable_data {
2716 	struct task_group *tg;
2717 	u64 rt_period;
2718 	u64 rt_runtime;
2719 };
2720 
2721 static int tg_rt_schedulable(struct task_group *tg, void *data)
2722 {
2723 	struct rt_schedulable_data *d = data;
2724 	struct task_group *child;
2725 	unsigned long total, sum = 0;
2726 	u64 period, runtime;
2727 
2728 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2729 	runtime = tg->rt_bandwidth.rt_runtime;
2730 
2731 	if (tg == d->tg) {
2732 		period = d->rt_period;
2733 		runtime = d->rt_runtime;
2734 	}
2735 
2736 	/*
2737 	 * Cannot have more runtime than the period.
2738 	 */
2739 	if (runtime > period && runtime != RUNTIME_INF)
2740 		return -EINVAL;
2741 
2742 	/*
2743 	 * Ensure we don't starve existing RT tasks if runtime turns zero.
2744 	 */
2745 	if (rt_bandwidth_enabled() && !runtime &&
2746 	    tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2747 		return -EBUSY;
2748 
2749 	total = to_ratio(period, runtime);
2750 
2751 	/*
2752 	 * Nobody can have more than the global setting allows.
2753 	 */
2754 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2755 		return -EINVAL;
2756 
2757 	/*
2758 	 * The sum of our children's runtime should not exceed our own.
2759 	 */
2760 	list_for_each_entry_rcu(child, &tg->children, siblings) {
2761 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
2762 		runtime = child->rt_bandwidth.rt_runtime;
2763 
2764 		if (child == d->tg) {
2765 			period = d->rt_period;
2766 			runtime = d->rt_runtime;
2767 		}
2768 
2769 		sum += to_ratio(period, runtime);
2770 	}
2771 
2772 	if (sum > total)
2773 		return -EINVAL;
2774 
2775 	return 0;
2776 }
2777 
2778 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2779 {
2780 	int ret;
2781 
2782 	struct rt_schedulable_data data = {
2783 		.tg = tg,
2784 		.rt_period = period,
2785 		.rt_runtime = runtime,
2786 	};
2787 
2788 	rcu_read_lock();
2789 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2790 	rcu_read_unlock();
2791 
2792 	return ret;
2793 }
2794 
2795 static int tg_set_rt_bandwidth(struct task_group *tg,
2796 		u64 rt_period, u64 rt_runtime)
2797 {
2798 	int i, err = 0;
2799 
2800 	/*
2801 	 * Disallowing the root group RT runtime is BAD, it would disallow the
2802 	 * kernel creating (and or operating) RT threads.
2803 	 */
2804 	if (tg == &root_task_group && rt_runtime == 0)
2805 		return -EINVAL;
2806 
2807 	/* No period doesn't make any sense. */
2808 	if (rt_period == 0)
2809 		return -EINVAL;
2810 
2811 	/*
2812 	 * Bound quota to defend quota against overflow during bandwidth shift.
2813 	 */
2814 	if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2815 		return -EINVAL;
2816 
2817 	mutex_lock(&rt_constraints_mutex);
2818 	err = __rt_schedulable(tg, rt_period, rt_runtime);
2819 	if (err)
2820 		goto unlock;
2821 
2822 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2823 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2824 	tg->rt_bandwidth.rt_runtime = rt_runtime;
2825 
2826 	for_each_possible_cpu(i) {
2827 		struct rt_rq *rt_rq = tg->rt_rq[i];
2828 
2829 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2830 		rt_rq->rt_runtime = rt_runtime;
2831 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2832 	}
2833 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2834 unlock:
2835 	mutex_unlock(&rt_constraints_mutex);
2836 
2837 	return err;
2838 }
2839 
2840 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2841 {
2842 	u64 rt_runtime, rt_period;
2843 
2844 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2845 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2846 	if (rt_runtime_us < 0)
2847 		rt_runtime = RUNTIME_INF;
2848 	else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2849 		return -EINVAL;
2850 
2851 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2852 }
2853 
2854 long sched_group_rt_runtime(struct task_group *tg)
2855 {
2856 	u64 rt_runtime_us;
2857 
2858 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2859 		return -1;
2860 
2861 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2862 	do_div(rt_runtime_us, NSEC_PER_USEC);
2863 	return rt_runtime_us;
2864 }
2865 
2866 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2867 {
2868 	u64 rt_runtime, rt_period;
2869 
2870 	if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2871 		return -EINVAL;
2872 
2873 	rt_period = rt_period_us * NSEC_PER_USEC;
2874 	rt_runtime = tg->rt_bandwidth.rt_runtime;
2875 
2876 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2877 }
2878 
2879 long sched_group_rt_period(struct task_group *tg)
2880 {
2881 	u64 rt_period_us;
2882 
2883 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2884 	do_div(rt_period_us, NSEC_PER_USEC);
2885 	return rt_period_us;
2886 }
2887 
2888 #ifdef CONFIG_SYSCTL
2889 static int sched_rt_global_constraints(void)
2890 {
2891 	int ret = 0;
2892 
2893 	mutex_lock(&rt_constraints_mutex);
2894 	ret = __rt_schedulable(NULL, 0, 0);
2895 	mutex_unlock(&rt_constraints_mutex);
2896 
2897 	return ret;
2898 }
2899 #endif /* CONFIG_SYSCTL */
2900 
2901 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2902 {
2903 	/* Don't accept real-time tasks when there is no way for them to run */
2904 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2905 		return 0;
2906 
2907 	return 1;
2908 }
2909 
2910 #else /* !CONFIG_RT_GROUP_SCHED */
2911 
2912 #ifdef CONFIG_SYSCTL
2913 static int sched_rt_global_constraints(void)
2914 {
2915 	unsigned long flags;
2916 	int i;
2917 
2918 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2919 	for_each_possible_cpu(i) {
2920 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2921 
2922 		raw_spin_lock(&rt_rq->rt_runtime_lock);
2923 		rt_rq->rt_runtime = global_rt_runtime();
2924 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
2925 	}
2926 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2927 
2928 	return 0;
2929 }
2930 #endif /* CONFIG_SYSCTL */
2931 #endif /* CONFIG_RT_GROUP_SCHED */
2932 
2933 #ifdef CONFIG_SYSCTL
2934 static int sched_rt_global_validate(void)
2935 {
2936 	if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2937 		((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2938 		 ((u64)sysctl_sched_rt_runtime *
2939 			NSEC_PER_USEC > max_rt_runtime)))
2940 		return -EINVAL;
2941 
2942 	return 0;
2943 }
2944 
2945 static void sched_rt_do_global(void)
2946 {
2947 	unsigned long flags;
2948 
2949 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2950 	def_rt_bandwidth.rt_runtime = global_rt_runtime();
2951 	def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2952 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2953 }
2954 
2955 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
2956 		size_t *lenp, loff_t *ppos)
2957 {
2958 	int old_period, old_runtime;
2959 	static DEFINE_MUTEX(mutex);
2960 	int ret;
2961 
2962 	mutex_lock(&mutex);
2963 	old_period = sysctl_sched_rt_period;
2964 	old_runtime = sysctl_sched_rt_runtime;
2965 
2966 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2967 
2968 	if (!ret && write) {
2969 		ret = sched_rt_global_validate();
2970 		if (ret)
2971 			goto undo;
2972 
2973 		ret = sched_dl_global_validate();
2974 		if (ret)
2975 			goto undo;
2976 
2977 		ret = sched_rt_global_constraints();
2978 		if (ret)
2979 			goto undo;
2980 
2981 		sched_rt_do_global();
2982 		sched_dl_do_global();
2983 	}
2984 	if (0) {
2985 undo:
2986 		sysctl_sched_rt_period = old_period;
2987 		sysctl_sched_rt_runtime = old_runtime;
2988 	}
2989 	mutex_unlock(&mutex);
2990 
2991 	return ret;
2992 }
2993 
2994 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
2995 		size_t *lenp, loff_t *ppos)
2996 {
2997 	int ret;
2998 	static DEFINE_MUTEX(mutex);
2999 
3000 	mutex_lock(&mutex);
3001 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
3002 	/*
3003 	 * Make sure that internally we keep jiffies.
3004 	 * Also, writing zero resets the time-slice to default:
3005 	 */
3006 	if (!ret && write) {
3007 		sched_rr_timeslice =
3008 			sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
3009 			msecs_to_jiffies(sysctl_sched_rr_timeslice);
3010 
3011 		if (sysctl_sched_rr_timeslice <= 0)
3012 			sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
3013 	}
3014 	mutex_unlock(&mutex);
3015 
3016 	return ret;
3017 }
3018 #endif /* CONFIG_SYSCTL */
3019 
3020 #ifdef CONFIG_SCHED_DEBUG
3021 void print_rt_stats(struct seq_file *m, int cpu)
3022 {
3023 	rt_rq_iter_t iter;
3024 	struct rt_rq *rt_rq;
3025 
3026 	rcu_read_lock();
3027 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
3028 		print_rt_rq(m, cpu, rt_rq);
3029 	rcu_read_unlock();
3030 }
3031 #endif /* CONFIG_SCHED_DEBUG */
3032