xref: /linux/kernel/sched/rt.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5 
6 #include "sched.h"
7 
8 #include <linux/slab.h>
9 
10 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11 
12 struct rt_bandwidth def_rt_bandwidth;
13 
14 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15 {
16 	struct rt_bandwidth *rt_b =
17 		container_of(timer, struct rt_bandwidth, rt_period_timer);
18 	ktime_t now;
19 	int overrun;
20 	int idle = 0;
21 
22 	for (;;) {
23 		now = hrtimer_cb_get_time(timer);
24 		overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25 
26 		if (!overrun)
27 			break;
28 
29 		idle = do_sched_rt_period_timer(rt_b, overrun);
30 	}
31 
32 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33 }
34 
35 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36 {
37 	rt_b->rt_period = ns_to_ktime(period);
38 	rt_b->rt_runtime = runtime;
39 
40 	raw_spin_lock_init(&rt_b->rt_runtime_lock);
41 
42 	hrtimer_init(&rt_b->rt_period_timer,
43 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 	rt_b->rt_period_timer.function = sched_rt_period_timer;
45 }
46 
47 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48 {
49 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50 		return;
51 
52 	if (hrtimer_active(&rt_b->rt_period_timer))
53 		return;
54 
55 	raw_spin_lock(&rt_b->rt_runtime_lock);
56 	start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 	raw_spin_unlock(&rt_b->rt_runtime_lock);
58 }
59 
60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61 {
62 	struct rt_prio_array *array;
63 	int i;
64 
65 	array = &rt_rq->active;
66 	for (i = 0; i < MAX_RT_PRIO; i++) {
67 		INIT_LIST_HEAD(array->queue + i);
68 		__clear_bit(i, array->bitmap);
69 	}
70 	/* delimiter for bitsearch: */
71 	__set_bit(MAX_RT_PRIO, array->bitmap);
72 
73 #if defined CONFIG_SMP
74 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 	rt_rq->highest_prio.next = MAX_RT_PRIO;
76 	rt_rq->rt_nr_migratory = 0;
77 	rt_rq->overloaded = 0;
78 	plist_head_init(&rt_rq->pushable_tasks);
79 #endif
80 
81 	rt_rq->rt_time = 0;
82 	rt_rq->rt_throttled = 0;
83 	rt_rq->rt_runtime = 0;
84 	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85 }
86 
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89 {
90 	hrtimer_cancel(&rt_b->rt_period_timer);
91 }
92 
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94 
95 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96 {
97 #ifdef CONFIG_SCHED_DEBUG
98 	WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99 #endif
100 	return container_of(rt_se, struct task_struct, rt);
101 }
102 
103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104 {
105 	return rt_rq->rq;
106 }
107 
108 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109 {
110 	return rt_se->rt_rq;
111 }
112 
113 void free_rt_sched_group(struct task_group *tg)
114 {
115 	int i;
116 
117 	if (tg->rt_se)
118 		destroy_rt_bandwidth(&tg->rt_bandwidth);
119 
120 	for_each_possible_cpu(i) {
121 		if (tg->rt_rq)
122 			kfree(tg->rt_rq[i]);
123 		if (tg->rt_se)
124 			kfree(tg->rt_se[i]);
125 	}
126 
127 	kfree(tg->rt_rq);
128 	kfree(tg->rt_se);
129 }
130 
131 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 		struct sched_rt_entity *rt_se, int cpu,
133 		struct sched_rt_entity *parent)
134 {
135 	struct rq *rq = cpu_rq(cpu);
136 
137 	rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 	rt_rq->rt_nr_boosted = 0;
139 	rt_rq->rq = rq;
140 	rt_rq->tg = tg;
141 
142 	tg->rt_rq[cpu] = rt_rq;
143 	tg->rt_se[cpu] = rt_se;
144 
145 	if (!rt_se)
146 		return;
147 
148 	if (!parent)
149 		rt_se->rt_rq = &rq->rt;
150 	else
151 		rt_se->rt_rq = parent->my_q;
152 
153 	rt_se->my_q = rt_rq;
154 	rt_se->parent = parent;
155 	INIT_LIST_HEAD(&rt_se->run_list);
156 }
157 
158 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159 {
160 	struct rt_rq *rt_rq;
161 	struct sched_rt_entity *rt_se;
162 	int i;
163 
164 	tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165 	if (!tg->rt_rq)
166 		goto err;
167 	tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168 	if (!tg->rt_se)
169 		goto err;
170 
171 	init_rt_bandwidth(&tg->rt_bandwidth,
172 			ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173 
174 	for_each_possible_cpu(i) {
175 		rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 				     GFP_KERNEL, cpu_to_node(i));
177 		if (!rt_rq)
178 			goto err;
179 
180 		rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 				     GFP_KERNEL, cpu_to_node(i));
182 		if (!rt_se)
183 			goto err_free_rq;
184 
185 		init_rt_rq(rt_rq, cpu_rq(i));
186 		rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 		init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188 	}
189 
190 	return 1;
191 
192 err_free_rq:
193 	kfree(rt_rq);
194 err:
195 	return 0;
196 }
197 
198 #else /* CONFIG_RT_GROUP_SCHED */
199 
200 #define rt_entity_is_task(rt_se) (1)
201 
202 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203 {
204 	return container_of(rt_se, struct task_struct, rt);
205 }
206 
207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208 {
209 	return container_of(rt_rq, struct rq, rt);
210 }
211 
212 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213 {
214 	struct task_struct *p = rt_task_of(rt_se);
215 	struct rq *rq = task_rq(p);
216 
217 	return &rq->rt;
218 }
219 
220 void free_rt_sched_group(struct task_group *tg) { }
221 
222 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223 {
224 	return 1;
225 }
226 #endif /* CONFIG_RT_GROUP_SCHED */
227 
228 #ifdef CONFIG_SMP
229 
230 static inline int rt_overloaded(struct rq *rq)
231 {
232 	return atomic_read(&rq->rd->rto_count);
233 }
234 
235 static inline void rt_set_overload(struct rq *rq)
236 {
237 	if (!rq->online)
238 		return;
239 
240 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
241 	/*
242 	 * Make sure the mask is visible before we set
243 	 * the overload count. That is checked to determine
244 	 * if we should look at the mask. It would be a shame
245 	 * if we looked at the mask, but the mask was not
246 	 * updated yet.
247 	 */
248 	wmb();
249 	atomic_inc(&rq->rd->rto_count);
250 }
251 
252 static inline void rt_clear_overload(struct rq *rq)
253 {
254 	if (!rq->online)
255 		return;
256 
257 	/* the order here really doesn't matter */
258 	atomic_dec(&rq->rd->rto_count);
259 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
260 }
261 
262 static void update_rt_migration(struct rt_rq *rt_rq)
263 {
264 	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265 		if (!rt_rq->overloaded) {
266 			rt_set_overload(rq_of_rt_rq(rt_rq));
267 			rt_rq->overloaded = 1;
268 		}
269 	} else if (rt_rq->overloaded) {
270 		rt_clear_overload(rq_of_rt_rq(rt_rq));
271 		rt_rq->overloaded = 0;
272 	}
273 }
274 
275 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276 {
277 	struct task_struct *p;
278 
279 	if (!rt_entity_is_task(rt_se))
280 		return;
281 
282 	p = rt_task_of(rt_se);
283 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
284 
285 	rt_rq->rt_nr_total++;
286 	if (p->nr_cpus_allowed > 1)
287 		rt_rq->rt_nr_migratory++;
288 
289 	update_rt_migration(rt_rq);
290 }
291 
292 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
293 {
294 	struct task_struct *p;
295 
296 	if (!rt_entity_is_task(rt_se))
297 		return;
298 
299 	p = rt_task_of(rt_se);
300 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
301 
302 	rt_rq->rt_nr_total--;
303 	if (p->nr_cpus_allowed > 1)
304 		rt_rq->rt_nr_migratory--;
305 
306 	update_rt_migration(rt_rq);
307 }
308 
309 static inline int has_pushable_tasks(struct rq *rq)
310 {
311 	return !plist_head_empty(&rq->rt.pushable_tasks);
312 }
313 
314 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
315 {
316 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
317 	plist_node_init(&p->pushable_tasks, p->prio);
318 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
319 
320 	/* Update the highest prio pushable task */
321 	if (p->prio < rq->rt.highest_prio.next)
322 		rq->rt.highest_prio.next = p->prio;
323 }
324 
325 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
326 {
327 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
328 
329 	/* Update the new highest prio pushable task */
330 	if (has_pushable_tasks(rq)) {
331 		p = plist_first_entry(&rq->rt.pushable_tasks,
332 				      struct task_struct, pushable_tasks);
333 		rq->rt.highest_prio.next = p->prio;
334 	} else
335 		rq->rt.highest_prio.next = MAX_RT_PRIO;
336 }
337 
338 #else
339 
340 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
341 {
342 }
343 
344 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
345 {
346 }
347 
348 static inline
349 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
350 {
351 }
352 
353 static inline
354 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
355 {
356 }
357 
358 #endif /* CONFIG_SMP */
359 
360 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
361 {
362 	return !list_empty(&rt_se->run_list);
363 }
364 
365 #ifdef CONFIG_RT_GROUP_SCHED
366 
367 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
368 {
369 	if (!rt_rq->tg)
370 		return RUNTIME_INF;
371 
372 	return rt_rq->rt_runtime;
373 }
374 
375 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
376 {
377 	return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
378 }
379 
380 typedef struct task_group *rt_rq_iter_t;
381 
382 static inline struct task_group *next_task_group(struct task_group *tg)
383 {
384 	do {
385 		tg = list_entry_rcu(tg->list.next,
386 			typeof(struct task_group), list);
387 	} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
388 
389 	if (&tg->list == &task_groups)
390 		tg = NULL;
391 
392 	return tg;
393 }
394 
395 #define for_each_rt_rq(rt_rq, iter, rq)					\
396 	for (iter = container_of(&task_groups, typeof(*iter), list);	\
397 		(iter = next_task_group(iter)) &&			\
398 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
399 
400 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
401 {
402 	list_add_rcu(&rt_rq->leaf_rt_rq_list,
403 			&rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
404 }
405 
406 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
407 {
408 	list_del_rcu(&rt_rq->leaf_rt_rq_list);
409 }
410 
411 #define for_each_leaf_rt_rq(rt_rq, rq) \
412 	list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
413 
414 #define for_each_sched_rt_entity(rt_se) \
415 	for (; rt_se; rt_se = rt_se->parent)
416 
417 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
418 {
419 	return rt_se->my_q;
420 }
421 
422 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
423 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
424 
425 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
426 {
427 	struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
428 	struct sched_rt_entity *rt_se;
429 
430 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
431 
432 	rt_se = rt_rq->tg->rt_se[cpu];
433 
434 	if (rt_rq->rt_nr_running) {
435 		if (rt_se && !on_rt_rq(rt_se))
436 			enqueue_rt_entity(rt_se, false);
437 		if (rt_rq->highest_prio.curr < curr->prio)
438 			resched_task(curr);
439 	}
440 }
441 
442 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
443 {
444 	struct sched_rt_entity *rt_se;
445 	int cpu = cpu_of(rq_of_rt_rq(rt_rq));
446 
447 	rt_se = rt_rq->tg->rt_se[cpu];
448 
449 	if (rt_se && on_rt_rq(rt_se))
450 		dequeue_rt_entity(rt_se);
451 }
452 
453 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
454 {
455 	return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
456 }
457 
458 static int rt_se_boosted(struct sched_rt_entity *rt_se)
459 {
460 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
461 	struct task_struct *p;
462 
463 	if (rt_rq)
464 		return !!rt_rq->rt_nr_boosted;
465 
466 	p = rt_task_of(rt_se);
467 	return p->prio != p->normal_prio;
468 }
469 
470 #ifdef CONFIG_SMP
471 static inline const struct cpumask *sched_rt_period_mask(void)
472 {
473 	return cpu_rq(smp_processor_id())->rd->span;
474 }
475 #else
476 static inline const struct cpumask *sched_rt_period_mask(void)
477 {
478 	return cpu_online_mask;
479 }
480 #endif
481 
482 static inline
483 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
484 {
485 	return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
486 }
487 
488 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
489 {
490 	return &rt_rq->tg->rt_bandwidth;
491 }
492 
493 #else /* !CONFIG_RT_GROUP_SCHED */
494 
495 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
496 {
497 	return rt_rq->rt_runtime;
498 }
499 
500 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
501 {
502 	return ktime_to_ns(def_rt_bandwidth.rt_period);
503 }
504 
505 typedef struct rt_rq *rt_rq_iter_t;
506 
507 #define for_each_rt_rq(rt_rq, iter, rq) \
508 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
509 
510 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
511 {
512 }
513 
514 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
515 {
516 }
517 
518 #define for_each_leaf_rt_rq(rt_rq, rq) \
519 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
520 
521 #define for_each_sched_rt_entity(rt_se) \
522 	for (; rt_se; rt_se = NULL)
523 
524 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
525 {
526 	return NULL;
527 }
528 
529 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
530 {
531 	if (rt_rq->rt_nr_running)
532 		resched_task(rq_of_rt_rq(rt_rq)->curr);
533 }
534 
535 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
536 {
537 }
538 
539 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
540 {
541 	return rt_rq->rt_throttled;
542 }
543 
544 static inline const struct cpumask *sched_rt_period_mask(void)
545 {
546 	return cpu_online_mask;
547 }
548 
549 static inline
550 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551 {
552 	return &cpu_rq(cpu)->rt;
553 }
554 
555 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556 {
557 	return &def_rt_bandwidth;
558 }
559 
560 #endif /* CONFIG_RT_GROUP_SCHED */
561 
562 #ifdef CONFIG_SMP
563 /*
564  * We ran out of runtime, see if we can borrow some from our neighbours.
565  */
566 static int do_balance_runtime(struct rt_rq *rt_rq)
567 {
568 	struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570 	int i, weight, more = 0;
571 	u64 rt_period;
572 
573 	weight = cpumask_weight(rd->span);
574 
575 	raw_spin_lock(&rt_b->rt_runtime_lock);
576 	rt_period = ktime_to_ns(rt_b->rt_period);
577 	for_each_cpu(i, rd->span) {
578 		struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
579 		s64 diff;
580 
581 		if (iter == rt_rq)
582 			continue;
583 
584 		raw_spin_lock(&iter->rt_runtime_lock);
585 		/*
586 		 * Either all rqs have inf runtime and there's nothing to steal
587 		 * or __disable_runtime() below sets a specific rq to inf to
588 		 * indicate its been disabled and disalow stealing.
589 		 */
590 		if (iter->rt_runtime == RUNTIME_INF)
591 			goto next;
592 
593 		/*
594 		 * From runqueues with spare time, take 1/n part of their
595 		 * spare time, but no more than our period.
596 		 */
597 		diff = iter->rt_runtime - iter->rt_time;
598 		if (diff > 0) {
599 			diff = div_u64((u64)diff, weight);
600 			if (rt_rq->rt_runtime + diff > rt_period)
601 				diff = rt_period - rt_rq->rt_runtime;
602 			iter->rt_runtime -= diff;
603 			rt_rq->rt_runtime += diff;
604 			more = 1;
605 			if (rt_rq->rt_runtime == rt_period) {
606 				raw_spin_unlock(&iter->rt_runtime_lock);
607 				break;
608 			}
609 		}
610 next:
611 		raw_spin_unlock(&iter->rt_runtime_lock);
612 	}
613 	raw_spin_unlock(&rt_b->rt_runtime_lock);
614 
615 	return more;
616 }
617 
618 /*
619  * Ensure this RQ takes back all the runtime it lend to its neighbours.
620  */
621 static void __disable_runtime(struct rq *rq)
622 {
623 	struct root_domain *rd = rq->rd;
624 	rt_rq_iter_t iter;
625 	struct rt_rq *rt_rq;
626 
627 	if (unlikely(!scheduler_running))
628 		return;
629 
630 	for_each_rt_rq(rt_rq, iter, rq) {
631 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632 		s64 want;
633 		int i;
634 
635 		raw_spin_lock(&rt_b->rt_runtime_lock);
636 		raw_spin_lock(&rt_rq->rt_runtime_lock);
637 		/*
638 		 * Either we're all inf and nobody needs to borrow, or we're
639 		 * already disabled and thus have nothing to do, or we have
640 		 * exactly the right amount of runtime to take out.
641 		 */
642 		if (rt_rq->rt_runtime == RUNTIME_INF ||
643 				rt_rq->rt_runtime == rt_b->rt_runtime)
644 			goto balanced;
645 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
646 
647 		/*
648 		 * Calculate the difference between what we started out with
649 		 * and what we current have, that's the amount of runtime
650 		 * we lend and now have to reclaim.
651 		 */
652 		want = rt_b->rt_runtime - rt_rq->rt_runtime;
653 
654 		/*
655 		 * Greedy reclaim, take back as much as we can.
656 		 */
657 		for_each_cpu(i, rd->span) {
658 			struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 			s64 diff;
660 
661 			/*
662 			 * Can't reclaim from ourselves or disabled runqueues.
663 			 */
664 			if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
665 				continue;
666 
667 			raw_spin_lock(&iter->rt_runtime_lock);
668 			if (want > 0) {
669 				diff = min_t(s64, iter->rt_runtime, want);
670 				iter->rt_runtime -= diff;
671 				want -= diff;
672 			} else {
673 				iter->rt_runtime -= want;
674 				want -= want;
675 			}
676 			raw_spin_unlock(&iter->rt_runtime_lock);
677 
678 			if (!want)
679 				break;
680 		}
681 
682 		raw_spin_lock(&rt_rq->rt_runtime_lock);
683 		/*
684 		 * We cannot be left wanting - that would mean some runtime
685 		 * leaked out of the system.
686 		 */
687 		BUG_ON(want);
688 balanced:
689 		/*
690 		 * Disable all the borrow logic by pretending we have inf
691 		 * runtime - in which case borrowing doesn't make sense.
692 		 */
693 		rt_rq->rt_runtime = RUNTIME_INF;
694 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 		raw_spin_unlock(&rt_b->rt_runtime_lock);
696 	}
697 }
698 
699 static void disable_runtime(struct rq *rq)
700 {
701 	unsigned long flags;
702 
703 	raw_spin_lock_irqsave(&rq->lock, flags);
704 	__disable_runtime(rq);
705 	raw_spin_unlock_irqrestore(&rq->lock, flags);
706 }
707 
708 static void __enable_runtime(struct rq *rq)
709 {
710 	rt_rq_iter_t iter;
711 	struct rt_rq *rt_rq;
712 
713 	if (unlikely(!scheduler_running))
714 		return;
715 
716 	/*
717 	 * Reset each runqueue's bandwidth settings
718 	 */
719 	for_each_rt_rq(rt_rq, iter, rq) {
720 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
721 
722 		raw_spin_lock(&rt_b->rt_runtime_lock);
723 		raw_spin_lock(&rt_rq->rt_runtime_lock);
724 		rt_rq->rt_runtime = rt_b->rt_runtime;
725 		rt_rq->rt_time = 0;
726 		rt_rq->rt_throttled = 0;
727 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
728 		raw_spin_unlock(&rt_b->rt_runtime_lock);
729 	}
730 }
731 
732 static void enable_runtime(struct rq *rq)
733 {
734 	unsigned long flags;
735 
736 	raw_spin_lock_irqsave(&rq->lock, flags);
737 	__enable_runtime(rq);
738 	raw_spin_unlock_irqrestore(&rq->lock, flags);
739 }
740 
741 int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
742 {
743 	int cpu = (int)(long)hcpu;
744 
745 	switch (action) {
746 	case CPU_DOWN_PREPARE:
747 	case CPU_DOWN_PREPARE_FROZEN:
748 		disable_runtime(cpu_rq(cpu));
749 		return NOTIFY_OK;
750 
751 	case CPU_DOWN_FAILED:
752 	case CPU_DOWN_FAILED_FROZEN:
753 	case CPU_ONLINE:
754 	case CPU_ONLINE_FROZEN:
755 		enable_runtime(cpu_rq(cpu));
756 		return NOTIFY_OK;
757 
758 	default:
759 		return NOTIFY_DONE;
760 	}
761 }
762 
763 static int balance_runtime(struct rt_rq *rt_rq)
764 {
765 	int more = 0;
766 
767 	if (!sched_feat(RT_RUNTIME_SHARE))
768 		return more;
769 
770 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
771 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
772 		more = do_balance_runtime(rt_rq);
773 		raw_spin_lock(&rt_rq->rt_runtime_lock);
774 	}
775 
776 	return more;
777 }
778 #else /* !CONFIG_SMP */
779 static inline int balance_runtime(struct rt_rq *rt_rq)
780 {
781 	return 0;
782 }
783 #endif /* CONFIG_SMP */
784 
785 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
786 {
787 	int i, idle = 1, throttled = 0;
788 	const struct cpumask *span;
789 
790 	span = sched_rt_period_mask();
791 	for_each_cpu(i, span) {
792 		int enqueue = 0;
793 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
794 		struct rq *rq = rq_of_rt_rq(rt_rq);
795 
796 		raw_spin_lock(&rq->lock);
797 		if (rt_rq->rt_time) {
798 			u64 runtime;
799 
800 			raw_spin_lock(&rt_rq->rt_runtime_lock);
801 			if (rt_rq->rt_throttled)
802 				balance_runtime(rt_rq);
803 			runtime = rt_rq->rt_runtime;
804 			rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
805 			if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
806 				rt_rq->rt_throttled = 0;
807 				enqueue = 1;
808 
809 				/*
810 				 * Force a clock update if the CPU was idle,
811 				 * lest wakeup -> unthrottle time accumulate.
812 				 */
813 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
814 					rq->skip_clock_update = -1;
815 			}
816 			if (rt_rq->rt_time || rt_rq->rt_nr_running)
817 				idle = 0;
818 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
819 		} else if (rt_rq->rt_nr_running) {
820 			idle = 0;
821 			if (!rt_rq_throttled(rt_rq))
822 				enqueue = 1;
823 		}
824 		if (rt_rq->rt_throttled)
825 			throttled = 1;
826 
827 		if (enqueue)
828 			sched_rt_rq_enqueue(rt_rq);
829 		raw_spin_unlock(&rq->lock);
830 	}
831 
832 	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
833 		return 1;
834 
835 	return idle;
836 }
837 
838 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
839 {
840 #ifdef CONFIG_RT_GROUP_SCHED
841 	struct rt_rq *rt_rq = group_rt_rq(rt_se);
842 
843 	if (rt_rq)
844 		return rt_rq->highest_prio.curr;
845 #endif
846 
847 	return rt_task_of(rt_se)->prio;
848 }
849 
850 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
851 {
852 	u64 runtime = sched_rt_runtime(rt_rq);
853 
854 	if (rt_rq->rt_throttled)
855 		return rt_rq_throttled(rt_rq);
856 
857 	if (runtime >= sched_rt_period(rt_rq))
858 		return 0;
859 
860 	balance_runtime(rt_rq);
861 	runtime = sched_rt_runtime(rt_rq);
862 	if (runtime == RUNTIME_INF)
863 		return 0;
864 
865 	if (rt_rq->rt_time > runtime) {
866 		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
867 
868 		/*
869 		 * Don't actually throttle groups that have no runtime assigned
870 		 * but accrue some time due to boosting.
871 		 */
872 		if (likely(rt_b->rt_runtime)) {
873 			static bool once = false;
874 
875 			rt_rq->rt_throttled = 1;
876 
877 			if (!once) {
878 				once = true;
879 				printk_sched("sched: RT throttling activated\n");
880 			}
881 		} else {
882 			/*
883 			 * In case we did anyway, make it go away,
884 			 * replenishment is a joke, since it will replenish us
885 			 * with exactly 0 ns.
886 			 */
887 			rt_rq->rt_time = 0;
888 		}
889 
890 		if (rt_rq_throttled(rt_rq)) {
891 			sched_rt_rq_dequeue(rt_rq);
892 			return 1;
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 /*
900  * Update the current task's runtime statistics. Skip current tasks that
901  * are not in our scheduling class.
902  */
903 static void update_curr_rt(struct rq *rq)
904 {
905 	struct task_struct *curr = rq->curr;
906 	struct sched_rt_entity *rt_se = &curr->rt;
907 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
908 	u64 delta_exec;
909 
910 	if (curr->sched_class != &rt_sched_class)
911 		return;
912 
913 	delta_exec = rq->clock_task - curr->se.exec_start;
914 	if (unlikely((s64)delta_exec < 0))
915 		delta_exec = 0;
916 
917 	schedstat_set(curr->se.statistics.exec_max,
918 		      max(curr->se.statistics.exec_max, delta_exec));
919 
920 	curr->se.sum_exec_runtime += delta_exec;
921 	account_group_exec_runtime(curr, delta_exec);
922 
923 	curr->se.exec_start = rq->clock_task;
924 	cpuacct_charge(curr, delta_exec);
925 
926 	sched_rt_avg_update(rq, delta_exec);
927 
928 	if (!rt_bandwidth_enabled())
929 		return;
930 
931 	for_each_sched_rt_entity(rt_se) {
932 		rt_rq = rt_rq_of_se(rt_se);
933 
934 		if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
935 			raw_spin_lock(&rt_rq->rt_runtime_lock);
936 			rt_rq->rt_time += delta_exec;
937 			if (sched_rt_runtime_exceeded(rt_rq))
938 				resched_task(curr);
939 			raw_spin_unlock(&rt_rq->rt_runtime_lock);
940 		}
941 	}
942 }
943 
944 #if defined CONFIG_SMP
945 
946 static void
947 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
948 {
949 	struct rq *rq = rq_of_rt_rq(rt_rq);
950 
951 	if (rq->online && prio < prev_prio)
952 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
953 }
954 
955 static void
956 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
957 {
958 	struct rq *rq = rq_of_rt_rq(rt_rq);
959 
960 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
961 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
962 }
963 
964 #else /* CONFIG_SMP */
965 
966 static inline
967 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
968 static inline
969 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
970 
971 #endif /* CONFIG_SMP */
972 
973 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
974 static void
975 inc_rt_prio(struct rt_rq *rt_rq, int prio)
976 {
977 	int prev_prio = rt_rq->highest_prio.curr;
978 
979 	if (prio < prev_prio)
980 		rt_rq->highest_prio.curr = prio;
981 
982 	inc_rt_prio_smp(rt_rq, prio, prev_prio);
983 }
984 
985 static void
986 dec_rt_prio(struct rt_rq *rt_rq, int prio)
987 {
988 	int prev_prio = rt_rq->highest_prio.curr;
989 
990 	if (rt_rq->rt_nr_running) {
991 
992 		WARN_ON(prio < prev_prio);
993 
994 		/*
995 		 * This may have been our highest task, and therefore
996 		 * we may have some recomputation to do
997 		 */
998 		if (prio == prev_prio) {
999 			struct rt_prio_array *array = &rt_rq->active;
1000 
1001 			rt_rq->highest_prio.curr =
1002 				sched_find_first_bit(array->bitmap);
1003 		}
1004 
1005 	} else
1006 		rt_rq->highest_prio.curr = MAX_RT_PRIO;
1007 
1008 	dec_rt_prio_smp(rt_rq, prio, prev_prio);
1009 }
1010 
1011 #else
1012 
1013 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1014 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1015 
1016 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1017 
1018 #ifdef CONFIG_RT_GROUP_SCHED
1019 
1020 static void
1021 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1022 {
1023 	if (rt_se_boosted(rt_se))
1024 		rt_rq->rt_nr_boosted++;
1025 
1026 	if (rt_rq->tg)
1027 		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1028 }
1029 
1030 static void
1031 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1032 {
1033 	if (rt_se_boosted(rt_se))
1034 		rt_rq->rt_nr_boosted--;
1035 
1036 	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1037 }
1038 
1039 #else /* CONFIG_RT_GROUP_SCHED */
1040 
1041 static void
1042 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1043 {
1044 	start_rt_bandwidth(&def_rt_bandwidth);
1045 }
1046 
1047 static inline
1048 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1049 
1050 #endif /* CONFIG_RT_GROUP_SCHED */
1051 
1052 static inline
1053 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1054 {
1055 	int prio = rt_se_prio(rt_se);
1056 
1057 	WARN_ON(!rt_prio(prio));
1058 	rt_rq->rt_nr_running++;
1059 
1060 	inc_rt_prio(rt_rq, prio);
1061 	inc_rt_migration(rt_se, rt_rq);
1062 	inc_rt_group(rt_se, rt_rq);
1063 }
1064 
1065 static inline
1066 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1067 {
1068 	WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1069 	WARN_ON(!rt_rq->rt_nr_running);
1070 	rt_rq->rt_nr_running--;
1071 
1072 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1073 	dec_rt_migration(rt_se, rt_rq);
1074 	dec_rt_group(rt_se, rt_rq);
1075 }
1076 
1077 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1078 {
1079 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1080 	struct rt_prio_array *array = &rt_rq->active;
1081 	struct rt_rq *group_rq = group_rt_rq(rt_se);
1082 	struct list_head *queue = array->queue + rt_se_prio(rt_se);
1083 
1084 	/*
1085 	 * Don't enqueue the group if its throttled, or when empty.
1086 	 * The latter is a consequence of the former when a child group
1087 	 * get throttled and the current group doesn't have any other
1088 	 * active members.
1089 	 */
1090 	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1091 		return;
1092 
1093 	if (!rt_rq->rt_nr_running)
1094 		list_add_leaf_rt_rq(rt_rq);
1095 
1096 	if (head)
1097 		list_add(&rt_se->run_list, queue);
1098 	else
1099 		list_add_tail(&rt_se->run_list, queue);
1100 	__set_bit(rt_se_prio(rt_se), array->bitmap);
1101 
1102 	inc_rt_tasks(rt_se, rt_rq);
1103 }
1104 
1105 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1106 {
1107 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1108 	struct rt_prio_array *array = &rt_rq->active;
1109 
1110 	list_del_init(&rt_se->run_list);
1111 	if (list_empty(array->queue + rt_se_prio(rt_se)))
1112 		__clear_bit(rt_se_prio(rt_se), array->bitmap);
1113 
1114 	dec_rt_tasks(rt_se, rt_rq);
1115 	if (!rt_rq->rt_nr_running)
1116 		list_del_leaf_rt_rq(rt_rq);
1117 }
1118 
1119 /*
1120  * Because the prio of an upper entry depends on the lower
1121  * entries, we must remove entries top - down.
1122  */
1123 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1124 {
1125 	struct sched_rt_entity *back = NULL;
1126 
1127 	for_each_sched_rt_entity(rt_se) {
1128 		rt_se->back = back;
1129 		back = rt_se;
1130 	}
1131 
1132 	for (rt_se = back; rt_se; rt_se = rt_se->back) {
1133 		if (on_rt_rq(rt_se))
1134 			__dequeue_rt_entity(rt_se);
1135 	}
1136 }
1137 
1138 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1139 {
1140 	dequeue_rt_stack(rt_se);
1141 	for_each_sched_rt_entity(rt_se)
1142 		__enqueue_rt_entity(rt_se, head);
1143 }
1144 
1145 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1146 {
1147 	dequeue_rt_stack(rt_se);
1148 
1149 	for_each_sched_rt_entity(rt_se) {
1150 		struct rt_rq *rt_rq = group_rt_rq(rt_se);
1151 
1152 		if (rt_rq && rt_rq->rt_nr_running)
1153 			__enqueue_rt_entity(rt_se, false);
1154 	}
1155 }
1156 
1157 /*
1158  * Adding/removing a task to/from a priority array:
1159  */
1160 static void
1161 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1162 {
1163 	struct sched_rt_entity *rt_se = &p->rt;
1164 
1165 	if (flags & ENQUEUE_WAKEUP)
1166 		rt_se->timeout = 0;
1167 
1168 	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1169 
1170 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1171 		enqueue_pushable_task(rq, p);
1172 
1173 	inc_nr_running(rq);
1174 }
1175 
1176 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1177 {
1178 	struct sched_rt_entity *rt_se = &p->rt;
1179 
1180 	update_curr_rt(rq);
1181 	dequeue_rt_entity(rt_se);
1182 
1183 	dequeue_pushable_task(rq, p);
1184 
1185 	dec_nr_running(rq);
1186 }
1187 
1188 /*
1189  * Put task to the head or the end of the run list without the overhead of
1190  * dequeue followed by enqueue.
1191  */
1192 static void
1193 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1194 {
1195 	if (on_rt_rq(rt_se)) {
1196 		struct rt_prio_array *array = &rt_rq->active;
1197 		struct list_head *queue = array->queue + rt_se_prio(rt_se);
1198 
1199 		if (head)
1200 			list_move(&rt_se->run_list, queue);
1201 		else
1202 			list_move_tail(&rt_se->run_list, queue);
1203 	}
1204 }
1205 
1206 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1207 {
1208 	struct sched_rt_entity *rt_se = &p->rt;
1209 	struct rt_rq *rt_rq;
1210 
1211 	for_each_sched_rt_entity(rt_se) {
1212 		rt_rq = rt_rq_of_se(rt_se);
1213 		requeue_rt_entity(rt_rq, rt_se, head);
1214 	}
1215 }
1216 
1217 static void yield_task_rt(struct rq *rq)
1218 {
1219 	requeue_task_rt(rq, rq->curr, 0);
1220 }
1221 
1222 #ifdef CONFIG_SMP
1223 static int find_lowest_rq(struct task_struct *task);
1224 
1225 static int
1226 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1227 {
1228 	struct task_struct *curr;
1229 	struct rq *rq;
1230 	int cpu;
1231 
1232 	cpu = task_cpu(p);
1233 
1234 	if (p->nr_cpus_allowed == 1)
1235 		goto out;
1236 
1237 	/* For anything but wake ups, just return the task_cpu */
1238 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1239 		goto out;
1240 
1241 	rq = cpu_rq(cpu);
1242 
1243 	rcu_read_lock();
1244 	curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1245 
1246 	/*
1247 	 * If the current task on @p's runqueue is an RT task, then
1248 	 * try to see if we can wake this RT task up on another
1249 	 * runqueue. Otherwise simply start this RT task
1250 	 * on its current runqueue.
1251 	 *
1252 	 * We want to avoid overloading runqueues. If the woken
1253 	 * task is a higher priority, then it will stay on this CPU
1254 	 * and the lower prio task should be moved to another CPU.
1255 	 * Even though this will probably make the lower prio task
1256 	 * lose its cache, we do not want to bounce a higher task
1257 	 * around just because it gave up its CPU, perhaps for a
1258 	 * lock?
1259 	 *
1260 	 * For equal prio tasks, we just let the scheduler sort it out.
1261 	 *
1262 	 * Otherwise, just let it ride on the affined RQ and the
1263 	 * post-schedule router will push the preempted task away
1264 	 *
1265 	 * This test is optimistic, if we get it wrong the load-balancer
1266 	 * will have to sort it out.
1267 	 */
1268 	if (curr && unlikely(rt_task(curr)) &&
1269 	    (curr->nr_cpus_allowed < 2 ||
1270 	     curr->prio <= p->prio) &&
1271 	    (p->nr_cpus_allowed > 1)) {
1272 		int target = find_lowest_rq(p);
1273 
1274 		if (target != -1)
1275 			cpu = target;
1276 	}
1277 	rcu_read_unlock();
1278 
1279 out:
1280 	return cpu;
1281 }
1282 
1283 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1284 {
1285 	if (rq->curr->nr_cpus_allowed == 1)
1286 		return;
1287 
1288 	if (p->nr_cpus_allowed != 1
1289 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
1290 		return;
1291 
1292 	if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1293 		return;
1294 
1295 	/*
1296 	 * There appears to be other cpus that can accept
1297 	 * current and none to run 'p', so lets reschedule
1298 	 * to try and push current away:
1299 	 */
1300 	requeue_task_rt(rq, p, 1);
1301 	resched_task(rq->curr);
1302 }
1303 
1304 #endif /* CONFIG_SMP */
1305 
1306 /*
1307  * Preempt the current task with a newly woken task if needed:
1308  */
1309 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1310 {
1311 	if (p->prio < rq->curr->prio) {
1312 		resched_task(rq->curr);
1313 		return;
1314 	}
1315 
1316 #ifdef CONFIG_SMP
1317 	/*
1318 	 * If:
1319 	 *
1320 	 * - the newly woken task is of equal priority to the current task
1321 	 * - the newly woken task is non-migratable while current is migratable
1322 	 * - current will be preempted on the next reschedule
1323 	 *
1324 	 * we should check to see if current can readily move to a different
1325 	 * cpu.  If so, we will reschedule to allow the push logic to try
1326 	 * to move current somewhere else, making room for our non-migratable
1327 	 * task.
1328 	 */
1329 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1330 		check_preempt_equal_prio(rq, p);
1331 #endif
1332 }
1333 
1334 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1335 						   struct rt_rq *rt_rq)
1336 {
1337 	struct rt_prio_array *array = &rt_rq->active;
1338 	struct sched_rt_entity *next = NULL;
1339 	struct list_head *queue;
1340 	int idx;
1341 
1342 	idx = sched_find_first_bit(array->bitmap);
1343 	BUG_ON(idx >= MAX_RT_PRIO);
1344 
1345 	queue = array->queue + idx;
1346 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
1347 
1348 	return next;
1349 }
1350 
1351 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1352 {
1353 	struct sched_rt_entity *rt_se;
1354 	struct task_struct *p;
1355 	struct rt_rq *rt_rq;
1356 
1357 	rt_rq = &rq->rt;
1358 
1359 	if (!rt_rq->rt_nr_running)
1360 		return NULL;
1361 
1362 	if (rt_rq_throttled(rt_rq))
1363 		return NULL;
1364 
1365 	do {
1366 		rt_se = pick_next_rt_entity(rq, rt_rq);
1367 		BUG_ON(!rt_se);
1368 		rt_rq = group_rt_rq(rt_se);
1369 	} while (rt_rq);
1370 
1371 	p = rt_task_of(rt_se);
1372 	p->se.exec_start = rq->clock_task;
1373 
1374 	return p;
1375 }
1376 
1377 static struct task_struct *pick_next_task_rt(struct rq *rq)
1378 {
1379 	struct task_struct *p = _pick_next_task_rt(rq);
1380 
1381 	/* The running task is never eligible for pushing */
1382 	if (p)
1383 		dequeue_pushable_task(rq, p);
1384 
1385 #ifdef CONFIG_SMP
1386 	/*
1387 	 * We detect this state here so that we can avoid taking the RQ
1388 	 * lock again later if there is no need to push
1389 	 */
1390 	rq->post_schedule = has_pushable_tasks(rq);
1391 #endif
1392 
1393 	return p;
1394 }
1395 
1396 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1397 {
1398 	update_curr_rt(rq);
1399 
1400 	/*
1401 	 * The previous task needs to be made eligible for pushing
1402 	 * if it is still active
1403 	 */
1404 	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1405 		enqueue_pushable_task(rq, p);
1406 }
1407 
1408 #ifdef CONFIG_SMP
1409 
1410 /* Only try algorithms three times */
1411 #define RT_MAX_TRIES 3
1412 
1413 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1414 {
1415 	if (!task_running(rq, p) &&
1416 	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1417 	    (p->nr_cpus_allowed > 1))
1418 		return 1;
1419 	return 0;
1420 }
1421 
1422 /* Return the second highest RT task, NULL otherwise */
1423 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1424 {
1425 	struct task_struct *next = NULL;
1426 	struct sched_rt_entity *rt_se;
1427 	struct rt_prio_array *array;
1428 	struct rt_rq *rt_rq;
1429 	int idx;
1430 
1431 	for_each_leaf_rt_rq(rt_rq, rq) {
1432 		array = &rt_rq->active;
1433 		idx = sched_find_first_bit(array->bitmap);
1434 next_idx:
1435 		if (idx >= MAX_RT_PRIO)
1436 			continue;
1437 		if (next && next->prio <= idx)
1438 			continue;
1439 		list_for_each_entry(rt_se, array->queue + idx, run_list) {
1440 			struct task_struct *p;
1441 
1442 			if (!rt_entity_is_task(rt_se))
1443 				continue;
1444 
1445 			p = rt_task_of(rt_se);
1446 			if (pick_rt_task(rq, p, cpu)) {
1447 				next = p;
1448 				break;
1449 			}
1450 		}
1451 		if (!next) {
1452 			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1453 			goto next_idx;
1454 		}
1455 	}
1456 
1457 	return next;
1458 }
1459 
1460 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1461 
1462 static int find_lowest_rq(struct task_struct *task)
1463 {
1464 	struct sched_domain *sd;
1465 	struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1466 	int this_cpu = smp_processor_id();
1467 	int cpu      = task_cpu(task);
1468 
1469 	/* Make sure the mask is initialized first */
1470 	if (unlikely(!lowest_mask))
1471 		return -1;
1472 
1473 	if (task->nr_cpus_allowed == 1)
1474 		return -1; /* No other targets possible */
1475 
1476 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1477 		return -1; /* No targets found */
1478 
1479 	/*
1480 	 * At this point we have built a mask of cpus representing the
1481 	 * lowest priority tasks in the system.  Now we want to elect
1482 	 * the best one based on our affinity and topology.
1483 	 *
1484 	 * We prioritize the last cpu that the task executed on since
1485 	 * it is most likely cache-hot in that location.
1486 	 */
1487 	if (cpumask_test_cpu(cpu, lowest_mask))
1488 		return cpu;
1489 
1490 	/*
1491 	 * Otherwise, we consult the sched_domains span maps to figure
1492 	 * out which cpu is logically closest to our hot cache data.
1493 	 */
1494 	if (!cpumask_test_cpu(this_cpu, lowest_mask))
1495 		this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1496 
1497 	rcu_read_lock();
1498 	for_each_domain(cpu, sd) {
1499 		if (sd->flags & SD_WAKE_AFFINE) {
1500 			int best_cpu;
1501 
1502 			/*
1503 			 * "this_cpu" is cheaper to preempt than a
1504 			 * remote processor.
1505 			 */
1506 			if (this_cpu != -1 &&
1507 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1508 				rcu_read_unlock();
1509 				return this_cpu;
1510 			}
1511 
1512 			best_cpu = cpumask_first_and(lowest_mask,
1513 						     sched_domain_span(sd));
1514 			if (best_cpu < nr_cpu_ids) {
1515 				rcu_read_unlock();
1516 				return best_cpu;
1517 			}
1518 		}
1519 	}
1520 	rcu_read_unlock();
1521 
1522 	/*
1523 	 * And finally, if there were no matches within the domains
1524 	 * just give the caller *something* to work with from the compatible
1525 	 * locations.
1526 	 */
1527 	if (this_cpu != -1)
1528 		return this_cpu;
1529 
1530 	cpu = cpumask_any(lowest_mask);
1531 	if (cpu < nr_cpu_ids)
1532 		return cpu;
1533 	return -1;
1534 }
1535 
1536 /* Will lock the rq it finds */
1537 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1538 {
1539 	struct rq *lowest_rq = NULL;
1540 	int tries;
1541 	int cpu;
1542 
1543 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1544 		cpu = find_lowest_rq(task);
1545 
1546 		if ((cpu == -1) || (cpu == rq->cpu))
1547 			break;
1548 
1549 		lowest_rq = cpu_rq(cpu);
1550 
1551 		/* if the prio of this runqueue changed, try again */
1552 		if (double_lock_balance(rq, lowest_rq)) {
1553 			/*
1554 			 * We had to unlock the run queue. In
1555 			 * the mean time, task could have
1556 			 * migrated already or had its affinity changed.
1557 			 * Also make sure that it wasn't scheduled on its rq.
1558 			 */
1559 			if (unlikely(task_rq(task) != rq ||
1560 				     !cpumask_test_cpu(lowest_rq->cpu,
1561 						       tsk_cpus_allowed(task)) ||
1562 				     task_running(rq, task) ||
1563 				     !task->on_rq)) {
1564 
1565 				double_unlock_balance(rq, lowest_rq);
1566 				lowest_rq = NULL;
1567 				break;
1568 			}
1569 		}
1570 
1571 		/* If this rq is still suitable use it. */
1572 		if (lowest_rq->rt.highest_prio.curr > task->prio)
1573 			break;
1574 
1575 		/* try again */
1576 		double_unlock_balance(rq, lowest_rq);
1577 		lowest_rq = NULL;
1578 	}
1579 
1580 	return lowest_rq;
1581 }
1582 
1583 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1584 {
1585 	struct task_struct *p;
1586 
1587 	if (!has_pushable_tasks(rq))
1588 		return NULL;
1589 
1590 	p = plist_first_entry(&rq->rt.pushable_tasks,
1591 			      struct task_struct, pushable_tasks);
1592 
1593 	BUG_ON(rq->cpu != task_cpu(p));
1594 	BUG_ON(task_current(rq, p));
1595 	BUG_ON(p->nr_cpus_allowed <= 1);
1596 
1597 	BUG_ON(!p->on_rq);
1598 	BUG_ON(!rt_task(p));
1599 
1600 	return p;
1601 }
1602 
1603 /*
1604  * If the current CPU has more than one RT task, see if the non
1605  * running task can migrate over to a CPU that is running a task
1606  * of lesser priority.
1607  */
1608 static int push_rt_task(struct rq *rq)
1609 {
1610 	struct task_struct *next_task;
1611 	struct rq *lowest_rq;
1612 	int ret = 0;
1613 
1614 	if (!rq->rt.overloaded)
1615 		return 0;
1616 
1617 	next_task = pick_next_pushable_task(rq);
1618 	if (!next_task)
1619 		return 0;
1620 
1621 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1622        if (unlikely(task_running(rq, next_task)))
1623                return 0;
1624 #endif
1625 
1626 retry:
1627 	if (unlikely(next_task == rq->curr)) {
1628 		WARN_ON(1);
1629 		return 0;
1630 	}
1631 
1632 	/*
1633 	 * It's possible that the next_task slipped in of
1634 	 * higher priority than current. If that's the case
1635 	 * just reschedule current.
1636 	 */
1637 	if (unlikely(next_task->prio < rq->curr->prio)) {
1638 		resched_task(rq->curr);
1639 		return 0;
1640 	}
1641 
1642 	/* We might release rq lock */
1643 	get_task_struct(next_task);
1644 
1645 	/* find_lock_lowest_rq locks the rq if found */
1646 	lowest_rq = find_lock_lowest_rq(next_task, rq);
1647 	if (!lowest_rq) {
1648 		struct task_struct *task;
1649 		/*
1650 		 * find_lock_lowest_rq releases rq->lock
1651 		 * so it is possible that next_task has migrated.
1652 		 *
1653 		 * We need to make sure that the task is still on the same
1654 		 * run-queue and is also still the next task eligible for
1655 		 * pushing.
1656 		 */
1657 		task = pick_next_pushable_task(rq);
1658 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1659 			/*
1660 			 * The task hasn't migrated, and is still the next
1661 			 * eligible task, but we failed to find a run-queue
1662 			 * to push it to.  Do not retry in this case, since
1663 			 * other cpus will pull from us when ready.
1664 			 */
1665 			goto out;
1666 		}
1667 
1668 		if (!task)
1669 			/* No more tasks, just exit */
1670 			goto out;
1671 
1672 		/*
1673 		 * Something has shifted, try again.
1674 		 */
1675 		put_task_struct(next_task);
1676 		next_task = task;
1677 		goto retry;
1678 	}
1679 
1680 	deactivate_task(rq, next_task, 0);
1681 	set_task_cpu(next_task, lowest_rq->cpu);
1682 	activate_task(lowest_rq, next_task, 0);
1683 	ret = 1;
1684 
1685 	resched_task(lowest_rq->curr);
1686 
1687 	double_unlock_balance(rq, lowest_rq);
1688 
1689 out:
1690 	put_task_struct(next_task);
1691 
1692 	return ret;
1693 }
1694 
1695 static void push_rt_tasks(struct rq *rq)
1696 {
1697 	/* push_rt_task will return true if it moved an RT */
1698 	while (push_rt_task(rq))
1699 		;
1700 }
1701 
1702 static int pull_rt_task(struct rq *this_rq)
1703 {
1704 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1705 	struct task_struct *p;
1706 	struct rq *src_rq;
1707 
1708 	if (likely(!rt_overloaded(this_rq)))
1709 		return 0;
1710 
1711 	for_each_cpu(cpu, this_rq->rd->rto_mask) {
1712 		if (this_cpu == cpu)
1713 			continue;
1714 
1715 		src_rq = cpu_rq(cpu);
1716 
1717 		/*
1718 		 * Don't bother taking the src_rq->lock if the next highest
1719 		 * task is known to be lower-priority than our current task.
1720 		 * This may look racy, but if this value is about to go
1721 		 * logically higher, the src_rq will push this task away.
1722 		 * And if its going logically lower, we do not care
1723 		 */
1724 		if (src_rq->rt.highest_prio.next >=
1725 		    this_rq->rt.highest_prio.curr)
1726 			continue;
1727 
1728 		/*
1729 		 * We can potentially drop this_rq's lock in
1730 		 * double_lock_balance, and another CPU could
1731 		 * alter this_rq
1732 		 */
1733 		double_lock_balance(this_rq, src_rq);
1734 
1735 		/*
1736 		 * Are there still pullable RT tasks?
1737 		 */
1738 		if (src_rq->rt.rt_nr_running <= 1)
1739 			goto skip;
1740 
1741 		p = pick_next_highest_task_rt(src_rq, this_cpu);
1742 
1743 		/*
1744 		 * Do we have an RT task that preempts
1745 		 * the to-be-scheduled task?
1746 		 */
1747 		if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1748 			WARN_ON(p == src_rq->curr);
1749 			WARN_ON(!p->on_rq);
1750 
1751 			/*
1752 			 * There's a chance that p is higher in priority
1753 			 * than what's currently running on its cpu.
1754 			 * This is just that p is wakeing up and hasn't
1755 			 * had a chance to schedule. We only pull
1756 			 * p if it is lower in priority than the
1757 			 * current task on the run queue
1758 			 */
1759 			if (p->prio < src_rq->curr->prio)
1760 				goto skip;
1761 
1762 			ret = 1;
1763 
1764 			deactivate_task(src_rq, p, 0);
1765 			set_task_cpu(p, this_cpu);
1766 			activate_task(this_rq, p, 0);
1767 			/*
1768 			 * We continue with the search, just in
1769 			 * case there's an even higher prio task
1770 			 * in another runqueue. (low likelihood
1771 			 * but possible)
1772 			 */
1773 		}
1774 skip:
1775 		double_unlock_balance(this_rq, src_rq);
1776 	}
1777 
1778 	return ret;
1779 }
1780 
1781 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1782 {
1783 	/* Try to pull RT tasks here if we lower this rq's prio */
1784 	if (rq->rt.highest_prio.curr > prev->prio)
1785 		pull_rt_task(rq);
1786 }
1787 
1788 static void post_schedule_rt(struct rq *rq)
1789 {
1790 	push_rt_tasks(rq);
1791 }
1792 
1793 /*
1794  * If we are not running and we are not going to reschedule soon, we should
1795  * try to push tasks away now
1796  */
1797 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1798 {
1799 	if (!task_running(rq, p) &&
1800 	    !test_tsk_need_resched(rq->curr) &&
1801 	    has_pushable_tasks(rq) &&
1802 	    p->nr_cpus_allowed > 1 &&
1803 	    rt_task(rq->curr) &&
1804 	    (rq->curr->nr_cpus_allowed < 2 ||
1805 	     rq->curr->prio <= p->prio))
1806 		push_rt_tasks(rq);
1807 }
1808 
1809 static void set_cpus_allowed_rt(struct task_struct *p,
1810 				const struct cpumask *new_mask)
1811 {
1812 	struct rq *rq;
1813 	int weight;
1814 
1815 	BUG_ON(!rt_task(p));
1816 
1817 	if (!p->on_rq)
1818 		return;
1819 
1820 	weight = cpumask_weight(new_mask);
1821 
1822 	/*
1823 	 * Only update if the process changes its state from whether it
1824 	 * can migrate or not.
1825 	 */
1826 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1827 		return;
1828 
1829 	rq = task_rq(p);
1830 
1831 	/*
1832 	 * The process used to be able to migrate OR it can now migrate
1833 	 */
1834 	if (weight <= 1) {
1835 		if (!task_current(rq, p))
1836 			dequeue_pushable_task(rq, p);
1837 		BUG_ON(!rq->rt.rt_nr_migratory);
1838 		rq->rt.rt_nr_migratory--;
1839 	} else {
1840 		if (!task_current(rq, p))
1841 			enqueue_pushable_task(rq, p);
1842 		rq->rt.rt_nr_migratory++;
1843 	}
1844 
1845 	update_rt_migration(&rq->rt);
1846 }
1847 
1848 /* Assumes rq->lock is held */
1849 static void rq_online_rt(struct rq *rq)
1850 {
1851 	if (rq->rt.overloaded)
1852 		rt_set_overload(rq);
1853 
1854 	__enable_runtime(rq);
1855 
1856 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1857 }
1858 
1859 /* Assumes rq->lock is held */
1860 static void rq_offline_rt(struct rq *rq)
1861 {
1862 	if (rq->rt.overloaded)
1863 		rt_clear_overload(rq);
1864 
1865 	__disable_runtime(rq);
1866 
1867 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1868 }
1869 
1870 /*
1871  * When switch from the rt queue, we bring ourselves to a position
1872  * that we might want to pull RT tasks from other runqueues.
1873  */
1874 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1875 {
1876 	/*
1877 	 * If there are other RT tasks then we will reschedule
1878 	 * and the scheduling of the other RT tasks will handle
1879 	 * the balancing. But if we are the last RT task
1880 	 * we may need to handle the pulling of RT tasks
1881 	 * now.
1882 	 */
1883 	if (p->on_rq && !rq->rt.rt_nr_running)
1884 		pull_rt_task(rq);
1885 }
1886 
1887 void init_sched_rt_class(void)
1888 {
1889 	unsigned int i;
1890 
1891 	for_each_possible_cpu(i) {
1892 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1893 					GFP_KERNEL, cpu_to_node(i));
1894 	}
1895 }
1896 #endif /* CONFIG_SMP */
1897 
1898 /*
1899  * When switching a task to RT, we may overload the runqueue
1900  * with RT tasks. In this case we try to push them off to
1901  * other runqueues.
1902  */
1903 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1904 {
1905 	int check_resched = 1;
1906 
1907 	/*
1908 	 * If we are already running, then there's nothing
1909 	 * that needs to be done. But if we are not running
1910 	 * we may need to preempt the current running task.
1911 	 * If that current running task is also an RT task
1912 	 * then see if we can move to another run queue.
1913 	 */
1914 	if (p->on_rq && rq->curr != p) {
1915 #ifdef CONFIG_SMP
1916 		if (rq->rt.overloaded && push_rt_task(rq) &&
1917 		    /* Don't resched if we changed runqueues */
1918 		    rq != task_rq(p))
1919 			check_resched = 0;
1920 #endif /* CONFIG_SMP */
1921 		if (check_resched && p->prio < rq->curr->prio)
1922 			resched_task(rq->curr);
1923 	}
1924 }
1925 
1926 /*
1927  * Priority of the task has changed. This may cause
1928  * us to initiate a push or pull.
1929  */
1930 static void
1931 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1932 {
1933 	if (!p->on_rq)
1934 		return;
1935 
1936 	if (rq->curr == p) {
1937 #ifdef CONFIG_SMP
1938 		/*
1939 		 * If our priority decreases while running, we
1940 		 * may need to pull tasks to this runqueue.
1941 		 */
1942 		if (oldprio < p->prio)
1943 			pull_rt_task(rq);
1944 		/*
1945 		 * If there's a higher priority task waiting to run
1946 		 * then reschedule. Note, the above pull_rt_task
1947 		 * can release the rq lock and p could migrate.
1948 		 * Only reschedule if p is still on the same runqueue.
1949 		 */
1950 		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1951 			resched_task(p);
1952 #else
1953 		/* For UP simply resched on drop of prio */
1954 		if (oldprio < p->prio)
1955 			resched_task(p);
1956 #endif /* CONFIG_SMP */
1957 	} else {
1958 		/*
1959 		 * This task is not running, but if it is
1960 		 * greater than the current running task
1961 		 * then reschedule.
1962 		 */
1963 		if (p->prio < rq->curr->prio)
1964 			resched_task(rq->curr);
1965 	}
1966 }
1967 
1968 static void watchdog(struct rq *rq, struct task_struct *p)
1969 {
1970 	unsigned long soft, hard;
1971 
1972 	/* max may change after cur was read, this will be fixed next tick */
1973 	soft = task_rlimit(p, RLIMIT_RTTIME);
1974 	hard = task_rlimit_max(p, RLIMIT_RTTIME);
1975 
1976 	if (soft != RLIM_INFINITY) {
1977 		unsigned long next;
1978 
1979 		p->rt.timeout++;
1980 		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1981 		if (p->rt.timeout > next)
1982 			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1983 	}
1984 }
1985 
1986 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1987 {
1988 	struct sched_rt_entity *rt_se = &p->rt;
1989 
1990 	update_curr_rt(rq);
1991 
1992 	watchdog(rq, p);
1993 
1994 	/*
1995 	 * RR tasks need a special form of timeslice management.
1996 	 * FIFO tasks have no timeslices.
1997 	 */
1998 	if (p->policy != SCHED_RR)
1999 		return;
2000 
2001 	if (--p->rt.time_slice)
2002 		return;
2003 
2004 	p->rt.time_slice = RR_TIMESLICE;
2005 
2006 	/*
2007 	 * Requeue to the end of queue if we (and all of our ancestors) are the
2008 	 * only element on the queue
2009 	 */
2010 	for_each_sched_rt_entity(rt_se) {
2011 		if (rt_se->run_list.prev != rt_se->run_list.next) {
2012 			requeue_task_rt(rq, p, 0);
2013 			set_tsk_need_resched(p);
2014 			return;
2015 		}
2016 	}
2017 }
2018 
2019 static void set_curr_task_rt(struct rq *rq)
2020 {
2021 	struct task_struct *p = rq->curr;
2022 
2023 	p->se.exec_start = rq->clock_task;
2024 
2025 	/* The running task is never eligible for pushing */
2026 	dequeue_pushable_task(rq, p);
2027 }
2028 
2029 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2030 {
2031 	/*
2032 	 * Time slice is 0 for SCHED_FIFO tasks
2033 	 */
2034 	if (task->policy == SCHED_RR)
2035 		return RR_TIMESLICE;
2036 	else
2037 		return 0;
2038 }
2039 
2040 const struct sched_class rt_sched_class = {
2041 	.next			= &fair_sched_class,
2042 	.enqueue_task		= enqueue_task_rt,
2043 	.dequeue_task		= dequeue_task_rt,
2044 	.yield_task		= yield_task_rt,
2045 
2046 	.check_preempt_curr	= check_preempt_curr_rt,
2047 
2048 	.pick_next_task		= pick_next_task_rt,
2049 	.put_prev_task		= put_prev_task_rt,
2050 
2051 #ifdef CONFIG_SMP
2052 	.select_task_rq		= select_task_rq_rt,
2053 
2054 	.set_cpus_allowed       = set_cpus_allowed_rt,
2055 	.rq_online              = rq_online_rt,
2056 	.rq_offline             = rq_offline_rt,
2057 	.pre_schedule		= pre_schedule_rt,
2058 	.post_schedule		= post_schedule_rt,
2059 	.task_woken		= task_woken_rt,
2060 	.switched_from		= switched_from_rt,
2061 #endif
2062 
2063 	.set_curr_task          = set_curr_task_rt,
2064 	.task_tick		= task_tick_rt,
2065 
2066 	.get_rr_interval	= get_rr_interval_rt,
2067 
2068 	.prio_changed		= prio_changed_rt,
2069 	.switched_to		= switched_to_rt,
2070 };
2071 
2072 #ifdef CONFIG_SCHED_DEBUG
2073 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2074 
2075 void print_rt_stats(struct seq_file *m, int cpu)
2076 {
2077 	rt_rq_iter_t iter;
2078 	struct rt_rq *rt_rq;
2079 
2080 	rcu_read_lock();
2081 	for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2082 		print_rt_rq(m, cpu, rt_rq);
2083 	rcu_read_unlock();
2084 }
2085 #endif /* CONFIG_SCHED_DEBUG */
2086