xref: /linux/kernel/sched/deadline.c (revision 97f0b13452198290799fd6780f05fbaa74f927d3)
1 /*
2  * Deadline Scheduling Class (SCHED_DEADLINE)
3  *
4  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5  *
6  * Tasks that periodically executes their instances for less than their
7  * runtime won't miss any of their deadlines.
8  * Tasks that are not periodic or sporadic or that tries to execute more
9  * than their reserved bandwidth will be slowed down (and may potentially
10  * miss some of their deadlines), and won't affect any other task.
11  *
12  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13  *                    Juri Lelli <juri.lelli@gmail.com>,
14  *                    Michael Trimarchi <michael@amarulasolutions.com>,
15  *                    Fabio Checconi <fchecconi@gmail.com>
16  */
17 #include "sched.h"
18 
19 #include <linux/slab.h>
20 
21 struct dl_bandwidth def_dl_bandwidth;
22 
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 	return container_of(dl_se, struct task_struct, dl);
26 }
27 
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 	return container_of(dl_rq, struct rq, dl);
31 }
32 
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 	struct task_struct *p = dl_task_of(dl_se);
36 	struct rq *rq = task_rq(p);
37 
38 	return &rq->dl;
39 }
40 
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 	return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45 
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47 {
48 	struct sched_dl_entity *dl_se = &p->dl;
49 
50 	return dl_rq->rb_leftmost == &dl_se->rb_node;
51 }
52 
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54 {
55 	raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 	dl_b->dl_period = period;
57 	dl_b->dl_runtime = runtime;
58 }
59 
60 void init_dl_bw(struct dl_bw *dl_b)
61 {
62 	raw_spin_lock_init(&dl_b->lock);
63 	raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 	if (global_rt_runtime() == RUNTIME_INF)
65 		dl_b->bw = -1;
66 	else
67 		dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 	raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 	dl_b->total_bw = 0;
70 }
71 
72 void init_dl_rq(struct dl_rq *dl_rq)
73 {
74 	dl_rq->rb_root = RB_ROOT;
75 
76 #ifdef CONFIG_SMP
77 	/* zero means no -deadline tasks */
78 	dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79 
80 	dl_rq->dl_nr_migratory = 0;
81 	dl_rq->overloaded = 0;
82 	dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 	init_dl_bw(&dl_rq->dl_bw);
85 #endif
86 }
87 
88 #ifdef CONFIG_SMP
89 
90 static inline int dl_overloaded(struct rq *rq)
91 {
92 	return atomic_read(&rq->rd->dlo_count);
93 }
94 
95 static inline void dl_set_overload(struct rq *rq)
96 {
97 	if (!rq->online)
98 		return;
99 
100 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 	/*
102 	 * Must be visible before the overload count is
103 	 * set (as in sched_rt.c).
104 	 *
105 	 * Matched by the barrier in pull_dl_task().
106 	 */
107 	smp_wmb();
108 	atomic_inc(&rq->rd->dlo_count);
109 }
110 
111 static inline void dl_clear_overload(struct rq *rq)
112 {
113 	if (!rq->online)
114 		return;
115 
116 	atomic_dec(&rq->rd->dlo_count);
117 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118 }
119 
120 static void update_dl_migration(struct dl_rq *dl_rq)
121 {
122 	if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 		if (!dl_rq->overloaded) {
124 			dl_set_overload(rq_of_dl_rq(dl_rq));
125 			dl_rq->overloaded = 1;
126 		}
127 	} else if (dl_rq->overloaded) {
128 		dl_clear_overload(rq_of_dl_rq(dl_rq));
129 		dl_rq->overloaded = 0;
130 	}
131 }
132 
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134 {
135 	struct task_struct *p = dl_task_of(dl_se);
136 
137 	if (p->nr_cpus_allowed > 1)
138 		dl_rq->dl_nr_migratory++;
139 
140 	update_dl_migration(dl_rq);
141 }
142 
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 	struct task_struct *p = dl_task_of(dl_se);
146 
147 	if (p->nr_cpus_allowed > 1)
148 		dl_rq->dl_nr_migratory--;
149 
150 	update_dl_migration(dl_rq);
151 }
152 
153 /*
154  * The list of pushable -deadline task is not a plist, like in
155  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156  */
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158 {
159 	struct dl_rq *dl_rq = &rq->dl;
160 	struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 	struct rb_node *parent = NULL;
162 	struct task_struct *entry;
163 	int leftmost = 1;
164 
165 	BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166 
167 	while (*link) {
168 		parent = *link;
169 		entry = rb_entry(parent, struct task_struct,
170 				 pushable_dl_tasks);
171 		if (dl_entity_preempt(&p->dl, &entry->dl))
172 			link = &parent->rb_left;
173 		else {
174 			link = &parent->rb_right;
175 			leftmost = 0;
176 		}
177 	}
178 
179 	if (leftmost)
180 		dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181 
182 	rb_link_node(&p->pushable_dl_tasks, parent, link);
183 	rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
184 }
185 
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187 {
188 	struct dl_rq *dl_rq = &rq->dl;
189 
190 	if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 		return;
192 
193 	if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 		struct rb_node *next_node;
195 
196 		next_node = rb_next(&p->pushable_dl_tasks);
197 		dl_rq->pushable_dl_tasks_leftmost = next_node;
198 	}
199 
200 	rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 	RB_CLEAR_NODE(&p->pushable_dl_tasks);
202 }
203 
204 static inline int has_pushable_dl_tasks(struct rq *rq)
205 {
206 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207 }
208 
209 static int push_dl_task(struct rq *rq);
210 
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212 {
213 	return dl_task(prev);
214 }
215 
216 static inline void set_post_schedule(struct rq *rq)
217 {
218 	rq->post_schedule = has_pushable_dl_tasks(rq);
219 }
220 
221 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
222 
223 static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
224 {
225 	struct rq *later_rq = NULL;
226 	bool fallback = false;
227 
228 	later_rq = find_lock_later_rq(p, rq);
229 
230 	if (!later_rq) {
231 		int cpu;
232 
233 		/*
234 		 * If we cannot preempt any rq, fall back to pick any
235 		 * online cpu.
236 		 */
237 		fallback = true;
238 		cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
239 		if (cpu >= nr_cpu_ids) {
240 			/*
241 			 * Fail to find any suitable cpu.
242 			 * The task will never come back!
243 			 */
244 			BUG_ON(dl_bandwidth_enabled());
245 
246 			/*
247 			 * If admission control is disabled we
248 			 * try a little harder to let the task
249 			 * run.
250 			 */
251 			cpu = cpumask_any(cpu_active_mask);
252 		}
253 		later_rq = cpu_rq(cpu);
254 		double_lock_balance(rq, later_rq);
255 	}
256 
257 	deactivate_task(rq, p, 0);
258 	set_task_cpu(p, later_rq->cpu);
259 	activate_task(later_rq, p, ENQUEUE_REPLENISH);
260 
261 	if (!fallback)
262 		resched_curr(later_rq);
263 
264 	double_unlock_balance(rq, later_rq);
265 }
266 
267 #else
268 
269 static inline
270 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
271 {
272 }
273 
274 static inline
275 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
276 {
277 }
278 
279 static inline
280 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
281 {
282 }
283 
284 static inline
285 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
286 {
287 }
288 
289 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
290 {
291 	return false;
292 }
293 
294 static inline int pull_dl_task(struct rq *rq)
295 {
296 	return 0;
297 }
298 
299 static inline void set_post_schedule(struct rq *rq)
300 {
301 }
302 #endif /* CONFIG_SMP */
303 
304 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
305 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
306 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
307 				  int flags);
308 
309 /*
310  * We are being explicitly informed that a new instance is starting,
311  * and this means that:
312  *  - the absolute deadline of the entity has to be placed at
313  *    current time + relative deadline;
314  *  - the runtime of the entity has to be set to the maximum value.
315  *
316  * The capability of specifying such event is useful whenever a -deadline
317  * entity wants to (try to!) synchronize its behaviour with the scheduler's
318  * one, and to (try to!) reconcile itself with its own scheduling
319  * parameters.
320  */
321 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
322 				       struct sched_dl_entity *pi_se)
323 {
324 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
325 	struct rq *rq = rq_of_dl_rq(dl_rq);
326 
327 	WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
328 
329 	/*
330 	 * We use the regular wall clock time to set deadlines in the
331 	 * future; in fact, we must consider execution overheads (time
332 	 * spent on hardirq context, etc.).
333 	 */
334 	dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
335 	dl_se->runtime = pi_se->dl_runtime;
336 	dl_se->dl_new = 0;
337 }
338 
339 /*
340  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
341  * possibility of a entity lasting more than what it declared, and thus
342  * exhausting its runtime.
343  *
344  * Here we are interested in making runtime overrun possible, but we do
345  * not want a entity which is misbehaving to affect the scheduling of all
346  * other entities.
347  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
348  * is used, in order to confine each entity within its own bandwidth.
349  *
350  * This function deals exactly with that, and ensures that when the runtime
351  * of a entity is replenished, its deadline is also postponed. That ensures
352  * the overrunning entity can't interfere with other entity in the system and
353  * can't make them miss their deadlines. Reasons why this kind of overruns
354  * could happen are, typically, a entity voluntarily trying to overcome its
355  * runtime, or it just underestimated it during sched_setattr().
356  */
357 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
358 				struct sched_dl_entity *pi_se)
359 {
360 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
361 	struct rq *rq = rq_of_dl_rq(dl_rq);
362 
363 	BUG_ON(pi_se->dl_runtime <= 0);
364 
365 	/*
366 	 * This could be the case for a !-dl task that is boosted.
367 	 * Just go with full inherited parameters.
368 	 */
369 	if (dl_se->dl_deadline == 0) {
370 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
371 		dl_se->runtime = pi_se->dl_runtime;
372 	}
373 
374 	/*
375 	 * We keep moving the deadline away until we get some
376 	 * available runtime for the entity. This ensures correct
377 	 * handling of situations where the runtime overrun is
378 	 * arbitrary large.
379 	 */
380 	while (dl_se->runtime <= 0) {
381 		dl_se->deadline += pi_se->dl_period;
382 		dl_se->runtime += pi_se->dl_runtime;
383 	}
384 
385 	/*
386 	 * At this point, the deadline really should be "in
387 	 * the future" with respect to rq->clock. If it's
388 	 * not, we are, for some reason, lagging too much!
389 	 * Anyway, after having warn userspace abut that,
390 	 * we still try to keep the things running by
391 	 * resetting the deadline and the budget of the
392 	 * entity.
393 	 */
394 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
395 		printk_deferred_once("sched: DL replenish lagged to much\n");
396 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
397 		dl_se->runtime = pi_se->dl_runtime;
398 	}
399 
400 	if (dl_se->dl_yielded)
401 		dl_se->dl_yielded = 0;
402 	if (dl_se->dl_throttled)
403 		dl_se->dl_throttled = 0;
404 }
405 
406 /*
407  * Here we check if --at time t-- an entity (which is probably being
408  * [re]activated or, in general, enqueued) can use its remaining runtime
409  * and its current deadline _without_ exceeding the bandwidth it is
410  * assigned (function returns true if it can't). We are in fact applying
411  * one of the CBS rules: when a task wakes up, if the residual runtime
412  * over residual deadline fits within the allocated bandwidth, then we
413  * can keep the current (absolute) deadline and residual budget without
414  * disrupting the schedulability of the system. Otherwise, we should
415  * refill the runtime and set the deadline a period in the future,
416  * because keeping the current (absolute) deadline of the task would
417  * result in breaking guarantees promised to other tasks (refer to
418  * Documentation/scheduler/sched-deadline.txt for more informations).
419  *
420  * This function returns true if:
421  *
422  *   runtime / (deadline - t) > dl_runtime / dl_period ,
423  *
424  * IOW we can't recycle current parameters.
425  *
426  * Notice that the bandwidth check is done against the period. For
427  * task with deadline equal to period this is the same of using
428  * dl_deadline instead of dl_period in the equation above.
429  */
430 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
431 			       struct sched_dl_entity *pi_se, u64 t)
432 {
433 	u64 left, right;
434 
435 	/*
436 	 * left and right are the two sides of the equation above,
437 	 * after a bit of shuffling to use multiplications instead
438 	 * of divisions.
439 	 *
440 	 * Note that none of the time values involved in the two
441 	 * multiplications are absolute: dl_deadline and dl_runtime
442 	 * are the relative deadline and the maximum runtime of each
443 	 * instance, runtime is the runtime left for the last instance
444 	 * and (deadline - t), since t is rq->clock, is the time left
445 	 * to the (absolute) deadline. Even if overflowing the u64 type
446 	 * is very unlikely to occur in both cases, here we scale down
447 	 * as we want to avoid that risk at all. Scaling down by 10
448 	 * means that we reduce granularity to 1us. We are fine with it,
449 	 * since this is only a true/false check and, anyway, thinking
450 	 * of anything below microseconds resolution is actually fiction
451 	 * (but still we want to give the user that illusion >;).
452 	 */
453 	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
454 	right = ((dl_se->deadline - t) >> DL_SCALE) *
455 		(pi_se->dl_runtime >> DL_SCALE);
456 
457 	return dl_time_before(right, left);
458 }
459 
460 /*
461  * When a -deadline entity is queued back on the runqueue, its runtime and
462  * deadline might need updating.
463  *
464  * The policy here is that we update the deadline of the entity only if:
465  *  - the current deadline is in the past,
466  *  - using the remaining runtime with the current deadline would make
467  *    the entity exceed its bandwidth.
468  */
469 static void update_dl_entity(struct sched_dl_entity *dl_se,
470 			     struct sched_dl_entity *pi_se)
471 {
472 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
473 	struct rq *rq = rq_of_dl_rq(dl_rq);
474 
475 	/*
476 	 * The arrival of a new instance needs special treatment, i.e.,
477 	 * the actual scheduling parameters have to be "renewed".
478 	 */
479 	if (dl_se->dl_new) {
480 		setup_new_dl_entity(dl_se, pi_se);
481 		return;
482 	}
483 
484 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
485 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
486 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
487 		dl_se->runtime = pi_se->dl_runtime;
488 	}
489 }
490 
491 /*
492  * If the entity depleted all its runtime, and if we want it to sleep
493  * while waiting for some new execution time to become available, we
494  * set the bandwidth enforcement timer to the replenishment instant
495  * and try to activate it.
496  *
497  * Notice that it is important for the caller to know if the timer
498  * actually started or not (i.e., the replenishment instant is in
499  * the future or in the past).
500  */
501 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
502 {
503 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
504 	struct rq *rq = rq_of_dl_rq(dl_rq);
505 	ktime_t now, act;
506 	s64 delta;
507 
508 	if (boosted)
509 		return 0;
510 	/*
511 	 * We want the timer to fire at the deadline, but considering
512 	 * that it is actually coming from rq->clock and not from
513 	 * hrtimer's time base reading.
514 	 */
515 	act = ns_to_ktime(dl_se->deadline);
516 	now = hrtimer_cb_get_time(&dl_se->dl_timer);
517 	delta = ktime_to_ns(now) - rq_clock(rq);
518 	act = ktime_add_ns(act, delta);
519 
520 	/*
521 	 * If the expiry time already passed, e.g., because the value
522 	 * chosen as the deadline is too small, don't even try to
523 	 * start the timer in the past!
524 	 */
525 	if (ktime_us_delta(act, now) < 0)
526 		return 0;
527 
528 	hrtimer_start(&dl_se->dl_timer, act, HRTIMER_MODE_ABS);
529 
530 	return 1;
531 }
532 
533 /*
534  * This is the bandwidth enforcement timer callback. If here, we know
535  * a task is not on its dl_rq, since the fact that the timer was running
536  * means the task is throttled and needs a runtime replenishment.
537  *
538  * However, what we actually do depends on the fact the task is active,
539  * (it is on its rq) or has been removed from there by a call to
540  * dequeue_task_dl(). In the former case we must issue the runtime
541  * replenishment and add the task back to the dl_rq; in the latter, we just
542  * do nothing but clearing dl_throttled, so that runtime and deadline
543  * updating (and the queueing back to dl_rq) will be done by the
544  * next call to enqueue_task_dl().
545  */
546 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
547 {
548 	struct sched_dl_entity *dl_se = container_of(timer,
549 						     struct sched_dl_entity,
550 						     dl_timer);
551 	struct task_struct *p = dl_task_of(dl_se);
552 	unsigned long flags;
553 	struct rq *rq;
554 
555 	rq = task_rq_lock(p, &flags);
556 
557 	/*
558 	 * We need to take care of several possible races here:
559 	 *
560 	 *   - the task might have changed its scheduling policy
561 	 *     to something different than SCHED_DEADLINE
562 	 *   - the task might have changed its reservation parameters
563 	 *     (through sched_setattr())
564 	 *   - the task might have been boosted by someone else and
565 	 *     might be in the boosting/deboosting path
566 	 *
567 	 * In all this cases we bail out, as the task is already
568 	 * in the runqueue or is going to be enqueued back anyway.
569 	 */
570 	if (!dl_task(p) || dl_se->dl_new ||
571 	    dl_se->dl_boosted || !dl_se->dl_throttled)
572 		goto unlock;
573 
574 	sched_clock_tick();
575 	update_rq_clock(rq);
576 
577 #ifdef CONFIG_SMP
578 	/*
579 	 * If we find that the rq the task was on is no longer
580 	 * available, we need to select a new rq.
581 	 */
582 	if (unlikely(!rq->online)) {
583 		dl_task_offline_migration(rq, p);
584 		goto unlock;
585 	}
586 #endif
587 
588 	/*
589 	 * If the throttle happened during sched-out; like:
590 	 *
591 	 *   schedule()
592 	 *     deactivate_task()
593 	 *       dequeue_task_dl()
594 	 *         update_curr_dl()
595 	 *           start_dl_timer()
596 	 *         __dequeue_task_dl()
597 	 *     prev->on_rq = 0;
598 	 *
599 	 * We can be both throttled and !queued. Replenish the counter
600 	 * but do not enqueue -- wait for our wakeup to do that.
601 	 */
602 	if (!task_on_rq_queued(p)) {
603 		replenish_dl_entity(dl_se, dl_se);
604 		goto unlock;
605 	}
606 
607 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
608 	if (dl_task(rq->curr))
609 		check_preempt_curr_dl(rq, p, 0);
610 	else
611 		resched_curr(rq);
612 #ifdef CONFIG_SMP
613 	/*
614 	 * Queueing this task back might have overloaded rq,
615 	 * check if we need to kick someone away.
616 	 */
617 	if (has_pushable_dl_tasks(rq))
618 		push_dl_task(rq);
619 #endif
620 unlock:
621 	task_rq_unlock(rq, p, &flags);
622 
623 	return HRTIMER_NORESTART;
624 }
625 
626 void init_dl_task_timer(struct sched_dl_entity *dl_se)
627 {
628 	struct hrtimer *timer = &dl_se->dl_timer;
629 
630 	hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
631 	timer->function = dl_task_timer;
632 }
633 
634 static
635 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
636 {
637 	return (dl_se->runtime <= 0);
638 }
639 
640 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
641 
642 /*
643  * Update the current task's runtime statistics (provided it is still
644  * a -deadline task and has not been removed from the dl_rq).
645  */
646 static void update_curr_dl(struct rq *rq)
647 {
648 	struct task_struct *curr = rq->curr;
649 	struct sched_dl_entity *dl_se = &curr->dl;
650 	u64 delta_exec;
651 
652 	if (!dl_task(curr) || !on_dl_rq(dl_se))
653 		return;
654 
655 	/*
656 	 * Consumed budget is computed considering the time as
657 	 * observed by schedulable tasks (excluding time spent
658 	 * in hardirq context, etc.). Deadlines are instead
659 	 * computed using hard walltime. This seems to be the more
660 	 * natural solution, but the full ramifications of this
661 	 * approach need further study.
662 	 */
663 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
664 	if (unlikely((s64)delta_exec <= 0))
665 		return;
666 
667 	schedstat_set(curr->se.statistics.exec_max,
668 		      max(curr->se.statistics.exec_max, delta_exec));
669 
670 	curr->se.sum_exec_runtime += delta_exec;
671 	account_group_exec_runtime(curr, delta_exec);
672 
673 	curr->se.exec_start = rq_clock_task(rq);
674 	cpuacct_charge(curr, delta_exec);
675 
676 	sched_rt_avg_update(rq, delta_exec);
677 
678 	dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
679 	if (dl_runtime_exceeded(dl_se)) {
680 		dl_se->dl_throttled = 1;
681 		__dequeue_task_dl(rq, curr, 0);
682 		if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
683 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
684 
685 		if (!is_leftmost(curr, &rq->dl))
686 			resched_curr(rq);
687 	}
688 
689 	/*
690 	 * Because -- for now -- we share the rt bandwidth, we need to
691 	 * account our runtime there too, otherwise actual rt tasks
692 	 * would be able to exceed the shared quota.
693 	 *
694 	 * Account to the root rt group for now.
695 	 *
696 	 * The solution we're working towards is having the RT groups scheduled
697 	 * using deadline servers -- however there's a few nasties to figure
698 	 * out before that can happen.
699 	 */
700 	if (rt_bandwidth_enabled()) {
701 		struct rt_rq *rt_rq = &rq->rt;
702 
703 		raw_spin_lock(&rt_rq->rt_runtime_lock);
704 		/*
705 		 * We'll let actual RT tasks worry about the overflow here, we
706 		 * have our own CBS to keep us inline; only account when RT
707 		 * bandwidth is relevant.
708 		 */
709 		if (sched_rt_bandwidth_account(rt_rq))
710 			rt_rq->rt_time += delta_exec;
711 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
712 	}
713 }
714 
715 #ifdef CONFIG_SMP
716 
717 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
718 
719 static inline u64 next_deadline(struct rq *rq)
720 {
721 	struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
722 
723 	if (next && dl_prio(next->prio))
724 		return next->dl.deadline;
725 	else
726 		return 0;
727 }
728 
729 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
730 {
731 	struct rq *rq = rq_of_dl_rq(dl_rq);
732 
733 	if (dl_rq->earliest_dl.curr == 0 ||
734 	    dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
735 		/*
736 		 * If the dl_rq had no -deadline tasks, or if the new task
737 		 * has shorter deadline than the current one on dl_rq, we
738 		 * know that the previous earliest becomes our next earliest,
739 		 * as the new task becomes the earliest itself.
740 		 */
741 		dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
742 		dl_rq->earliest_dl.curr = deadline;
743 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
744 	} else if (dl_rq->earliest_dl.next == 0 ||
745 		   dl_time_before(deadline, dl_rq->earliest_dl.next)) {
746 		/*
747 		 * On the other hand, if the new -deadline task has a
748 		 * a later deadline than the earliest one on dl_rq, but
749 		 * it is earlier than the next (if any), we must
750 		 * recompute the next-earliest.
751 		 */
752 		dl_rq->earliest_dl.next = next_deadline(rq);
753 	}
754 }
755 
756 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
757 {
758 	struct rq *rq = rq_of_dl_rq(dl_rq);
759 
760 	/*
761 	 * Since we may have removed our earliest (and/or next earliest)
762 	 * task we must recompute them.
763 	 */
764 	if (!dl_rq->dl_nr_running) {
765 		dl_rq->earliest_dl.curr = 0;
766 		dl_rq->earliest_dl.next = 0;
767 		cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
768 	} else {
769 		struct rb_node *leftmost = dl_rq->rb_leftmost;
770 		struct sched_dl_entity *entry;
771 
772 		entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
773 		dl_rq->earliest_dl.curr = entry->deadline;
774 		dl_rq->earliest_dl.next = next_deadline(rq);
775 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
776 	}
777 }
778 
779 #else
780 
781 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
782 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
783 
784 #endif /* CONFIG_SMP */
785 
786 static inline
787 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
788 {
789 	int prio = dl_task_of(dl_se)->prio;
790 	u64 deadline = dl_se->deadline;
791 
792 	WARN_ON(!dl_prio(prio));
793 	dl_rq->dl_nr_running++;
794 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
795 
796 	inc_dl_deadline(dl_rq, deadline);
797 	inc_dl_migration(dl_se, dl_rq);
798 }
799 
800 static inline
801 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
802 {
803 	int prio = dl_task_of(dl_se)->prio;
804 
805 	WARN_ON(!dl_prio(prio));
806 	WARN_ON(!dl_rq->dl_nr_running);
807 	dl_rq->dl_nr_running--;
808 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
809 
810 	dec_dl_deadline(dl_rq, dl_se->deadline);
811 	dec_dl_migration(dl_se, dl_rq);
812 }
813 
814 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
815 {
816 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
817 	struct rb_node **link = &dl_rq->rb_root.rb_node;
818 	struct rb_node *parent = NULL;
819 	struct sched_dl_entity *entry;
820 	int leftmost = 1;
821 
822 	BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
823 
824 	while (*link) {
825 		parent = *link;
826 		entry = rb_entry(parent, struct sched_dl_entity, rb_node);
827 		if (dl_time_before(dl_se->deadline, entry->deadline))
828 			link = &parent->rb_left;
829 		else {
830 			link = &parent->rb_right;
831 			leftmost = 0;
832 		}
833 	}
834 
835 	if (leftmost)
836 		dl_rq->rb_leftmost = &dl_se->rb_node;
837 
838 	rb_link_node(&dl_se->rb_node, parent, link);
839 	rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
840 
841 	inc_dl_tasks(dl_se, dl_rq);
842 }
843 
844 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
845 {
846 	struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
847 
848 	if (RB_EMPTY_NODE(&dl_se->rb_node))
849 		return;
850 
851 	if (dl_rq->rb_leftmost == &dl_se->rb_node) {
852 		struct rb_node *next_node;
853 
854 		next_node = rb_next(&dl_se->rb_node);
855 		dl_rq->rb_leftmost = next_node;
856 	}
857 
858 	rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
859 	RB_CLEAR_NODE(&dl_se->rb_node);
860 
861 	dec_dl_tasks(dl_se, dl_rq);
862 }
863 
864 static void
865 enqueue_dl_entity(struct sched_dl_entity *dl_se,
866 		  struct sched_dl_entity *pi_se, int flags)
867 {
868 	BUG_ON(on_dl_rq(dl_se));
869 
870 	/*
871 	 * If this is a wakeup or a new instance, the scheduling
872 	 * parameters of the task might need updating. Otherwise,
873 	 * we want a replenishment of its runtime.
874 	 */
875 	if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
876 		update_dl_entity(dl_se, pi_se);
877 	else if (flags & ENQUEUE_REPLENISH)
878 		replenish_dl_entity(dl_se, pi_se);
879 
880 	__enqueue_dl_entity(dl_se);
881 }
882 
883 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
884 {
885 	__dequeue_dl_entity(dl_se);
886 }
887 
888 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
889 {
890 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
891 	struct sched_dl_entity *pi_se = &p->dl;
892 
893 	/*
894 	 * Use the scheduling parameters of the top pi-waiter
895 	 * task if we have one and its (relative) deadline is
896 	 * smaller than our one... OTW we keep our runtime and
897 	 * deadline.
898 	 */
899 	if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
900 		pi_se = &pi_task->dl;
901 	} else if (!dl_prio(p->normal_prio)) {
902 		/*
903 		 * Special case in which we have a !SCHED_DEADLINE task
904 		 * that is going to be deboosted, but exceedes its
905 		 * runtime while doing so. No point in replenishing
906 		 * it, as it's going to return back to its original
907 		 * scheduling class after this.
908 		 */
909 		BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
910 		return;
911 	}
912 
913 	/*
914 	 * If p is throttled, we do nothing. In fact, if it exhausted
915 	 * its budget it needs a replenishment and, since it now is on
916 	 * its rq, the bandwidth timer callback (which clearly has not
917 	 * run yet) will take care of this.
918 	 */
919 	if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
920 		return;
921 
922 	enqueue_dl_entity(&p->dl, pi_se, flags);
923 
924 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
925 		enqueue_pushable_dl_task(rq, p);
926 }
927 
928 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
929 {
930 	dequeue_dl_entity(&p->dl);
931 	dequeue_pushable_dl_task(rq, p);
932 }
933 
934 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
935 {
936 	update_curr_dl(rq);
937 	__dequeue_task_dl(rq, p, flags);
938 }
939 
940 /*
941  * Yield task semantic for -deadline tasks is:
942  *
943  *   get off from the CPU until our next instance, with
944  *   a new runtime. This is of little use now, since we
945  *   don't have a bandwidth reclaiming mechanism. Anyway,
946  *   bandwidth reclaiming is planned for the future, and
947  *   yield_task_dl will indicate that some spare budget
948  *   is available for other task instances to use it.
949  */
950 static void yield_task_dl(struct rq *rq)
951 {
952 	struct task_struct *p = rq->curr;
953 
954 	/*
955 	 * We make the task go to sleep until its current deadline by
956 	 * forcing its runtime to zero. This way, update_curr_dl() stops
957 	 * it and the bandwidth timer will wake it up and will give it
958 	 * new scheduling parameters (thanks to dl_yielded=1).
959 	 */
960 	if (p->dl.runtime > 0) {
961 		rq->curr->dl.dl_yielded = 1;
962 		p->dl.runtime = 0;
963 	}
964 	update_rq_clock(rq);
965 	update_curr_dl(rq);
966 	/*
967 	 * Tell update_rq_clock() that we've just updated,
968 	 * so we don't do microscopic update in schedule()
969 	 * and double the fastpath cost.
970 	 */
971 	rq_clock_skip_update(rq, true);
972 }
973 
974 #ifdef CONFIG_SMP
975 
976 static int find_later_rq(struct task_struct *task);
977 
978 static int
979 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
980 {
981 	struct task_struct *curr;
982 	struct rq *rq;
983 
984 	if (sd_flag != SD_BALANCE_WAKE)
985 		goto out;
986 
987 	rq = cpu_rq(cpu);
988 
989 	rcu_read_lock();
990 	curr = READ_ONCE(rq->curr); /* unlocked access */
991 
992 	/*
993 	 * If we are dealing with a -deadline task, we must
994 	 * decide where to wake it up.
995 	 * If it has a later deadline and the current task
996 	 * on this rq can't move (provided the waking task
997 	 * can!) we prefer to send it somewhere else. On the
998 	 * other hand, if it has a shorter deadline, we
999 	 * try to make it stay here, it might be important.
1000 	 */
1001 	if (unlikely(dl_task(curr)) &&
1002 	    (curr->nr_cpus_allowed < 2 ||
1003 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
1004 	    (p->nr_cpus_allowed > 1)) {
1005 		int target = find_later_rq(p);
1006 
1007 		if (target != -1 &&
1008 				dl_time_before(p->dl.deadline,
1009 					cpu_rq(target)->dl.earliest_dl.curr))
1010 			cpu = target;
1011 	}
1012 	rcu_read_unlock();
1013 
1014 out:
1015 	return cpu;
1016 }
1017 
1018 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1019 {
1020 	/*
1021 	 * Current can't be migrated, useless to reschedule,
1022 	 * let's hope p can move out.
1023 	 */
1024 	if (rq->curr->nr_cpus_allowed == 1 ||
1025 	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1026 		return;
1027 
1028 	/*
1029 	 * p is migratable, so let's not schedule it and
1030 	 * see if it is pushed or pulled somewhere else.
1031 	 */
1032 	if (p->nr_cpus_allowed != 1 &&
1033 	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1034 		return;
1035 
1036 	resched_curr(rq);
1037 }
1038 
1039 static int pull_dl_task(struct rq *this_rq);
1040 
1041 #endif /* CONFIG_SMP */
1042 
1043 /*
1044  * Only called when both the current and waking task are -deadline
1045  * tasks.
1046  */
1047 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1048 				  int flags)
1049 {
1050 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1051 		resched_curr(rq);
1052 		return;
1053 	}
1054 
1055 #ifdef CONFIG_SMP
1056 	/*
1057 	 * In the unlikely case current and p have the same deadline
1058 	 * let us try to decide what's the best thing to do...
1059 	 */
1060 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
1061 	    !test_tsk_need_resched(rq->curr))
1062 		check_preempt_equal_dl(rq, p);
1063 #endif /* CONFIG_SMP */
1064 }
1065 
1066 #ifdef CONFIG_SCHED_HRTICK
1067 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1068 {
1069 	hrtick_start(rq, p->dl.runtime);
1070 }
1071 #else /* !CONFIG_SCHED_HRTICK */
1072 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1073 {
1074 }
1075 #endif
1076 
1077 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1078 						   struct dl_rq *dl_rq)
1079 {
1080 	struct rb_node *left = dl_rq->rb_leftmost;
1081 
1082 	if (!left)
1083 		return NULL;
1084 
1085 	return rb_entry(left, struct sched_dl_entity, rb_node);
1086 }
1087 
1088 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1089 {
1090 	struct sched_dl_entity *dl_se;
1091 	struct task_struct *p;
1092 	struct dl_rq *dl_rq;
1093 
1094 	dl_rq = &rq->dl;
1095 
1096 	if (need_pull_dl_task(rq, prev)) {
1097 		pull_dl_task(rq);
1098 		/*
1099 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1100 		 * means a stop task can slip in, in which case we need to
1101 		 * re-start task selection.
1102 		 */
1103 		if (rq->stop && task_on_rq_queued(rq->stop))
1104 			return RETRY_TASK;
1105 	}
1106 
1107 	/*
1108 	 * When prev is DL, we may throttle it in put_prev_task().
1109 	 * So, we update time before we check for dl_nr_running.
1110 	 */
1111 	if (prev->sched_class == &dl_sched_class)
1112 		update_curr_dl(rq);
1113 
1114 	if (unlikely(!dl_rq->dl_nr_running))
1115 		return NULL;
1116 
1117 	put_prev_task(rq, prev);
1118 
1119 	dl_se = pick_next_dl_entity(rq, dl_rq);
1120 	BUG_ON(!dl_se);
1121 
1122 	p = dl_task_of(dl_se);
1123 	p->se.exec_start = rq_clock_task(rq);
1124 
1125 	/* Running task will never be pushed. */
1126        dequeue_pushable_dl_task(rq, p);
1127 
1128 	if (hrtick_enabled(rq))
1129 		start_hrtick_dl(rq, p);
1130 
1131 	set_post_schedule(rq);
1132 
1133 	return p;
1134 }
1135 
1136 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1137 {
1138 	update_curr_dl(rq);
1139 
1140 	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1141 		enqueue_pushable_dl_task(rq, p);
1142 }
1143 
1144 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1145 {
1146 	update_curr_dl(rq);
1147 
1148 	/*
1149 	 * Even when we have runtime, update_curr_dl() might have resulted in us
1150 	 * not being the leftmost task anymore. In that case NEED_RESCHED will
1151 	 * be set and schedule() will start a new hrtick for the next task.
1152 	 */
1153 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1154 	    is_leftmost(p, &rq->dl))
1155 		start_hrtick_dl(rq, p);
1156 }
1157 
1158 static void task_fork_dl(struct task_struct *p)
1159 {
1160 	/*
1161 	 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1162 	 * sched_fork()
1163 	 */
1164 }
1165 
1166 static void task_dead_dl(struct task_struct *p)
1167 {
1168 	struct hrtimer *timer = &p->dl.dl_timer;
1169 	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1170 
1171 	/*
1172 	 * Since we are TASK_DEAD we won't slip out of the domain!
1173 	 */
1174 	raw_spin_lock_irq(&dl_b->lock);
1175 	/* XXX we should retain the bw until 0-lag */
1176 	dl_b->total_bw -= p->dl.dl_bw;
1177 	raw_spin_unlock_irq(&dl_b->lock);
1178 
1179 	hrtimer_cancel(timer);
1180 }
1181 
1182 static void set_curr_task_dl(struct rq *rq)
1183 {
1184 	struct task_struct *p = rq->curr;
1185 
1186 	p->se.exec_start = rq_clock_task(rq);
1187 
1188 	/* You can't push away the running task */
1189 	dequeue_pushable_dl_task(rq, p);
1190 }
1191 
1192 #ifdef CONFIG_SMP
1193 
1194 /* Only try algorithms three times */
1195 #define DL_MAX_TRIES 3
1196 
1197 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1198 {
1199 	if (!task_running(rq, p) &&
1200 	    cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1201 		return 1;
1202 	return 0;
1203 }
1204 
1205 /* Returns the second earliest -deadline task, NULL otherwise */
1206 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1207 {
1208 	struct rb_node *next_node = rq->dl.rb_leftmost;
1209 	struct sched_dl_entity *dl_se;
1210 	struct task_struct *p = NULL;
1211 
1212 next_node:
1213 	next_node = rb_next(next_node);
1214 	if (next_node) {
1215 		dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1216 		p = dl_task_of(dl_se);
1217 
1218 		if (pick_dl_task(rq, p, cpu))
1219 			return p;
1220 
1221 		goto next_node;
1222 	}
1223 
1224 	return NULL;
1225 }
1226 
1227 /*
1228  * Return the earliest pushable rq's task, which is suitable to be executed
1229  * on the CPU, NULL otherwise:
1230  */
1231 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1232 {
1233 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1234 	struct task_struct *p = NULL;
1235 
1236 	if (!has_pushable_dl_tasks(rq))
1237 		return NULL;
1238 
1239 next_node:
1240 	if (next_node) {
1241 		p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1242 
1243 		if (pick_dl_task(rq, p, cpu))
1244 			return p;
1245 
1246 		next_node = rb_next(next_node);
1247 		goto next_node;
1248 	}
1249 
1250 	return NULL;
1251 }
1252 
1253 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1254 
1255 static int find_later_rq(struct task_struct *task)
1256 {
1257 	struct sched_domain *sd;
1258 	struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1259 	int this_cpu = smp_processor_id();
1260 	int best_cpu, cpu = task_cpu(task);
1261 
1262 	/* Make sure the mask is initialized first */
1263 	if (unlikely(!later_mask))
1264 		return -1;
1265 
1266 	if (task->nr_cpus_allowed == 1)
1267 		return -1;
1268 
1269 	/*
1270 	 * We have to consider system topology and task affinity
1271 	 * first, then we can look for a suitable cpu.
1272 	 */
1273 	best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1274 			task, later_mask);
1275 	if (best_cpu == -1)
1276 		return -1;
1277 
1278 	/*
1279 	 * If we are here, some target has been found,
1280 	 * the most suitable of which is cached in best_cpu.
1281 	 * This is, among the runqueues where the current tasks
1282 	 * have later deadlines than the task's one, the rq
1283 	 * with the latest possible one.
1284 	 *
1285 	 * Now we check how well this matches with task's
1286 	 * affinity and system topology.
1287 	 *
1288 	 * The last cpu where the task run is our first
1289 	 * guess, since it is most likely cache-hot there.
1290 	 */
1291 	if (cpumask_test_cpu(cpu, later_mask))
1292 		return cpu;
1293 	/*
1294 	 * Check if this_cpu is to be skipped (i.e., it is
1295 	 * not in the mask) or not.
1296 	 */
1297 	if (!cpumask_test_cpu(this_cpu, later_mask))
1298 		this_cpu = -1;
1299 
1300 	rcu_read_lock();
1301 	for_each_domain(cpu, sd) {
1302 		if (sd->flags & SD_WAKE_AFFINE) {
1303 
1304 			/*
1305 			 * If possible, preempting this_cpu is
1306 			 * cheaper than migrating.
1307 			 */
1308 			if (this_cpu != -1 &&
1309 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1310 				rcu_read_unlock();
1311 				return this_cpu;
1312 			}
1313 
1314 			/*
1315 			 * Last chance: if best_cpu is valid and is
1316 			 * in the mask, that becomes our choice.
1317 			 */
1318 			if (best_cpu < nr_cpu_ids &&
1319 			    cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1320 				rcu_read_unlock();
1321 				return best_cpu;
1322 			}
1323 		}
1324 	}
1325 	rcu_read_unlock();
1326 
1327 	/*
1328 	 * At this point, all our guesses failed, we just return
1329 	 * 'something', and let the caller sort the things out.
1330 	 */
1331 	if (this_cpu != -1)
1332 		return this_cpu;
1333 
1334 	cpu = cpumask_any(later_mask);
1335 	if (cpu < nr_cpu_ids)
1336 		return cpu;
1337 
1338 	return -1;
1339 }
1340 
1341 /* Locks the rq it finds */
1342 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1343 {
1344 	struct rq *later_rq = NULL;
1345 	int tries;
1346 	int cpu;
1347 
1348 	for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1349 		cpu = find_later_rq(task);
1350 
1351 		if ((cpu == -1) || (cpu == rq->cpu))
1352 			break;
1353 
1354 		later_rq = cpu_rq(cpu);
1355 
1356 		if (!dl_time_before(task->dl.deadline,
1357 					later_rq->dl.earliest_dl.curr)) {
1358 			/*
1359 			 * Target rq has tasks of equal or earlier deadline,
1360 			 * retrying does not release any lock and is unlikely
1361 			 * to yield a different result.
1362 			 */
1363 			later_rq = NULL;
1364 			break;
1365 		}
1366 
1367 		/* Retry if something changed. */
1368 		if (double_lock_balance(rq, later_rq)) {
1369 			if (unlikely(task_rq(task) != rq ||
1370 				     !cpumask_test_cpu(later_rq->cpu,
1371 				                       &task->cpus_allowed) ||
1372 				     task_running(rq, task) ||
1373 				     !task_on_rq_queued(task))) {
1374 				double_unlock_balance(rq, later_rq);
1375 				later_rq = NULL;
1376 				break;
1377 			}
1378 		}
1379 
1380 		/*
1381 		 * If the rq we found has no -deadline task, or
1382 		 * its earliest one has a later deadline than our
1383 		 * task, the rq is a good one.
1384 		 */
1385 		if (!later_rq->dl.dl_nr_running ||
1386 		    dl_time_before(task->dl.deadline,
1387 				   later_rq->dl.earliest_dl.curr))
1388 			break;
1389 
1390 		/* Otherwise we try again. */
1391 		double_unlock_balance(rq, later_rq);
1392 		later_rq = NULL;
1393 	}
1394 
1395 	return later_rq;
1396 }
1397 
1398 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1399 {
1400 	struct task_struct *p;
1401 
1402 	if (!has_pushable_dl_tasks(rq))
1403 		return NULL;
1404 
1405 	p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1406 		     struct task_struct, pushable_dl_tasks);
1407 
1408 	BUG_ON(rq->cpu != task_cpu(p));
1409 	BUG_ON(task_current(rq, p));
1410 	BUG_ON(p->nr_cpus_allowed <= 1);
1411 
1412 	BUG_ON(!task_on_rq_queued(p));
1413 	BUG_ON(!dl_task(p));
1414 
1415 	return p;
1416 }
1417 
1418 /*
1419  * See if the non running -deadline tasks on this rq
1420  * can be sent to some other CPU where they can preempt
1421  * and start executing.
1422  */
1423 static int push_dl_task(struct rq *rq)
1424 {
1425 	struct task_struct *next_task;
1426 	struct rq *later_rq;
1427 	int ret = 0;
1428 
1429 	if (!rq->dl.overloaded)
1430 		return 0;
1431 
1432 	next_task = pick_next_pushable_dl_task(rq);
1433 	if (!next_task)
1434 		return 0;
1435 
1436 retry:
1437 	if (unlikely(next_task == rq->curr)) {
1438 		WARN_ON(1);
1439 		return 0;
1440 	}
1441 
1442 	/*
1443 	 * If next_task preempts rq->curr, and rq->curr
1444 	 * can move away, it makes sense to just reschedule
1445 	 * without going further in pushing next_task.
1446 	 */
1447 	if (dl_task(rq->curr) &&
1448 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1449 	    rq->curr->nr_cpus_allowed > 1) {
1450 		resched_curr(rq);
1451 		return 0;
1452 	}
1453 
1454 	/* We might release rq lock */
1455 	get_task_struct(next_task);
1456 
1457 	/* Will lock the rq it'll find */
1458 	later_rq = find_lock_later_rq(next_task, rq);
1459 	if (!later_rq) {
1460 		struct task_struct *task;
1461 
1462 		/*
1463 		 * We must check all this again, since
1464 		 * find_lock_later_rq releases rq->lock and it is
1465 		 * then possible that next_task has migrated.
1466 		 */
1467 		task = pick_next_pushable_dl_task(rq);
1468 		if (task_cpu(next_task) == rq->cpu && task == next_task) {
1469 			/*
1470 			 * The task is still there. We don't try
1471 			 * again, some other cpu will pull it when ready.
1472 			 */
1473 			goto out;
1474 		}
1475 
1476 		if (!task)
1477 			/* No more tasks */
1478 			goto out;
1479 
1480 		put_task_struct(next_task);
1481 		next_task = task;
1482 		goto retry;
1483 	}
1484 
1485 	deactivate_task(rq, next_task, 0);
1486 	set_task_cpu(next_task, later_rq->cpu);
1487 	activate_task(later_rq, next_task, 0);
1488 	ret = 1;
1489 
1490 	resched_curr(later_rq);
1491 
1492 	double_unlock_balance(rq, later_rq);
1493 
1494 out:
1495 	put_task_struct(next_task);
1496 
1497 	return ret;
1498 }
1499 
1500 static void push_dl_tasks(struct rq *rq)
1501 {
1502 	/* Terminates as it moves a -deadline task */
1503 	while (push_dl_task(rq))
1504 		;
1505 }
1506 
1507 static int pull_dl_task(struct rq *this_rq)
1508 {
1509 	int this_cpu = this_rq->cpu, ret = 0, cpu;
1510 	struct task_struct *p;
1511 	struct rq *src_rq;
1512 	u64 dmin = LONG_MAX;
1513 
1514 	if (likely(!dl_overloaded(this_rq)))
1515 		return 0;
1516 
1517 	/*
1518 	 * Match the barrier from dl_set_overloaded; this guarantees that if we
1519 	 * see overloaded we must also see the dlo_mask bit.
1520 	 */
1521 	smp_rmb();
1522 
1523 	for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1524 		if (this_cpu == cpu)
1525 			continue;
1526 
1527 		src_rq = cpu_rq(cpu);
1528 
1529 		/*
1530 		 * It looks racy, abd it is! However, as in sched_rt.c,
1531 		 * we are fine with this.
1532 		 */
1533 		if (this_rq->dl.dl_nr_running &&
1534 		    dl_time_before(this_rq->dl.earliest_dl.curr,
1535 				   src_rq->dl.earliest_dl.next))
1536 			continue;
1537 
1538 		/* Might drop this_rq->lock */
1539 		double_lock_balance(this_rq, src_rq);
1540 
1541 		/*
1542 		 * If there are no more pullable tasks on the
1543 		 * rq, we're done with it.
1544 		 */
1545 		if (src_rq->dl.dl_nr_running <= 1)
1546 			goto skip;
1547 
1548 		p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1549 
1550 		/*
1551 		 * We found a task to be pulled if:
1552 		 *  - it preempts our current (if there's one),
1553 		 *  - it will preempt the last one we pulled (if any).
1554 		 */
1555 		if (p && dl_time_before(p->dl.deadline, dmin) &&
1556 		    (!this_rq->dl.dl_nr_running ||
1557 		     dl_time_before(p->dl.deadline,
1558 				    this_rq->dl.earliest_dl.curr))) {
1559 			WARN_ON(p == src_rq->curr);
1560 			WARN_ON(!task_on_rq_queued(p));
1561 
1562 			/*
1563 			 * Then we pull iff p has actually an earlier
1564 			 * deadline than the current task of its runqueue.
1565 			 */
1566 			if (dl_time_before(p->dl.deadline,
1567 					   src_rq->curr->dl.deadline))
1568 				goto skip;
1569 
1570 			ret = 1;
1571 
1572 			deactivate_task(src_rq, p, 0);
1573 			set_task_cpu(p, this_cpu);
1574 			activate_task(this_rq, p, 0);
1575 			dmin = p->dl.deadline;
1576 
1577 			/* Is there any other task even earlier? */
1578 		}
1579 skip:
1580 		double_unlock_balance(this_rq, src_rq);
1581 	}
1582 
1583 	return ret;
1584 }
1585 
1586 static void post_schedule_dl(struct rq *rq)
1587 {
1588 	push_dl_tasks(rq);
1589 }
1590 
1591 /*
1592  * Since the task is not running and a reschedule is not going to happen
1593  * anytime soon on its runqueue, we try pushing it away now.
1594  */
1595 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1596 {
1597 	if (!task_running(rq, p) &&
1598 	    !test_tsk_need_resched(rq->curr) &&
1599 	    has_pushable_dl_tasks(rq) &&
1600 	    p->nr_cpus_allowed > 1 &&
1601 	    dl_task(rq->curr) &&
1602 	    (rq->curr->nr_cpus_allowed < 2 ||
1603 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1604 		push_dl_tasks(rq);
1605 	}
1606 }
1607 
1608 static void set_cpus_allowed_dl(struct task_struct *p,
1609 				const struct cpumask *new_mask)
1610 {
1611 	struct rq *rq;
1612 	struct root_domain *src_rd;
1613 	int weight;
1614 
1615 	BUG_ON(!dl_task(p));
1616 
1617 	rq = task_rq(p);
1618 	src_rd = rq->rd;
1619 	/*
1620 	 * Migrating a SCHED_DEADLINE task between exclusive
1621 	 * cpusets (different root_domains) entails a bandwidth
1622 	 * update. We already made space for us in the destination
1623 	 * domain (see cpuset_can_attach()).
1624 	 */
1625 	if (!cpumask_intersects(src_rd->span, new_mask)) {
1626 		struct dl_bw *src_dl_b;
1627 
1628 		src_dl_b = dl_bw_of(cpu_of(rq));
1629 		/*
1630 		 * We now free resources of the root_domain we are migrating
1631 		 * off. In the worst case, sched_setattr() may temporary fail
1632 		 * until we complete the update.
1633 		 */
1634 		raw_spin_lock(&src_dl_b->lock);
1635 		__dl_clear(src_dl_b, p->dl.dl_bw);
1636 		raw_spin_unlock(&src_dl_b->lock);
1637 	}
1638 
1639 	/*
1640 	 * Update only if the task is actually running (i.e.,
1641 	 * it is on the rq AND it is not throttled).
1642 	 */
1643 	if (!on_dl_rq(&p->dl))
1644 		return;
1645 
1646 	weight = cpumask_weight(new_mask);
1647 
1648 	/*
1649 	 * Only update if the process changes its state from whether it
1650 	 * can migrate or not.
1651 	 */
1652 	if ((p->nr_cpus_allowed > 1) == (weight > 1))
1653 		return;
1654 
1655 	/*
1656 	 * The process used to be able to migrate OR it can now migrate
1657 	 */
1658 	if (weight <= 1) {
1659 		if (!task_current(rq, p))
1660 			dequeue_pushable_dl_task(rq, p);
1661 		BUG_ON(!rq->dl.dl_nr_migratory);
1662 		rq->dl.dl_nr_migratory--;
1663 	} else {
1664 		if (!task_current(rq, p))
1665 			enqueue_pushable_dl_task(rq, p);
1666 		rq->dl.dl_nr_migratory++;
1667 	}
1668 
1669 	update_dl_migration(&rq->dl);
1670 }
1671 
1672 /* Assumes rq->lock is held */
1673 static void rq_online_dl(struct rq *rq)
1674 {
1675 	if (rq->dl.overloaded)
1676 		dl_set_overload(rq);
1677 
1678 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1679 	if (rq->dl.dl_nr_running > 0)
1680 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1681 }
1682 
1683 /* Assumes rq->lock is held */
1684 static void rq_offline_dl(struct rq *rq)
1685 {
1686 	if (rq->dl.overloaded)
1687 		dl_clear_overload(rq);
1688 
1689 	cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1690 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1691 }
1692 
1693 void __init init_sched_dl_class(void)
1694 {
1695 	unsigned int i;
1696 
1697 	for_each_possible_cpu(i)
1698 		zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1699 					GFP_KERNEL, cpu_to_node(i));
1700 }
1701 
1702 #endif /* CONFIG_SMP */
1703 
1704 /*
1705  *  Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
1706  */
1707 static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1708 {
1709 	struct hrtimer *dl_timer = &p->dl.dl_timer;
1710 
1711 	/* Nobody will change task's class if pi_lock is held */
1712 	lockdep_assert_held(&p->pi_lock);
1713 
1714 	if (hrtimer_active(dl_timer)) {
1715 		int ret = hrtimer_try_to_cancel(dl_timer);
1716 
1717 		if (unlikely(ret == -1)) {
1718 			/*
1719 			 * Note, p may migrate OR new deadline tasks
1720 			 * may appear in rq when we are unlocking it.
1721 			 * A caller of us must be fine with that.
1722 			 */
1723 			raw_spin_unlock(&rq->lock);
1724 			hrtimer_cancel(dl_timer);
1725 			raw_spin_lock(&rq->lock);
1726 		}
1727 	}
1728 }
1729 
1730 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1731 {
1732 	/* XXX we should retain the bw until 0-lag */
1733 	cancel_dl_timer(rq, p);
1734 	__dl_clear_params(p);
1735 
1736 	/*
1737 	 * Since this might be the only -deadline task on the rq,
1738 	 * this is the right place to try to pull some other one
1739 	 * from an overloaded cpu, if any.
1740 	 */
1741 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1742 		return;
1743 
1744 	if (pull_dl_task(rq))
1745 		resched_curr(rq);
1746 }
1747 
1748 /*
1749  * When switching to -deadline, we may overload the rq, then
1750  * we try to push someone off, if possible.
1751  */
1752 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1753 {
1754 	int check_resched = 1;
1755 
1756 	if (task_on_rq_queued(p) && rq->curr != p) {
1757 #ifdef CONFIG_SMP
1758 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
1759 			push_dl_task(rq) && rq != task_rq(p))
1760 			/* Only reschedule if pushing failed */
1761 			check_resched = 0;
1762 #endif /* CONFIG_SMP */
1763 		if (check_resched) {
1764 			if (dl_task(rq->curr))
1765 				check_preempt_curr_dl(rq, p, 0);
1766 			else
1767 				resched_curr(rq);
1768 		}
1769 	}
1770 }
1771 
1772 /*
1773  * If the scheduling parameters of a -deadline task changed,
1774  * a push or pull operation might be needed.
1775  */
1776 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1777 			    int oldprio)
1778 {
1779 	if (task_on_rq_queued(p) || rq->curr == p) {
1780 #ifdef CONFIG_SMP
1781 		/*
1782 		 * This might be too much, but unfortunately
1783 		 * we don't have the old deadline value, and
1784 		 * we can't argue if the task is increasing
1785 		 * or lowering its prio, so...
1786 		 */
1787 		if (!rq->dl.overloaded)
1788 			pull_dl_task(rq);
1789 
1790 		/*
1791 		 * If we now have a earlier deadline task than p,
1792 		 * then reschedule, provided p is still on this
1793 		 * runqueue.
1794 		 */
1795 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1796 		    rq->curr == p)
1797 			resched_curr(rq);
1798 #else
1799 		/*
1800 		 * Again, we don't know if p has a earlier
1801 		 * or later deadline, so let's blindly set a
1802 		 * (maybe not needed) rescheduling point.
1803 		 */
1804 		resched_curr(rq);
1805 #endif /* CONFIG_SMP */
1806 	} else
1807 		switched_to_dl(rq, p);
1808 }
1809 
1810 const struct sched_class dl_sched_class = {
1811 	.next			= &rt_sched_class,
1812 	.enqueue_task		= enqueue_task_dl,
1813 	.dequeue_task		= dequeue_task_dl,
1814 	.yield_task		= yield_task_dl,
1815 
1816 	.check_preempt_curr	= check_preempt_curr_dl,
1817 
1818 	.pick_next_task		= pick_next_task_dl,
1819 	.put_prev_task		= put_prev_task_dl,
1820 
1821 #ifdef CONFIG_SMP
1822 	.select_task_rq		= select_task_rq_dl,
1823 	.set_cpus_allowed       = set_cpus_allowed_dl,
1824 	.rq_online              = rq_online_dl,
1825 	.rq_offline             = rq_offline_dl,
1826 	.post_schedule		= post_schedule_dl,
1827 	.task_woken		= task_woken_dl,
1828 #endif
1829 
1830 	.set_curr_task		= set_curr_task_dl,
1831 	.task_tick		= task_tick_dl,
1832 	.task_fork              = task_fork_dl,
1833 	.task_dead		= task_dead_dl,
1834 
1835 	.prio_changed           = prio_changed_dl,
1836 	.switched_from		= switched_from_dl,
1837 	.switched_to		= switched_to_dl,
1838 
1839 	.update_curr		= update_curr_dl,
1840 };
1841 
1842 #ifdef CONFIG_SCHED_DEBUG
1843 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1844 
1845 void print_dl_stats(struct seq_file *m, int cpu)
1846 {
1847 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1848 }
1849 #endif /* CONFIG_SCHED_DEBUG */
1850