1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18
19 #include <linux/cpuset.h>
20 #include <linux/sched/clock.h>
21 #include <uapi/linux/sched/types.h>
22 #include "sched.h"
23 #include "pelt.h"
24
25 /*
26 * Default limits for DL period; on the top end we guard against small util
27 * tasks still getting ridiculously long effective runtimes, on the bottom end we
28 * guard against timer DoS.
29 */
30 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
31 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
32 #ifdef CONFIG_SYSCTL
33 static const struct ctl_table sched_dl_sysctls[] = {
34 {
35 .procname = "sched_deadline_period_max_us",
36 .data = &sysctl_sched_dl_period_max,
37 .maxlen = sizeof(unsigned int),
38 .mode = 0644,
39 .proc_handler = proc_douintvec_minmax,
40 .extra1 = (void *)&sysctl_sched_dl_period_min,
41 },
42 {
43 .procname = "sched_deadline_period_min_us",
44 .data = &sysctl_sched_dl_period_min,
45 .maxlen = sizeof(unsigned int),
46 .mode = 0644,
47 .proc_handler = proc_douintvec_minmax,
48 .extra2 = (void *)&sysctl_sched_dl_period_max,
49 },
50 };
51
sched_dl_sysctl_init(void)52 static int __init sched_dl_sysctl_init(void)
53 {
54 register_sysctl_init("kernel", sched_dl_sysctls);
55 return 0;
56 }
57 late_initcall(sched_dl_sysctl_init);
58 #endif /* CONFIG_SYSCTL */
59
dl_server(struct sched_dl_entity * dl_se)60 static bool dl_server(struct sched_dl_entity *dl_se)
61 {
62 return dl_se->dl_server;
63 }
64
dl_task_of(struct sched_dl_entity * dl_se)65 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
66 {
67 BUG_ON(dl_server(dl_se));
68 return container_of(dl_se, struct task_struct, dl);
69 }
70
rq_of_dl_rq(struct dl_rq * dl_rq)71 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
72 {
73 return container_of(dl_rq, struct rq, dl);
74 }
75
rq_of_dl_se(struct sched_dl_entity * dl_se)76 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
77 {
78 struct rq *rq = dl_se->rq;
79
80 if (!dl_server(dl_se))
81 rq = task_rq(dl_task_of(dl_se));
82
83 return rq;
84 }
85
dl_rq_of_se(struct sched_dl_entity * dl_se)86 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
87 {
88 return &rq_of_dl_se(dl_se)->dl;
89 }
90
on_dl_rq(struct sched_dl_entity * dl_se)91 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
92 {
93 return !RB_EMPTY_NODE(&dl_se->rb_node);
94 }
95
96 #ifdef CONFIG_RT_MUTEXES
pi_of(struct sched_dl_entity * dl_se)97 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
98 {
99 return dl_se->pi_se;
100 }
101
is_dl_boosted(struct sched_dl_entity * dl_se)102 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
103 {
104 return pi_of(dl_se) != dl_se;
105 }
106 #else /* !CONFIG_RT_MUTEXES: */
pi_of(struct sched_dl_entity * dl_se)107 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
108 {
109 return dl_se;
110 }
111
is_dl_boosted(struct sched_dl_entity * dl_se)112 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
113 {
114 return false;
115 }
116 #endif /* !CONFIG_RT_MUTEXES */
117
dl_bw_of(int i)118 static inline struct dl_bw *dl_bw_of(int i)
119 {
120 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
121 "sched RCU must be held");
122 return &cpu_rq(i)->rd->dl_bw;
123 }
124
dl_bw_cpus(int i)125 static inline int dl_bw_cpus(int i)
126 {
127 struct root_domain *rd = cpu_rq(i)->rd;
128
129 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
130 "sched RCU must be held");
131
132 return cpumask_weight_and(rd->span, cpu_active_mask);
133 }
134
__dl_bw_capacity(const struct cpumask * mask)135 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
136 {
137 unsigned long cap = 0;
138 int i;
139
140 for_each_cpu_and(i, mask, cpu_active_mask)
141 cap += arch_scale_cpu_capacity(i);
142
143 return cap;
144 }
145
146 /*
147 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
148 * of the CPU the task is running on rather rd's \Sum CPU capacity.
149 */
dl_bw_capacity(int i)150 static inline unsigned long dl_bw_capacity(int i)
151 {
152 if (!sched_asym_cpucap_active() &&
153 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
154 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
155 } else {
156 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
157 "sched RCU must be held");
158
159 return __dl_bw_capacity(cpu_rq(i)->rd->span);
160 }
161 }
162
dl_bw_visited(int cpu,u64 cookie)163 bool dl_bw_visited(int cpu, u64 cookie)
164 {
165 struct root_domain *rd = cpu_rq(cpu)->rd;
166
167 if (rd->visit_cookie == cookie)
168 return true;
169
170 rd->visit_cookie = cookie;
171 return false;
172 }
173
174 static inline
__dl_update(struct dl_bw * dl_b,s64 bw)175 void __dl_update(struct dl_bw *dl_b, s64 bw)
176 {
177 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
178 int i;
179
180 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
181 "sched RCU must be held");
182 for_each_cpu_and(i, rd->span, cpu_active_mask) {
183 struct rq *rq = cpu_rq(i);
184
185 rq->dl.extra_bw += bw;
186 }
187 }
188
189 static inline
__dl_sub(struct dl_bw * dl_b,u64 tsk_bw,int cpus)190 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
191 {
192 dl_b->total_bw -= tsk_bw;
193 __dl_update(dl_b, (s32)tsk_bw / cpus);
194 }
195
196 static inline
__dl_add(struct dl_bw * dl_b,u64 tsk_bw,int cpus)197 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
198 {
199 dl_b->total_bw += tsk_bw;
200 __dl_update(dl_b, -((s32)tsk_bw / cpus));
201 }
202
203 static inline bool
__dl_overflow(struct dl_bw * dl_b,unsigned long cap,u64 old_bw,u64 new_bw)204 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
205 {
206 return dl_b->bw != -1 &&
207 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
208 }
209
210 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)211 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
212 {
213 u64 old = dl_rq->running_bw;
214
215 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
216 dl_rq->running_bw += dl_bw;
217 WARN_ON_ONCE(dl_rq->running_bw < old); /* overflow */
218 WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
219 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
220 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
221 }
222
223 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)224 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
225 {
226 u64 old = dl_rq->running_bw;
227
228 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
229 dl_rq->running_bw -= dl_bw;
230 WARN_ON_ONCE(dl_rq->running_bw > old); /* underflow */
231 if (dl_rq->running_bw > old)
232 dl_rq->running_bw = 0;
233 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
234 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
235 }
236
237 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)238 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
239 {
240 u64 old = dl_rq->this_bw;
241
242 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
243 dl_rq->this_bw += dl_bw;
244 WARN_ON_ONCE(dl_rq->this_bw < old); /* overflow */
245 }
246
247 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)248 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
249 {
250 u64 old = dl_rq->this_bw;
251
252 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
253 dl_rq->this_bw -= dl_bw;
254 WARN_ON_ONCE(dl_rq->this_bw > old); /* underflow */
255 if (dl_rq->this_bw > old)
256 dl_rq->this_bw = 0;
257 WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
258 }
259
260 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)261 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
262 {
263 if (!dl_entity_is_special(dl_se))
264 __add_rq_bw(dl_se->dl_bw, dl_rq);
265 }
266
267 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)268 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
269 {
270 if (!dl_entity_is_special(dl_se))
271 __sub_rq_bw(dl_se->dl_bw, dl_rq);
272 }
273
274 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)275 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
276 {
277 if (!dl_entity_is_special(dl_se))
278 __add_running_bw(dl_se->dl_bw, dl_rq);
279 }
280
281 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)282 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
283 {
284 if (!dl_entity_is_special(dl_se))
285 __sub_running_bw(dl_se->dl_bw, dl_rq);
286 }
287
dl_rq_change_utilization(struct rq * rq,struct sched_dl_entity * dl_se,u64 new_bw)288 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
289 {
290 if (dl_se->dl_non_contending) {
291 sub_running_bw(dl_se, &rq->dl);
292 dl_se->dl_non_contending = 0;
293
294 /*
295 * If the timer handler is currently running and the
296 * timer cannot be canceled, inactive_task_timer()
297 * will see that dl_not_contending is not set, and
298 * will not touch the rq's active utilization,
299 * so we are still safe.
300 */
301 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
302 if (!dl_server(dl_se))
303 put_task_struct(dl_task_of(dl_se));
304 }
305 }
306 __sub_rq_bw(dl_se->dl_bw, &rq->dl);
307 __add_rq_bw(new_bw, &rq->dl);
308 }
309
310 static __always_inline
cancel_dl_timer(struct sched_dl_entity * dl_se,struct hrtimer * timer)311 void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer)
312 {
313 /*
314 * If the timer callback was running (hrtimer_try_to_cancel == -1),
315 * it will eventually call put_task_struct().
316 */
317 if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se))
318 put_task_struct(dl_task_of(dl_se));
319 }
320
321 static __always_inline
cancel_replenish_timer(struct sched_dl_entity * dl_se)322 void cancel_replenish_timer(struct sched_dl_entity *dl_se)
323 {
324 cancel_dl_timer(dl_se, &dl_se->dl_timer);
325 }
326
327 static __always_inline
cancel_inactive_timer(struct sched_dl_entity * dl_se)328 void cancel_inactive_timer(struct sched_dl_entity *dl_se)
329 {
330 cancel_dl_timer(dl_se, &dl_se->inactive_timer);
331 }
332
dl_change_utilization(struct task_struct * p,u64 new_bw)333 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
334 {
335 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
336
337 if (task_on_rq_queued(p))
338 return;
339
340 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
341 }
342
343 static void __dl_clear_params(struct sched_dl_entity *dl_se);
344
345 /*
346 * The utilization of a task cannot be immediately removed from
347 * the rq active utilization (running_bw) when the task blocks.
348 * Instead, we have to wait for the so called "0-lag time".
349 *
350 * If a task blocks before the "0-lag time", a timer (the inactive
351 * timer) is armed, and running_bw is decreased when the timer
352 * fires.
353 *
354 * If the task wakes up again before the inactive timer fires,
355 * the timer is canceled, whereas if the task wakes up after the
356 * inactive timer fired (and running_bw has been decreased) the
357 * task's utilization has to be added to running_bw again.
358 * A flag in the deadline scheduling entity (dl_non_contending)
359 * is used to avoid race conditions between the inactive timer handler
360 * and task wakeups.
361 *
362 * The following diagram shows how running_bw is updated. A task is
363 * "ACTIVE" when its utilization contributes to running_bw; an
364 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
365 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
366 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
367 * time already passed, which does not contribute to running_bw anymore.
368 * +------------------+
369 * wakeup | ACTIVE |
370 * +------------------>+ contending |
371 * | add_running_bw | |
372 * | +----+------+------+
373 * | | ^
374 * | dequeue | |
375 * +--------+-------+ | |
376 * | | t >= 0-lag | | wakeup
377 * | INACTIVE |<---------------+ |
378 * | | sub_running_bw | |
379 * +--------+-------+ | |
380 * ^ | |
381 * | t < 0-lag | |
382 * | | |
383 * | V |
384 * | +----+------+------+
385 * | sub_running_bw | ACTIVE |
386 * +-------------------+ |
387 * inactive timer | non contending |
388 * fired +------------------+
389 *
390 * The task_non_contending() function is invoked when a task
391 * blocks, and checks if the 0-lag time already passed or
392 * not (in the first case, it directly updates running_bw;
393 * in the second case, it arms the inactive timer).
394 *
395 * The task_contending() function is invoked when a task wakes
396 * up, and checks if the task is still in the "ACTIVE non contending"
397 * state or not (in the second case, it updates running_bw).
398 */
task_non_contending(struct sched_dl_entity * dl_se,bool dl_task)399 static void task_non_contending(struct sched_dl_entity *dl_se, bool dl_task)
400 {
401 struct hrtimer *timer = &dl_se->inactive_timer;
402 struct rq *rq = rq_of_dl_se(dl_se);
403 struct dl_rq *dl_rq = &rq->dl;
404 s64 zerolag_time;
405
406 /*
407 * If this is a non-deadline task that has been boosted,
408 * do nothing
409 */
410 if (dl_se->dl_runtime == 0)
411 return;
412
413 if (dl_entity_is_special(dl_se))
414 return;
415
416 WARN_ON(dl_se->dl_non_contending);
417
418 zerolag_time = dl_se->deadline -
419 div64_long((dl_se->runtime * dl_se->dl_period),
420 dl_se->dl_runtime);
421
422 /*
423 * Using relative times instead of the absolute "0-lag time"
424 * allows to simplify the code
425 */
426 zerolag_time -= rq_clock(rq);
427
428 /*
429 * If the "0-lag time" already passed, decrease the active
430 * utilization now, instead of starting a timer
431 */
432 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
433 if (dl_server(dl_se)) {
434 sub_running_bw(dl_se, dl_rq);
435 } else {
436 struct task_struct *p = dl_task_of(dl_se);
437
438 if (dl_task)
439 sub_running_bw(dl_se, dl_rq);
440
441 if (!dl_task || READ_ONCE(p->__state) == TASK_DEAD) {
442 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
443
444 if (READ_ONCE(p->__state) == TASK_DEAD)
445 sub_rq_bw(dl_se, &rq->dl);
446 raw_spin_lock(&dl_b->lock);
447 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
448 raw_spin_unlock(&dl_b->lock);
449 __dl_clear_params(dl_se);
450 }
451 }
452
453 return;
454 }
455
456 dl_se->dl_non_contending = 1;
457 if (!dl_server(dl_se))
458 get_task_struct(dl_task_of(dl_se));
459
460 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
461 }
462
task_contending(struct sched_dl_entity * dl_se,int flags)463 static void task_contending(struct sched_dl_entity *dl_se, int flags)
464 {
465 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
466
467 /*
468 * If this is a non-deadline task that has been boosted,
469 * do nothing
470 */
471 if (dl_se->dl_runtime == 0)
472 return;
473
474 if (flags & ENQUEUE_MIGRATED)
475 add_rq_bw(dl_se, dl_rq);
476
477 if (dl_se->dl_non_contending) {
478 dl_se->dl_non_contending = 0;
479 /*
480 * If the timer handler is currently running and the
481 * timer cannot be canceled, inactive_task_timer()
482 * will see that dl_not_contending is not set, and
483 * will not touch the rq's active utilization,
484 * so we are still safe.
485 */
486 cancel_inactive_timer(dl_se);
487 } else {
488 /*
489 * Since "dl_non_contending" is not set, the
490 * task's utilization has already been removed from
491 * active utilization (either when the task blocked,
492 * when the "inactive timer" fired).
493 * So, add it back.
494 */
495 add_running_bw(dl_se, dl_rq);
496 }
497 }
498
is_leftmost(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)499 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
500 {
501 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
502 }
503
504 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
505
init_dl_bw(struct dl_bw * dl_b)506 void init_dl_bw(struct dl_bw *dl_b)
507 {
508 raw_spin_lock_init(&dl_b->lock);
509 if (global_rt_runtime() == RUNTIME_INF)
510 dl_b->bw = -1;
511 else
512 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
513 dl_b->total_bw = 0;
514 }
515
init_dl_rq(struct dl_rq * dl_rq)516 void init_dl_rq(struct dl_rq *dl_rq)
517 {
518 dl_rq->root = RB_ROOT_CACHED;
519
520 /* zero means no -deadline tasks */
521 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
522
523 dl_rq->overloaded = 0;
524 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
525
526 dl_rq->running_bw = 0;
527 dl_rq->this_bw = 0;
528 init_dl_rq_bw_ratio(dl_rq);
529 }
530
dl_overloaded(struct rq * rq)531 static inline int dl_overloaded(struct rq *rq)
532 {
533 return atomic_read(&rq->rd->dlo_count);
534 }
535
dl_set_overload(struct rq * rq)536 static inline void dl_set_overload(struct rq *rq)
537 {
538 if (!rq->online)
539 return;
540
541 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
542 /*
543 * Must be visible before the overload count is
544 * set (as in sched_rt.c).
545 *
546 * Matched by the barrier in pull_dl_task().
547 */
548 smp_wmb();
549 atomic_inc(&rq->rd->dlo_count);
550 }
551
dl_clear_overload(struct rq * rq)552 static inline void dl_clear_overload(struct rq *rq)
553 {
554 if (!rq->online)
555 return;
556
557 atomic_dec(&rq->rd->dlo_count);
558 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
559 }
560
561 #define __node_2_pdl(node) \
562 rb_entry((node), struct task_struct, pushable_dl_tasks)
563
__pushable_less(struct rb_node * a,const struct rb_node * b)564 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
565 {
566 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
567 }
568
has_pushable_dl_tasks(struct rq * rq)569 static inline int has_pushable_dl_tasks(struct rq *rq)
570 {
571 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
572 }
573
574 /*
575 * The list of pushable -deadline task is not a plist, like in
576 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
577 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)578 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
579 {
580 struct rb_node *leftmost;
581
582 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
583
584 leftmost = rb_add_cached(&p->pushable_dl_tasks,
585 &rq->dl.pushable_dl_tasks_root,
586 __pushable_less);
587 if (leftmost)
588 rq->dl.earliest_dl.next = p->dl.deadline;
589
590 if (!rq->dl.overloaded) {
591 dl_set_overload(rq);
592 rq->dl.overloaded = 1;
593 }
594 }
595
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)596 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
597 {
598 struct dl_rq *dl_rq = &rq->dl;
599 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
600 struct rb_node *leftmost;
601
602 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
603 return;
604
605 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
606 if (leftmost)
607 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
608
609 RB_CLEAR_NODE(&p->pushable_dl_tasks);
610
611 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
612 dl_clear_overload(rq);
613 rq->dl.overloaded = 0;
614 }
615 }
616
617 static int push_dl_task(struct rq *rq);
618
need_pull_dl_task(struct rq * rq,struct task_struct * prev)619 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
620 {
621 return rq->online && dl_task(prev);
622 }
623
624 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
625 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
626
627 static void push_dl_tasks(struct rq *);
628 static void pull_dl_task(struct rq *);
629
deadline_queue_push_tasks(struct rq * rq)630 static inline void deadline_queue_push_tasks(struct rq *rq)
631 {
632 if (!has_pushable_dl_tasks(rq))
633 return;
634
635 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
636 }
637
deadline_queue_pull_task(struct rq * rq)638 static inline void deadline_queue_pull_task(struct rq *rq)
639 {
640 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
641 }
642
643 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
644
dl_task_offline_migration(struct rq * rq,struct task_struct * p)645 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
646 {
647 struct rq *later_rq = NULL;
648 struct dl_bw *dl_b;
649
650 later_rq = find_lock_later_rq(p, rq);
651 if (!later_rq) {
652 int cpu;
653
654 /*
655 * If we cannot preempt any rq, fall back to pick any
656 * online CPU:
657 */
658 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
659 if (cpu >= nr_cpu_ids) {
660 /*
661 * Failed to find any suitable CPU.
662 * The task will never come back!
663 */
664 WARN_ON_ONCE(dl_bandwidth_enabled());
665
666 /*
667 * If admission control is disabled we
668 * try a little harder to let the task
669 * run.
670 */
671 cpu = cpumask_any(cpu_active_mask);
672 }
673 later_rq = cpu_rq(cpu);
674 double_lock_balance(rq, later_rq);
675 }
676
677 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
678 /*
679 * Inactive timer is armed (or callback is running, but
680 * waiting for us to release rq locks). In any case, when it
681 * will fire (or continue), it will see running_bw of this
682 * task migrated to later_rq (and correctly handle it).
683 */
684 sub_running_bw(&p->dl, &rq->dl);
685 sub_rq_bw(&p->dl, &rq->dl);
686
687 add_rq_bw(&p->dl, &later_rq->dl);
688 add_running_bw(&p->dl, &later_rq->dl);
689 } else {
690 sub_rq_bw(&p->dl, &rq->dl);
691 add_rq_bw(&p->dl, &later_rq->dl);
692 }
693
694 /*
695 * And we finally need to fix up root_domain(s) bandwidth accounting,
696 * since p is still hanging out in the old (now moved to default) root
697 * domain.
698 */
699 dl_b = &rq->rd->dl_bw;
700 raw_spin_lock(&dl_b->lock);
701 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
702 raw_spin_unlock(&dl_b->lock);
703
704 dl_b = &later_rq->rd->dl_bw;
705 raw_spin_lock(&dl_b->lock);
706 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
707 raw_spin_unlock(&dl_b->lock);
708
709 set_task_cpu(p, later_rq->cpu);
710 double_unlock_balance(later_rq, rq);
711
712 return later_rq;
713 }
714
715 static void
716 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
717 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
718 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
719 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
720
replenish_dl_new_period(struct sched_dl_entity * dl_se,struct rq * rq)721 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
722 struct rq *rq)
723 {
724 /* for non-boosted task, pi_of(dl_se) == dl_se */
725 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
726 dl_se->runtime = pi_of(dl_se)->dl_runtime;
727
728 /*
729 * If it is a deferred reservation, and the server
730 * is not handling an starvation case, defer it.
731 */
732 if (dl_se->dl_defer && !dl_se->dl_defer_running) {
733 dl_se->dl_throttled = 1;
734 dl_se->dl_defer_armed = 1;
735 }
736 }
737
738 /*
739 * We are being explicitly informed that a new instance is starting,
740 * and this means that:
741 * - the absolute deadline of the entity has to be placed at
742 * current time + relative deadline;
743 * - the runtime of the entity has to be set to the maximum value.
744 *
745 * The capability of specifying such event is useful whenever a -deadline
746 * entity wants to (try to!) synchronize its behaviour with the scheduler's
747 * one, and to (try to!) reconcile itself with its own scheduling
748 * parameters.
749 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)750 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
751 {
752 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
753 struct rq *rq = rq_of_dl_rq(dl_rq);
754
755 WARN_ON(is_dl_boosted(dl_se));
756 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
757
758 /*
759 * We are racing with the deadline timer. So, do nothing because
760 * the deadline timer handler will take care of properly recharging
761 * the runtime and postponing the deadline
762 */
763 if (dl_se->dl_throttled)
764 return;
765
766 /*
767 * We use the regular wall clock time to set deadlines in the
768 * future; in fact, we must consider execution overheads (time
769 * spent on hardirq context, etc.).
770 */
771 replenish_dl_new_period(dl_se, rq);
772 }
773
774 static int start_dl_timer(struct sched_dl_entity *dl_se);
775 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
776
777 /*
778 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
779 * possibility of a entity lasting more than what it declared, and thus
780 * exhausting its runtime.
781 *
782 * Here we are interested in making runtime overrun possible, but we do
783 * not want a entity which is misbehaving to affect the scheduling of all
784 * other entities.
785 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
786 * is used, in order to confine each entity within its own bandwidth.
787 *
788 * This function deals exactly with that, and ensures that when the runtime
789 * of a entity is replenished, its deadline is also postponed. That ensures
790 * the overrunning entity can't interfere with other entity in the system and
791 * can't make them miss their deadlines. Reasons why this kind of overruns
792 * could happen are, typically, a entity voluntarily trying to overcome its
793 * runtime, or it just underestimated it during sched_setattr().
794 */
replenish_dl_entity(struct sched_dl_entity * dl_se)795 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
796 {
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798 struct rq *rq = rq_of_dl_rq(dl_rq);
799
800 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
801
802 /*
803 * This could be the case for a !-dl task that is boosted.
804 * Just go with full inherited parameters.
805 *
806 * Or, it could be the case of a deferred reservation that
807 * was not able to consume its runtime in background and
808 * reached this point with current u > U.
809 *
810 * In both cases, set a new period.
811 */
812 if (dl_se->dl_deadline == 0 ||
813 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
814 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
815 dl_se->runtime = pi_of(dl_se)->dl_runtime;
816 }
817
818 if (dl_se->dl_yielded && dl_se->runtime > 0)
819 dl_se->runtime = 0;
820
821 /*
822 * We keep moving the deadline away until we get some
823 * available runtime for the entity. This ensures correct
824 * handling of situations where the runtime overrun is
825 * arbitrary large.
826 */
827 while (dl_se->runtime <= 0) {
828 dl_se->deadline += pi_of(dl_se)->dl_period;
829 dl_se->runtime += pi_of(dl_se)->dl_runtime;
830 }
831
832 /*
833 * At this point, the deadline really should be "in
834 * the future" with respect to rq->clock. If it's
835 * not, we are, for some reason, lagging too much!
836 * Anyway, after having warn userspace abut that,
837 * we still try to keep the things running by
838 * resetting the deadline and the budget of the
839 * entity.
840 */
841 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
842 printk_deferred_once("sched: DL replenish lagged too much\n");
843 replenish_dl_new_period(dl_se, rq);
844 }
845
846 if (dl_se->dl_yielded)
847 dl_se->dl_yielded = 0;
848 if (dl_se->dl_throttled)
849 dl_se->dl_throttled = 0;
850
851 /*
852 * If this is the replenishment of a deferred reservation,
853 * clear the flag and return.
854 */
855 if (dl_se->dl_defer_armed) {
856 dl_se->dl_defer_armed = 0;
857 return;
858 }
859
860 /*
861 * A this point, if the deferred server is not armed, and the deadline
862 * is in the future, if it is not running already, throttle the server
863 * and arm the defer timer.
864 */
865 if (dl_se->dl_defer && !dl_se->dl_defer_running &&
866 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
867 if (!is_dl_boosted(dl_se)) {
868
869 /*
870 * Set dl_se->dl_defer_armed and dl_throttled variables to
871 * inform the start_dl_timer() that this is a deferred
872 * activation.
873 */
874 dl_se->dl_defer_armed = 1;
875 dl_se->dl_throttled = 1;
876 if (!start_dl_timer(dl_se)) {
877 /*
878 * If for whatever reason (delays), a previous timer was
879 * queued but not serviced, cancel it and clean the
880 * deferrable server variables intended for start_dl_timer().
881 */
882 hrtimer_try_to_cancel(&dl_se->dl_timer);
883 dl_se->dl_defer_armed = 0;
884 dl_se->dl_throttled = 0;
885 }
886 }
887 }
888 }
889
890 /*
891 * Here we check if --at time t-- an entity (which is probably being
892 * [re]activated or, in general, enqueued) can use its remaining runtime
893 * and its current deadline _without_ exceeding the bandwidth it is
894 * assigned (function returns true if it can't). We are in fact applying
895 * one of the CBS rules: when a task wakes up, if the residual runtime
896 * over residual deadline fits within the allocated bandwidth, then we
897 * can keep the current (absolute) deadline and residual budget without
898 * disrupting the schedulability of the system. Otherwise, we should
899 * refill the runtime and set the deadline a period in the future,
900 * because keeping the current (absolute) deadline of the task would
901 * result in breaking guarantees promised to other tasks (refer to
902 * Documentation/scheduler/sched-deadline.rst for more information).
903 *
904 * This function returns true if:
905 *
906 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
907 *
908 * IOW we can't recycle current parameters.
909 *
910 * Notice that the bandwidth check is done against the deadline. For
911 * task with deadline equal to period this is the same of using
912 * dl_period instead of dl_deadline in the equation above.
913 */
dl_entity_overflow(struct sched_dl_entity * dl_se,u64 t)914 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
915 {
916 u64 left, right;
917
918 /*
919 * left and right are the two sides of the equation above,
920 * after a bit of shuffling to use multiplications instead
921 * of divisions.
922 *
923 * Note that none of the time values involved in the two
924 * multiplications are absolute: dl_deadline and dl_runtime
925 * are the relative deadline and the maximum runtime of each
926 * instance, runtime is the runtime left for the last instance
927 * and (deadline - t), since t is rq->clock, is the time left
928 * to the (absolute) deadline. Even if overflowing the u64 type
929 * is very unlikely to occur in both cases, here we scale down
930 * as we want to avoid that risk at all. Scaling down by 10
931 * means that we reduce granularity to 1us. We are fine with it,
932 * since this is only a true/false check and, anyway, thinking
933 * of anything below microseconds resolution is actually fiction
934 * (but still we want to give the user that illusion >;).
935 */
936 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
937 right = ((dl_se->deadline - t) >> DL_SCALE) *
938 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
939
940 return dl_time_before(right, left);
941 }
942
943 /*
944 * Revised wakeup rule [1]: For self-suspending tasks, rather then
945 * re-initializing task's runtime and deadline, the revised wakeup
946 * rule adjusts the task's runtime to avoid the task to overrun its
947 * density.
948 *
949 * Reasoning: a task may overrun the density if:
950 * runtime / (deadline - t) > dl_runtime / dl_deadline
951 *
952 * Therefore, runtime can be adjusted to:
953 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
954 *
955 * In such way that runtime will be equal to the maximum density
956 * the task can use without breaking any rule.
957 *
958 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
959 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
960 */
961 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)962 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
963 {
964 u64 laxity = dl_se->deadline - rq_clock(rq);
965
966 /*
967 * If the task has deadline < period, and the deadline is in the past,
968 * it should already be throttled before this check.
969 *
970 * See update_dl_entity() comments for further details.
971 */
972 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
973
974 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
975 }
976
977 /*
978 * Regarding the deadline, a task with implicit deadline has a relative
979 * deadline == relative period. A task with constrained deadline has a
980 * relative deadline <= relative period.
981 *
982 * We support constrained deadline tasks. However, there are some restrictions
983 * applied only for tasks which do not have an implicit deadline. See
984 * update_dl_entity() to know more about such restrictions.
985 *
986 * The dl_is_implicit() returns true if the task has an implicit deadline.
987 */
dl_is_implicit(struct sched_dl_entity * dl_se)988 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
989 {
990 return dl_se->dl_deadline == dl_se->dl_period;
991 }
992
993 /*
994 * When a deadline entity is placed in the runqueue, its runtime and deadline
995 * might need to be updated. This is done by a CBS wake up rule. There are two
996 * different rules: 1) the original CBS; and 2) the Revisited CBS.
997 *
998 * When the task is starting a new period, the Original CBS is used. In this
999 * case, the runtime is replenished and a new absolute deadline is set.
1000 *
1001 * When a task is queued before the begin of the next period, using the
1002 * remaining runtime and deadline could make the entity to overflow, see
1003 * dl_entity_overflow() to find more about runtime overflow. When such case
1004 * is detected, the runtime and deadline need to be updated.
1005 *
1006 * If the task has an implicit deadline, i.e., deadline == period, the Original
1007 * CBS is applied. The runtime is replenished and a new absolute deadline is
1008 * set, as in the previous cases.
1009 *
1010 * However, the Original CBS does not work properly for tasks with
1011 * deadline < period, which are said to have a constrained deadline. By
1012 * applying the Original CBS, a constrained deadline task would be able to run
1013 * runtime/deadline in a period. With deadline < period, the task would
1014 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1015 *
1016 * In order to prevent this misbehave, the Revisited CBS is used for
1017 * constrained deadline tasks when a runtime overflow is detected. In the
1018 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1019 * the remaining runtime of the task is reduced to avoid runtime overflow.
1020 * Please refer to the comments update_dl_revised_wakeup() function to find
1021 * more about the Revised CBS rule.
1022 */
update_dl_entity(struct sched_dl_entity * dl_se)1023 static void update_dl_entity(struct sched_dl_entity *dl_se)
1024 {
1025 struct rq *rq = rq_of_dl_se(dl_se);
1026
1027 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1028 dl_entity_overflow(dl_se, rq_clock(rq))) {
1029
1030 if (unlikely(!dl_is_implicit(dl_se) &&
1031 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1032 !is_dl_boosted(dl_se))) {
1033 update_dl_revised_wakeup(dl_se, rq);
1034 return;
1035 }
1036
1037 /*
1038 * When [4] D->A is followed by [1] A->B, dl_defer_running
1039 * needs to be cleared, otherwise it will fail to properly
1040 * start the zero-laxity timer.
1041 */
1042 dl_se->dl_defer_running = 0;
1043 replenish_dl_new_period(dl_se, rq);
1044 } else if (dl_server(dl_se) && dl_se->dl_defer) {
1045 /*
1046 * The server can still use its previous deadline, so check if
1047 * it left the dl_defer_running state.
1048 */
1049 if (!dl_se->dl_defer_running) {
1050 dl_se->dl_defer_armed = 1;
1051 dl_se->dl_throttled = 1;
1052 }
1053 }
1054 }
1055
dl_next_period(struct sched_dl_entity * dl_se)1056 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1057 {
1058 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1059 }
1060
1061 /*
1062 * If the entity depleted all its runtime, and if we want it to sleep
1063 * while waiting for some new execution time to become available, we
1064 * set the bandwidth replenishment timer to the replenishment instant
1065 * and try to activate it.
1066 *
1067 * Notice that it is important for the caller to know if the timer
1068 * actually started or not (i.e., the replenishment instant is in
1069 * the future or in the past).
1070 */
start_dl_timer(struct sched_dl_entity * dl_se)1071 static int start_dl_timer(struct sched_dl_entity *dl_se)
1072 {
1073 struct hrtimer *timer = &dl_se->dl_timer;
1074 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1075 struct rq *rq = rq_of_dl_rq(dl_rq);
1076 ktime_t now, act;
1077 s64 delta;
1078
1079 lockdep_assert_rq_held(rq);
1080
1081 /*
1082 * We want the timer to fire at the deadline, but considering
1083 * that it is actually coming from rq->clock and not from
1084 * hrtimer's time base reading.
1085 *
1086 * The deferred reservation will have its timer set to
1087 * (deadline - runtime). At that point, the CBS rule will decide
1088 * if the current deadline can be used, or if a replenishment is
1089 * required to avoid add too much pressure on the system
1090 * (current u > U).
1091 */
1092 if (dl_se->dl_defer_armed) {
1093 WARN_ON_ONCE(!dl_se->dl_throttled);
1094 act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
1095 } else {
1096 /* act = deadline - rel-deadline + period */
1097 act = ns_to_ktime(dl_next_period(dl_se));
1098 }
1099
1100 now = hrtimer_cb_get_time(timer);
1101 delta = ktime_to_ns(now) - rq_clock(rq);
1102 act = ktime_add_ns(act, delta);
1103
1104 /*
1105 * If the expiry time already passed, e.g., because the value
1106 * chosen as the deadline is too small, don't even try to
1107 * start the timer in the past!
1108 */
1109 if (ktime_us_delta(act, now) < 0)
1110 return 0;
1111
1112 /*
1113 * !enqueued will guarantee another callback; even if one is already in
1114 * progress. This ensures a balanced {get,put}_task_struct().
1115 *
1116 * The race against __run_timer() clearing the enqueued state is
1117 * harmless because we're holding task_rq()->lock, therefore the timer
1118 * expiring after we've done the check will wait on its task_rq_lock()
1119 * and observe our state.
1120 */
1121 if (!hrtimer_is_queued(timer)) {
1122 if (!dl_server(dl_se))
1123 get_task_struct(dl_task_of(dl_se));
1124 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1125 }
1126
1127 return 1;
1128 }
1129
__push_dl_task(struct rq * rq,struct rq_flags * rf)1130 static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1131 {
1132 /*
1133 * Queueing this task back might have overloaded rq, check if we need
1134 * to kick someone away.
1135 */
1136 if (has_pushable_dl_tasks(rq)) {
1137 /*
1138 * Nothing relies on rq->lock after this, so its safe to drop
1139 * rq->lock.
1140 */
1141 rq_unpin_lock(rq, rf);
1142 push_dl_task(rq);
1143 rq_repin_lock(rq, rf);
1144 }
1145 }
1146
1147 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
1148 static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
1149
dl_server_timer(struct hrtimer * timer,struct sched_dl_entity * dl_se)1150 static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
1151 {
1152 struct rq *rq = rq_of_dl_se(dl_se);
1153 u64 fw;
1154
1155 scoped_guard (rq_lock, rq) {
1156 struct rq_flags *rf = &scope.rf;
1157
1158 if (!dl_se->dl_throttled || !dl_se->dl_runtime)
1159 return HRTIMER_NORESTART;
1160
1161 sched_clock_tick();
1162 update_rq_clock(rq);
1163
1164 /*
1165 * Make sure current has propagated its pending runtime into
1166 * any relevant server through calling dl_server_update() and
1167 * friends.
1168 */
1169 rq->donor->sched_class->update_curr(rq);
1170
1171 if (dl_se->dl_defer_idle) {
1172 dl_server_stop(dl_se);
1173 return HRTIMER_NORESTART;
1174 }
1175
1176 if (dl_se->dl_defer_armed) {
1177 /*
1178 * First check if the server could consume runtime in background.
1179 * If so, it is possible to push the defer timer for this amount
1180 * of time. The dl_server_min_res serves as a limit to avoid
1181 * forwarding the timer for a too small amount of time.
1182 */
1183 if (dl_time_before(rq_clock(dl_se->rq),
1184 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
1185
1186 /* reset the defer timer */
1187 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
1188
1189 hrtimer_forward_now(timer, ns_to_ktime(fw));
1190 return HRTIMER_RESTART;
1191 }
1192
1193 dl_se->dl_defer_running = 1;
1194 }
1195
1196 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1197
1198 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
1199 resched_curr(rq);
1200
1201 __push_dl_task(rq, rf);
1202 }
1203
1204 return HRTIMER_NORESTART;
1205 }
1206
1207 /*
1208 * This is the bandwidth enforcement timer callback. If here, we know
1209 * a task is not on its dl_rq, since the fact that the timer was running
1210 * means the task is throttled and needs a runtime replenishment.
1211 *
1212 * However, what we actually do depends on the fact the task is active,
1213 * (it is on its rq) or has been removed from there by a call to
1214 * dequeue_task_dl(). In the former case we must issue the runtime
1215 * replenishment and add the task back to the dl_rq; in the latter, we just
1216 * do nothing but clearing dl_throttled, so that runtime and deadline
1217 * updating (and the queueing back to dl_rq) will be done by the
1218 * next call to enqueue_task_dl().
1219 */
dl_task_timer(struct hrtimer * timer)1220 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1221 {
1222 struct sched_dl_entity *dl_se = container_of(timer,
1223 struct sched_dl_entity,
1224 dl_timer);
1225 struct task_struct *p;
1226 struct rq_flags rf;
1227 struct rq *rq;
1228
1229 if (dl_server(dl_se))
1230 return dl_server_timer(timer, dl_se);
1231
1232 p = dl_task_of(dl_se);
1233 rq = task_rq_lock(p, &rf);
1234
1235 /*
1236 * The task might have changed its scheduling policy to something
1237 * different than SCHED_DEADLINE (through switched_from_dl()).
1238 */
1239 if (!dl_task(p))
1240 goto unlock;
1241
1242 /*
1243 * The task might have been boosted by someone else and might be in the
1244 * boosting/deboosting path, its not throttled.
1245 */
1246 if (is_dl_boosted(dl_se))
1247 goto unlock;
1248
1249 /*
1250 * Spurious timer due to start_dl_timer() race; or we already received
1251 * a replenishment from rt_mutex_setprio().
1252 */
1253 if (!dl_se->dl_throttled)
1254 goto unlock;
1255
1256 sched_clock_tick();
1257 update_rq_clock(rq);
1258
1259 /*
1260 * If the throttle happened during sched-out; like:
1261 *
1262 * schedule()
1263 * deactivate_task()
1264 * dequeue_task_dl()
1265 * update_curr_dl()
1266 * start_dl_timer()
1267 * __dequeue_task_dl()
1268 * prev->on_rq = 0;
1269 *
1270 * We can be both throttled and !queued. Replenish the counter
1271 * but do not enqueue -- wait for our wakeup to do that.
1272 */
1273 if (!task_on_rq_queued(p)) {
1274 replenish_dl_entity(dl_se);
1275 goto unlock;
1276 }
1277
1278 if (unlikely(!rq->online)) {
1279 /*
1280 * If the runqueue is no longer available, migrate the
1281 * task elsewhere. This necessarily changes rq.
1282 */
1283 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1284 rq = dl_task_offline_migration(rq, p);
1285 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1286 update_rq_clock(rq);
1287
1288 /*
1289 * Now that the task has been migrated to the new RQ and we
1290 * have that locked, proceed as normal and enqueue the task
1291 * there.
1292 */
1293 }
1294
1295 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1296 if (dl_task(rq->donor))
1297 wakeup_preempt_dl(rq, p, 0);
1298 else
1299 resched_curr(rq);
1300
1301 __push_dl_task(rq, &rf);
1302
1303 unlock:
1304 task_rq_unlock(rq, p, &rf);
1305
1306 /*
1307 * This can free the task_struct, including this hrtimer, do not touch
1308 * anything related to that after this.
1309 */
1310 put_task_struct(p);
1311
1312 return HRTIMER_NORESTART;
1313 }
1314
init_dl_task_timer(struct sched_dl_entity * dl_se)1315 static void init_dl_task_timer(struct sched_dl_entity *dl_se)
1316 {
1317 struct hrtimer *timer = &dl_se->dl_timer;
1318
1319 hrtimer_setup(timer, dl_task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1320 }
1321
1322 /*
1323 * During the activation, CBS checks if it can reuse the current task's
1324 * runtime and period. If the deadline of the task is in the past, CBS
1325 * cannot use the runtime, and so it replenishes the task. This rule
1326 * works fine for implicit deadline tasks (deadline == period), and the
1327 * CBS was designed for implicit deadline tasks. However, a task with
1328 * constrained deadline (deadline < period) might be awakened after the
1329 * deadline, but before the next period. In this case, replenishing the
1330 * task would allow it to run for runtime / deadline. As in this case
1331 * deadline < period, CBS enables a task to run for more than the
1332 * runtime / period. In a very loaded system, this can cause a domino
1333 * effect, making other tasks miss their deadlines.
1334 *
1335 * To avoid this problem, in the activation of a constrained deadline
1336 * task after the deadline but before the next period, throttle the
1337 * task and set the replenishing timer to the begin of the next period,
1338 * unless it is boosted.
1339 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1340 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1341 {
1342 struct rq *rq = rq_of_dl_se(dl_se);
1343
1344 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1345 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1346 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
1347 return;
1348 dl_se->dl_throttled = 1;
1349 if (dl_se->runtime > 0)
1350 dl_se->runtime = 0;
1351 }
1352 }
1353
1354 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1355 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1356 {
1357 return (dl_se->runtime <= 0);
1358 }
1359
1360 /*
1361 * This function implements the GRUB accounting rule. According to the
1362 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1363 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
1364 * where u is the utilization of the task, Umax is the maximum reclaimable
1365 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1366 * as the difference between the "total runqueue utilization" and the
1367 * "runqueue active utilization", and Uextra is the (per runqueue) extra
1368 * reclaimable utilization.
1369 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1370 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1371 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1372 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1373 * Since delta is a 64 bit variable, to have an overflow its value should be
1374 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1375 * not an issue here.
1376 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1377 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1378 {
1379 u64 u_act;
1380 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1381
1382 /*
1383 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1384 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1385 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1386 * negative leading to wrong results.
1387 */
1388 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1389 u_act = dl_se->dl_bw;
1390 else
1391 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1392
1393 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1394 return (delta * u_act) >> BW_SHIFT;
1395 }
1396
dl_scaled_delta_exec(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec)1397 s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1398 {
1399 s64 scaled_delta_exec;
1400
1401 /*
1402 * For tasks that participate in GRUB, we implement GRUB-PA: the
1403 * spare reclaimed bandwidth is used to clock down frequency.
1404 *
1405 * For the others, we still need to scale reservation parameters
1406 * according to current frequency and CPU maximum capacity.
1407 */
1408 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1409 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1410 } else {
1411 int cpu = cpu_of(rq);
1412 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1413 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1414
1415 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1416 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1417 }
1418
1419 return scaled_delta_exec;
1420 }
1421
1422 static inline void
1423 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, int flags);
1424
update_curr_dl_se(struct rq * rq,struct sched_dl_entity * dl_se,s64 delta_exec)1425 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1426 {
1427 bool idle = idle_rq(rq);
1428 s64 scaled_delta_exec;
1429
1430 if (unlikely(delta_exec <= 0)) {
1431 if (unlikely(dl_se->dl_yielded))
1432 goto throttle;
1433 return;
1434 }
1435
1436 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
1437 return;
1438
1439 if (dl_entity_is_special(dl_se))
1440 return;
1441
1442 scaled_delta_exec = delta_exec;
1443 if (!dl_server(dl_se))
1444 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
1445
1446 dl_se->runtime -= scaled_delta_exec;
1447
1448 if (dl_se->dl_defer_idle && !idle)
1449 dl_se->dl_defer_idle = 0;
1450
1451 /*
1452 * The DL server can consume its runtime while throttled (not
1453 * queued / running as regular CFS).
1454 *
1455 * If the server consumes its entire runtime in this state. The server
1456 * is not required for the current period. Thus, reset the server by
1457 * starting a new period, pushing the activation.
1458 */
1459 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
1460 /*
1461 * Non-servers would never get time accounted while throttled.
1462 */
1463 WARN_ON_ONCE(!dl_server(dl_se));
1464
1465 /*
1466 * While the server is marked idle, do not push out the
1467 * activation further, instead wait for the period timer
1468 * to lapse and stop the server.
1469 */
1470 if (dl_se->dl_defer_idle && idle) {
1471 /*
1472 * The timer is at the zero-laxity point, this means
1473 * dl_server_stop() / dl_server_start() can happen
1474 * while now < deadline. This means update_dl_entity()
1475 * will not replenish. Additionally start_dl_timer()
1476 * will be set for 'deadline - runtime'. Negative
1477 * runtime will not do.
1478 */
1479 dl_se->runtime = 0;
1480 return;
1481 }
1482
1483 /*
1484 * If the server was previously activated - the starving condition
1485 * took place, it this point it went away because the fair scheduler
1486 * was able to get runtime in background. So return to the initial
1487 * state.
1488 */
1489 dl_se->dl_defer_running = 0;
1490
1491 hrtimer_try_to_cancel(&dl_se->dl_timer);
1492
1493 replenish_dl_new_period(dl_se, dl_se->rq);
1494
1495 if (idle)
1496 dl_se->dl_defer_idle = 1;
1497
1498 /*
1499 * Not being able to start the timer seems problematic. If it could not
1500 * be started for whatever reason, we need to "unthrottle" the DL server
1501 * and queue right away. Otherwise nothing might queue it. That's similar
1502 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
1503 */
1504 WARN_ON_ONCE(!start_dl_timer(dl_se));
1505
1506 return;
1507 }
1508
1509 throttle:
1510 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1511 dl_se->dl_throttled = 1;
1512
1513 /* If requested, inform the user about runtime overruns. */
1514 if (dl_runtime_exceeded(dl_se) &&
1515 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1516 dl_se->dl_overrun = 1;
1517
1518 dequeue_dl_entity(dl_se, 0);
1519 if (!dl_server(dl_se)) {
1520 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1521 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1522 }
1523
1524 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1525 if (dl_server(dl_se)) {
1526 replenish_dl_new_period(dl_se, rq);
1527 start_dl_timer(dl_se);
1528 } else {
1529 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1530 }
1531 }
1532
1533 if (!is_leftmost(dl_se, &rq->dl))
1534 resched_curr(rq);
1535 }
1536
1537 /*
1538 * The dl_server does not account for real-time workload because it
1539 * is running fair work.
1540 */
1541 if (dl_se->dl_server)
1542 return;
1543
1544 #ifdef CONFIG_RT_GROUP_SCHED
1545 /*
1546 * Because -- for now -- we share the rt bandwidth, we need to
1547 * account our runtime there too, otherwise actual rt tasks
1548 * would be able to exceed the shared quota.
1549 *
1550 * Account to the root rt group for now.
1551 *
1552 * The solution we're working towards is having the RT groups scheduled
1553 * using deadline servers -- however there's a few nasties to figure
1554 * out before that can happen.
1555 */
1556 if (rt_bandwidth_enabled()) {
1557 struct rt_rq *rt_rq = &rq->rt;
1558
1559 raw_spin_lock(&rt_rq->rt_runtime_lock);
1560 /*
1561 * We'll let actual RT tasks worry about the overflow here, we
1562 * have our own CBS to keep us inline; only account when RT
1563 * bandwidth is relevant.
1564 */
1565 if (sched_rt_bandwidth_account(rt_rq))
1566 rt_rq->rt_time += delta_exec;
1567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1568 }
1569 #endif /* CONFIG_RT_GROUP_SCHED */
1570 }
1571
1572 /*
1573 * In the non-defer mode, the idle time is not accounted, as the
1574 * server provides a guarantee.
1575 *
1576 * If the dl_server is in defer mode, the idle time is also considered as
1577 * time available for the dl_server, avoiding a penalty for the rt
1578 * scheduler that did not consumed that time.
1579 */
dl_server_update_idle(struct sched_dl_entity * dl_se,s64 delta_exec)1580 void dl_server_update_idle(struct sched_dl_entity *dl_se, s64 delta_exec)
1581 {
1582 if (dl_se->dl_server_active && dl_se->dl_runtime && dl_se->dl_defer)
1583 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1584 }
1585
dl_server_update(struct sched_dl_entity * dl_se,s64 delta_exec)1586 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1587 {
1588 /* 0 runtime = fair server disabled */
1589 if (dl_se->dl_server_active && dl_se->dl_runtime)
1590 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1591 }
1592
1593 /*
1594 * dl_server && dl_defer:
1595 *
1596 * 6
1597 * +--------------------+
1598 * v |
1599 * +-------------+ 4 +-----------+ 5 +------------------+
1600 * +-> | A:init | <--- | D:running | -----> | E:replenish-wait |
1601 * | +-------------+ +-----------+ +------------------+
1602 * | | | 1 ^ ^ |
1603 * | | 1 +----------+ | 3 |
1604 * | v | |
1605 * | +--------------------------------+ 2 |
1606 * | | | ----+ |
1607 * | 8 | B:zero_laxity-wait | | |
1608 * | | | <---+ |
1609 * | +--------------------------------+ |
1610 * | | ^ ^ 2 |
1611 * | | 7 | 2, 1 +----------------+
1612 * | v |
1613 * | +-------------+ |
1614 * +-- | C:idle-wait | -+
1615 * +-------------+
1616 * ^ 7 |
1617 * +---------+
1618 *
1619 *
1620 * [A] - init
1621 * dl_server_active = 0
1622 * dl_throttled = 0
1623 * dl_defer_armed = 0
1624 * dl_defer_running = 0/1
1625 * dl_defer_idle = 0
1626 *
1627 * [B] - zero_laxity-wait
1628 * dl_server_active = 1
1629 * dl_throttled = 1
1630 * dl_defer_armed = 1
1631 * dl_defer_running = 0
1632 * dl_defer_idle = 0
1633 *
1634 * [C] - idle-wait
1635 * dl_server_active = 1
1636 * dl_throttled = 1
1637 * dl_defer_armed = 1
1638 * dl_defer_running = 0
1639 * dl_defer_idle = 1
1640 *
1641 * [D] - running
1642 * dl_server_active = 1
1643 * dl_throttled = 0
1644 * dl_defer_armed = 0
1645 * dl_defer_running = 1
1646 * dl_defer_idle = 0
1647 *
1648 * [E] - replenish-wait
1649 * dl_server_active = 1
1650 * dl_throttled = 1
1651 * dl_defer_armed = 0
1652 * dl_defer_running = 1
1653 * dl_defer_idle = 0
1654 *
1655 *
1656 * [1] A->B, A->D, C->B
1657 * dl_server_start()
1658 * dl_defer_idle = 0;
1659 * if (dl_server_active)
1660 * return; // [B]
1661 * dl_server_active = 1;
1662 * enqueue_dl_entity()
1663 * update_dl_entity(WAKEUP)
1664 * if (dl_time_before() || dl_entity_overflow())
1665 * dl_defer_running = 0;
1666 * replenish_dl_new_period();
1667 * // fwd period
1668 * dl_throttled = 1;
1669 * dl_defer_armed = 1;
1670 * if (!dl_defer_running)
1671 * dl_defer_armed = 1;
1672 * dl_throttled = 1;
1673 * if (dl_throttled && start_dl_timer())
1674 * return; // [B]
1675 * __enqueue_dl_entity();
1676 * // [D]
1677 *
1678 * // deplete server runtime from client-class
1679 * [2] B->B, C->B, E->B
1680 * dl_server_update()
1681 * update_curr_dl_se() // idle = false
1682 * if (dl_defer_idle)
1683 * dl_defer_idle = 0;
1684 * if (dl_defer && dl_throttled && dl_runtime_exceeded())
1685 * dl_defer_running = 0;
1686 * hrtimer_try_to_cancel(); // stop timer
1687 * replenish_dl_new_period()
1688 * // fwd period
1689 * dl_throttled = 1;
1690 * dl_defer_armed = 1;
1691 * start_dl_timer(); // restart timer
1692 * // [B]
1693 *
1694 * // timer actually fires means we have runtime
1695 * [3] B->D
1696 * dl_server_timer()
1697 * if (dl_defer_armed)
1698 * dl_defer_running = 1;
1699 * enqueue_dl_entity(REPLENISH)
1700 * replenish_dl_entity()
1701 * // fwd period
1702 * if (dl_throttled)
1703 * dl_throttled = 0;
1704 * if (dl_defer_armed)
1705 * dl_defer_armed = 0;
1706 * __enqueue_dl_entity();
1707 * // [D]
1708 *
1709 * // schedule server
1710 * [4] D->A
1711 * pick_task_dl()
1712 * p = server_pick_task();
1713 * if (!p)
1714 * dl_server_stop()
1715 * dequeue_dl_entity();
1716 * hrtimer_try_to_cancel();
1717 * dl_defer_armed = 0;
1718 * dl_throttled = 0;
1719 * dl_server_active = 0;
1720 * // [A]
1721 * return p;
1722 *
1723 * // server running
1724 * [5] D->E
1725 * update_curr_dl_se()
1726 * if (dl_runtime_exceeded())
1727 * dl_throttled = 1;
1728 * dequeue_dl_entity();
1729 * start_dl_timer();
1730 * // [E]
1731 *
1732 * // server replenished
1733 * [6] E->D
1734 * dl_server_timer()
1735 * enqueue_dl_entity(REPLENISH)
1736 * replenish_dl_entity()
1737 * fwd-period
1738 * if (dl_throttled)
1739 * dl_throttled = 0;
1740 * __enqueue_dl_entity();
1741 * // [D]
1742 *
1743 * // deplete server runtime from idle
1744 * [7] B->C, C->C
1745 * dl_server_update_idle()
1746 * update_curr_dl_se() // idle = true
1747 * if (dl_defer && dl_throttled && dl_runtime_exceeded())
1748 * if (dl_defer_idle)
1749 * return;
1750 * dl_defer_running = 0;
1751 * hrtimer_try_to_cancel();
1752 * replenish_dl_new_period()
1753 * // fwd period
1754 * dl_throttled = 1;
1755 * dl_defer_armed = 1;
1756 * dl_defer_idle = 1;
1757 * start_dl_timer(); // restart timer
1758 * // [C]
1759 *
1760 * // stop idle server
1761 * [8] C->A
1762 * dl_server_timer()
1763 * if (dl_defer_idle)
1764 * dl_server_stop();
1765 * // [A]
1766 *
1767 *
1768 * digraph dl_server {
1769 * "A:init" -> "B:zero_laxity-wait" [label="1:dl_server_start"]
1770 * "A:init" -> "D:running" [label="1:dl_server_start"]
1771 * "B:zero_laxity-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1772 * "B:zero_laxity-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
1773 * "B:zero_laxity-wait" -> "D:running" [label="3:dl_server_timer"]
1774 * "C:idle-wait" -> "A:init" [label="8:dl_server_timer"]
1775 * "C:idle-wait" -> "B:zero_laxity-wait" [label="1:dl_server_start"]
1776 * "C:idle-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1777 * "C:idle-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"]
1778 * "D:running" -> "A:init" [label="4:pick_task_dl"]
1779 * "D:running" -> "E:replenish-wait" [label="5:update_curr_dl_se"]
1780 * "E:replenish-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"]
1781 * "E:replenish-wait" -> "D:running" [label="6:dl_server_timer"]
1782 * }
1783 *
1784 *
1785 * Notes:
1786 *
1787 * - When there are fair tasks running the most likely loop is [2]->[2].
1788 * the dl_server never actually runs, the timer never fires.
1789 *
1790 * - When there is actual fair starvation; the timer fires and starts the
1791 * dl_server. This will then throttle and replenish like a normal DL
1792 * task. Notably it will not 'defer' again.
1793 *
1794 * - When idle it will push the actication forward once, and then wait
1795 * for the timer to hit or a non-idle update to restart things.
1796 */
dl_server_start(struct sched_dl_entity * dl_se)1797 void dl_server_start(struct sched_dl_entity *dl_se)
1798 {
1799 struct rq *rq = dl_se->rq;
1800
1801 dl_se->dl_defer_idle = 0;
1802 if (!dl_server(dl_se) || dl_se->dl_server_active || !dl_se->dl_runtime)
1803 return;
1804
1805 /*
1806 * Update the current task to 'now'.
1807 */
1808 rq->donor->sched_class->update_curr(rq);
1809
1810 if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
1811 return;
1812
1813 dl_se->dl_server_active = 1;
1814 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1815 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
1816 resched_curr(dl_se->rq);
1817 }
1818
dl_server_stop(struct sched_dl_entity * dl_se)1819 void dl_server_stop(struct sched_dl_entity *dl_se)
1820 {
1821 if (!dl_server(dl_se) || !dl_server_active(dl_se))
1822 return;
1823
1824 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1825 hrtimer_try_to_cancel(&dl_se->dl_timer);
1826 dl_se->dl_defer_armed = 0;
1827 dl_se->dl_throttled = 0;
1828 dl_se->dl_defer_idle = 0;
1829 dl_se->dl_server_active = 0;
1830 }
1831
dl_server_init(struct sched_dl_entity * dl_se,struct rq * rq,dl_server_pick_f pick_task)1832 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1833 dl_server_pick_f pick_task)
1834 {
1835 dl_se->rq = rq;
1836 dl_se->server_pick_task = pick_task;
1837 }
1838
sched_init_dl_servers(void)1839 void sched_init_dl_servers(void)
1840 {
1841 int cpu;
1842 struct rq *rq;
1843 struct sched_dl_entity *dl_se;
1844
1845 for_each_online_cpu(cpu) {
1846 u64 runtime = 50 * NSEC_PER_MSEC;
1847 u64 period = 1000 * NSEC_PER_MSEC;
1848
1849 rq = cpu_rq(cpu);
1850
1851 guard(rq_lock_irq)(rq);
1852 update_rq_clock(rq);
1853
1854 dl_se = &rq->fair_server;
1855
1856 WARN_ON(dl_server(dl_se));
1857
1858 dl_server_apply_params(dl_se, runtime, period, 1);
1859
1860 dl_se->dl_server = 1;
1861 dl_se->dl_defer = 1;
1862 setup_new_dl_entity(dl_se);
1863
1864 #ifdef CONFIG_SCHED_CLASS_EXT
1865 dl_se = &rq->ext_server;
1866
1867 WARN_ON(dl_server(dl_se));
1868
1869 dl_server_apply_params(dl_se, runtime, period, 1);
1870
1871 dl_se->dl_server = 1;
1872 dl_se->dl_defer = 1;
1873 setup_new_dl_entity(dl_se);
1874 #endif
1875 }
1876 }
1877
__dl_server_attach_root(struct sched_dl_entity * dl_se,struct rq * rq)1878 void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
1879 {
1880 u64 new_bw = dl_se->dl_bw;
1881 int cpu = cpu_of(rq);
1882 struct dl_bw *dl_b;
1883
1884 dl_b = dl_bw_of(cpu_of(rq));
1885 guard(raw_spinlock)(&dl_b->lock);
1886
1887 if (!dl_bw_cpus(cpu))
1888 return;
1889
1890 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
1891 }
1892
dl_server_apply_params(struct sched_dl_entity * dl_se,u64 runtime,u64 period,bool init)1893 int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
1894 {
1895 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1896 u64 new_bw = to_ratio(period, runtime);
1897 struct rq *rq = dl_se->rq;
1898 int cpu = cpu_of(rq);
1899 struct dl_bw *dl_b;
1900 unsigned long cap;
1901 int cpus;
1902
1903 dl_b = dl_bw_of(cpu);
1904 guard(raw_spinlock)(&dl_b->lock);
1905
1906 cpus = dl_bw_cpus(cpu);
1907 cap = dl_bw_capacity(cpu);
1908
1909 if (__dl_overflow(dl_b, cap, old_bw, new_bw))
1910 return -EBUSY;
1911
1912 if (init) {
1913 __add_rq_bw(new_bw, &rq->dl);
1914 __dl_add(dl_b, new_bw, cpus);
1915 } else {
1916 __dl_sub(dl_b, dl_se->dl_bw, cpus);
1917 __dl_add(dl_b, new_bw, cpus);
1918
1919 dl_rq_change_utilization(rq, dl_se, new_bw);
1920 }
1921
1922 dl_se->dl_runtime = runtime;
1923 dl_se->dl_deadline = period;
1924 dl_se->dl_period = period;
1925
1926 dl_se->runtime = 0;
1927 dl_se->deadline = 0;
1928
1929 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1930 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
1931
1932 return 0;
1933 }
1934
1935 /*
1936 * Update the current task's runtime statistics (provided it is still
1937 * a -deadline task and has not been removed from the dl_rq).
1938 */
update_curr_dl(struct rq * rq)1939 static void update_curr_dl(struct rq *rq)
1940 {
1941 struct task_struct *donor = rq->donor;
1942 struct sched_dl_entity *dl_se = &donor->dl;
1943 s64 delta_exec;
1944
1945 if (!dl_task(donor) || !on_dl_rq(dl_se))
1946 return;
1947
1948 /*
1949 * Consumed budget is computed considering the time as
1950 * observed by schedulable tasks (excluding time spent
1951 * in hardirq context, etc.). Deadlines are instead
1952 * computed using hard walltime. This seems to be the more
1953 * natural solution, but the full ramifications of this
1954 * approach need further study.
1955 */
1956 delta_exec = update_curr_common(rq);
1957 update_curr_dl_se(rq, dl_se, delta_exec);
1958 }
1959
inactive_task_timer(struct hrtimer * timer)1960 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1961 {
1962 struct sched_dl_entity *dl_se = container_of(timer,
1963 struct sched_dl_entity,
1964 inactive_timer);
1965 struct task_struct *p = NULL;
1966 struct rq_flags rf;
1967 struct rq *rq;
1968
1969 if (!dl_server(dl_se)) {
1970 p = dl_task_of(dl_se);
1971 rq = task_rq_lock(p, &rf);
1972 } else {
1973 rq = dl_se->rq;
1974 rq_lock(rq, &rf);
1975 }
1976
1977 sched_clock_tick();
1978 update_rq_clock(rq);
1979
1980 if (dl_server(dl_se))
1981 goto no_task;
1982
1983 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1984 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1985
1986 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1987 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1988 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1989 dl_se->dl_non_contending = 0;
1990 }
1991
1992 raw_spin_lock(&dl_b->lock);
1993 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1994 raw_spin_unlock(&dl_b->lock);
1995 __dl_clear_params(dl_se);
1996
1997 goto unlock;
1998 }
1999
2000 no_task:
2001 if (dl_se->dl_non_contending == 0)
2002 goto unlock;
2003
2004 sub_running_bw(dl_se, &rq->dl);
2005 dl_se->dl_non_contending = 0;
2006 unlock:
2007
2008 if (!dl_server(dl_se)) {
2009 task_rq_unlock(rq, p, &rf);
2010 put_task_struct(p);
2011 } else {
2012 rq_unlock(rq, &rf);
2013 }
2014
2015 return HRTIMER_NORESTART;
2016 }
2017
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)2018 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
2019 {
2020 struct hrtimer *timer = &dl_se->inactive_timer;
2021
2022 hrtimer_setup(timer, inactive_task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
2023 }
2024
2025 #define __node_2_dle(node) \
2026 rb_entry((node), struct sched_dl_entity, rb_node)
2027
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)2028 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
2029 {
2030 struct rq *rq = rq_of_dl_rq(dl_rq);
2031
2032 if (dl_rq->earliest_dl.curr == 0 ||
2033 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
2034 if (dl_rq->earliest_dl.curr == 0)
2035 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
2036 dl_rq->earliest_dl.curr = deadline;
2037 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
2038 }
2039 }
2040
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)2041 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
2042 {
2043 struct rq *rq = rq_of_dl_rq(dl_rq);
2044
2045 /*
2046 * Since we may have removed our earliest (and/or next earliest)
2047 * task we must recompute them.
2048 */
2049 if (!dl_rq->dl_nr_running) {
2050 dl_rq->earliest_dl.curr = 0;
2051 dl_rq->earliest_dl.next = 0;
2052 cpudl_clear(&rq->rd->cpudl, rq->cpu, rq->online);
2053 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2054 } else {
2055 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
2056 struct sched_dl_entity *entry = __node_2_dle(leftmost);
2057
2058 dl_rq->earliest_dl.curr = entry->deadline;
2059 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
2060 }
2061 }
2062
2063 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)2064 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
2065 {
2066 u64 deadline = dl_se->deadline;
2067
2068 dl_rq->dl_nr_running++;
2069
2070 if (!dl_server(dl_se))
2071 add_nr_running(rq_of_dl_rq(dl_rq), 1);
2072
2073 inc_dl_deadline(dl_rq, deadline);
2074 }
2075
2076 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)2077 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
2078 {
2079 WARN_ON(!dl_rq->dl_nr_running);
2080 dl_rq->dl_nr_running--;
2081
2082 if (!dl_server(dl_se))
2083 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
2084
2085 dec_dl_deadline(dl_rq, dl_se->deadline);
2086 }
2087
__dl_less(struct rb_node * a,const struct rb_node * b)2088 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
2089 {
2090 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
2091 }
2092
2093 static __always_inline struct sched_statistics *
__schedstats_from_dl_se(struct sched_dl_entity * dl_se)2094 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
2095 {
2096 if (!schedstat_enabled())
2097 return NULL;
2098
2099 if (dl_server(dl_se))
2100 return NULL;
2101
2102 return &dl_task_of(dl_se)->stats;
2103 }
2104
2105 static inline void
update_stats_wait_start_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2106 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2107 {
2108 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2109 if (stats)
2110 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2111 }
2112
2113 static inline void
update_stats_wait_end_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2114 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2115 {
2116 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2117 if (stats)
2118 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2119 }
2120
2121 static inline void
update_stats_enqueue_sleeper_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se)2122 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
2123 {
2124 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se);
2125 if (stats)
2126 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
2127 }
2128
2129 static inline void
update_stats_enqueue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)2130 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
2131 int flags)
2132 {
2133 if (!schedstat_enabled())
2134 return;
2135
2136 if (flags & ENQUEUE_WAKEUP)
2137 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
2138 }
2139
2140 static inline void
update_stats_dequeue_dl(struct dl_rq * dl_rq,struct sched_dl_entity * dl_se,int flags)2141 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
2142 int flags)
2143 {
2144 struct task_struct *p = dl_task_of(dl_se);
2145
2146 if (!schedstat_enabled())
2147 return;
2148
2149 if ((flags & DEQUEUE_SLEEP)) {
2150 unsigned int state;
2151
2152 state = READ_ONCE(p->__state);
2153 if (state & TASK_INTERRUPTIBLE)
2154 __schedstat_set(p->stats.sleep_start,
2155 rq_clock(rq_of_dl_rq(dl_rq)));
2156
2157 if (state & TASK_UNINTERRUPTIBLE)
2158 __schedstat_set(p->stats.block_start,
2159 rq_clock(rq_of_dl_rq(dl_rq)));
2160 }
2161 }
2162
__enqueue_dl_entity(struct sched_dl_entity * dl_se)2163 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
2164 {
2165 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2166
2167 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
2168
2169 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
2170
2171 inc_dl_tasks(dl_se, dl_rq);
2172 }
2173
__dequeue_dl_entity(struct sched_dl_entity * dl_se)2174 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
2175 {
2176 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2177
2178 if (RB_EMPTY_NODE(&dl_se->rb_node))
2179 return;
2180
2181 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
2182
2183 RB_CLEAR_NODE(&dl_se->rb_node);
2184
2185 dec_dl_tasks(dl_se, dl_rq);
2186 }
2187
2188 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,int flags)2189 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
2190 {
2191 WARN_ON_ONCE(on_dl_rq(dl_se));
2192
2193 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
2194
2195 /*
2196 * Check if a constrained deadline task was activated
2197 * after the deadline but before the next period.
2198 * If that is the case, the task will be throttled and
2199 * the replenishment timer will be set to the next period.
2200 */
2201 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
2202 dl_check_constrained_dl(dl_se);
2203
2204 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
2205 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2206
2207 add_rq_bw(dl_se, dl_rq);
2208 add_running_bw(dl_se, dl_rq);
2209 }
2210
2211 /*
2212 * If p is throttled, we do not enqueue it. In fact, if it exhausted
2213 * its budget it needs a replenishment and, since it now is on
2214 * its rq, the bandwidth timer callback (which clearly has not
2215 * run yet) will take care of this.
2216 * However, the active utilization does not depend on the fact
2217 * that the task is on the runqueue or not (but depends on the
2218 * task's state - in GRUB parlance, "inactive" vs "active contending").
2219 * In other words, even if a task is throttled its utilization must
2220 * be counted in the active utilization; hence, we need to call
2221 * add_running_bw().
2222 */
2223 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
2224 if (flags & ENQUEUE_WAKEUP)
2225 task_contending(dl_se, flags);
2226
2227 return;
2228 }
2229
2230 /*
2231 * If this is a wakeup or a new instance, the scheduling
2232 * parameters of the task might need updating. Otherwise,
2233 * we want a replenishment of its runtime.
2234 */
2235 if (flags & ENQUEUE_WAKEUP) {
2236 task_contending(dl_se, flags);
2237 update_dl_entity(dl_se);
2238 } else if (flags & ENQUEUE_REPLENISH) {
2239 replenish_dl_entity(dl_se);
2240 } else if ((flags & ENQUEUE_MOVE) &&
2241 !is_dl_boosted(dl_se) &&
2242 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
2243 setup_new_dl_entity(dl_se);
2244 }
2245
2246 /*
2247 * If the reservation is still throttled, e.g., it got replenished but is a
2248 * deferred task and still got to wait, don't enqueue.
2249 */
2250 if (dl_se->dl_throttled && start_dl_timer(dl_se))
2251 return;
2252
2253 /*
2254 * We're about to enqueue, make sure we're not ->dl_throttled!
2255 * In case the timer was not started, say because the defer time
2256 * has passed, mark as not throttled and mark unarmed.
2257 * Also cancel earlier timers, since letting those run is pointless.
2258 */
2259 if (dl_se->dl_throttled) {
2260 hrtimer_try_to_cancel(&dl_se->dl_timer);
2261 dl_se->dl_defer_armed = 0;
2262 dl_se->dl_throttled = 0;
2263 }
2264
2265 __enqueue_dl_entity(dl_se);
2266 }
2267
dequeue_dl_entity(struct sched_dl_entity * dl_se,int flags)2268 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
2269 {
2270 __dequeue_dl_entity(dl_se);
2271
2272 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
2273 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2274
2275 sub_running_bw(dl_se, dl_rq);
2276 sub_rq_bw(dl_se, dl_rq);
2277 }
2278
2279 /*
2280 * This check allows to start the inactive timer (or to immediately
2281 * decrease the active utilization, if needed) in two cases:
2282 * when the task blocks and when it is terminating
2283 * (p->state == TASK_DEAD). We can handle the two cases in the same
2284 * way, because from GRUB's point of view the same thing is happening
2285 * (the task moves from "active contending" to "active non contending"
2286 * or "inactive")
2287 */
2288 if (flags & DEQUEUE_SLEEP)
2289 task_non_contending(dl_se, true);
2290 }
2291
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)2292 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2293 {
2294 if (is_dl_boosted(&p->dl)) {
2295 /*
2296 * Because of delays in the detection of the overrun of a
2297 * thread's runtime, it might be the case that a thread
2298 * goes to sleep in a rt mutex with negative runtime. As
2299 * a consequence, the thread will be throttled.
2300 *
2301 * While waiting for the mutex, this thread can also be
2302 * boosted via PI, resulting in a thread that is throttled
2303 * and boosted at the same time.
2304 *
2305 * In this case, the boost overrides the throttle.
2306 */
2307 if (p->dl.dl_throttled) {
2308 /*
2309 * The replenish timer needs to be canceled. No
2310 * problem if it fires concurrently: boosted threads
2311 * are ignored in dl_task_timer().
2312 */
2313 cancel_replenish_timer(&p->dl);
2314 p->dl.dl_throttled = 0;
2315 }
2316 } else if (!dl_prio(p->normal_prio)) {
2317 /*
2318 * Special case in which we have a !SCHED_DEADLINE task that is going
2319 * to be deboosted, but exceeds its runtime while doing so. No point in
2320 * replenishing it, as it's going to return back to its original
2321 * scheduling class after this. If it has been throttled, we need to
2322 * clear the flag, otherwise the task may wake up as throttled after
2323 * being boosted again with no means to replenish the runtime and clear
2324 * the throttle.
2325 */
2326 p->dl.dl_throttled = 0;
2327 if (!(flags & ENQUEUE_REPLENISH))
2328 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
2329 task_pid_nr(p));
2330
2331 return;
2332 }
2333
2334 check_schedstat_required();
2335 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
2336
2337 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2338 flags |= ENQUEUE_MIGRATING;
2339
2340 enqueue_dl_entity(&p->dl, flags);
2341
2342 if (dl_server(&p->dl))
2343 return;
2344
2345 if (task_is_blocked(p))
2346 return;
2347
2348 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
2349 enqueue_pushable_dl_task(rq, p);
2350 }
2351
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)2352 static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2353 {
2354 update_curr_dl(rq);
2355
2356 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2357 flags |= DEQUEUE_MIGRATING;
2358
2359 dequeue_dl_entity(&p->dl, flags);
2360 if (!p->dl.dl_throttled && !dl_server(&p->dl))
2361 dequeue_pushable_dl_task(rq, p);
2362
2363 return true;
2364 }
2365
2366 /*
2367 * Yield task semantic for -deadline tasks is:
2368 *
2369 * get off from the CPU until our next instance, with
2370 * a new runtime. This is of little use now, since we
2371 * don't have a bandwidth reclaiming mechanism. Anyway,
2372 * bandwidth reclaiming is planned for the future, and
2373 * yield_task_dl will indicate that some spare budget
2374 * is available for other task instances to use it.
2375 */
yield_task_dl(struct rq * rq)2376 static void yield_task_dl(struct rq *rq)
2377 {
2378 /*
2379 * We make the task go to sleep until its current deadline by
2380 * forcing its runtime to zero. This way, update_curr_dl() stops
2381 * it and the bandwidth timer will wake it up and will give it
2382 * new scheduling parameters (thanks to dl_yielded=1).
2383 */
2384 rq->donor->dl.dl_yielded = 1;
2385
2386 update_rq_clock(rq);
2387 update_curr_dl(rq);
2388 /*
2389 * Tell update_rq_clock() that we've just updated,
2390 * so we don't do microscopic update in schedule()
2391 * and double the fastpath cost.
2392 */
2393 rq_clock_skip_update(rq);
2394 }
2395
dl_task_is_earliest_deadline(struct task_struct * p,struct rq * rq)2396 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
2397 struct rq *rq)
2398 {
2399 return (!rq->dl.dl_nr_running ||
2400 dl_time_before(p->dl.deadline,
2401 rq->dl.earliest_dl.curr));
2402 }
2403
2404 static int find_later_rq(struct task_struct *task);
2405
2406 static int
select_task_rq_dl(struct task_struct * p,int cpu,int flags)2407 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
2408 {
2409 struct task_struct *curr, *donor;
2410 bool select_rq;
2411 struct rq *rq;
2412
2413 if (!(flags & WF_TTWU))
2414 return cpu;
2415
2416 rq = cpu_rq(cpu);
2417
2418 rcu_read_lock();
2419 curr = READ_ONCE(rq->curr); /* unlocked access */
2420 donor = READ_ONCE(rq->donor);
2421
2422 /*
2423 * If we are dealing with a -deadline task, we must
2424 * decide where to wake it up.
2425 * If it has a later deadline and the current task
2426 * on this rq can't move (provided the waking task
2427 * can!) we prefer to send it somewhere else. On the
2428 * other hand, if it has a shorter deadline, we
2429 * try to make it stay here, it might be important.
2430 */
2431 select_rq = unlikely(dl_task(donor)) &&
2432 (curr->nr_cpus_allowed < 2 ||
2433 !dl_entity_preempt(&p->dl, &donor->dl)) &&
2434 p->nr_cpus_allowed > 1;
2435
2436 /*
2437 * Take the capacity of the CPU into account to
2438 * ensure it fits the requirement of the task.
2439 */
2440 if (sched_asym_cpucap_active())
2441 select_rq |= !dl_task_fits_capacity(p, cpu);
2442
2443 if (select_rq) {
2444 int target = find_later_rq(p);
2445
2446 if (target != -1 &&
2447 dl_task_is_earliest_deadline(p, cpu_rq(target)))
2448 cpu = target;
2449 }
2450 rcu_read_unlock();
2451
2452 return cpu;
2453 }
2454
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)2455 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
2456 {
2457 struct rq_flags rf;
2458 struct rq *rq;
2459
2460 if (READ_ONCE(p->__state) != TASK_WAKING)
2461 return;
2462
2463 rq = task_rq(p);
2464 /*
2465 * Since p->state == TASK_WAKING, set_task_cpu() has been called
2466 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2467 * rq->lock is not... So, lock it
2468 */
2469 rq_lock(rq, &rf);
2470 if (p->dl.dl_non_contending) {
2471 update_rq_clock(rq);
2472 sub_running_bw(&p->dl, &rq->dl);
2473 p->dl.dl_non_contending = 0;
2474 /*
2475 * If the timer handler is currently running and the
2476 * timer cannot be canceled, inactive_task_timer()
2477 * will see that dl_not_contending is not set, and
2478 * will not touch the rq's active utilization,
2479 * so we are still safe.
2480 */
2481 cancel_inactive_timer(&p->dl);
2482 }
2483 sub_rq_bw(&p->dl, &rq->dl);
2484 rq_unlock(rq, &rf);
2485 }
2486
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)2487 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2488 {
2489 /*
2490 * Current can't be migrated, useless to reschedule,
2491 * let's hope p can move out.
2492 */
2493 if (rq->curr->nr_cpus_allowed == 1 ||
2494 !cpudl_find(&rq->rd->cpudl, rq->donor, NULL))
2495 return;
2496
2497 /*
2498 * p is migratable, so let's not schedule it and
2499 * see if it is pushed or pulled somewhere else.
2500 */
2501 if (p->nr_cpus_allowed != 1 &&
2502 cpudl_find(&rq->rd->cpudl, p, NULL))
2503 return;
2504
2505 resched_curr(rq);
2506 }
2507
balance_dl(struct rq * rq,struct task_struct * p,struct rq_flags * rf)2508 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2509 {
2510 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2511 /*
2512 * This is OK, because current is on_cpu, which avoids it being
2513 * picked for load-balance and preemption/IRQs are still
2514 * disabled avoiding further scheduler activity on it and we've
2515 * not yet started the picking loop.
2516 */
2517 rq_unpin_lock(rq, rf);
2518 pull_dl_task(rq);
2519 rq_repin_lock(rq, rf);
2520 }
2521
2522 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2523 }
2524
2525 /*
2526 * Only called when both the current and waking task are -deadline
2527 * tasks.
2528 */
wakeup_preempt_dl(struct rq * rq,struct task_struct * p,int flags)2529 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags)
2530 {
2531 /*
2532 * Can only get preempted by stop-class, and those should be
2533 * few and short lived, doesn't really make sense to push
2534 * anything away for that.
2535 */
2536 if (p->sched_class != &dl_sched_class)
2537 return;
2538
2539 if (dl_entity_preempt(&p->dl, &rq->donor->dl)) {
2540 resched_curr(rq);
2541 return;
2542 }
2543
2544 /*
2545 * In the unlikely case current and p have the same deadline
2546 * let us try to decide what's the best thing to do...
2547 */
2548 if ((p->dl.deadline == rq->donor->dl.deadline) &&
2549 !test_tsk_need_resched(rq->curr))
2550 check_preempt_equal_dl(rq, p);
2551 }
2552
2553 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se)2554 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2555 {
2556 hrtick_start(rq, dl_se->runtime);
2557 }
2558 #else /* !CONFIG_SCHED_HRTICK: */
start_hrtick_dl(struct rq * rq,struct sched_dl_entity * dl_se)2559 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2560 {
2561 }
2562 #endif /* !CONFIG_SCHED_HRTICK */
2563
set_next_task_dl(struct rq * rq,struct task_struct * p,bool first)2564 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2565 {
2566 struct sched_dl_entity *dl_se = &p->dl;
2567 struct dl_rq *dl_rq = &rq->dl;
2568
2569 p->se.exec_start = rq_clock_task(rq);
2570 if (on_dl_rq(&p->dl))
2571 update_stats_wait_end_dl(dl_rq, dl_se);
2572
2573 /* You can't push away the running task */
2574 dequeue_pushable_dl_task(rq, p);
2575
2576 if (!first)
2577 return;
2578
2579 if (rq->donor->sched_class != &dl_sched_class)
2580 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2581
2582 deadline_queue_push_tasks(rq);
2583
2584 if (hrtick_enabled_dl(rq))
2585 start_hrtick_dl(rq, &p->dl);
2586 }
2587
pick_next_dl_entity(struct dl_rq * dl_rq)2588 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2589 {
2590 struct rb_node *left = rb_first_cached(&dl_rq->root);
2591
2592 if (!left)
2593 return NULL;
2594
2595 return __node_2_dle(left);
2596 }
2597
2598 /*
2599 * __pick_next_task_dl - Helper to pick the next -deadline task to run.
2600 * @rq: The runqueue to pick the next task from.
2601 */
__pick_task_dl(struct rq * rq,struct rq_flags * rf)2602 static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
2603 {
2604 struct sched_dl_entity *dl_se;
2605 struct dl_rq *dl_rq = &rq->dl;
2606 struct task_struct *p;
2607
2608 again:
2609 if (!sched_dl_runnable(rq))
2610 return NULL;
2611
2612 dl_se = pick_next_dl_entity(dl_rq);
2613 WARN_ON_ONCE(!dl_se);
2614
2615 if (dl_server(dl_se)) {
2616 p = dl_se->server_pick_task(dl_se, rf);
2617 if (!p) {
2618 dl_server_stop(dl_se);
2619 goto again;
2620 }
2621 rq->dl_server = dl_se;
2622 } else {
2623 p = dl_task_of(dl_se);
2624 }
2625
2626 return p;
2627 }
2628
pick_task_dl(struct rq * rq,struct rq_flags * rf)2629 static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
2630 {
2631 return __pick_task_dl(rq, rf);
2632 }
2633
put_prev_task_dl(struct rq * rq,struct task_struct * p,struct task_struct * next)2634 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
2635 {
2636 struct sched_dl_entity *dl_se = &p->dl;
2637 struct dl_rq *dl_rq = &rq->dl;
2638
2639 if (on_dl_rq(&p->dl))
2640 update_stats_wait_start_dl(dl_rq, dl_se);
2641
2642 update_curr_dl(rq);
2643
2644 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2645
2646 if (task_is_blocked(p))
2647 return;
2648
2649 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2650 enqueue_pushable_dl_task(rq, p);
2651 }
2652
2653 /*
2654 * scheduler tick hitting a task of our scheduling class.
2655 *
2656 * NOTE: This function can be called remotely by the tick offload that
2657 * goes along full dynticks. Therefore no local assumption can be made
2658 * and everything must be accessed through the @rq and @curr passed in
2659 * parameters.
2660 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)2661 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2662 {
2663 update_curr_dl(rq);
2664
2665 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2666 /*
2667 * Even when we have runtime, update_curr_dl() might have resulted in us
2668 * not being the leftmost task anymore. In that case NEED_RESCHED will
2669 * be set and schedule() will start a new hrtick for the next task.
2670 */
2671 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2672 is_leftmost(&p->dl, &rq->dl))
2673 start_hrtick_dl(rq, &p->dl);
2674 }
2675
task_fork_dl(struct task_struct * p)2676 static void task_fork_dl(struct task_struct *p)
2677 {
2678 /*
2679 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2680 * sched_fork()
2681 */
2682 }
2683
2684 /* Only try algorithms three times */
2685 #define DL_MAX_TRIES 3
2686
2687 /*
2688 * Return the earliest pushable rq's task, which is suitable to be executed
2689 * on the CPU, NULL otherwise:
2690 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)2691 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2692 {
2693 struct task_struct *p = NULL;
2694 struct rb_node *next_node;
2695
2696 if (!has_pushable_dl_tasks(rq))
2697 return NULL;
2698
2699 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2700 while (next_node) {
2701 p = __node_2_pdl(next_node);
2702
2703 if (task_is_pushable(rq, p, cpu))
2704 return p;
2705
2706 next_node = rb_next(next_node);
2707 }
2708
2709 return NULL;
2710 }
2711
2712 /* Access rule: must be called on local CPU with preemption disabled */
2713 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2714
find_later_rq(struct task_struct * task)2715 static int find_later_rq(struct task_struct *task)
2716 {
2717 struct sched_domain *sd;
2718 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2719 int this_cpu = smp_processor_id();
2720 int cpu = task_cpu(task);
2721
2722 /* Make sure the mask is initialized first */
2723 if (unlikely(!later_mask))
2724 return -1;
2725
2726 if (task->nr_cpus_allowed == 1)
2727 return -1;
2728
2729 /*
2730 * We have to consider system topology and task affinity
2731 * first, then we can look for a suitable CPU.
2732 */
2733 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2734 return -1;
2735
2736 /*
2737 * If we are here, some targets have been found, including
2738 * the most suitable which is, among the runqueues where the
2739 * current tasks have later deadlines than the task's one, the
2740 * rq with the latest possible one.
2741 *
2742 * Now we check how well this matches with task's
2743 * affinity and system topology.
2744 *
2745 * The last CPU where the task run is our first
2746 * guess, since it is most likely cache-hot there.
2747 */
2748 if (cpumask_test_cpu(cpu, later_mask))
2749 return cpu;
2750 /*
2751 * Check if this_cpu is to be skipped (i.e., it is
2752 * not in the mask) or not.
2753 */
2754 if (!cpumask_test_cpu(this_cpu, later_mask))
2755 this_cpu = -1;
2756
2757 rcu_read_lock();
2758 for_each_domain(cpu, sd) {
2759 if (sd->flags & SD_WAKE_AFFINE) {
2760 int best_cpu;
2761
2762 /*
2763 * If possible, preempting this_cpu is
2764 * cheaper than migrating.
2765 */
2766 if (this_cpu != -1 &&
2767 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2768 rcu_read_unlock();
2769 return this_cpu;
2770 }
2771
2772 best_cpu = cpumask_any_and_distribute(later_mask,
2773 sched_domain_span(sd));
2774 /*
2775 * Last chance: if a CPU being in both later_mask
2776 * and current sd span is valid, that becomes our
2777 * choice. Of course, the latest possible CPU is
2778 * already under consideration through later_mask.
2779 */
2780 if (best_cpu < nr_cpu_ids) {
2781 rcu_read_unlock();
2782 return best_cpu;
2783 }
2784 }
2785 }
2786 rcu_read_unlock();
2787
2788 /*
2789 * At this point, all our guesses failed, we just return
2790 * 'something', and let the caller sort the things out.
2791 */
2792 if (this_cpu != -1)
2793 return this_cpu;
2794
2795 cpu = cpumask_any_distribute(later_mask);
2796 if (cpu < nr_cpu_ids)
2797 return cpu;
2798
2799 return -1;
2800 }
2801
pick_next_pushable_dl_task(struct rq * rq)2802 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2803 {
2804 struct task_struct *p;
2805
2806 if (!has_pushable_dl_tasks(rq))
2807 return NULL;
2808
2809 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2810
2811 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2812 WARN_ON_ONCE(task_current(rq, p));
2813 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2814
2815 WARN_ON_ONCE(!task_on_rq_queued(p));
2816 WARN_ON_ONCE(!dl_task(p));
2817
2818 return p;
2819 }
2820
2821 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)2822 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2823 {
2824 struct rq *later_rq = NULL;
2825 int tries;
2826 int cpu;
2827
2828 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2829 cpu = find_later_rq(task);
2830
2831 if ((cpu == -1) || (cpu == rq->cpu))
2832 break;
2833
2834 later_rq = cpu_rq(cpu);
2835
2836 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2837 /*
2838 * Target rq has tasks of equal or earlier deadline,
2839 * retrying does not release any lock and is unlikely
2840 * to yield a different result.
2841 */
2842 later_rq = NULL;
2843 break;
2844 }
2845
2846 /* Retry if something changed. */
2847 if (double_lock_balance(rq, later_rq)) {
2848 /*
2849 * double_lock_balance had to release rq->lock, in the
2850 * meantime, task may no longer be fit to be migrated.
2851 * Check the following to ensure that the task is
2852 * still suitable for migration:
2853 * 1. It is possible the task was scheduled,
2854 * migrate_disabled was set and then got preempted,
2855 * so we must check the task migration disable
2856 * flag.
2857 * 2. The CPU picked is in the task's affinity.
2858 * 3. For throttled task (dl_task_offline_migration),
2859 * check the following:
2860 * - the task is not on the rq anymore (it was
2861 * migrated)
2862 * - the task is not on CPU anymore
2863 * - the task is still a dl task
2864 * - the task is not queued on the rq anymore
2865 * 4. For the non-throttled task (push_dl_task), the
2866 * check to ensure that this task is still at the
2867 * head of the pushable tasks list is enough.
2868 */
2869 if (unlikely(is_migration_disabled(task) ||
2870 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2871 (task->dl.dl_throttled &&
2872 (task_rq(task) != rq ||
2873 task_on_cpu(rq, task) ||
2874 !dl_task(task) ||
2875 !task_on_rq_queued(task))) ||
2876 (!task->dl.dl_throttled &&
2877 task != pick_next_pushable_dl_task(rq)))) {
2878
2879 double_unlock_balance(rq, later_rq);
2880 later_rq = NULL;
2881 break;
2882 }
2883 }
2884
2885 /*
2886 * If the rq we found has no -deadline task, or
2887 * its earliest one has a later deadline than our
2888 * task, the rq is a good one.
2889 */
2890 if (dl_task_is_earliest_deadline(task, later_rq))
2891 break;
2892
2893 /* Otherwise we try again. */
2894 double_unlock_balance(rq, later_rq);
2895 later_rq = NULL;
2896 }
2897
2898 return later_rq;
2899 }
2900
2901 /*
2902 * See if the non running -deadline tasks on this rq
2903 * can be sent to some other CPU where they can preempt
2904 * and start executing.
2905 */
push_dl_task(struct rq * rq)2906 static int push_dl_task(struct rq *rq)
2907 {
2908 struct task_struct *next_task;
2909 struct rq *later_rq;
2910 int ret = 0;
2911
2912 next_task = pick_next_pushable_dl_task(rq);
2913 if (!next_task)
2914 return 0;
2915
2916 retry:
2917 /*
2918 * If next_task preempts rq->curr, and rq->curr
2919 * can move away, it makes sense to just reschedule
2920 * without going further in pushing next_task.
2921 */
2922 if (dl_task(rq->donor) &&
2923 dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) &&
2924 rq->curr->nr_cpus_allowed > 1) {
2925 resched_curr(rq);
2926 return 0;
2927 }
2928
2929 if (is_migration_disabled(next_task))
2930 return 0;
2931
2932 if (WARN_ON(next_task == rq->curr))
2933 return 0;
2934
2935 /* We might release rq lock */
2936 get_task_struct(next_task);
2937
2938 /* Will lock the rq it'll find */
2939 later_rq = find_lock_later_rq(next_task, rq);
2940 if (!later_rq) {
2941 struct task_struct *task;
2942
2943 /*
2944 * We must check all this again, since
2945 * find_lock_later_rq releases rq->lock and it is
2946 * then possible that next_task has migrated.
2947 */
2948 task = pick_next_pushable_dl_task(rq);
2949 if (task == next_task) {
2950 /*
2951 * The task is still there. We don't try
2952 * again, some other CPU will pull it when ready.
2953 */
2954 goto out;
2955 }
2956
2957 if (!task)
2958 /* No more tasks */
2959 goto out;
2960
2961 put_task_struct(next_task);
2962 next_task = task;
2963 goto retry;
2964 }
2965
2966 move_queued_task_locked(rq, later_rq, next_task);
2967 ret = 1;
2968
2969 resched_curr(later_rq);
2970
2971 double_unlock_balance(rq, later_rq);
2972
2973 out:
2974 put_task_struct(next_task);
2975
2976 return ret;
2977 }
2978
push_dl_tasks(struct rq * rq)2979 static void push_dl_tasks(struct rq *rq)
2980 {
2981 /* push_dl_task() will return true if it moved a -deadline task */
2982 while (push_dl_task(rq))
2983 ;
2984 }
2985
pull_dl_task(struct rq * this_rq)2986 static void pull_dl_task(struct rq *this_rq)
2987 {
2988 int this_cpu = this_rq->cpu, cpu;
2989 struct task_struct *p, *push_task;
2990 bool resched = false;
2991 struct rq *src_rq;
2992 u64 dmin = LONG_MAX;
2993
2994 if (likely(!dl_overloaded(this_rq)))
2995 return;
2996
2997 /*
2998 * Match the barrier from dl_set_overloaded; this guarantees that if we
2999 * see overloaded we must also see the dlo_mask bit.
3000 */
3001 smp_rmb();
3002
3003 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
3004 if (this_cpu == cpu)
3005 continue;
3006
3007 src_rq = cpu_rq(cpu);
3008
3009 /*
3010 * It looks racy, and it is! However, as in sched_rt.c,
3011 * we are fine with this.
3012 */
3013 if (this_rq->dl.dl_nr_running &&
3014 dl_time_before(this_rq->dl.earliest_dl.curr,
3015 src_rq->dl.earliest_dl.next))
3016 continue;
3017
3018 /* Might drop this_rq->lock */
3019 push_task = NULL;
3020 double_lock_balance(this_rq, src_rq);
3021
3022 /*
3023 * If there are no more pullable tasks on the
3024 * rq, we're done with it.
3025 */
3026 if (src_rq->dl.dl_nr_running <= 1)
3027 goto skip;
3028
3029 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
3030
3031 /*
3032 * We found a task to be pulled if:
3033 * - it preempts our current (if there's one),
3034 * - it will preempt the last one we pulled (if any).
3035 */
3036 if (p && dl_time_before(p->dl.deadline, dmin) &&
3037 dl_task_is_earliest_deadline(p, this_rq)) {
3038 WARN_ON(p == src_rq->curr);
3039 WARN_ON(!task_on_rq_queued(p));
3040
3041 /*
3042 * Then we pull iff p has actually an earlier
3043 * deadline than the current task of its runqueue.
3044 */
3045 if (dl_time_before(p->dl.deadline,
3046 src_rq->donor->dl.deadline))
3047 goto skip;
3048
3049 if (is_migration_disabled(p)) {
3050 push_task = get_push_task(src_rq);
3051 } else {
3052 move_queued_task_locked(src_rq, this_rq, p);
3053 dmin = p->dl.deadline;
3054 resched = true;
3055 }
3056
3057 /* Is there any other task even earlier? */
3058 }
3059 skip:
3060 double_unlock_balance(this_rq, src_rq);
3061
3062 if (push_task) {
3063 preempt_disable();
3064 raw_spin_rq_unlock(this_rq);
3065 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
3066 push_task, &src_rq->push_work);
3067 preempt_enable();
3068 raw_spin_rq_lock(this_rq);
3069 }
3070 }
3071
3072 if (resched)
3073 resched_curr(this_rq);
3074 }
3075
3076 /*
3077 * Since the task is not running and a reschedule is not going to happen
3078 * anytime soon on its runqueue, we try pushing it away now.
3079 */
task_woken_dl(struct rq * rq,struct task_struct * p)3080 static void task_woken_dl(struct rq *rq, struct task_struct *p)
3081 {
3082 if (!task_on_cpu(rq, p) &&
3083 !test_tsk_need_resched(rq->curr) &&
3084 p->nr_cpus_allowed > 1 &&
3085 dl_task(rq->donor) &&
3086 (rq->curr->nr_cpus_allowed < 2 ||
3087 !dl_entity_preempt(&p->dl, &rq->donor->dl))) {
3088 push_dl_tasks(rq);
3089 }
3090 }
3091
set_cpus_allowed_dl(struct task_struct * p,struct affinity_context * ctx)3092 static void set_cpus_allowed_dl(struct task_struct *p,
3093 struct affinity_context *ctx)
3094 {
3095 struct root_domain *src_rd;
3096 struct rq *rq;
3097
3098 WARN_ON_ONCE(!dl_task(p));
3099
3100 rq = task_rq(p);
3101 src_rd = rq->rd;
3102 /*
3103 * Migrating a SCHED_DEADLINE task between exclusive
3104 * cpusets (different root_domains) entails a bandwidth
3105 * update. We already made space for us in the destination
3106 * domain (see cpuset_can_attach()).
3107 */
3108 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
3109 struct dl_bw *src_dl_b;
3110
3111 src_dl_b = dl_bw_of(cpu_of(rq));
3112 /*
3113 * We now free resources of the root_domain we are migrating
3114 * off. In the worst case, sched_setattr() may temporary fail
3115 * until we complete the update.
3116 */
3117 raw_spin_lock(&src_dl_b->lock);
3118 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
3119 raw_spin_unlock(&src_dl_b->lock);
3120 }
3121
3122 set_cpus_allowed_common(p, ctx);
3123 }
3124
3125 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)3126 static void rq_online_dl(struct rq *rq)
3127 {
3128 if (rq->dl.overloaded)
3129 dl_set_overload(rq);
3130
3131 if (rq->dl.dl_nr_running > 0)
3132 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
3133 else
3134 cpudl_clear(&rq->rd->cpudl, rq->cpu, true);
3135 }
3136
3137 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)3138 static void rq_offline_dl(struct rq *rq)
3139 {
3140 if (rq->dl.overloaded)
3141 dl_clear_overload(rq);
3142
3143 cpudl_clear(&rq->rd->cpudl, rq->cpu, false);
3144 }
3145
init_sched_dl_class(void)3146 void __init init_sched_dl_class(void)
3147 {
3148 unsigned int i;
3149
3150 for_each_possible_cpu(i)
3151 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
3152 GFP_KERNEL, cpu_to_node(i));
3153 }
3154
3155 /*
3156 * This function always returns a non-empty bitmap in @cpus. This is because
3157 * if a root domain has reserved bandwidth for DL tasks, the DL bandwidth
3158 * check will prevent CPU hotplug from deactivating all CPUs in that domain.
3159 */
dl_get_task_effective_cpus(struct task_struct * p,struct cpumask * cpus)3160 static void dl_get_task_effective_cpus(struct task_struct *p, struct cpumask *cpus)
3161 {
3162 const struct cpumask *hk_msk;
3163
3164 hk_msk = housekeeping_cpumask(HK_TYPE_DOMAIN);
3165 if (housekeeping_enabled(HK_TYPE_DOMAIN)) {
3166 if (!cpumask_intersects(p->cpus_ptr, hk_msk)) {
3167 /*
3168 * CPUs isolated by isolcpu="domain" always belong to
3169 * def_root_domain.
3170 */
3171 cpumask_andnot(cpus, cpu_active_mask, hk_msk);
3172 return;
3173 }
3174 }
3175
3176 /*
3177 * If a root domain holds a DL task, it must have active CPUs. So
3178 * active CPUs can always be found by walking up the task's cpuset
3179 * hierarchy up to the partition root.
3180 */
3181 cpuset_cpus_allowed_locked(p, cpus);
3182 }
3183
3184 /* The caller should hold cpuset_mutex */
dl_add_task_root_domain(struct task_struct * p)3185 void dl_add_task_root_domain(struct task_struct *p)
3186 {
3187 struct rq_flags rf;
3188 struct rq *rq;
3189 struct dl_bw *dl_b;
3190 unsigned int cpu;
3191 struct cpumask *msk;
3192
3193 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
3194 if (!dl_task(p) || dl_entity_is_special(&p->dl)) {
3195 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3196 return;
3197 }
3198
3199 msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
3200 dl_get_task_effective_cpus(p, msk);
3201 cpu = cpumask_first_and(cpu_active_mask, msk);
3202 BUG_ON(cpu >= nr_cpu_ids);
3203 rq = cpu_rq(cpu);
3204 dl_b = &rq->rd->dl_bw;
3205
3206 raw_spin_lock(&dl_b->lock);
3207 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
3208 raw_spin_unlock(&dl_b->lock);
3209 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
3210 }
3211
dl_server_add_bw(struct root_domain * rd,int cpu)3212 static void dl_server_add_bw(struct root_domain *rd, int cpu)
3213 {
3214 struct sched_dl_entity *dl_se;
3215
3216 dl_se = &cpu_rq(cpu)->fair_server;
3217 if (dl_server(dl_se) && cpu_active(cpu))
3218 __dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3219
3220 #ifdef CONFIG_SCHED_CLASS_EXT
3221 dl_se = &cpu_rq(cpu)->ext_server;
3222 if (dl_server(dl_se) && cpu_active(cpu))
3223 __dl_add(&rd->dl_bw, dl_se->dl_bw, dl_bw_cpus(cpu));
3224 #endif
3225 }
3226
dl_server_read_bw(int cpu)3227 static u64 dl_server_read_bw(int cpu)
3228 {
3229 u64 dl_bw = 0;
3230
3231 if (cpu_rq(cpu)->fair_server.dl_server)
3232 dl_bw += cpu_rq(cpu)->fair_server.dl_bw;
3233
3234 #ifdef CONFIG_SCHED_CLASS_EXT
3235 if (cpu_rq(cpu)->ext_server.dl_server)
3236 dl_bw += cpu_rq(cpu)->ext_server.dl_bw;
3237 #endif
3238
3239 return dl_bw;
3240 }
3241
dl_clear_root_domain(struct root_domain * rd)3242 void dl_clear_root_domain(struct root_domain *rd)
3243 {
3244 int i;
3245
3246 guard(raw_spinlock_irqsave)(&rd->dl_bw.lock);
3247
3248 /*
3249 * Reset total_bw to zero and extra_bw to max_bw so that next
3250 * loop will add dl-servers contributions back properly,
3251 */
3252 rd->dl_bw.total_bw = 0;
3253 for_each_cpu(i, rd->span)
3254 cpu_rq(i)->dl.extra_bw = cpu_rq(i)->dl.max_bw;
3255
3256 /*
3257 * dl_servers are not tasks. Since dl_add_task_root_domain ignores
3258 * them, we need to account for them here explicitly.
3259 */
3260 for_each_cpu(i, rd->span)
3261 dl_server_add_bw(rd, i);
3262 }
3263
dl_clear_root_domain_cpu(int cpu)3264 void dl_clear_root_domain_cpu(int cpu)
3265 {
3266 dl_clear_root_domain(cpu_rq(cpu)->rd);
3267 }
3268
switched_from_dl(struct rq * rq,struct task_struct * p)3269 static void switched_from_dl(struct rq *rq, struct task_struct *p)
3270 {
3271 /*
3272 * task_non_contending() can start the "inactive timer" (if the 0-lag
3273 * time is in the future). If the task switches back to dl before
3274 * the "inactive timer" fires, it can continue to consume its current
3275 * runtime using its current deadline. If it stays outside of
3276 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
3277 * will reset the task parameters.
3278 */
3279 if (task_on_rq_queued(p) && p->dl.dl_runtime)
3280 task_non_contending(&p->dl, false);
3281
3282 /*
3283 * In case a task is setscheduled out from SCHED_DEADLINE we need to
3284 * keep track of that on its cpuset (for correct bandwidth tracking).
3285 */
3286 dec_dl_tasks_cs(p);
3287
3288 if (!task_on_rq_queued(p)) {
3289 /*
3290 * Inactive timer is armed. However, p is leaving DEADLINE and
3291 * might migrate away from this rq while continuing to run on
3292 * some other class. We need to remove its contribution from
3293 * this rq running_bw now, or sub_rq_bw (below) will complain.
3294 */
3295 if (p->dl.dl_non_contending)
3296 sub_running_bw(&p->dl, &rq->dl);
3297 sub_rq_bw(&p->dl, &rq->dl);
3298 }
3299
3300 /*
3301 * We cannot use inactive_task_timer() to invoke sub_running_bw()
3302 * at the 0-lag time, because the task could have been migrated
3303 * while SCHED_OTHER in the meanwhile.
3304 */
3305 if (p->dl.dl_non_contending)
3306 p->dl.dl_non_contending = 0;
3307
3308 /*
3309 * Since this might be the only -deadline task on the rq,
3310 * this is the right place to try to pull some other one
3311 * from an overloaded CPU, if any.
3312 */
3313 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3314 return;
3315
3316 deadline_queue_pull_task(rq);
3317 }
3318
3319 /*
3320 * When switching to -deadline, we may overload the rq, then
3321 * we try to push someone off, if possible.
3322 */
switched_to_dl(struct rq * rq,struct task_struct * p)3323 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3324 {
3325 cancel_inactive_timer(&p->dl);
3326
3327 /*
3328 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
3329 * track of that on its cpuset (for correct bandwidth tracking).
3330 */
3331 inc_dl_tasks_cs(p);
3332
3333 /* If p is not queued we will update its parameters at next wakeup. */
3334 if (!task_on_rq_queued(p)) {
3335 add_rq_bw(&p->dl, &rq->dl);
3336
3337 return;
3338 }
3339
3340 if (rq->donor != p) {
3341 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3342 deadline_queue_push_tasks(rq);
3343 if (dl_task(rq->donor))
3344 wakeup_preempt_dl(rq, p, 0);
3345 else
3346 resched_curr(rq);
3347 } else {
3348 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
3349 }
3350 }
3351
get_prio_dl(struct rq * rq,struct task_struct * p)3352 static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
3353 {
3354 /*
3355 * Make sure to update current so we don't return a stale value.
3356 */
3357 if (task_current_donor(rq, p))
3358 update_curr_dl(rq);
3359
3360 return p->dl.deadline;
3361 }
3362
3363 /*
3364 * If the scheduling parameters of a -deadline task changed,
3365 * a push or pull operation might be needed.
3366 */
prio_changed_dl(struct rq * rq,struct task_struct * p,u64 old_deadline)3367 static void prio_changed_dl(struct rq *rq, struct task_struct *p, u64 old_deadline)
3368 {
3369 if (!task_on_rq_queued(p))
3370 return;
3371
3372 if (p->dl.deadline == old_deadline)
3373 return;
3374
3375 if (dl_time_before(old_deadline, p->dl.deadline))
3376 deadline_queue_pull_task(rq);
3377
3378 if (task_current_donor(rq, p)) {
3379 /*
3380 * If we now have a earlier deadline task than p,
3381 * then reschedule, provided p is still on this
3382 * runqueue.
3383 */
3384 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3385 resched_curr(rq);
3386 } else {
3387 /*
3388 * Current may not be deadline in case p was throttled but we
3389 * have just replenished it (e.g. rt_mutex_setprio()).
3390 *
3391 * Otherwise, if p was given an earlier deadline, reschedule.
3392 */
3393 if (!dl_task(rq->curr) ||
3394 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
3395 resched_curr(rq);
3396 }
3397 }
3398
3399 #ifdef CONFIG_SCHED_CORE
task_is_throttled_dl(struct task_struct * p,int cpu)3400 static int task_is_throttled_dl(struct task_struct *p, int cpu)
3401 {
3402 return p->dl.dl_throttled;
3403 }
3404 #endif
3405
3406 DEFINE_SCHED_CLASS(dl) = {
3407 .enqueue_task = enqueue_task_dl,
3408 .dequeue_task = dequeue_task_dl,
3409 .yield_task = yield_task_dl,
3410
3411 .wakeup_preempt = wakeup_preempt_dl,
3412
3413 .pick_task = pick_task_dl,
3414 .put_prev_task = put_prev_task_dl,
3415 .set_next_task = set_next_task_dl,
3416
3417 .balance = balance_dl,
3418 .select_task_rq = select_task_rq_dl,
3419 .migrate_task_rq = migrate_task_rq_dl,
3420 .set_cpus_allowed = set_cpus_allowed_dl,
3421 .rq_online = rq_online_dl,
3422 .rq_offline = rq_offline_dl,
3423 .task_woken = task_woken_dl,
3424 .find_lock_rq = find_lock_later_rq,
3425
3426 .task_tick = task_tick_dl,
3427 .task_fork = task_fork_dl,
3428
3429 .get_prio = get_prio_dl,
3430 .prio_changed = prio_changed_dl,
3431 .switched_from = switched_from_dl,
3432 .switched_to = switched_to_dl,
3433
3434 .update_curr = update_curr_dl,
3435 #ifdef CONFIG_SCHED_CORE
3436 .task_is_throttled = task_is_throttled_dl,
3437 #endif
3438 };
3439
3440 /*
3441 * Used for dl_bw check and update, used under sched_rt_handler()::mutex and
3442 * sched_domains_mutex.
3443 */
3444 u64 dl_cookie;
3445
sched_dl_global_validate(void)3446 int sched_dl_global_validate(void)
3447 {
3448 u64 runtime = global_rt_runtime();
3449 u64 period = global_rt_period();
3450 u64 new_bw = to_ratio(period, runtime);
3451 u64 cookie = ++dl_cookie;
3452 struct dl_bw *dl_b;
3453 int cpu, cpus, ret = 0;
3454 unsigned long flags;
3455
3456 /*
3457 * Here we want to check the bandwidth not being set to some
3458 * value smaller than the currently allocated bandwidth in
3459 * any of the root_domains.
3460 */
3461 for_each_online_cpu(cpu) {
3462 rcu_read_lock_sched();
3463
3464 if (dl_bw_visited(cpu, cookie))
3465 goto next;
3466
3467 dl_b = dl_bw_of(cpu);
3468 cpus = dl_bw_cpus(cpu);
3469
3470 raw_spin_lock_irqsave(&dl_b->lock, flags);
3471 if (new_bw * cpus < dl_b->total_bw)
3472 ret = -EBUSY;
3473 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3474
3475 next:
3476 rcu_read_unlock_sched();
3477
3478 if (ret)
3479 break;
3480 }
3481
3482 return ret;
3483 }
3484
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)3485 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
3486 {
3487 if (global_rt_runtime() == RUNTIME_INF) {
3488 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
3489 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
3490 } else {
3491 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3492 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
3493 dl_rq->max_bw = dl_rq->extra_bw =
3494 to_ratio(global_rt_period(), global_rt_runtime());
3495 }
3496 }
3497
sched_dl_do_global(void)3498 void sched_dl_do_global(void)
3499 {
3500 u64 new_bw = -1;
3501 u64 cookie = ++dl_cookie;
3502 struct dl_bw *dl_b;
3503 int cpu;
3504 unsigned long flags;
3505
3506 if (global_rt_runtime() != RUNTIME_INF)
3507 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3508
3509 for_each_possible_cpu(cpu)
3510 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3511
3512 for_each_possible_cpu(cpu) {
3513 rcu_read_lock_sched();
3514
3515 if (dl_bw_visited(cpu, cookie)) {
3516 rcu_read_unlock_sched();
3517 continue;
3518 }
3519
3520 dl_b = dl_bw_of(cpu);
3521
3522 raw_spin_lock_irqsave(&dl_b->lock, flags);
3523 dl_b->bw = new_bw;
3524 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3525
3526 rcu_read_unlock_sched();
3527 }
3528 }
3529
3530 /*
3531 * We must be sure that accepting a new task (or allowing changing the
3532 * parameters of an existing one) is consistent with the bandwidth
3533 * constraints. If yes, this function also accordingly updates the currently
3534 * allocated bandwidth to reflect the new situation.
3535 *
3536 * This function is called while holding p's rq->lock.
3537 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)3538 int sched_dl_overflow(struct task_struct *p, int policy,
3539 const struct sched_attr *attr)
3540 {
3541 u64 period = attr->sched_period ?: attr->sched_deadline;
3542 u64 runtime = attr->sched_runtime;
3543 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
3544 int cpus, err = -1, cpu = task_cpu(p);
3545 struct dl_bw *dl_b = dl_bw_of(cpu);
3546 unsigned long cap;
3547
3548 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3549 return 0;
3550
3551 /* !deadline task may carry old deadline bandwidth */
3552 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3553 return 0;
3554
3555 /*
3556 * Either if a task, enters, leave, or stays -deadline but changes
3557 * its parameters, we may need to update accordingly the total
3558 * allocated bandwidth of the container.
3559 */
3560 raw_spin_lock(&dl_b->lock);
3561 cpus = dl_bw_cpus(cpu);
3562 cap = dl_bw_capacity(cpu);
3563
3564 if (dl_policy(policy) && !task_has_dl_policy(p) &&
3565 !__dl_overflow(dl_b, cap, 0, new_bw)) {
3566 if (hrtimer_active(&p->dl.inactive_timer))
3567 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3568 __dl_add(dl_b, new_bw, cpus);
3569 err = 0;
3570 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
3571 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
3572 /*
3573 * XXX this is slightly incorrect: when the task
3574 * utilization decreases, we should delay the total
3575 * utilization change until the task's 0-lag point.
3576 * But this would require to set the task's "inactive
3577 * timer" when the task is not inactive.
3578 */
3579 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3580 __dl_add(dl_b, new_bw, cpus);
3581 dl_change_utilization(p, new_bw);
3582 err = 0;
3583 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3584 /*
3585 * Do not decrease the total deadline utilization here,
3586 * switched_from_dl() will take care to do it at the correct
3587 * (0-lag) time.
3588 */
3589 err = 0;
3590 }
3591 raw_spin_unlock(&dl_b->lock);
3592
3593 return err;
3594 }
3595
3596 /*
3597 * This function initializes the sched_dl_entity of a newly becoming
3598 * SCHED_DEADLINE task.
3599 *
3600 * Only the static values are considered here, the actual runtime and the
3601 * absolute deadline will be properly calculated when the task is enqueued
3602 * for the first time with its new policy.
3603 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)3604 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3605 {
3606 struct sched_dl_entity *dl_se = &p->dl;
3607
3608 dl_se->dl_runtime = attr->sched_runtime;
3609 dl_se->dl_deadline = attr->sched_deadline;
3610 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3611 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3612 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3613 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3614 }
3615
__getparam_dl(struct task_struct * p,struct sched_attr * attr)3616 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3617 {
3618 struct sched_dl_entity *dl_se = &p->dl;
3619
3620 attr->sched_priority = p->rt_priority;
3621 attr->sched_runtime = dl_se->dl_runtime;
3622 attr->sched_deadline = dl_se->dl_deadline;
3623 attr->sched_period = dl_se->dl_period;
3624 attr->sched_flags &= ~SCHED_DL_FLAGS;
3625 attr->sched_flags |= dl_se->flags;
3626 }
3627
3628 /*
3629 * This function validates the new parameters of a -deadline task.
3630 * We ask for the deadline not being zero, and greater or equal
3631 * than the runtime, as well as the period of being zero or
3632 * greater than deadline. Furthermore, we have to be sure that
3633 * user parameters are above the internal resolution of 1us (we
3634 * check sched_runtime only since it is always the smaller one) and
3635 * below 2^63 ns (we have to check both sched_deadline and
3636 * sched_period, as the latter can be zero).
3637 */
__checkparam_dl(const struct sched_attr * attr)3638 bool __checkparam_dl(const struct sched_attr *attr)
3639 {
3640 u64 period, max, min;
3641
3642 /* special dl tasks don't actually use any parameter */
3643 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3644 return true;
3645
3646 /* deadline != 0 */
3647 if (attr->sched_deadline == 0)
3648 return false;
3649
3650 /*
3651 * Since we truncate DL_SCALE bits, make sure we're at least
3652 * that big.
3653 */
3654 if (attr->sched_runtime < (1ULL << DL_SCALE))
3655 return false;
3656
3657 /*
3658 * Since we use the MSB for wrap-around and sign issues, make
3659 * sure it's not set (mind that period can be equal to zero).
3660 */
3661 if (attr->sched_deadline & (1ULL << 63) ||
3662 attr->sched_period & (1ULL << 63))
3663 return false;
3664
3665 period = attr->sched_period;
3666 if (!period)
3667 period = attr->sched_deadline;
3668
3669 /* runtime <= deadline <= period (if period != 0) */
3670 if (period < attr->sched_deadline ||
3671 attr->sched_deadline < attr->sched_runtime)
3672 return false;
3673
3674 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3675 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3676
3677 if (period < min || period > max)
3678 return false;
3679
3680 return true;
3681 }
3682
3683 /*
3684 * This function clears the sched_dl_entity static params.
3685 */
__dl_clear_params(struct sched_dl_entity * dl_se)3686 static void __dl_clear_params(struct sched_dl_entity *dl_se)
3687 {
3688 dl_se->dl_runtime = 0;
3689 dl_se->dl_deadline = 0;
3690 dl_se->dl_period = 0;
3691 dl_se->flags = 0;
3692 dl_se->dl_bw = 0;
3693 dl_se->dl_density = 0;
3694
3695 dl_se->dl_throttled = 0;
3696 dl_se->dl_yielded = 0;
3697 dl_se->dl_non_contending = 0;
3698 dl_se->dl_overrun = 0;
3699 dl_se->dl_server = 0;
3700 dl_se->dl_defer = 0;
3701 dl_se->dl_defer_running = 0;
3702 dl_se->dl_defer_armed = 0;
3703
3704 #ifdef CONFIG_RT_MUTEXES
3705 dl_se->pi_se = dl_se;
3706 #endif
3707 }
3708
init_dl_entity(struct sched_dl_entity * dl_se)3709 void init_dl_entity(struct sched_dl_entity *dl_se)
3710 {
3711 RB_CLEAR_NODE(&dl_se->rb_node);
3712 init_dl_task_timer(dl_se);
3713 init_dl_inactive_task_timer(dl_se);
3714 __dl_clear_params(dl_se);
3715 }
3716
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)3717 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3718 {
3719 struct sched_dl_entity *dl_se = &p->dl;
3720
3721 if (dl_se->dl_runtime != attr->sched_runtime ||
3722 dl_se->dl_deadline != attr->sched_deadline ||
3723 dl_se->dl_period != attr->sched_period ||
3724 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3725 return true;
3726
3727 return false;
3728 }
3729
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)3730 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3731 const struct cpumask *trial)
3732 {
3733 unsigned long flags, cap;
3734 struct dl_bw *cur_dl_b;
3735 int ret = 1;
3736
3737 rcu_read_lock_sched();
3738 cur_dl_b = dl_bw_of(cpumask_any(cur));
3739 cap = __dl_bw_capacity(trial);
3740 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3741 if (__dl_overflow(cur_dl_b, cap, 0, 0))
3742 ret = 0;
3743 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3744 rcu_read_unlock_sched();
3745
3746 return ret;
3747 }
3748
3749 enum dl_bw_request {
3750 dl_bw_req_deactivate = 0,
3751 dl_bw_req_alloc,
3752 dl_bw_req_free
3753 };
3754
dl_bw_manage(enum dl_bw_request req,int cpu,u64 dl_bw)3755 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3756 {
3757 unsigned long flags, cap;
3758 struct dl_bw *dl_b;
3759 bool overflow = 0;
3760 u64 dl_server_bw = 0;
3761
3762 rcu_read_lock_sched();
3763 dl_b = dl_bw_of(cpu);
3764 raw_spin_lock_irqsave(&dl_b->lock, flags);
3765
3766 cap = dl_bw_capacity(cpu);
3767 switch (req) {
3768 case dl_bw_req_free:
3769 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3770 break;
3771 case dl_bw_req_alloc:
3772 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3773
3774 if (!overflow) {
3775 /*
3776 * We reserve space in the destination
3777 * root_domain, as we can't fail after this point.
3778 * We will free resources in the source root_domain
3779 * later on (see set_cpus_allowed_dl()).
3780 */
3781 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3782 }
3783 break;
3784 case dl_bw_req_deactivate:
3785 /*
3786 * cpu is not off yet, but we need to do the math by
3787 * considering it off already (i.e., what would happen if we
3788 * turn cpu off?).
3789 */
3790 cap -= arch_scale_cpu_capacity(cpu);
3791
3792 /*
3793 * cpu is going offline and NORMAL and EXT tasks will be
3794 * moved away from it. We can thus discount dl_server
3795 * bandwidth contribution as it won't need to be servicing
3796 * tasks after the cpu is off.
3797 */
3798 dl_server_bw = dl_server_read_bw(cpu);
3799
3800 /*
3801 * Not much to check if no DEADLINE bandwidth is present.
3802 * dl_servers we can discount, as tasks will be moved out the
3803 * offlined CPUs anyway.
3804 */
3805 if (dl_b->total_bw - dl_server_bw > 0) {
3806 /*
3807 * Leaving at least one CPU for DEADLINE tasks seems a
3808 * wise thing to do. As said above, cpu is not offline
3809 * yet, so account for that.
3810 */
3811 if (dl_bw_cpus(cpu) - 1)
3812 overflow = __dl_overflow(dl_b, cap, dl_server_bw, 0);
3813 else
3814 overflow = 1;
3815 }
3816
3817 break;
3818 }
3819
3820 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3821 rcu_read_unlock_sched();
3822
3823 return overflow ? -EBUSY : 0;
3824 }
3825
dl_bw_deactivate(int cpu)3826 int dl_bw_deactivate(int cpu)
3827 {
3828 return dl_bw_manage(dl_bw_req_deactivate, cpu, 0);
3829 }
3830
dl_bw_alloc(int cpu,u64 dl_bw)3831 int dl_bw_alloc(int cpu, u64 dl_bw)
3832 {
3833 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3834 }
3835
dl_bw_free(int cpu,u64 dl_bw)3836 void dl_bw_free(int cpu, u64 dl_bw)
3837 {
3838 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3839 }
3840
print_dl_stats(struct seq_file * m,int cpu)3841 void print_dl_stats(struct seq_file *m, int cpu)
3842 {
3843 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3844 }
3845