1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 19 #include <linux/cpuset.h> 20 21 /* 22 * Default limits for DL period; on the top end we guard against small util 23 * tasks still getting ridiculously long effective runtimes, on the bottom end we 24 * guard against timer DoS. 25 */ 26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ 27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ 28 #ifdef CONFIG_SYSCTL 29 static struct ctl_table sched_dl_sysctls[] = { 30 { 31 .procname = "sched_deadline_period_max_us", 32 .data = &sysctl_sched_dl_period_max, 33 .maxlen = sizeof(unsigned int), 34 .mode = 0644, 35 .proc_handler = proc_douintvec_minmax, 36 .extra1 = (void *)&sysctl_sched_dl_period_min, 37 }, 38 { 39 .procname = "sched_deadline_period_min_us", 40 .data = &sysctl_sched_dl_period_min, 41 .maxlen = sizeof(unsigned int), 42 .mode = 0644, 43 .proc_handler = proc_douintvec_minmax, 44 .extra2 = (void *)&sysctl_sched_dl_period_max, 45 }, 46 }; 47 48 static int __init sched_dl_sysctl_init(void) 49 { 50 register_sysctl_init("kernel", sched_dl_sysctls); 51 return 0; 52 } 53 late_initcall(sched_dl_sysctl_init); 54 #endif 55 56 static bool dl_server(struct sched_dl_entity *dl_se) 57 { 58 return dl_se->dl_server; 59 } 60 61 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 62 { 63 BUG_ON(dl_server(dl_se)); 64 return container_of(dl_se, struct task_struct, dl); 65 } 66 67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 68 { 69 return container_of(dl_rq, struct rq, dl); 70 } 71 72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) 73 { 74 struct rq *rq = dl_se->rq; 75 76 if (!dl_server(dl_se)) 77 rq = task_rq(dl_task_of(dl_se)); 78 79 return rq; 80 } 81 82 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 83 { 84 return &rq_of_dl_se(dl_se)->dl; 85 } 86 87 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 88 { 89 return !RB_EMPTY_NODE(&dl_se->rb_node); 90 } 91 92 #ifdef CONFIG_RT_MUTEXES 93 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 94 { 95 return dl_se->pi_se; 96 } 97 98 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 99 { 100 return pi_of(dl_se) != dl_se; 101 } 102 #else 103 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 104 { 105 return dl_se; 106 } 107 108 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 109 { 110 return false; 111 } 112 #endif 113 114 #ifdef CONFIG_SMP 115 static inline struct dl_bw *dl_bw_of(int i) 116 { 117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 118 "sched RCU must be held"); 119 return &cpu_rq(i)->rd->dl_bw; 120 } 121 122 static inline int dl_bw_cpus(int i) 123 { 124 struct root_domain *rd = cpu_rq(i)->rd; 125 int cpus; 126 127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 128 "sched RCU must be held"); 129 130 if (cpumask_subset(rd->span, cpu_active_mask)) 131 return cpumask_weight(rd->span); 132 133 cpus = 0; 134 135 for_each_cpu_and(i, rd->span, cpu_active_mask) 136 cpus++; 137 138 return cpus; 139 } 140 141 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) 142 { 143 unsigned long cap = 0; 144 int i; 145 146 for_each_cpu_and(i, mask, cpu_active_mask) 147 cap += arch_scale_cpu_capacity(i); 148 149 return cap; 150 } 151 152 /* 153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 154 * of the CPU the task is running on rather rd's \Sum CPU capacity. 155 */ 156 static inline unsigned long dl_bw_capacity(int i) 157 { 158 if (!sched_asym_cpucap_active() && 159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) { 160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; 161 } else { 162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 163 "sched RCU must be held"); 164 165 return __dl_bw_capacity(cpu_rq(i)->rd->span); 166 } 167 } 168 169 static inline bool dl_bw_visited(int cpu, u64 gen) 170 { 171 struct root_domain *rd = cpu_rq(cpu)->rd; 172 173 if (rd->visit_gen == gen) 174 return true; 175 176 rd->visit_gen = gen; 177 return false; 178 } 179 180 static inline 181 void __dl_update(struct dl_bw *dl_b, s64 bw) 182 { 183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 184 int i; 185 186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 187 "sched RCU must be held"); 188 for_each_cpu_and(i, rd->span, cpu_active_mask) { 189 struct rq *rq = cpu_rq(i); 190 191 rq->dl.extra_bw += bw; 192 } 193 } 194 #else 195 static inline struct dl_bw *dl_bw_of(int i) 196 { 197 return &cpu_rq(i)->dl.dl_bw; 198 } 199 200 static inline int dl_bw_cpus(int i) 201 { 202 return 1; 203 } 204 205 static inline unsigned long dl_bw_capacity(int i) 206 { 207 return SCHED_CAPACITY_SCALE; 208 } 209 210 static inline bool dl_bw_visited(int cpu, u64 gen) 211 { 212 return false; 213 } 214 215 static inline 216 void __dl_update(struct dl_bw *dl_b, s64 bw) 217 { 218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 219 220 dl->extra_bw += bw; 221 } 222 #endif 223 224 static inline 225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 226 { 227 dl_b->total_bw -= tsk_bw; 228 __dl_update(dl_b, (s32)tsk_bw / cpus); 229 } 230 231 static inline 232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 233 { 234 dl_b->total_bw += tsk_bw; 235 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 236 } 237 238 static inline bool 239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) 240 { 241 return dl_b->bw != -1 && 242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 243 } 244 245 static inline 246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 247 { 248 u64 old = dl_rq->running_bw; 249 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 251 dl_rq->running_bw += dl_bw; 252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 256 } 257 258 static inline 259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 260 { 261 u64 old = dl_rq->running_bw; 262 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 264 dl_rq->running_bw -= dl_bw; 265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 266 if (dl_rq->running_bw > old) 267 dl_rq->running_bw = 0; 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 270 } 271 272 static inline 273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 274 { 275 u64 old = dl_rq->this_bw; 276 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 278 dl_rq->this_bw += dl_bw; 279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 280 } 281 282 static inline 283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 284 { 285 u64 old = dl_rq->this_bw; 286 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 288 dl_rq->this_bw -= dl_bw; 289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 290 if (dl_rq->this_bw > old) 291 dl_rq->this_bw = 0; 292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 293 } 294 295 static inline 296 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 297 { 298 if (!dl_entity_is_special(dl_se)) 299 __add_rq_bw(dl_se->dl_bw, dl_rq); 300 } 301 302 static inline 303 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 304 { 305 if (!dl_entity_is_special(dl_se)) 306 __sub_rq_bw(dl_se->dl_bw, dl_rq); 307 } 308 309 static inline 310 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 311 { 312 if (!dl_entity_is_special(dl_se)) 313 __add_running_bw(dl_se->dl_bw, dl_rq); 314 } 315 316 static inline 317 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 318 { 319 if (!dl_entity_is_special(dl_se)) 320 __sub_running_bw(dl_se->dl_bw, dl_rq); 321 } 322 323 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw) 324 { 325 if (dl_se->dl_non_contending) { 326 sub_running_bw(dl_se, &rq->dl); 327 dl_se->dl_non_contending = 0; 328 329 /* 330 * If the timer handler is currently running and the 331 * timer cannot be canceled, inactive_task_timer() 332 * will see that dl_not_contending is not set, and 333 * will not touch the rq's active utilization, 334 * so we are still safe. 335 */ 336 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { 337 if (!dl_server(dl_se)) 338 put_task_struct(dl_task_of(dl_se)); 339 } 340 } 341 __sub_rq_bw(dl_se->dl_bw, &rq->dl); 342 __add_rq_bw(new_bw, &rq->dl); 343 } 344 345 static __always_inline 346 void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer) 347 { 348 /* 349 * If the timer callback was running (hrtimer_try_to_cancel == -1), 350 * it will eventually call put_task_struct(). 351 */ 352 if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se)) 353 put_task_struct(dl_task_of(dl_se)); 354 } 355 356 static __always_inline 357 void cancel_replenish_timer(struct sched_dl_entity *dl_se) 358 { 359 cancel_dl_timer(dl_se, &dl_se->dl_timer); 360 } 361 362 static __always_inline 363 void cancel_inactive_timer(struct sched_dl_entity *dl_se) 364 { 365 cancel_dl_timer(dl_se, &dl_se->inactive_timer); 366 } 367 368 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 369 { 370 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); 371 372 if (task_on_rq_queued(p)) 373 return; 374 375 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); 376 } 377 378 static void __dl_clear_params(struct sched_dl_entity *dl_se); 379 380 /* 381 * The utilization of a task cannot be immediately removed from 382 * the rq active utilization (running_bw) when the task blocks. 383 * Instead, we have to wait for the so called "0-lag time". 384 * 385 * If a task blocks before the "0-lag time", a timer (the inactive 386 * timer) is armed, and running_bw is decreased when the timer 387 * fires. 388 * 389 * If the task wakes up again before the inactive timer fires, 390 * the timer is canceled, whereas if the task wakes up after the 391 * inactive timer fired (and running_bw has been decreased) the 392 * task's utilization has to be added to running_bw again. 393 * A flag in the deadline scheduling entity (dl_non_contending) 394 * is used to avoid race conditions between the inactive timer handler 395 * and task wakeups. 396 * 397 * The following diagram shows how running_bw is updated. A task is 398 * "ACTIVE" when its utilization contributes to running_bw; an 399 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 400 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 401 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 402 * time already passed, which does not contribute to running_bw anymore. 403 * +------------------+ 404 * wakeup | ACTIVE | 405 * +------------------>+ contending | 406 * | add_running_bw | | 407 * | +----+------+------+ 408 * | | ^ 409 * | dequeue | | 410 * +--------+-------+ | | 411 * | | t >= 0-lag | | wakeup 412 * | INACTIVE |<---------------+ | 413 * | | sub_running_bw | | 414 * +--------+-------+ | | 415 * ^ | | 416 * | t < 0-lag | | 417 * | | | 418 * | V | 419 * | +----+------+------+ 420 * | sub_running_bw | ACTIVE | 421 * +-------------------+ | 422 * inactive timer | non contending | 423 * fired +------------------+ 424 * 425 * The task_non_contending() function is invoked when a task 426 * blocks, and checks if the 0-lag time already passed or 427 * not (in the first case, it directly updates running_bw; 428 * in the second case, it arms the inactive timer). 429 * 430 * The task_contending() function is invoked when a task wakes 431 * up, and checks if the task is still in the "ACTIVE non contending" 432 * state or not (in the second case, it updates running_bw). 433 */ 434 static void task_non_contending(struct sched_dl_entity *dl_se) 435 { 436 struct hrtimer *timer = &dl_se->inactive_timer; 437 struct rq *rq = rq_of_dl_se(dl_se); 438 struct dl_rq *dl_rq = &rq->dl; 439 s64 zerolag_time; 440 441 /* 442 * If this is a non-deadline task that has been boosted, 443 * do nothing 444 */ 445 if (dl_se->dl_runtime == 0) 446 return; 447 448 if (dl_entity_is_special(dl_se)) 449 return; 450 451 WARN_ON(dl_se->dl_non_contending); 452 453 zerolag_time = dl_se->deadline - 454 div64_long((dl_se->runtime * dl_se->dl_period), 455 dl_se->dl_runtime); 456 457 /* 458 * Using relative times instead of the absolute "0-lag time" 459 * allows to simplify the code 460 */ 461 zerolag_time -= rq_clock(rq); 462 463 /* 464 * If the "0-lag time" already passed, decrease the active 465 * utilization now, instead of starting a timer 466 */ 467 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 468 if (dl_server(dl_se)) { 469 sub_running_bw(dl_se, dl_rq); 470 } else { 471 struct task_struct *p = dl_task_of(dl_se); 472 473 if (dl_task(p)) 474 sub_running_bw(dl_se, dl_rq); 475 476 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 477 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 478 479 if (READ_ONCE(p->__state) == TASK_DEAD) 480 sub_rq_bw(dl_se, &rq->dl); 481 raw_spin_lock(&dl_b->lock); 482 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); 483 raw_spin_unlock(&dl_b->lock); 484 __dl_clear_params(dl_se); 485 } 486 } 487 488 return; 489 } 490 491 dl_se->dl_non_contending = 1; 492 if (!dl_server(dl_se)) 493 get_task_struct(dl_task_of(dl_se)); 494 495 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 496 } 497 498 static void task_contending(struct sched_dl_entity *dl_se, int flags) 499 { 500 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 501 502 /* 503 * If this is a non-deadline task that has been boosted, 504 * do nothing 505 */ 506 if (dl_se->dl_runtime == 0) 507 return; 508 509 if (flags & ENQUEUE_MIGRATED) 510 add_rq_bw(dl_se, dl_rq); 511 512 if (dl_se->dl_non_contending) { 513 dl_se->dl_non_contending = 0; 514 /* 515 * If the timer handler is currently running and the 516 * timer cannot be canceled, inactive_task_timer() 517 * will see that dl_not_contending is not set, and 518 * will not touch the rq's active utilization, 519 * so we are still safe. 520 */ 521 cancel_inactive_timer(dl_se); 522 } else { 523 /* 524 * Since "dl_non_contending" is not set, the 525 * task's utilization has already been removed from 526 * active utilization (either when the task blocked, 527 * when the "inactive timer" fired). 528 * So, add it back. 529 */ 530 add_running_bw(dl_se, dl_rq); 531 } 532 } 533 534 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 535 { 536 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node; 537 } 538 539 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 540 541 void init_dl_bw(struct dl_bw *dl_b) 542 { 543 raw_spin_lock_init(&dl_b->lock); 544 if (global_rt_runtime() == RUNTIME_INF) 545 dl_b->bw = -1; 546 else 547 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 548 dl_b->total_bw = 0; 549 } 550 551 void init_dl_rq(struct dl_rq *dl_rq) 552 { 553 dl_rq->root = RB_ROOT_CACHED; 554 555 #ifdef CONFIG_SMP 556 /* zero means no -deadline tasks */ 557 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 558 559 dl_rq->overloaded = 0; 560 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 561 #else 562 init_dl_bw(&dl_rq->dl_bw); 563 #endif 564 565 dl_rq->running_bw = 0; 566 dl_rq->this_bw = 0; 567 init_dl_rq_bw_ratio(dl_rq); 568 } 569 570 #ifdef CONFIG_SMP 571 572 static inline int dl_overloaded(struct rq *rq) 573 { 574 return atomic_read(&rq->rd->dlo_count); 575 } 576 577 static inline void dl_set_overload(struct rq *rq) 578 { 579 if (!rq->online) 580 return; 581 582 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 583 /* 584 * Must be visible before the overload count is 585 * set (as in sched_rt.c). 586 * 587 * Matched by the barrier in pull_dl_task(). 588 */ 589 smp_wmb(); 590 atomic_inc(&rq->rd->dlo_count); 591 } 592 593 static inline void dl_clear_overload(struct rq *rq) 594 { 595 if (!rq->online) 596 return; 597 598 atomic_dec(&rq->rd->dlo_count); 599 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 600 } 601 602 #define __node_2_pdl(node) \ 603 rb_entry((node), struct task_struct, pushable_dl_tasks) 604 605 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b) 606 { 607 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); 608 } 609 610 static inline int has_pushable_dl_tasks(struct rq *rq) 611 { 612 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 613 } 614 615 /* 616 * The list of pushable -deadline task is not a plist, like in 617 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 618 */ 619 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 620 { 621 struct rb_node *leftmost; 622 623 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 624 625 leftmost = rb_add_cached(&p->pushable_dl_tasks, 626 &rq->dl.pushable_dl_tasks_root, 627 __pushable_less); 628 if (leftmost) 629 rq->dl.earliest_dl.next = p->dl.deadline; 630 631 if (!rq->dl.overloaded) { 632 dl_set_overload(rq); 633 rq->dl.overloaded = 1; 634 } 635 } 636 637 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 638 { 639 struct dl_rq *dl_rq = &rq->dl; 640 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root; 641 struct rb_node *leftmost; 642 643 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 644 return; 645 646 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); 647 if (leftmost) 648 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; 649 650 RB_CLEAR_NODE(&p->pushable_dl_tasks); 651 652 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) { 653 dl_clear_overload(rq); 654 rq->dl.overloaded = 0; 655 } 656 } 657 658 static int push_dl_task(struct rq *rq); 659 660 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 661 { 662 return rq->online && dl_task(prev); 663 } 664 665 static DEFINE_PER_CPU(struct balance_callback, dl_push_head); 666 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head); 667 668 static void push_dl_tasks(struct rq *); 669 static void pull_dl_task(struct rq *); 670 671 static inline void deadline_queue_push_tasks(struct rq *rq) 672 { 673 if (!has_pushable_dl_tasks(rq)) 674 return; 675 676 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 677 } 678 679 static inline void deadline_queue_pull_task(struct rq *rq) 680 { 681 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 682 } 683 684 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 685 686 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 687 { 688 struct rq *later_rq = NULL; 689 struct dl_bw *dl_b; 690 691 later_rq = find_lock_later_rq(p, rq); 692 if (!later_rq) { 693 int cpu; 694 695 /* 696 * If we cannot preempt any rq, fall back to pick any 697 * online CPU: 698 */ 699 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 700 if (cpu >= nr_cpu_ids) { 701 /* 702 * Failed to find any suitable CPU. 703 * The task will never come back! 704 */ 705 WARN_ON_ONCE(dl_bandwidth_enabled()); 706 707 /* 708 * If admission control is disabled we 709 * try a little harder to let the task 710 * run. 711 */ 712 cpu = cpumask_any(cpu_active_mask); 713 } 714 later_rq = cpu_rq(cpu); 715 double_lock_balance(rq, later_rq); 716 } 717 718 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 719 /* 720 * Inactive timer is armed (or callback is running, but 721 * waiting for us to release rq locks). In any case, when it 722 * will fire (or continue), it will see running_bw of this 723 * task migrated to later_rq (and correctly handle it). 724 */ 725 sub_running_bw(&p->dl, &rq->dl); 726 sub_rq_bw(&p->dl, &rq->dl); 727 728 add_rq_bw(&p->dl, &later_rq->dl); 729 add_running_bw(&p->dl, &later_rq->dl); 730 } else { 731 sub_rq_bw(&p->dl, &rq->dl); 732 add_rq_bw(&p->dl, &later_rq->dl); 733 } 734 735 /* 736 * And we finally need to fix up root_domain(s) bandwidth accounting, 737 * since p is still hanging out in the old (now moved to default) root 738 * domain. 739 */ 740 dl_b = &rq->rd->dl_bw; 741 raw_spin_lock(&dl_b->lock); 742 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 743 raw_spin_unlock(&dl_b->lock); 744 745 dl_b = &later_rq->rd->dl_bw; 746 raw_spin_lock(&dl_b->lock); 747 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 748 raw_spin_unlock(&dl_b->lock); 749 750 set_task_cpu(p, later_rq->cpu); 751 double_unlock_balance(later_rq, rq); 752 753 return later_rq; 754 } 755 756 #else 757 758 static inline 759 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 760 { 761 } 762 763 static inline 764 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 765 { 766 } 767 768 static inline 769 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 770 { 771 } 772 773 static inline 774 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 775 { 776 } 777 778 static inline void deadline_queue_push_tasks(struct rq *rq) 779 { 780 } 781 782 static inline void deadline_queue_pull_task(struct rq *rq) 783 { 784 } 785 #endif /* CONFIG_SMP */ 786 787 static void 788 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); 789 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 790 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags); 791 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags); 792 793 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, 794 struct rq *rq) 795 { 796 /* for non-boosted task, pi_of(dl_se) == dl_se */ 797 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 798 dl_se->runtime = pi_of(dl_se)->dl_runtime; 799 800 /* 801 * If it is a deferred reservation, and the server 802 * is not handling an starvation case, defer it. 803 */ 804 if (dl_se->dl_defer && !dl_se->dl_defer_running) { 805 dl_se->dl_throttled = 1; 806 dl_se->dl_defer_armed = 1; 807 } 808 } 809 810 /* 811 * We are being explicitly informed that a new instance is starting, 812 * and this means that: 813 * - the absolute deadline of the entity has to be placed at 814 * current time + relative deadline; 815 * - the runtime of the entity has to be set to the maximum value. 816 * 817 * The capability of specifying such event is useful whenever a -deadline 818 * entity wants to (try to!) synchronize its behaviour with the scheduler's 819 * one, and to (try to!) reconcile itself with its own scheduling 820 * parameters. 821 */ 822 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 823 { 824 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 825 struct rq *rq = rq_of_dl_rq(dl_rq); 826 827 WARN_ON(is_dl_boosted(dl_se)); 828 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 829 830 /* 831 * We are racing with the deadline timer. So, do nothing because 832 * the deadline timer handler will take care of properly recharging 833 * the runtime and postponing the deadline 834 */ 835 if (dl_se->dl_throttled) 836 return; 837 838 /* 839 * We use the regular wall clock time to set deadlines in the 840 * future; in fact, we must consider execution overheads (time 841 * spent on hardirq context, etc.). 842 */ 843 replenish_dl_new_period(dl_se, rq); 844 } 845 846 static int start_dl_timer(struct sched_dl_entity *dl_se); 847 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t); 848 849 /* 850 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 851 * possibility of a entity lasting more than what it declared, and thus 852 * exhausting its runtime. 853 * 854 * Here we are interested in making runtime overrun possible, but we do 855 * not want a entity which is misbehaving to affect the scheduling of all 856 * other entities. 857 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 858 * is used, in order to confine each entity within its own bandwidth. 859 * 860 * This function deals exactly with that, and ensures that when the runtime 861 * of a entity is replenished, its deadline is also postponed. That ensures 862 * the overrunning entity can't interfere with other entity in the system and 863 * can't make them miss their deadlines. Reasons why this kind of overruns 864 * could happen are, typically, a entity voluntarily trying to overcome its 865 * runtime, or it just underestimated it during sched_setattr(). 866 */ 867 static void replenish_dl_entity(struct sched_dl_entity *dl_se) 868 { 869 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 870 struct rq *rq = rq_of_dl_rq(dl_rq); 871 872 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0); 873 874 /* 875 * This could be the case for a !-dl task that is boosted. 876 * Just go with full inherited parameters. 877 * 878 * Or, it could be the case of a deferred reservation that 879 * was not able to consume its runtime in background and 880 * reached this point with current u > U. 881 * 882 * In both cases, set a new period. 883 */ 884 if (dl_se->dl_deadline == 0 || 885 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) { 886 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 887 dl_se->runtime = pi_of(dl_se)->dl_runtime; 888 } 889 890 if (dl_se->dl_yielded && dl_se->runtime > 0) 891 dl_se->runtime = 0; 892 893 /* 894 * We keep moving the deadline away until we get some 895 * available runtime for the entity. This ensures correct 896 * handling of situations where the runtime overrun is 897 * arbitrary large. 898 */ 899 while (dl_se->runtime <= 0) { 900 dl_se->deadline += pi_of(dl_se)->dl_period; 901 dl_se->runtime += pi_of(dl_se)->dl_runtime; 902 } 903 904 /* 905 * At this point, the deadline really should be "in 906 * the future" with respect to rq->clock. If it's 907 * not, we are, for some reason, lagging too much! 908 * Anyway, after having warn userspace abut that, 909 * we still try to keep the things running by 910 * resetting the deadline and the budget of the 911 * entity. 912 */ 913 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 914 printk_deferred_once("sched: DL replenish lagged too much\n"); 915 replenish_dl_new_period(dl_se, rq); 916 } 917 918 if (dl_se->dl_yielded) 919 dl_se->dl_yielded = 0; 920 if (dl_se->dl_throttled) 921 dl_se->dl_throttled = 0; 922 923 /* 924 * If this is the replenishment of a deferred reservation, 925 * clear the flag and return. 926 */ 927 if (dl_se->dl_defer_armed) { 928 dl_se->dl_defer_armed = 0; 929 return; 930 } 931 932 /* 933 * A this point, if the deferred server is not armed, and the deadline 934 * is in the future, if it is not running already, throttle the server 935 * and arm the defer timer. 936 */ 937 if (dl_se->dl_defer && !dl_se->dl_defer_running && 938 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) { 939 if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) { 940 941 /* 942 * Set dl_se->dl_defer_armed and dl_throttled variables to 943 * inform the start_dl_timer() that this is a deferred 944 * activation. 945 */ 946 dl_se->dl_defer_armed = 1; 947 dl_se->dl_throttled = 1; 948 if (!start_dl_timer(dl_se)) { 949 /* 950 * If for whatever reason (delays), a previous timer was 951 * queued but not serviced, cancel it and clean the 952 * deferrable server variables intended for start_dl_timer(). 953 */ 954 hrtimer_try_to_cancel(&dl_se->dl_timer); 955 dl_se->dl_defer_armed = 0; 956 dl_se->dl_throttled = 0; 957 } 958 } 959 } 960 } 961 962 /* 963 * Here we check if --at time t-- an entity (which is probably being 964 * [re]activated or, in general, enqueued) can use its remaining runtime 965 * and its current deadline _without_ exceeding the bandwidth it is 966 * assigned (function returns true if it can't). We are in fact applying 967 * one of the CBS rules: when a task wakes up, if the residual runtime 968 * over residual deadline fits within the allocated bandwidth, then we 969 * can keep the current (absolute) deadline and residual budget without 970 * disrupting the schedulability of the system. Otherwise, we should 971 * refill the runtime and set the deadline a period in the future, 972 * because keeping the current (absolute) deadline of the task would 973 * result in breaking guarantees promised to other tasks (refer to 974 * Documentation/scheduler/sched-deadline.rst for more information). 975 * 976 * This function returns true if: 977 * 978 * runtime / (deadline - t) > dl_runtime / dl_deadline , 979 * 980 * IOW we can't recycle current parameters. 981 * 982 * Notice that the bandwidth check is done against the deadline. For 983 * task with deadline equal to period this is the same of using 984 * dl_period instead of dl_deadline in the equation above. 985 */ 986 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t) 987 { 988 u64 left, right; 989 990 /* 991 * left and right are the two sides of the equation above, 992 * after a bit of shuffling to use multiplications instead 993 * of divisions. 994 * 995 * Note that none of the time values involved in the two 996 * multiplications are absolute: dl_deadline and dl_runtime 997 * are the relative deadline and the maximum runtime of each 998 * instance, runtime is the runtime left for the last instance 999 * and (deadline - t), since t is rq->clock, is the time left 1000 * to the (absolute) deadline. Even if overflowing the u64 type 1001 * is very unlikely to occur in both cases, here we scale down 1002 * as we want to avoid that risk at all. Scaling down by 10 1003 * means that we reduce granularity to 1us. We are fine with it, 1004 * since this is only a true/false check and, anyway, thinking 1005 * of anything below microseconds resolution is actually fiction 1006 * (but still we want to give the user that illusion >;). 1007 */ 1008 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 1009 right = ((dl_se->deadline - t) >> DL_SCALE) * 1010 (pi_of(dl_se)->dl_runtime >> DL_SCALE); 1011 1012 return dl_time_before(right, left); 1013 } 1014 1015 /* 1016 * Revised wakeup rule [1]: For self-suspending tasks, rather then 1017 * re-initializing task's runtime and deadline, the revised wakeup 1018 * rule adjusts the task's runtime to avoid the task to overrun its 1019 * density. 1020 * 1021 * Reasoning: a task may overrun the density if: 1022 * runtime / (deadline - t) > dl_runtime / dl_deadline 1023 * 1024 * Therefore, runtime can be adjusted to: 1025 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 1026 * 1027 * In such way that runtime will be equal to the maximum density 1028 * the task can use without breaking any rule. 1029 * 1030 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 1031 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 1032 */ 1033 static void 1034 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 1035 { 1036 u64 laxity = dl_se->deadline - rq_clock(rq); 1037 1038 /* 1039 * If the task has deadline < period, and the deadline is in the past, 1040 * it should already be throttled before this check. 1041 * 1042 * See update_dl_entity() comments for further details. 1043 */ 1044 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 1045 1046 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 1047 } 1048 1049 /* 1050 * Regarding the deadline, a task with implicit deadline has a relative 1051 * deadline == relative period. A task with constrained deadline has a 1052 * relative deadline <= relative period. 1053 * 1054 * We support constrained deadline tasks. However, there are some restrictions 1055 * applied only for tasks which do not have an implicit deadline. See 1056 * update_dl_entity() to know more about such restrictions. 1057 * 1058 * The dl_is_implicit() returns true if the task has an implicit deadline. 1059 */ 1060 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 1061 { 1062 return dl_se->dl_deadline == dl_se->dl_period; 1063 } 1064 1065 /* 1066 * When a deadline entity is placed in the runqueue, its runtime and deadline 1067 * might need to be updated. This is done by a CBS wake up rule. There are two 1068 * different rules: 1) the original CBS; and 2) the Revisited CBS. 1069 * 1070 * When the task is starting a new period, the Original CBS is used. In this 1071 * case, the runtime is replenished and a new absolute deadline is set. 1072 * 1073 * When a task is queued before the begin of the next period, using the 1074 * remaining runtime and deadline could make the entity to overflow, see 1075 * dl_entity_overflow() to find more about runtime overflow. When such case 1076 * is detected, the runtime and deadline need to be updated. 1077 * 1078 * If the task has an implicit deadline, i.e., deadline == period, the Original 1079 * CBS is applied. The runtime is replenished and a new absolute deadline is 1080 * set, as in the previous cases. 1081 * 1082 * However, the Original CBS does not work properly for tasks with 1083 * deadline < period, which are said to have a constrained deadline. By 1084 * applying the Original CBS, a constrained deadline task would be able to run 1085 * runtime/deadline in a period. With deadline < period, the task would 1086 * overrun the runtime/period allowed bandwidth, breaking the admission test. 1087 * 1088 * In order to prevent this misbehave, the Revisited CBS is used for 1089 * constrained deadline tasks when a runtime overflow is detected. In the 1090 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 1091 * the remaining runtime of the task is reduced to avoid runtime overflow. 1092 * Please refer to the comments update_dl_revised_wakeup() function to find 1093 * more about the Revised CBS rule. 1094 */ 1095 static void update_dl_entity(struct sched_dl_entity *dl_se) 1096 { 1097 struct rq *rq = rq_of_dl_se(dl_se); 1098 1099 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 1100 dl_entity_overflow(dl_se, rq_clock(rq))) { 1101 1102 if (unlikely(!dl_is_implicit(dl_se) && 1103 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 1104 !is_dl_boosted(dl_se))) { 1105 update_dl_revised_wakeup(dl_se, rq); 1106 return; 1107 } 1108 1109 replenish_dl_new_period(dl_se, rq); 1110 } else if (dl_server(dl_se) && dl_se->dl_defer) { 1111 /* 1112 * The server can still use its previous deadline, so check if 1113 * it left the dl_defer_running state. 1114 */ 1115 if (!dl_se->dl_defer_running) { 1116 dl_se->dl_defer_armed = 1; 1117 dl_se->dl_throttled = 1; 1118 } 1119 } 1120 } 1121 1122 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 1123 { 1124 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 1125 } 1126 1127 /* 1128 * If the entity depleted all its runtime, and if we want it to sleep 1129 * while waiting for some new execution time to become available, we 1130 * set the bandwidth replenishment timer to the replenishment instant 1131 * and try to activate it. 1132 * 1133 * Notice that it is important for the caller to know if the timer 1134 * actually started or not (i.e., the replenishment instant is in 1135 * the future or in the past). 1136 */ 1137 static int start_dl_timer(struct sched_dl_entity *dl_se) 1138 { 1139 struct hrtimer *timer = &dl_se->dl_timer; 1140 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1141 struct rq *rq = rq_of_dl_rq(dl_rq); 1142 ktime_t now, act; 1143 s64 delta; 1144 1145 lockdep_assert_rq_held(rq); 1146 1147 /* 1148 * We want the timer to fire at the deadline, but considering 1149 * that it is actually coming from rq->clock and not from 1150 * hrtimer's time base reading. 1151 * 1152 * The deferred reservation will have its timer set to 1153 * (deadline - runtime). At that point, the CBS rule will decide 1154 * if the current deadline can be used, or if a replenishment is 1155 * required to avoid add too much pressure on the system 1156 * (current u > U). 1157 */ 1158 if (dl_se->dl_defer_armed) { 1159 WARN_ON_ONCE(!dl_se->dl_throttled); 1160 act = ns_to_ktime(dl_se->deadline - dl_se->runtime); 1161 } else { 1162 /* act = deadline - rel-deadline + period */ 1163 act = ns_to_ktime(dl_next_period(dl_se)); 1164 } 1165 1166 now = hrtimer_cb_get_time(timer); 1167 delta = ktime_to_ns(now) - rq_clock(rq); 1168 act = ktime_add_ns(act, delta); 1169 1170 /* 1171 * If the expiry time already passed, e.g., because the value 1172 * chosen as the deadline is too small, don't even try to 1173 * start the timer in the past! 1174 */ 1175 if (ktime_us_delta(act, now) < 0) 1176 return 0; 1177 1178 /* 1179 * !enqueued will guarantee another callback; even if one is already in 1180 * progress. This ensures a balanced {get,put}_task_struct(). 1181 * 1182 * The race against __run_timer() clearing the enqueued state is 1183 * harmless because we're holding task_rq()->lock, therefore the timer 1184 * expiring after we've done the check will wait on its task_rq_lock() 1185 * and observe our state. 1186 */ 1187 if (!hrtimer_is_queued(timer)) { 1188 if (!dl_server(dl_se)) 1189 get_task_struct(dl_task_of(dl_se)); 1190 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 1191 } 1192 1193 return 1; 1194 } 1195 1196 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) 1197 { 1198 #ifdef CONFIG_SMP 1199 /* 1200 * Queueing this task back might have overloaded rq, check if we need 1201 * to kick someone away. 1202 */ 1203 if (has_pushable_dl_tasks(rq)) { 1204 /* 1205 * Nothing relies on rq->lock after this, so its safe to drop 1206 * rq->lock. 1207 */ 1208 rq_unpin_lock(rq, rf); 1209 push_dl_task(rq); 1210 rq_repin_lock(rq, rf); 1211 } 1212 #endif 1213 } 1214 1215 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */ 1216 static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC; 1217 1218 static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se) 1219 { 1220 struct rq *rq = rq_of_dl_se(dl_se); 1221 u64 fw; 1222 1223 scoped_guard (rq_lock, rq) { 1224 struct rq_flags *rf = &scope.rf; 1225 1226 if (!dl_se->dl_throttled || !dl_se->dl_runtime) 1227 return HRTIMER_NORESTART; 1228 1229 sched_clock_tick(); 1230 update_rq_clock(rq); 1231 1232 if (!dl_se->dl_runtime) 1233 return HRTIMER_NORESTART; 1234 1235 if (!dl_se->server_has_tasks(dl_se)) { 1236 replenish_dl_entity(dl_se); 1237 return HRTIMER_NORESTART; 1238 } 1239 1240 if (dl_se->dl_defer_armed) { 1241 /* 1242 * First check if the server could consume runtime in background. 1243 * If so, it is possible to push the defer timer for this amount 1244 * of time. The dl_server_min_res serves as a limit to avoid 1245 * forwarding the timer for a too small amount of time. 1246 */ 1247 if (dl_time_before(rq_clock(dl_se->rq), 1248 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) { 1249 1250 /* reset the defer timer */ 1251 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime; 1252 1253 hrtimer_forward_now(timer, ns_to_ktime(fw)); 1254 return HRTIMER_RESTART; 1255 } 1256 1257 dl_se->dl_defer_running = 1; 1258 } 1259 1260 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1261 1262 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl)) 1263 resched_curr(rq); 1264 1265 __push_dl_task(rq, rf); 1266 } 1267 1268 return HRTIMER_NORESTART; 1269 } 1270 1271 /* 1272 * This is the bandwidth enforcement timer callback. If here, we know 1273 * a task is not on its dl_rq, since the fact that the timer was running 1274 * means the task is throttled and needs a runtime replenishment. 1275 * 1276 * However, what we actually do depends on the fact the task is active, 1277 * (it is on its rq) or has been removed from there by a call to 1278 * dequeue_task_dl(). In the former case we must issue the runtime 1279 * replenishment and add the task back to the dl_rq; in the latter, we just 1280 * do nothing but clearing dl_throttled, so that runtime and deadline 1281 * updating (and the queueing back to dl_rq) will be done by the 1282 * next call to enqueue_task_dl(). 1283 */ 1284 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 1285 { 1286 struct sched_dl_entity *dl_se = container_of(timer, 1287 struct sched_dl_entity, 1288 dl_timer); 1289 struct task_struct *p; 1290 struct rq_flags rf; 1291 struct rq *rq; 1292 1293 if (dl_server(dl_se)) 1294 return dl_server_timer(timer, dl_se); 1295 1296 p = dl_task_of(dl_se); 1297 rq = task_rq_lock(p, &rf); 1298 1299 /* 1300 * The task might have changed its scheduling policy to something 1301 * different than SCHED_DEADLINE (through switched_from_dl()). 1302 */ 1303 if (!dl_task(p)) 1304 goto unlock; 1305 1306 /* 1307 * The task might have been boosted by someone else and might be in the 1308 * boosting/deboosting path, its not throttled. 1309 */ 1310 if (is_dl_boosted(dl_se)) 1311 goto unlock; 1312 1313 /* 1314 * Spurious timer due to start_dl_timer() race; or we already received 1315 * a replenishment from rt_mutex_setprio(). 1316 */ 1317 if (!dl_se->dl_throttled) 1318 goto unlock; 1319 1320 sched_clock_tick(); 1321 update_rq_clock(rq); 1322 1323 /* 1324 * If the throttle happened during sched-out; like: 1325 * 1326 * schedule() 1327 * deactivate_task() 1328 * dequeue_task_dl() 1329 * update_curr_dl() 1330 * start_dl_timer() 1331 * __dequeue_task_dl() 1332 * prev->on_rq = 0; 1333 * 1334 * We can be both throttled and !queued. Replenish the counter 1335 * but do not enqueue -- wait for our wakeup to do that. 1336 */ 1337 if (!task_on_rq_queued(p)) { 1338 replenish_dl_entity(dl_se); 1339 goto unlock; 1340 } 1341 1342 #ifdef CONFIG_SMP 1343 if (unlikely(!rq->online)) { 1344 /* 1345 * If the runqueue is no longer available, migrate the 1346 * task elsewhere. This necessarily changes rq. 1347 */ 1348 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); 1349 rq = dl_task_offline_migration(rq, p); 1350 rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); 1351 update_rq_clock(rq); 1352 1353 /* 1354 * Now that the task has been migrated to the new RQ and we 1355 * have that locked, proceed as normal and enqueue the task 1356 * there. 1357 */ 1358 } 1359 #endif 1360 1361 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1362 if (dl_task(rq->donor)) 1363 wakeup_preempt_dl(rq, p, 0); 1364 else 1365 resched_curr(rq); 1366 1367 __push_dl_task(rq, &rf); 1368 1369 unlock: 1370 task_rq_unlock(rq, p, &rf); 1371 1372 /* 1373 * This can free the task_struct, including this hrtimer, do not touch 1374 * anything related to that after this. 1375 */ 1376 put_task_struct(p); 1377 1378 return HRTIMER_NORESTART; 1379 } 1380 1381 static void init_dl_task_timer(struct sched_dl_entity *dl_se) 1382 { 1383 struct hrtimer *timer = &dl_se->dl_timer; 1384 1385 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1386 timer->function = dl_task_timer; 1387 } 1388 1389 /* 1390 * During the activation, CBS checks if it can reuse the current task's 1391 * runtime and period. If the deadline of the task is in the past, CBS 1392 * cannot use the runtime, and so it replenishes the task. This rule 1393 * works fine for implicit deadline tasks (deadline == period), and the 1394 * CBS was designed for implicit deadline tasks. However, a task with 1395 * constrained deadline (deadline < period) might be awakened after the 1396 * deadline, but before the next period. In this case, replenishing the 1397 * task would allow it to run for runtime / deadline. As in this case 1398 * deadline < period, CBS enables a task to run for more than the 1399 * runtime / period. In a very loaded system, this can cause a domino 1400 * effect, making other tasks miss their deadlines. 1401 * 1402 * To avoid this problem, in the activation of a constrained deadline 1403 * task after the deadline but before the next period, throttle the 1404 * task and set the replenishing timer to the begin of the next period, 1405 * unless it is boosted. 1406 */ 1407 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1408 { 1409 struct rq *rq = rq_of_dl_se(dl_se); 1410 1411 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1412 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1413 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) 1414 return; 1415 dl_se->dl_throttled = 1; 1416 if (dl_se->runtime > 0) 1417 dl_se->runtime = 0; 1418 } 1419 } 1420 1421 static 1422 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1423 { 1424 return (dl_se->runtime <= 0); 1425 } 1426 1427 /* 1428 * This function implements the GRUB accounting rule. According to the 1429 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt", 1430 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt", 1431 * where u is the utilization of the task, Umax is the maximum reclaimable 1432 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1433 * as the difference between the "total runqueue utilization" and the 1434 * "runqueue active utilization", and Uextra is the (per runqueue) extra 1435 * reclaimable utilization. 1436 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied 1437 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT. 1438 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw 1439 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1440 * Since delta is a 64 bit variable, to have an overflow its value should be 1441 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is 1442 * not an issue here. 1443 */ 1444 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1445 { 1446 u64 u_act; 1447 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1448 1449 /* 1450 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we 1451 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra 1452 * can be larger than u_max. So, u_max - u_inact - u_extra would be 1453 * negative leading to wrong results. 1454 */ 1455 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw) 1456 u_act = dl_se->dl_bw; 1457 else 1458 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw; 1459 1460 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT; 1461 return (delta * u_act) >> BW_SHIFT; 1462 } 1463 1464 s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) 1465 { 1466 s64 scaled_delta_exec; 1467 1468 /* 1469 * For tasks that participate in GRUB, we implement GRUB-PA: the 1470 * spare reclaimed bandwidth is used to clock down frequency. 1471 * 1472 * For the others, we still need to scale reservation parameters 1473 * according to current frequency and CPU maximum capacity. 1474 */ 1475 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1476 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se); 1477 } else { 1478 int cpu = cpu_of(rq); 1479 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1480 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1481 1482 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1483 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1484 } 1485 1486 return scaled_delta_exec; 1487 } 1488 1489 static inline void 1490 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1491 int flags); 1492 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) 1493 { 1494 s64 scaled_delta_exec; 1495 1496 if (unlikely(delta_exec <= 0)) { 1497 if (unlikely(dl_se->dl_yielded)) 1498 goto throttle; 1499 return; 1500 } 1501 1502 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer) 1503 return; 1504 1505 if (dl_entity_is_special(dl_se)) 1506 return; 1507 1508 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec); 1509 1510 dl_se->runtime -= scaled_delta_exec; 1511 1512 /* 1513 * The fair server can consume its runtime while throttled (not queued/ 1514 * running as regular CFS). 1515 * 1516 * If the server consumes its entire runtime in this state. The server 1517 * is not required for the current period. Thus, reset the server by 1518 * starting a new period, pushing the activation. 1519 */ 1520 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) { 1521 /* 1522 * If the server was previously activated - the starving condition 1523 * took place, it this point it went away because the fair scheduler 1524 * was able to get runtime in background. So return to the initial 1525 * state. 1526 */ 1527 dl_se->dl_defer_running = 0; 1528 1529 hrtimer_try_to_cancel(&dl_se->dl_timer); 1530 1531 replenish_dl_new_period(dl_se, dl_se->rq); 1532 1533 /* 1534 * Not being able to start the timer seems problematic. If it could not 1535 * be started for whatever reason, we need to "unthrottle" the DL server 1536 * and queue right away. Otherwise nothing might queue it. That's similar 1537 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn. 1538 */ 1539 WARN_ON_ONCE(!start_dl_timer(dl_se)); 1540 1541 return; 1542 } 1543 1544 throttle: 1545 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1546 dl_se->dl_throttled = 1; 1547 1548 /* If requested, inform the user about runtime overruns. */ 1549 if (dl_runtime_exceeded(dl_se) && 1550 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1551 dl_se->dl_overrun = 1; 1552 1553 dequeue_dl_entity(dl_se, 0); 1554 if (!dl_server(dl_se)) { 1555 update_stats_dequeue_dl(&rq->dl, dl_se, 0); 1556 dequeue_pushable_dl_task(rq, dl_task_of(dl_se)); 1557 } 1558 1559 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) { 1560 if (dl_server(dl_se)) 1561 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1562 else 1563 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH); 1564 } 1565 1566 if (!is_leftmost(dl_se, &rq->dl)) 1567 resched_curr(rq); 1568 } 1569 1570 /* 1571 * The fair server (sole dl_server) does not account for real-time 1572 * workload because it is running fair work. 1573 */ 1574 if (dl_se == &rq->fair_server) 1575 return; 1576 1577 #ifdef CONFIG_RT_GROUP_SCHED 1578 /* 1579 * Because -- for now -- we share the rt bandwidth, we need to 1580 * account our runtime there too, otherwise actual rt tasks 1581 * would be able to exceed the shared quota. 1582 * 1583 * Account to the root rt group for now. 1584 * 1585 * The solution we're working towards is having the RT groups scheduled 1586 * using deadline servers -- however there's a few nasties to figure 1587 * out before that can happen. 1588 */ 1589 if (rt_bandwidth_enabled()) { 1590 struct rt_rq *rt_rq = &rq->rt; 1591 1592 raw_spin_lock(&rt_rq->rt_runtime_lock); 1593 /* 1594 * We'll let actual RT tasks worry about the overflow here, we 1595 * have our own CBS to keep us inline; only account when RT 1596 * bandwidth is relevant. 1597 */ 1598 if (sched_rt_bandwidth_account(rt_rq)) 1599 rt_rq->rt_time += delta_exec; 1600 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1601 } 1602 #endif 1603 } 1604 1605 /* 1606 * In the non-defer mode, the idle time is not accounted, as the 1607 * server provides a guarantee. 1608 * 1609 * If the dl_server is in defer mode, the idle time is also considered 1610 * as time available for the fair server, avoiding a penalty for the 1611 * rt scheduler that did not consumed that time. 1612 */ 1613 void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) 1614 { 1615 s64 delta_exec, scaled_delta_exec; 1616 1617 if (!rq->fair_server.dl_defer) 1618 return; 1619 1620 /* no need to discount more */ 1621 if (rq->fair_server.runtime < 0) 1622 return; 1623 1624 delta_exec = rq_clock_task(rq) - p->se.exec_start; 1625 if (delta_exec < 0) 1626 return; 1627 1628 scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec); 1629 1630 rq->fair_server.runtime -= scaled_delta_exec; 1631 1632 if (rq->fair_server.runtime < 0) { 1633 rq->fair_server.dl_defer_running = 0; 1634 rq->fair_server.runtime = 0; 1635 } 1636 1637 p->se.exec_start = rq_clock_task(rq); 1638 } 1639 1640 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) 1641 { 1642 /* 0 runtime = fair server disabled */ 1643 if (dl_se->dl_runtime) 1644 update_curr_dl_se(dl_se->rq, dl_se, delta_exec); 1645 } 1646 1647 void dl_server_start(struct sched_dl_entity *dl_se) 1648 { 1649 struct rq *rq = dl_se->rq; 1650 1651 /* 1652 * XXX: the apply do not work fine at the init phase for the 1653 * fair server because things are not yet set. We need to improve 1654 * this before getting generic. 1655 */ 1656 if (!dl_server(dl_se)) { 1657 u64 runtime = 50 * NSEC_PER_MSEC; 1658 u64 period = 1000 * NSEC_PER_MSEC; 1659 1660 dl_server_apply_params(dl_se, runtime, period, 1); 1661 1662 dl_se->dl_server = 1; 1663 dl_se->dl_defer = 1; 1664 setup_new_dl_entity(dl_se); 1665 } 1666 1667 if (!dl_se->dl_runtime) 1668 return; 1669 1670 dl_se->dl_server_active = 1; 1671 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); 1672 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) 1673 resched_curr(dl_se->rq); 1674 } 1675 1676 void dl_server_stop(struct sched_dl_entity *dl_se) 1677 { 1678 if (!dl_se->dl_runtime) 1679 return; 1680 1681 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); 1682 hrtimer_try_to_cancel(&dl_se->dl_timer); 1683 dl_se->dl_defer_armed = 0; 1684 dl_se->dl_throttled = 0; 1685 dl_se->dl_server_active = 0; 1686 } 1687 1688 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 1689 dl_server_has_tasks_f has_tasks, 1690 dl_server_pick_f pick_task) 1691 { 1692 dl_se->rq = rq; 1693 dl_se->server_has_tasks = has_tasks; 1694 dl_se->server_pick_task = pick_task; 1695 } 1696 1697 void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq) 1698 { 1699 u64 new_bw = dl_se->dl_bw; 1700 int cpu = cpu_of(rq); 1701 struct dl_bw *dl_b; 1702 1703 dl_b = dl_bw_of(cpu_of(rq)); 1704 guard(raw_spinlock)(&dl_b->lock); 1705 1706 if (!dl_bw_cpus(cpu)) 1707 return; 1708 1709 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu)); 1710 } 1711 1712 int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init) 1713 { 1714 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime); 1715 u64 new_bw = to_ratio(period, runtime); 1716 struct rq *rq = dl_se->rq; 1717 int cpu = cpu_of(rq); 1718 struct dl_bw *dl_b; 1719 unsigned long cap; 1720 int retval = 0; 1721 int cpus; 1722 1723 dl_b = dl_bw_of(cpu); 1724 guard(raw_spinlock)(&dl_b->lock); 1725 1726 cpus = dl_bw_cpus(cpu); 1727 cap = dl_bw_capacity(cpu); 1728 1729 if (__dl_overflow(dl_b, cap, old_bw, new_bw)) 1730 return -EBUSY; 1731 1732 if (init) { 1733 __add_rq_bw(new_bw, &rq->dl); 1734 __dl_add(dl_b, new_bw, cpus); 1735 } else { 1736 __dl_sub(dl_b, dl_se->dl_bw, cpus); 1737 __dl_add(dl_b, new_bw, cpus); 1738 1739 dl_rq_change_utilization(rq, dl_se, new_bw); 1740 } 1741 1742 dl_se->dl_runtime = runtime; 1743 dl_se->dl_deadline = period; 1744 dl_se->dl_period = period; 1745 1746 dl_se->runtime = 0; 1747 dl_se->deadline = 0; 1748 1749 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 1750 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 1751 1752 return retval; 1753 } 1754 1755 /* 1756 * Update the current task's runtime statistics (provided it is still 1757 * a -deadline task and has not been removed from the dl_rq). 1758 */ 1759 static void update_curr_dl(struct rq *rq) 1760 { 1761 struct task_struct *donor = rq->donor; 1762 struct sched_dl_entity *dl_se = &donor->dl; 1763 s64 delta_exec; 1764 1765 if (!dl_task(donor) || !on_dl_rq(dl_se)) 1766 return; 1767 1768 /* 1769 * Consumed budget is computed considering the time as 1770 * observed by schedulable tasks (excluding time spent 1771 * in hardirq context, etc.). Deadlines are instead 1772 * computed using hard walltime. This seems to be the more 1773 * natural solution, but the full ramifications of this 1774 * approach need further study. 1775 */ 1776 delta_exec = update_curr_common(rq); 1777 update_curr_dl_se(rq, dl_se, delta_exec); 1778 } 1779 1780 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1781 { 1782 struct sched_dl_entity *dl_se = container_of(timer, 1783 struct sched_dl_entity, 1784 inactive_timer); 1785 struct task_struct *p = NULL; 1786 struct rq_flags rf; 1787 struct rq *rq; 1788 1789 if (!dl_server(dl_se)) { 1790 p = dl_task_of(dl_se); 1791 rq = task_rq_lock(p, &rf); 1792 } else { 1793 rq = dl_se->rq; 1794 rq_lock(rq, &rf); 1795 } 1796 1797 sched_clock_tick(); 1798 update_rq_clock(rq); 1799 1800 if (dl_server(dl_se)) 1801 goto no_task; 1802 1803 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 1804 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1805 1806 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { 1807 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1808 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1809 dl_se->dl_non_contending = 0; 1810 } 1811 1812 raw_spin_lock(&dl_b->lock); 1813 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1814 raw_spin_unlock(&dl_b->lock); 1815 __dl_clear_params(dl_se); 1816 1817 goto unlock; 1818 } 1819 1820 no_task: 1821 if (dl_se->dl_non_contending == 0) 1822 goto unlock; 1823 1824 sub_running_bw(dl_se, &rq->dl); 1825 dl_se->dl_non_contending = 0; 1826 unlock: 1827 1828 if (!dl_server(dl_se)) { 1829 task_rq_unlock(rq, p, &rf); 1830 put_task_struct(p); 1831 } else { 1832 rq_unlock(rq, &rf); 1833 } 1834 1835 return HRTIMER_NORESTART; 1836 } 1837 1838 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1839 { 1840 struct hrtimer *timer = &dl_se->inactive_timer; 1841 1842 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1843 timer->function = inactive_task_timer; 1844 } 1845 1846 #define __node_2_dle(node) \ 1847 rb_entry((node), struct sched_dl_entity, rb_node) 1848 1849 #ifdef CONFIG_SMP 1850 1851 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1852 { 1853 struct rq *rq = rq_of_dl_rq(dl_rq); 1854 1855 if (dl_rq->earliest_dl.curr == 0 || 1856 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1857 if (dl_rq->earliest_dl.curr == 0) 1858 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); 1859 dl_rq->earliest_dl.curr = deadline; 1860 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1861 } 1862 } 1863 1864 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1865 { 1866 struct rq *rq = rq_of_dl_rq(dl_rq); 1867 1868 /* 1869 * Since we may have removed our earliest (and/or next earliest) 1870 * task we must recompute them. 1871 */ 1872 if (!dl_rq->dl_nr_running) { 1873 dl_rq->earliest_dl.curr = 0; 1874 dl_rq->earliest_dl.next = 0; 1875 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1876 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1877 } else { 1878 struct rb_node *leftmost = rb_first_cached(&dl_rq->root); 1879 struct sched_dl_entity *entry = __node_2_dle(leftmost); 1880 1881 dl_rq->earliest_dl.curr = entry->deadline; 1882 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1883 } 1884 } 1885 1886 #else 1887 1888 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1889 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1890 1891 #endif /* CONFIG_SMP */ 1892 1893 static inline 1894 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1895 { 1896 u64 deadline = dl_se->deadline; 1897 1898 dl_rq->dl_nr_running++; 1899 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1900 1901 inc_dl_deadline(dl_rq, deadline); 1902 } 1903 1904 static inline 1905 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1906 { 1907 WARN_ON(!dl_rq->dl_nr_running); 1908 dl_rq->dl_nr_running--; 1909 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1910 1911 dec_dl_deadline(dl_rq, dl_se->deadline); 1912 } 1913 1914 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) 1915 { 1916 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); 1917 } 1918 1919 static __always_inline struct sched_statistics * 1920 __schedstats_from_dl_se(struct sched_dl_entity *dl_se) 1921 { 1922 if (!schedstat_enabled()) 1923 return NULL; 1924 1925 if (dl_server(dl_se)) 1926 return NULL; 1927 1928 return &dl_task_of(dl_se)->stats; 1929 } 1930 1931 static inline void 1932 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1933 { 1934 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1935 if (stats) 1936 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1937 } 1938 1939 static inline void 1940 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1941 { 1942 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1943 if (stats) 1944 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1945 } 1946 1947 static inline void 1948 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1949 { 1950 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1951 if (stats) 1952 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1953 } 1954 1955 static inline void 1956 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1957 int flags) 1958 { 1959 if (!schedstat_enabled()) 1960 return; 1961 1962 if (flags & ENQUEUE_WAKEUP) 1963 update_stats_enqueue_sleeper_dl(dl_rq, dl_se); 1964 } 1965 1966 static inline void 1967 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1968 int flags) 1969 { 1970 struct task_struct *p = dl_task_of(dl_se); 1971 1972 if (!schedstat_enabled()) 1973 return; 1974 1975 if ((flags & DEQUEUE_SLEEP)) { 1976 unsigned int state; 1977 1978 state = READ_ONCE(p->__state); 1979 if (state & TASK_INTERRUPTIBLE) 1980 __schedstat_set(p->stats.sleep_start, 1981 rq_clock(rq_of_dl_rq(dl_rq))); 1982 1983 if (state & TASK_UNINTERRUPTIBLE) 1984 __schedstat_set(p->stats.block_start, 1985 rq_clock(rq_of_dl_rq(dl_rq))); 1986 } 1987 } 1988 1989 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1990 { 1991 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1992 1993 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node)); 1994 1995 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less); 1996 1997 inc_dl_tasks(dl_se, dl_rq); 1998 } 1999 2000 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 2001 { 2002 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 2003 2004 if (RB_EMPTY_NODE(&dl_se->rb_node)) 2005 return; 2006 2007 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 2008 2009 RB_CLEAR_NODE(&dl_se->rb_node); 2010 2011 dec_dl_tasks(dl_se, dl_rq); 2012 } 2013 2014 static void 2015 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) 2016 { 2017 WARN_ON_ONCE(on_dl_rq(dl_se)); 2018 2019 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags); 2020 2021 /* 2022 * Check if a constrained deadline task was activated 2023 * after the deadline but before the next period. 2024 * If that is the case, the task will be throttled and 2025 * the replenishment timer will be set to the next period. 2026 */ 2027 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se)) 2028 dl_check_constrained_dl(dl_se); 2029 2030 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) { 2031 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 2032 2033 add_rq_bw(dl_se, dl_rq); 2034 add_running_bw(dl_se, dl_rq); 2035 } 2036 2037 /* 2038 * If p is throttled, we do not enqueue it. In fact, if it exhausted 2039 * its budget it needs a replenishment and, since it now is on 2040 * its rq, the bandwidth timer callback (which clearly has not 2041 * run yet) will take care of this. 2042 * However, the active utilization does not depend on the fact 2043 * that the task is on the runqueue or not (but depends on the 2044 * task's state - in GRUB parlance, "inactive" vs "active contending"). 2045 * In other words, even if a task is throttled its utilization must 2046 * be counted in the active utilization; hence, we need to call 2047 * add_running_bw(). 2048 */ 2049 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 2050 if (flags & ENQUEUE_WAKEUP) 2051 task_contending(dl_se, flags); 2052 2053 return; 2054 } 2055 2056 /* 2057 * If this is a wakeup or a new instance, the scheduling 2058 * parameters of the task might need updating. Otherwise, 2059 * we want a replenishment of its runtime. 2060 */ 2061 if (flags & ENQUEUE_WAKEUP) { 2062 task_contending(dl_se, flags); 2063 update_dl_entity(dl_se); 2064 } else if (flags & ENQUEUE_REPLENISH) { 2065 replenish_dl_entity(dl_se); 2066 } else if ((flags & ENQUEUE_RESTORE) && 2067 !is_dl_boosted(dl_se) && 2068 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { 2069 setup_new_dl_entity(dl_se); 2070 } 2071 2072 /* 2073 * If the reservation is still throttled, e.g., it got replenished but is a 2074 * deferred task and still got to wait, don't enqueue. 2075 */ 2076 if (dl_se->dl_throttled && start_dl_timer(dl_se)) 2077 return; 2078 2079 /* 2080 * We're about to enqueue, make sure we're not ->dl_throttled! 2081 * In case the timer was not started, say because the defer time 2082 * has passed, mark as not throttled and mark unarmed. 2083 * Also cancel earlier timers, since letting those run is pointless. 2084 */ 2085 if (dl_se->dl_throttled) { 2086 hrtimer_try_to_cancel(&dl_se->dl_timer); 2087 dl_se->dl_defer_armed = 0; 2088 dl_se->dl_throttled = 0; 2089 } 2090 2091 __enqueue_dl_entity(dl_se); 2092 } 2093 2094 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags) 2095 { 2096 __dequeue_dl_entity(dl_se); 2097 2098 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) { 2099 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 2100 2101 sub_running_bw(dl_se, dl_rq); 2102 sub_rq_bw(dl_se, dl_rq); 2103 } 2104 2105 /* 2106 * This check allows to start the inactive timer (or to immediately 2107 * decrease the active utilization, if needed) in two cases: 2108 * when the task blocks and when it is terminating 2109 * (p->state == TASK_DEAD). We can handle the two cases in the same 2110 * way, because from GRUB's point of view the same thing is happening 2111 * (the task moves from "active contending" to "active non contending" 2112 * or "inactive") 2113 */ 2114 if (flags & DEQUEUE_SLEEP) 2115 task_non_contending(dl_se); 2116 } 2117 2118 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 2119 { 2120 if (is_dl_boosted(&p->dl)) { 2121 /* 2122 * Because of delays in the detection of the overrun of a 2123 * thread's runtime, it might be the case that a thread 2124 * goes to sleep in a rt mutex with negative runtime. As 2125 * a consequence, the thread will be throttled. 2126 * 2127 * While waiting for the mutex, this thread can also be 2128 * boosted via PI, resulting in a thread that is throttled 2129 * and boosted at the same time. 2130 * 2131 * In this case, the boost overrides the throttle. 2132 */ 2133 if (p->dl.dl_throttled) { 2134 /* 2135 * The replenish timer needs to be canceled. No 2136 * problem if it fires concurrently: boosted threads 2137 * are ignored in dl_task_timer(). 2138 */ 2139 cancel_replenish_timer(&p->dl); 2140 p->dl.dl_throttled = 0; 2141 } 2142 } else if (!dl_prio(p->normal_prio)) { 2143 /* 2144 * Special case in which we have a !SCHED_DEADLINE task that is going 2145 * to be deboosted, but exceeds its runtime while doing so. No point in 2146 * replenishing it, as it's going to return back to its original 2147 * scheduling class after this. If it has been throttled, we need to 2148 * clear the flag, otherwise the task may wake up as throttled after 2149 * being boosted again with no means to replenish the runtime and clear 2150 * the throttle. 2151 */ 2152 p->dl.dl_throttled = 0; 2153 if (!(flags & ENQUEUE_REPLENISH)) 2154 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", 2155 task_pid_nr(p)); 2156 2157 return; 2158 } 2159 2160 check_schedstat_required(); 2161 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); 2162 2163 if (p->on_rq == TASK_ON_RQ_MIGRATING) 2164 flags |= ENQUEUE_MIGRATING; 2165 2166 enqueue_dl_entity(&p->dl, flags); 2167 2168 if (dl_server(&p->dl)) 2169 return; 2170 2171 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) 2172 enqueue_pushable_dl_task(rq, p); 2173 } 2174 2175 static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 2176 { 2177 update_curr_dl(rq); 2178 2179 if (p->on_rq == TASK_ON_RQ_MIGRATING) 2180 flags |= DEQUEUE_MIGRATING; 2181 2182 dequeue_dl_entity(&p->dl, flags); 2183 if (!p->dl.dl_throttled && !dl_server(&p->dl)) 2184 dequeue_pushable_dl_task(rq, p); 2185 2186 return true; 2187 } 2188 2189 /* 2190 * Yield task semantic for -deadline tasks is: 2191 * 2192 * get off from the CPU until our next instance, with 2193 * a new runtime. This is of little use now, since we 2194 * don't have a bandwidth reclaiming mechanism. Anyway, 2195 * bandwidth reclaiming is planned for the future, and 2196 * yield_task_dl will indicate that some spare budget 2197 * is available for other task instances to use it. 2198 */ 2199 static void yield_task_dl(struct rq *rq) 2200 { 2201 /* 2202 * We make the task go to sleep until its current deadline by 2203 * forcing its runtime to zero. This way, update_curr_dl() stops 2204 * it and the bandwidth timer will wake it up and will give it 2205 * new scheduling parameters (thanks to dl_yielded=1). 2206 */ 2207 rq->curr->dl.dl_yielded = 1; 2208 2209 update_rq_clock(rq); 2210 update_curr_dl(rq); 2211 /* 2212 * Tell update_rq_clock() that we've just updated, 2213 * so we don't do microscopic update in schedule() 2214 * and double the fastpath cost. 2215 */ 2216 rq_clock_skip_update(rq); 2217 } 2218 2219 #ifdef CONFIG_SMP 2220 2221 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, 2222 struct rq *rq) 2223 { 2224 return (!rq->dl.dl_nr_running || 2225 dl_time_before(p->dl.deadline, 2226 rq->dl.earliest_dl.curr)); 2227 } 2228 2229 static int find_later_rq(struct task_struct *task); 2230 2231 static int 2232 select_task_rq_dl(struct task_struct *p, int cpu, int flags) 2233 { 2234 struct task_struct *curr, *donor; 2235 bool select_rq; 2236 struct rq *rq; 2237 2238 if (!(flags & WF_TTWU)) 2239 goto out; 2240 2241 rq = cpu_rq(cpu); 2242 2243 rcu_read_lock(); 2244 curr = READ_ONCE(rq->curr); /* unlocked access */ 2245 donor = READ_ONCE(rq->donor); 2246 2247 /* 2248 * If we are dealing with a -deadline task, we must 2249 * decide where to wake it up. 2250 * If it has a later deadline and the current task 2251 * on this rq can't move (provided the waking task 2252 * can!) we prefer to send it somewhere else. On the 2253 * other hand, if it has a shorter deadline, we 2254 * try to make it stay here, it might be important. 2255 */ 2256 select_rq = unlikely(dl_task(donor)) && 2257 (curr->nr_cpus_allowed < 2 || 2258 !dl_entity_preempt(&p->dl, &donor->dl)) && 2259 p->nr_cpus_allowed > 1; 2260 2261 /* 2262 * Take the capacity of the CPU into account to 2263 * ensure it fits the requirement of the task. 2264 */ 2265 if (sched_asym_cpucap_active()) 2266 select_rq |= !dl_task_fits_capacity(p, cpu); 2267 2268 if (select_rq) { 2269 int target = find_later_rq(p); 2270 2271 if (target != -1 && 2272 dl_task_is_earliest_deadline(p, cpu_rq(target))) 2273 cpu = target; 2274 } 2275 rcu_read_unlock(); 2276 2277 out: 2278 return cpu; 2279 } 2280 2281 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 2282 { 2283 struct rq_flags rf; 2284 struct rq *rq; 2285 2286 if (READ_ONCE(p->__state) != TASK_WAKING) 2287 return; 2288 2289 rq = task_rq(p); 2290 /* 2291 * Since p->state == TASK_WAKING, set_task_cpu() has been called 2292 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 2293 * rq->lock is not... So, lock it 2294 */ 2295 rq_lock(rq, &rf); 2296 if (p->dl.dl_non_contending) { 2297 update_rq_clock(rq); 2298 sub_running_bw(&p->dl, &rq->dl); 2299 p->dl.dl_non_contending = 0; 2300 /* 2301 * If the timer handler is currently running and the 2302 * timer cannot be canceled, inactive_task_timer() 2303 * will see that dl_not_contending is not set, and 2304 * will not touch the rq's active utilization, 2305 * so we are still safe. 2306 */ 2307 cancel_inactive_timer(&p->dl); 2308 } 2309 sub_rq_bw(&p->dl, &rq->dl); 2310 rq_unlock(rq, &rf); 2311 } 2312 2313 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 2314 { 2315 /* 2316 * Current can't be migrated, useless to reschedule, 2317 * let's hope p can move out. 2318 */ 2319 if (rq->curr->nr_cpus_allowed == 1 || 2320 !cpudl_find(&rq->rd->cpudl, rq->donor, NULL)) 2321 return; 2322 2323 /* 2324 * p is migratable, so let's not schedule it and 2325 * see if it is pushed or pulled somewhere else. 2326 */ 2327 if (p->nr_cpus_allowed != 1 && 2328 cpudl_find(&rq->rd->cpudl, p, NULL)) 2329 return; 2330 2331 resched_curr(rq); 2332 } 2333 2334 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 2335 { 2336 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 2337 /* 2338 * This is OK, because current is on_cpu, which avoids it being 2339 * picked for load-balance and preemption/IRQs are still 2340 * disabled avoiding further scheduler activity on it and we've 2341 * not yet started the picking loop. 2342 */ 2343 rq_unpin_lock(rq, rf); 2344 pull_dl_task(rq); 2345 rq_repin_lock(rq, rf); 2346 } 2347 2348 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 2349 } 2350 #endif /* CONFIG_SMP */ 2351 2352 /* 2353 * Only called when both the current and waking task are -deadline 2354 * tasks. 2355 */ 2356 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, 2357 int flags) 2358 { 2359 if (dl_entity_preempt(&p->dl, &rq->donor->dl)) { 2360 resched_curr(rq); 2361 return; 2362 } 2363 2364 #ifdef CONFIG_SMP 2365 /* 2366 * In the unlikely case current and p have the same deadline 2367 * let us try to decide what's the best thing to do... 2368 */ 2369 if ((p->dl.deadline == rq->donor->dl.deadline) && 2370 !test_tsk_need_resched(rq->curr)) 2371 check_preempt_equal_dl(rq, p); 2372 #endif /* CONFIG_SMP */ 2373 } 2374 2375 #ifdef CONFIG_SCHED_HRTICK 2376 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2377 { 2378 hrtick_start(rq, dl_se->runtime); 2379 } 2380 #else /* !CONFIG_SCHED_HRTICK */ 2381 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2382 { 2383 } 2384 #endif 2385 2386 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 2387 { 2388 struct sched_dl_entity *dl_se = &p->dl; 2389 struct dl_rq *dl_rq = &rq->dl; 2390 2391 p->se.exec_start = rq_clock_task(rq); 2392 if (on_dl_rq(&p->dl)) 2393 update_stats_wait_end_dl(dl_rq, dl_se); 2394 2395 /* You can't push away the running task */ 2396 dequeue_pushable_dl_task(rq, p); 2397 2398 if (!first) 2399 return; 2400 2401 if (rq->donor->sched_class != &dl_sched_class) 2402 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2403 2404 deadline_queue_push_tasks(rq); 2405 2406 if (hrtick_enabled_dl(rq)) 2407 start_hrtick_dl(rq, &p->dl); 2408 } 2409 2410 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) 2411 { 2412 struct rb_node *left = rb_first_cached(&dl_rq->root); 2413 2414 if (!left) 2415 return NULL; 2416 2417 return __node_2_dle(left); 2418 } 2419 2420 /* 2421 * __pick_next_task_dl - Helper to pick the next -deadline task to run. 2422 * @rq: The runqueue to pick the next task from. 2423 */ 2424 static struct task_struct *__pick_task_dl(struct rq *rq) 2425 { 2426 struct sched_dl_entity *dl_se; 2427 struct dl_rq *dl_rq = &rq->dl; 2428 struct task_struct *p; 2429 2430 again: 2431 if (!sched_dl_runnable(rq)) 2432 return NULL; 2433 2434 dl_se = pick_next_dl_entity(dl_rq); 2435 WARN_ON_ONCE(!dl_se); 2436 2437 if (dl_server(dl_se)) { 2438 p = dl_se->server_pick_task(dl_se); 2439 if (!p) { 2440 if (dl_server_active(dl_se)) { 2441 dl_se->dl_yielded = 1; 2442 update_curr_dl_se(rq, dl_se, 0); 2443 } 2444 goto again; 2445 } 2446 rq->dl_server = dl_se; 2447 } else { 2448 p = dl_task_of(dl_se); 2449 } 2450 2451 return p; 2452 } 2453 2454 static struct task_struct *pick_task_dl(struct rq *rq) 2455 { 2456 return __pick_task_dl(rq); 2457 } 2458 2459 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) 2460 { 2461 struct sched_dl_entity *dl_se = &p->dl; 2462 struct dl_rq *dl_rq = &rq->dl; 2463 2464 if (on_dl_rq(&p->dl)) 2465 update_stats_wait_start_dl(dl_rq, dl_se); 2466 2467 update_curr_dl(rq); 2468 2469 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2470 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 2471 enqueue_pushable_dl_task(rq, p); 2472 } 2473 2474 /* 2475 * scheduler tick hitting a task of our scheduling class. 2476 * 2477 * NOTE: This function can be called remotely by the tick offload that 2478 * goes along full dynticks. Therefore no local assumption can be made 2479 * and everything must be accessed through the @rq and @curr passed in 2480 * parameters. 2481 */ 2482 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 2483 { 2484 update_curr_dl(rq); 2485 2486 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2487 /* 2488 * Even when we have runtime, update_curr_dl() might have resulted in us 2489 * not being the leftmost task anymore. In that case NEED_RESCHED will 2490 * be set and schedule() will start a new hrtick for the next task. 2491 */ 2492 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && 2493 is_leftmost(&p->dl, &rq->dl)) 2494 start_hrtick_dl(rq, &p->dl); 2495 } 2496 2497 static void task_fork_dl(struct task_struct *p) 2498 { 2499 /* 2500 * SCHED_DEADLINE tasks cannot fork and this is achieved through 2501 * sched_fork() 2502 */ 2503 } 2504 2505 #ifdef CONFIG_SMP 2506 2507 /* Only try algorithms three times */ 2508 #define DL_MAX_TRIES 3 2509 2510 /* 2511 * Return the earliest pushable rq's task, which is suitable to be executed 2512 * on the CPU, NULL otherwise: 2513 */ 2514 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 2515 { 2516 struct task_struct *p = NULL; 2517 struct rb_node *next_node; 2518 2519 if (!has_pushable_dl_tasks(rq)) 2520 return NULL; 2521 2522 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); 2523 while (next_node) { 2524 p = __node_2_pdl(next_node); 2525 2526 if (task_is_pushable(rq, p, cpu)) 2527 return p; 2528 2529 next_node = rb_next(next_node); 2530 } 2531 2532 return NULL; 2533 } 2534 2535 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 2536 2537 static int find_later_rq(struct task_struct *task) 2538 { 2539 struct sched_domain *sd; 2540 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 2541 int this_cpu = smp_processor_id(); 2542 int cpu = task_cpu(task); 2543 2544 /* Make sure the mask is initialized first */ 2545 if (unlikely(!later_mask)) 2546 return -1; 2547 2548 if (task->nr_cpus_allowed == 1) 2549 return -1; 2550 2551 /* 2552 * We have to consider system topology and task affinity 2553 * first, then we can look for a suitable CPU. 2554 */ 2555 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 2556 return -1; 2557 2558 /* 2559 * If we are here, some targets have been found, including 2560 * the most suitable which is, among the runqueues where the 2561 * current tasks have later deadlines than the task's one, the 2562 * rq with the latest possible one. 2563 * 2564 * Now we check how well this matches with task's 2565 * affinity and system topology. 2566 * 2567 * The last CPU where the task run is our first 2568 * guess, since it is most likely cache-hot there. 2569 */ 2570 if (cpumask_test_cpu(cpu, later_mask)) 2571 return cpu; 2572 /* 2573 * Check if this_cpu is to be skipped (i.e., it is 2574 * not in the mask) or not. 2575 */ 2576 if (!cpumask_test_cpu(this_cpu, later_mask)) 2577 this_cpu = -1; 2578 2579 rcu_read_lock(); 2580 for_each_domain(cpu, sd) { 2581 if (sd->flags & SD_WAKE_AFFINE) { 2582 int best_cpu; 2583 2584 /* 2585 * If possible, preempting this_cpu is 2586 * cheaper than migrating. 2587 */ 2588 if (this_cpu != -1 && 2589 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 2590 rcu_read_unlock(); 2591 return this_cpu; 2592 } 2593 2594 best_cpu = cpumask_any_and_distribute(later_mask, 2595 sched_domain_span(sd)); 2596 /* 2597 * Last chance: if a CPU being in both later_mask 2598 * and current sd span is valid, that becomes our 2599 * choice. Of course, the latest possible CPU is 2600 * already under consideration through later_mask. 2601 */ 2602 if (best_cpu < nr_cpu_ids) { 2603 rcu_read_unlock(); 2604 return best_cpu; 2605 } 2606 } 2607 } 2608 rcu_read_unlock(); 2609 2610 /* 2611 * At this point, all our guesses failed, we just return 2612 * 'something', and let the caller sort the things out. 2613 */ 2614 if (this_cpu != -1) 2615 return this_cpu; 2616 2617 cpu = cpumask_any_distribute(later_mask); 2618 if (cpu < nr_cpu_ids) 2619 return cpu; 2620 2621 return -1; 2622 } 2623 2624 /* Locks the rq it finds */ 2625 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 2626 { 2627 struct rq *later_rq = NULL; 2628 int tries; 2629 int cpu; 2630 2631 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 2632 cpu = find_later_rq(task); 2633 2634 if ((cpu == -1) || (cpu == rq->cpu)) 2635 break; 2636 2637 later_rq = cpu_rq(cpu); 2638 2639 if (!dl_task_is_earliest_deadline(task, later_rq)) { 2640 /* 2641 * Target rq has tasks of equal or earlier deadline, 2642 * retrying does not release any lock and is unlikely 2643 * to yield a different result. 2644 */ 2645 later_rq = NULL; 2646 break; 2647 } 2648 2649 /* Retry if something changed. */ 2650 if (double_lock_balance(rq, later_rq)) { 2651 if (unlikely(task_rq(task) != rq || 2652 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || 2653 task_on_cpu(rq, task) || 2654 !dl_task(task) || 2655 is_migration_disabled(task) || 2656 !task_on_rq_queued(task))) { 2657 double_unlock_balance(rq, later_rq); 2658 later_rq = NULL; 2659 break; 2660 } 2661 } 2662 2663 /* 2664 * If the rq we found has no -deadline task, or 2665 * its earliest one has a later deadline than our 2666 * task, the rq is a good one. 2667 */ 2668 if (dl_task_is_earliest_deadline(task, later_rq)) 2669 break; 2670 2671 /* Otherwise we try again. */ 2672 double_unlock_balance(rq, later_rq); 2673 later_rq = NULL; 2674 } 2675 2676 return later_rq; 2677 } 2678 2679 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2680 { 2681 struct task_struct *p; 2682 2683 if (!has_pushable_dl_tasks(rq)) 2684 return NULL; 2685 2686 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); 2687 2688 WARN_ON_ONCE(rq->cpu != task_cpu(p)); 2689 WARN_ON_ONCE(task_current(rq, p)); 2690 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); 2691 2692 WARN_ON_ONCE(!task_on_rq_queued(p)); 2693 WARN_ON_ONCE(!dl_task(p)); 2694 2695 return p; 2696 } 2697 2698 /* 2699 * See if the non running -deadline tasks on this rq 2700 * can be sent to some other CPU where they can preempt 2701 * and start executing. 2702 */ 2703 static int push_dl_task(struct rq *rq) 2704 { 2705 struct task_struct *next_task; 2706 struct rq *later_rq; 2707 int ret = 0; 2708 2709 next_task = pick_next_pushable_dl_task(rq); 2710 if (!next_task) 2711 return 0; 2712 2713 retry: 2714 /* 2715 * If next_task preempts rq->curr, and rq->curr 2716 * can move away, it makes sense to just reschedule 2717 * without going further in pushing next_task. 2718 */ 2719 if (dl_task(rq->donor) && 2720 dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) && 2721 rq->curr->nr_cpus_allowed > 1) { 2722 resched_curr(rq); 2723 return 0; 2724 } 2725 2726 if (is_migration_disabled(next_task)) 2727 return 0; 2728 2729 if (WARN_ON(next_task == rq->curr)) 2730 return 0; 2731 2732 /* We might release rq lock */ 2733 get_task_struct(next_task); 2734 2735 /* Will lock the rq it'll find */ 2736 later_rq = find_lock_later_rq(next_task, rq); 2737 if (!later_rq) { 2738 struct task_struct *task; 2739 2740 /* 2741 * We must check all this again, since 2742 * find_lock_later_rq releases rq->lock and it is 2743 * then possible that next_task has migrated. 2744 */ 2745 task = pick_next_pushable_dl_task(rq); 2746 if (task == next_task) { 2747 /* 2748 * The task is still there. We don't try 2749 * again, some other CPU will pull it when ready. 2750 */ 2751 goto out; 2752 } 2753 2754 if (!task) 2755 /* No more tasks */ 2756 goto out; 2757 2758 put_task_struct(next_task); 2759 next_task = task; 2760 goto retry; 2761 } 2762 2763 move_queued_task_locked(rq, later_rq, next_task); 2764 ret = 1; 2765 2766 resched_curr(later_rq); 2767 2768 double_unlock_balance(rq, later_rq); 2769 2770 out: 2771 put_task_struct(next_task); 2772 2773 return ret; 2774 } 2775 2776 static void push_dl_tasks(struct rq *rq) 2777 { 2778 /* push_dl_task() will return true if it moved a -deadline task */ 2779 while (push_dl_task(rq)) 2780 ; 2781 } 2782 2783 static void pull_dl_task(struct rq *this_rq) 2784 { 2785 int this_cpu = this_rq->cpu, cpu; 2786 struct task_struct *p, *push_task; 2787 bool resched = false; 2788 struct rq *src_rq; 2789 u64 dmin = LONG_MAX; 2790 2791 if (likely(!dl_overloaded(this_rq))) 2792 return; 2793 2794 /* 2795 * Match the barrier from dl_set_overloaded; this guarantees that if we 2796 * see overloaded we must also see the dlo_mask bit. 2797 */ 2798 smp_rmb(); 2799 2800 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2801 if (this_cpu == cpu) 2802 continue; 2803 2804 src_rq = cpu_rq(cpu); 2805 2806 /* 2807 * It looks racy, and it is! However, as in sched_rt.c, 2808 * we are fine with this. 2809 */ 2810 if (this_rq->dl.dl_nr_running && 2811 dl_time_before(this_rq->dl.earliest_dl.curr, 2812 src_rq->dl.earliest_dl.next)) 2813 continue; 2814 2815 /* Might drop this_rq->lock */ 2816 push_task = NULL; 2817 double_lock_balance(this_rq, src_rq); 2818 2819 /* 2820 * If there are no more pullable tasks on the 2821 * rq, we're done with it. 2822 */ 2823 if (src_rq->dl.dl_nr_running <= 1) 2824 goto skip; 2825 2826 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2827 2828 /* 2829 * We found a task to be pulled if: 2830 * - it preempts our current (if there's one), 2831 * - it will preempt the last one we pulled (if any). 2832 */ 2833 if (p && dl_time_before(p->dl.deadline, dmin) && 2834 dl_task_is_earliest_deadline(p, this_rq)) { 2835 WARN_ON(p == src_rq->curr); 2836 WARN_ON(!task_on_rq_queued(p)); 2837 2838 /* 2839 * Then we pull iff p has actually an earlier 2840 * deadline than the current task of its runqueue. 2841 */ 2842 if (dl_time_before(p->dl.deadline, 2843 src_rq->donor->dl.deadline)) 2844 goto skip; 2845 2846 if (is_migration_disabled(p)) { 2847 push_task = get_push_task(src_rq); 2848 } else { 2849 move_queued_task_locked(src_rq, this_rq, p); 2850 dmin = p->dl.deadline; 2851 resched = true; 2852 } 2853 2854 /* Is there any other task even earlier? */ 2855 } 2856 skip: 2857 double_unlock_balance(this_rq, src_rq); 2858 2859 if (push_task) { 2860 preempt_disable(); 2861 raw_spin_rq_unlock(this_rq); 2862 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2863 push_task, &src_rq->push_work); 2864 preempt_enable(); 2865 raw_spin_rq_lock(this_rq); 2866 } 2867 } 2868 2869 if (resched) 2870 resched_curr(this_rq); 2871 } 2872 2873 /* 2874 * Since the task is not running and a reschedule is not going to happen 2875 * anytime soon on its runqueue, we try pushing it away now. 2876 */ 2877 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2878 { 2879 if (!task_on_cpu(rq, p) && 2880 !test_tsk_need_resched(rq->curr) && 2881 p->nr_cpus_allowed > 1 && 2882 dl_task(rq->donor) && 2883 (rq->curr->nr_cpus_allowed < 2 || 2884 !dl_entity_preempt(&p->dl, &rq->donor->dl))) { 2885 push_dl_tasks(rq); 2886 } 2887 } 2888 2889 static void set_cpus_allowed_dl(struct task_struct *p, 2890 struct affinity_context *ctx) 2891 { 2892 struct root_domain *src_rd; 2893 struct rq *rq; 2894 2895 WARN_ON_ONCE(!dl_task(p)); 2896 2897 rq = task_rq(p); 2898 src_rd = rq->rd; 2899 /* 2900 * Migrating a SCHED_DEADLINE task between exclusive 2901 * cpusets (different root_domains) entails a bandwidth 2902 * update. We already made space for us in the destination 2903 * domain (see cpuset_can_attach()). 2904 */ 2905 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { 2906 struct dl_bw *src_dl_b; 2907 2908 src_dl_b = dl_bw_of(cpu_of(rq)); 2909 /* 2910 * We now free resources of the root_domain we are migrating 2911 * off. In the worst case, sched_setattr() may temporary fail 2912 * until we complete the update. 2913 */ 2914 raw_spin_lock(&src_dl_b->lock); 2915 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2916 raw_spin_unlock(&src_dl_b->lock); 2917 } 2918 2919 set_cpus_allowed_common(p, ctx); 2920 } 2921 2922 /* Assumes rq->lock is held */ 2923 static void rq_online_dl(struct rq *rq) 2924 { 2925 if (rq->dl.overloaded) 2926 dl_set_overload(rq); 2927 2928 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2929 if (rq->dl.dl_nr_running > 0) 2930 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2931 } 2932 2933 /* Assumes rq->lock is held */ 2934 static void rq_offline_dl(struct rq *rq) 2935 { 2936 if (rq->dl.overloaded) 2937 dl_clear_overload(rq); 2938 2939 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2940 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2941 } 2942 2943 void __init init_sched_dl_class(void) 2944 { 2945 unsigned int i; 2946 2947 for_each_possible_cpu(i) 2948 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2949 GFP_KERNEL, cpu_to_node(i)); 2950 } 2951 2952 void dl_add_task_root_domain(struct task_struct *p) 2953 { 2954 struct rq_flags rf; 2955 struct rq *rq; 2956 struct dl_bw *dl_b; 2957 2958 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2959 if (!dl_task(p)) { 2960 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 2961 return; 2962 } 2963 2964 rq = __task_rq_lock(p, &rf); 2965 2966 dl_b = &rq->rd->dl_bw; 2967 raw_spin_lock(&dl_b->lock); 2968 2969 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2970 2971 raw_spin_unlock(&dl_b->lock); 2972 2973 task_rq_unlock(rq, p, &rf); 2974 } 2975 2976 void dl_clear_root_domain(struct root_domain *rd) 2977 { 2978 int i; 2979 2980 guard(raw_spinlock_irqsave)(&rd->dl_bw.lock); 2981 rd->dl_bw.total_bw = 0; 2982 2983 /* 2984 * dl_server bandwidth is only restored when CPUs are attached to root 2985 * domains (after domains are created or CPUs moved back to the 2986 * default root doamin). 2987 */ 2988 for_each_cpu(i, rd->span) { 2989 struct sched_dl_entity *dl_se = &cpu_rq(i)->fair_server; 2990 2991 if (dl_server(dl_se) && cpu_active(i)) 2992 rd->dl_bw.total_bw += dl_se->dl_bw; 2993 } 2994 } 2995 2996 #endif /* CONFIG_SMP */ 2997 2998 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2999 { 3000 /* 3001 * task_non_contending() can start the "inactive timer" (if the 0-lag 3002 * time is in the future). If the task switches back to dl before 3003 * the "inactive timer" fires, it can continue to consume its current 3004 * runtime using its current deadline. If it stays outside of 3005 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 3006 * will reset the task parameters. 3007 */ 3008 if (task_on_rq_queued(p) && p->dl.dl_runtime) 3009 task_non_contending(&p->dl); 3010 3011 /* 3012 * In case a task is setscheduled out from SCHED_DEADLINE we need to 3013 * keep track of that on its cpuset (for correct bandwidth tracking). 3014 */ 3015 dec_dl_tasks_cs(p); 3016 3017 if (!task_on_rq_queued(p)) { 3018 /* 3019 * Inactive timer is armed. However, p is leaving DEADLINE and 3020 * might migrate away from this rq while continuing to run on 3021 * some other class. We need to remove its contribution from 3022 * this rq running_bw now, or sub_rq_bw (below) will complain. 3023 */ 3024 if (p->dl.dl_non_contending) 3025 sub_running_bw(&p->dl, &rq->dl); 3026 sub_rq_bw(&p->dl, &rq->dl); 3027 } 3028 3029 /* 3030 * We cannot use inactive_task_timer() to invoke sub_running_bw() 3031 * at the 0-lag time, because the task could have been migrated 3032 * while SCHED_OTHER in the meanwhile. 3033 */ 3034 if (p->dl.dl_non_contending) 3035 p->dl.dl_non_contending = 0; 3036 3037 /* 3038 * Since this might be the only -deadline task on the rq, 3039 * this is the right place to try to pull some other one 3040 * from an overloaded CPU, if any. 3041 */ 3042 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 3043 return; 3044 3045 deadline_queue_pull_task(rq); 3046 } 3047 3048 /* 3049 * When switching to -deadline, we may overload the rq, then 3050 * we try to push someone off, if possible. 3051 */ 3052 static void switched_to_dl(struct rq *rq, struct task_struct *p) 3053 { 3054 cancel_inactive_timer(&p->dl); 3055 3056 /* 3057 * In case a task is setscheduled to SCHED_DEADLINE we need to keep 3058 * track of that on its cpuset (for correct bandwidth tracking). 3059 */ 3060 inc_dl_tasks_cs(p); 3061 3062 /* If p is not queued we will update its parameters at next wakeup. */ 3063 if (!task_on_rq_queued(p)) { 3064 add_rq_bw(&p->dl, &rq->dl); 3065 3066 return; 3067 } 3068 3069 if (rq->donor != p) { 3070 #ifdef CONFIG_SMP 3071 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 3072 deadline_queue_push_tasks(rq); 3073 #endif 3074 if (dl_task(rq->donor)) 3075 wakeup_preempt_dl(rq, p, 0); 3076 else 3077 resched_curr(rq); 3078 } else { 3079 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 3080 } 3081 } 3082 3083 /* 3084 * If the scheduling parameters of a -deadline task changed, 3085 * a push or pull operation might be needed. 3086 */ 3087 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 3088 int oldprio) 3089 { 3090 if (!task_on_rq_queued(p)) 3091 return; 3092 3093 #ifdef CONFIG_SMP 3094 /* 3095 * This might be too much, but unfortunately 3096 * we don't have the old deadline value, and 3097 * we can't argue if the task is increasing 3098 * or lowering its prio, so... 3099 */ 3100 if (!rq->dl.overloaded) 3101 deadline_queue_pull_task(rq); 3102 3103 if (task_current_donor(rq, p)) { 3104 /* 3105 * If we now have a earlier deadline task than p, 3106 * then reschedule, provided p is still on this 3107 * runqueue. 3108 */ 3109 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 3110 resched_curr(rq); 3111 } else { 3112 /* 3113 * Current may not be deadline in case p was throttled but we 3114 * have just replenished it (e.g. rt_mutex_setprio()). 3115 * 3116 * Otherwise, if p was given an earlier deadline, reschedule. 3117 */ 3118 if (!dl_task(rq->curr) || 3119 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) 3120 resched_curr(rq); 3121 } 3122 #else 3123 /* 3124 * We don't know if p has a earlier or later deadline, so let's blindly 3125 * set a (maybe not needed) rescheduling point. 3126 */ 3127 resched_curr(rq); 3128 #endif 3129 } 3130 3131 #ifdef CONFIG_SCHED_CORE 3132 static int task_is_throttled_dl(struct task_struct *p, int cpu) 3133 { 3134 return p->dl.dl_throttled; 3135 } 3136 #endif 3137 3138 DEFINE_SCHED_CLASS(dl) = { 3139 3140 .enqueue_task = enqueue_task_dl, 3141 .dequeue_task = dequeue_task_dl, 3142 .yield_task = yield_task_dl, 3143 3144 .wakeup_preempt = wakeup_preempt_dl, 3145 3146 .pick_task = pick_task_dl, 3147 .put_prev_task = put_prev_task_dl, 3148 .set_next_task = set_next_task_dl, 3149 3150 #ifdef CONFIG_SMP 3151 .balance = balance_dl, 3152 .select_task_rq = select_task_rq_dl, 3153 .migrate_task_rq = migrate_task_rq_dl, 3154 .set_cpus_allowed = set_cpus_allowed_dl, 3155 .rq_online = rq_online_dl, 3156 .rq_offline = rq_offline_dl, 3157 .task_woken = task_woken_dl, 3158 .find_lock_rq = find_lock_later_rq, 3159 #endif 3160 3161 .task_tick = task_tick_dl, 3162 .task_fork = task_fork_dl, 3163 3164 .prio_changed = prio_changed_dl, 3165 .switched_from = switched_from_dl, 3166 .switched_to = switched_to_dl, 3167 3168 .update_curr = update_curr_dl, 3169 #ifdef CONFIG_SCHED_CORE 3170 .task_is_throttled = task_is_throttled_dl, 3171 #endif 3172 }; 3173 3174 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ 3175 static u64 dl_generation; 3176 3177 int sched_dl_global_validate(void) 3178 { 3179 u64 runtime = global_rt_runtime(); 3180 u64 period = global_rt_period(); 3181 u64 new_bw = to_ratio(period, runtime); 3182 u64 gen = ++dl_generation; 3183 struct dl_bw *dl_b; 3184 int cpu, cpus, ret = 0; 3185 unsigned long flags; 3186 3187 /* 3188 * Here we want to check the bandwidth not being set to some 3189 * value smaller than the currently allocated bandwidth in 3190 * any of the root_domains. 3191 */ 3192 for_each_possible_cpu(cpu) { 3193 rcu_read_lock_sched(); 3194 3195 if (dl_bw_visited(cpu, gen)) 3196 goto next; 3197 3198 dl_b = dl_bw_of(cpu); 3199 cpus = dl_bw_cpus(cpu); 3200 3201 raw_spin_lock_irqsave(&dl_b->lock, flags); 3202 if (new_bw * cpus < dl_b->total_bw) 3203 ret = -EBUSY; 3204 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3205 3206 next: 3207 rcu_read_unlock_sched(); 3208 3209 if (ret) 3210 break; 3211 } 3212 3213 return ret; 3214 } 3215 3216 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 3217 { 3218 if (global_rt_runtime() == RUNTIME_INF) { 3219 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 3220 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT; 3221 } else { 3222 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 3223 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 3224 dl_rq->max_bw = dl_rq->extra_bw = 3225 to_ratio(global_rt_period(), global_rt_runtime()); 3226 } 3227 } 3228 3229 void sched_dl_do_global(void) 3230 { 3231 u64 new_bw = -1; 3232 u64 gen = ++dl_generation; 3233 struct dl_bw *dl_b; 3234 int cpu; 3235 unsigned long flags; 3236 3237 if (global_rt_runtime() != RUNTIME_INF) 3238 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 3239 3240 for_each_possible_cpu(cpu) { 3241 rcu_read_lock_sched(); 3242 3243 if (dl_bw_visited(cpu, gen)) { 3244 rcu_read_unlock_sched(); 3245 continue; 3246 } 3247 3248 dl_b = dl_bw_of(cpu); 3249 3250 raw_spin_lock_irqsave(&dl_b->lock, flags); 3251 dl_b->bw = new_bw; 3252 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3253 3254 rcu_read_unlock_sched(); 3255 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 3256 } 3257 } 3258 3259 /* 3260 * We must be sure that accepting a new task (or allowing changing the 3261 * parameters of an existing one) is consistent with the bandwidth 3262 * constraints. If yes, this function also accordingly updates the currently 3263 * allocated bandwidth to reflect the new situation. 3264 * 3265 * This function is called while holding p's rq->lock. 3266 */ 3267 int sched_dl_overflow(struct task_struct *p, int policy, 3268 const struct sched_attr *attr) 3269 { 3270 u64 period = attr->sched_period ?: attr->sched_deadline; 3271 u64 runtime = attr->sched_runtime; 3272 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 3273 int cpus, err = -1, cpu = task_cpu(p); 3274 struct dl_bw *dl_b = dl_bw_of(cpu); 3275 unsigned long cap; 3276 3277 if (attr->sched_flags & SCHED_FLAG_SUGOV) 3278 return 0; 3279 3280 /* !deadline task may carry old deadline bandwidth */ 3281 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 3282 return 0; 3283 3284 /* 3285 * Either if a task, enters, leave, or stays -deadline but changes 3286 * its parameters, we may need to update accordingly the total 3287 * allocated bandwidth of the container. 3288 */ 3289 raw_spin_lock(&dl_b->lock); 3290 cpus = dl_bw_cpus(cpu); 3291 cap = dl_bw_capacity(cpu); 3292 3293 if (dl_policy(policy) && !task_has_dl_policy(p) && 3294 !__dl_overflow(dl_b, cap, 0, new_bw)) { 3295 if (hrtimer_active(&p->dl.inactive_timer)) 3296 __dl_sub(dl_b, p->dl.dl_bw, cpus); 3297 __dl_add(dl_b, new_bw, cpus); 3298 err = 0; 3299 } else if (dl_policy(policy) && task_has_dl_policy(p) && 3300 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { 3301 /* 3302 * XXX this is slightly incorrect: when the task 3303 * utilization decreases, we should delay the total 3304 * utilization change until the task's 0-lag point. 3305 * But this would require to set the task's "inactive 3306 * timer" when the task is not inactive. 3307 */ 3308 __dl_sub(dl_b, p->dl.dl_bw, cpus); 3309 __dl_add(dl_b, new_bw, cpus); 3310 dl_change_utilization(p, new_bw); 3311 err = 0; 3312 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 3313 /* 3314 * Do not decrease the total deadline utilization here, 3315 * switched_from_dl() will take care to do it at the correct 3316 * (0-lag) time. 3317 */ 3318 err = 0; 3319 } 3320 raw_spin_unlock(&dl_b->lock); 3321 3322 return err; 3323 } 3324 3325 /* 3326 * This function initializes the sched_dl_entity of a newly becoming 3327 * SCHED_DEADLINE task. 3328 * 3329 * Only the static values are considered here, the actual runtime and the 3330 * absolute deadline will be properly calculated when the task is enqueued 3331 * for the first time with its new policy. 3332 */ 3333 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3334 { 3335 struct sched_dl_entity *dl_se = &p->dl; 3336 3337 dl_se->dl_runtime = attr->sched_runtime; 3338 dl_se->dl_deadline = attr->sched_deadline; 3339 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3340 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; 3341 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3342 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 3343 } 3344 3345 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3346 { 3347 struct sched_dl_entity *dl_se = &p->dl; 3348 3349 attr->sched_priority = p->rt_priority; 3350 attr->sched_runtime = dl_se->dl_runtime; 3351 attr->sched_deadline = dl_se->dl_deadline; 3352 attr->sched_period = dl_se->dl_period; 3353 attr->sched_flags &= ~SCHED_DL_FLAGS; 3354 attr->sched_flags |= dl_se->flags; 3355 } 3356 3357 /* 3358 * This function validates the new parameters of a -deadline task. 3359 * We ask for the deadline not being zero, and greater or equal 3360 * than the runtime, as well as the period of being zero or 3361 * greater than deadline. Furthermore, we have to be sure that 3362 * user parameters are above the internal resolution of 1us (we 3363 * check sched_runtime only since it is always the smaller one) and 3364 * below 2^63 ns (we have to check both sched_deadline and 3365 * sched_period, as the latter can be zero). 3366 */ 3367 bool __checkparam_dl(const struct sched_attr *attr) 3368 { 3369 u64 period, max, min; 3370 3371 /* special dl tasks don't actually use any parameter */ 3372 if (attr->sched_flags & SCHED_FLAG_SUGOV) 3373 return true; 3374 3375 /* deadline != 0 */ 3376 if (attr->sched_deadline == 0) 3377 return false; 3378 3379 /* 3380 * Since we truncate DL_SCALE bits, make sure we're at least 3381 * that big. 3382 */ 3383 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3384 return false; 3385 3386 /* 3387 * Since we use the MSB for wrap-around and sign issues, make 3388 * sure it's not set (mind that period can be equal to zero). 3389 */ 3390 if (attr->sched_deadline & (1ULL << 63) || 3391 attr->sched_period & (1ULL << 63)) 3392 return false; 3393 3394 period = attr->sched_period; 3395 if (!period) 3396 period = attr->sched_deadline; 3397 3398 /* runtime <= deadline <= period (if period != 0) */ 3399 if (period < attr->sched_deadline || 3400 attr->sched_deadline < attr->sched_runtime) 3401 return false; 3402 3403 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; 3404 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; 3405 3406 if (period < min || period > max) 3407 return false; 3408 3409 return true; 3410 } 3411 3412 /* 3413 * This function clears the sched_dl_entity static params. 3414 */ 3415 static void __dl_clear_params(struct sched_dl_entity *dl_se) 3416 { 3417 dl_se->dl_runtime = 0; 3418 dl_se->dl_deadline = 0; 3419 dl_se->dl_period = 0; 3420 dl_se->flags = 0; 3421 dl_se->dl_bw = 0; 3422 dl_se->dl_density = 0; 3423 3424 dl_se->dl_throttled = 0; 3425 dl_se->dl_yielded = 0; 3426 dl_se->dl_non_contending = 0; 3427 dl_se->dl_overrun = 0; 3428 dl_se->dl_server = 0; 3429 3430 #ifdef CONFIG_RT_MUTEXES 3431 dl_se->pi_se = dl_se; 3432 #endif 3433 } 3434 3435 void init_dl_entity(struct sched_dl_entity *dl_se) 3436 { 3437 RB_CLEAR_NODE(&dl_se->rb_node); 3438 init_dl_task_timer(dl_se); 3439 init_dl_inactive_task_timer(dl_se); 3440 __dl_clear_params(dl_se); 3441 } 3442 3443 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 3444 { 3445 struct sched_dl_entity *dl_se = &p->dl; 3446 3447 if (dl_se->dl_runtime != attr->sched_runtime || 3448 dl_se->dl_deadline != attr->sched_deadline || 3449 dl_se->dl_period != attr->sched_period || 3450 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS)) 3451 return true; 3452 3453 return false; 3454 } 3455 3456 #ifdef CONFIG_SMP 3457 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 3458 const struct cpumask *trial) 3459 { 3460 unsigned long flags, cap; 3461 struct dl_bw *cur_dl_b; 3462 int ret = 1; 3463 3464 rcu_read_lock_sched(); 3465 cur_dl_b = dl_bw_of(cpumask_any(cur)); 3466 cap = __dl_bw_capacity(trial); 3467 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 3468 if (__dl_overflow(cur_dl_b, cap, 0, 0)) 3469 ret = 0; 3470 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 3471 rcu_read_unlock_sched(); 3472 3473 return ret; 3474 } 3475 3476 enum dl_bw_request { 3477 dl_bw_req_deactivate = 0, 3478 dl_bw_req_alloc, 3479 dl_bw_req_free 3480 }; 3481 3482 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) 3483 { 3484 unsigned long flags, cap; 3485 struct dl_bw *dl_b; 3486 bool overflow = 0; 3487 u64 fair_server_bw = 0; 3488 3489 rcu_read_lock_sched(); 3490 dl_b = dl_bw_of(cpu); 3491 raw_spin_lock_irqsave(&dl_b->lock, flags); 3492 3493 cap = dl_bw_capacity(cpu); 3494 switch (req) { 3495 case dl_bw_req_free: 3496 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); 3497 break; 3498 case dl_bw_req_alloc: 3499 overflow = __dl_overflow(dl_b, cap, 0, dl_bw); 3500 3501 if (!overflow) { 3502 /* 3503 * We reserve space in the destination 3504 * root_domain, as we can't fail after this point. 3505 * We will free resources in the source root_domain 3506 * later on (see set_cpus_allowed_dl()). 3507 */ 3508 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); 3509 } 3510 break; 3511 case dl_bw_req_deactivate: 3512 /* 3513 * cpu is not off yet, but we need to do the math by 3514 * considering it off already (i.e., what would happen if we 3515 * turn cpu off?). 3516 */ 3517 cap -= arch_scale_cpu_capacity(cpu); 3518 3519 /* 3520 * cpu is going offline and NORMAL tasks will be moved away 3521 * from it. We can thus discount dl_server bandwidth 3522 * contribution as it won't need to be servicing tasks after 3523 * the cpu is off. 3524 */ 3525 if (cpu_rq(cpu)->fair_server.dl_server) 3526 fair_server_bw = cpu_rq(cpu)->fair_server.dl_bw; 3527 3528 /* 3529 * Not much to check if no DEADLINE bandwidth is present. 3530 * dl_servers we can discount, as tasks will be moved out the 3531 * offlined CPUs anyway. 3532 */ 3533 if (dl_b->total_bw - fair_server_bw > 0) { 3534 /* 3535 * Leaving at least one CPU for DEADLINE tasks seems a 3536 * wise thing to do. As said above, cpu is not offline 3537 * yet, so account for that. 3538 */ 3539 if (dl_bw_cpus(cpu) - 1) 3540 overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0); 3541 else 3542 overflow = 1; 3543 } 3544 3545 break; 3546 } 3547 3548 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3549 rcu_read_unlock_sched(); 3550 3551 return overflow ? -EBUSY : 0; 3552 } 3553 3554 int dl_bw_deactivate(int cpu) 3555 { 3556 return dl_bw_manage(dl_bw_req_deactivate, cpu, 0); 3557 } 3558 3559 int dl_bw_alloc(int cpu, u64 dl_bw) 3560 { 3561 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); 3562 } 3563 3564 void dl_bw_free(int cpu, u64 dl_bw) 3565 { 3566 dl_bw_manage(dl_bw_req_free, cpu, dl_bw); 3567 } 3568 #endif 3569 3570 #ifdef CONFIG_SCHED_DEBUG 3571 void print_dl_stats(struct seq_file *m, int cpu) 3572 { 3573 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 3574 } 3575 #endif /* CONFIG_SCHED_DEBUG */ 3576