1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 19 #include <linux/cpuset.h> 20 21 /* 22 * Default limits for DL period; on the top end we guard against small util 23 * tasks still getting ridiculously long effective runtimes, on the bottom end we 24 * guard against timer DoS. 25 */ 26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ 27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ 28 #ifdef CONFIG_SYSCTL 29 static struct ctl_table sched_dl_sysctls[] = { 30 { 31 .procname = "sched_deadline_period_max_us", 32 .data = &sysctl_sched_dl_period_max, 33 .maxlen = sizeof(unsigned int), 34 .mode = 0644, 35 .proc_handler = proc_douintvec_minmax, 36 .extra1 = (void *)&sysctl_sched_dl_period_min, 37 }, 38 { 39 .procname = "sched_deadline_period_min_us", 40 .data = &sysctl_sched_dl_period_min, 41 .maxlen = sizeof(unsigned int), 42 .mode = 0644, 43 .proc_handler = proc_douintvec_minmax, 44 .extra2 = (void *)&sysctl_sched_dl_period_max, 45 }, 46 }; 47 48 static int __init sched_dl_sysctl_init(void) 49 { 50 register_sysctl_init("kernel", sched_dl_sysctls); 51 return 0; 52 } 53 late_initcall(sched_dl_sysctl_init); 54 #endif 55 56 static bool dl_server(struct sched_dl_entity *dl_se) 57 { 58 return dl_se->dl_server; 59 } 60 61 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 62 { 63 BUG_ON(dl_server(dl_se)); 64 return container_of(dl_se, struct task_struct, dl); 65 } 66 67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 68 { 69 return container_of(dl_rq, struct rq, dl); 70 } 71 72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) 73 { 74 struct rq *rq = dl_se->rq; 75 76 if (!dl_server(dl_se)) 77 rq = task_rq(dl_task_of(dl_se)); 78 79 return rq; 80 } 81 82 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 83 { 84 return &rq_of_dl_se(dl_se)->dl; 85 } 86 87 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 88 { 89 return !RB_EMPTY_NODE(&dl_se->rb_node); 90 } 91 92 #ifdef CONFIG_RT_MUTEXES 93 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 94 { 95 return dl_se->pi_se; 96 } 97 98 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 99 { 100 return pi_of(dl_se) != dl_se; 101 } 102 #else 103 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 104 { 105 return dl_se; 106 } 107 108 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 109 { 110 return false; 111 } 112 #endif 113 114 #ifdef CONFIG_SMP 115 static inline struct dl_bw *dl_bw_of(int i) 116 { 117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 118 "sched RCU must be held"); 119 return &cpu_rq(i)->rd->dl_bw; 120 } 121 122 static inline int dl_bw_cpus(int i) 123 { 124 struct root_domain *rd = cpu_rq(i)->rd; 125 int cpus; 126 127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 128 "sched RCU must be held"); 129 130 if (cpumask_subset(rd->span, cpu_active_mask)) 131 return cpumask_weight(rd->span); 132 133 cpus = 0; 134 135 for_each_cpu_and(i, rd->span, cpu_active_mask) 136 cpus++; 137 138 return cpus; 139 } 140 141 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) 142 { 143 unsigned long cap = 0; 144 int i; 145 146 for_each_cpu_and(i, mask, cpu_active_mask) 147 cap += arch_scale_cpu_capacity(i); 148 149 return cap; 150 } 151 152 /* 153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 154 * of the CPU the task is running on rather rd's \Sum CPU capacity. 155 */ 156 static inline unsigned long dl_bw_capacity(int i) 157 { 158 if (!sched_asym_cpucap_active() && 159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) { 160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; 161 } else { 162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 163 "sched RCU must be held"); 164 165 return __dl_bw_capacity(cpu_rq(i)->rd->span); 166 } 167 } 168 169 static inline bool dl_bw_visited(int cpu, u64 gen) 170 { 171 struct root_domain *rd = cpu_rq(cpu)->rd; 172 173 if (rd->visit_gen == gen) 174 return true; 175 176 rd->visit_gen = gen; 177 return false; 178 } 179 180 static inline 181 void __dl_update(struct dl_bw *dl_b, s64 bw) 182 { 183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 184 int i; 185 186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 187 "sched RCU must be held"); 188 for_each_cpu_and(i, rd->span, cpu_active_mask) { 189 struct rq *rq = cpu_rq(i); 190 191 rq->dl.extra_bw += bw; 192 } 193 } 194 #else 195 static inline struct dl_bw *dl_bw_of(int i) 196 { 197 return &cpu_rq(i)->dl.dl_bw; 198 } 199 200 static inline int dl_bw_cpus(int i) 201 { 202 return 1; 203 } 204 205 static inline unsigned long dl_bw_capacity(int i) 206 { 207 return SCHED_CAPACITY_SCALE; 208 } 209 210 static inline bool dl_bw_visited(int cpu, u64 gen) 211 { 212 return false; 213 } 214 215 static inline 216 void __dl_update(struct dl_bw *dl_b, s64 bw) 217 { 218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 219 220 dl->extra_bw += bw; 221 } 222 #endif 223 224 static inline 225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 226 { 227 dl_b->total_bw -= tsk_bw; 228 __dl_update(dl_b, (s32)tsk_bw / cpus); 229 } 230 231 static inline 232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 233 { 234 dl_b->total_bw += tsk_bw; 235 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 236 } 237 238 static inline bool 239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) 240 { 241 return dl_b->bw != -1 && 242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 243 } 244 245 static inline 246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 247 { 248 u64 old = dl_rq->running_bw; 249 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 251 dl_rq->running_bw += dl_bw; 252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 256 } 257 258 static inline 259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 260 { 261 u64 old = dl_rq->running_bw; 262 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 264 dl_rq->running_bw -= dl_bw; 265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 266 if (dl_rq->running_bw > old) 267 dl_rq->running_bw = 0; 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 270 } 271 272 static inline 273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 274 { 275 u64 old = dl_rq->this_bw; 276 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 278 dl_rq->this_bw += dl_bw; 279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 280 } 281 282 static inline 283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 284 { 285 u64 old = dl_rq->this_bw; 286 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 288 dl_rq->this_bw -= dl_bw; 289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 290 if (dl_rq->this_bw > old) 291 dl_rq->this_bw = 0; 292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 293 } 294 295 static inline 296 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 297 { 298 if (!dl_entity_is_special(dl_se)) 299 __add_rq_bw(dl_se->dl_bw, dl_rq); 300 } 301 302 static inline 303 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 304 { 305 if (!dl_entity_is_special(dl_se)) 306 __sub_rq_bw(dl_se->dl_bw, dl_rq); 307 } 308 309 static inline 310 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 311 { 312 if (!dl_entity_is_special(dl_se)) 313 __add_running_bw(dl_se->dl_bw, dl_rq); 314 } 315 316 static inline 317 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 318 { 319 if (!dl_entity_is_special(dl_se)) 320 __sub_running_bw(dl_se->dl_bw, dl_rq); 321 } 322 323 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 324 { 325 struct rq *rq; 326 327 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); 328 329 if (task_on_rq_queued(p)) 330 return; 331 332 rq = task_rq(p); 333 if (p->dl.dl_non_contending) { 334 sub_running_bw(&p->dl, &rq->dl); 335 p->dl.dl_non_contending = 0; 336 /* 337 * If the timer handler is currently running and the 338 * timer cannot be canceled, inactive_task_timer() 339 * will see that dl_not_contending is not set, and 340 * will not touch the rq's active utilization, 341 * so we are still safe. 342 */ 343 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 344 put_task_struct(p); 345 } 346 __sub_rq_bw(p->dl.dl_bw, &rq->dl); 347 __add_rq_bw(new_bw, &rq->dl); 348 } 349 350 static void __dl_clear_params(struct sched_dl_entity *dl_se); 351 352 /* 353 * The utilization of a task cannot be immediately removed from 354 * the rq active utilization (running_bw) when the task blocks. 355 * Instead, we have to wait for the so called "0-lag time". 356 * 357 * If a task blocks before the "0-lag time", a timer (the inactive 358 * timer) is armed, and running_bw is decreased when the timer 359 * fires. 360 * 361 * If the task wakes up again before the inactive timer fires, 362 * the timer is canceled, whereas if the task wakes up after the 363 * inactive timer fired (and running_bw has been decreased) the 364 * task's utilization has to be added to running_bw again. 365 * A flag in the deadline scheduling entity (dl_non_contending) 366 * is used to avoid race conditions between the inactive timer handler 367 * and task wakeups. 368 * 369 * The following diagram shows how running_bw is updated. A task is 370 * "ACTIVE" when its utilization contributes to running_bw; an 371 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 372 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 373 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 374 * time already passed, which does not contribute to running_bw anymore. 375 * +------------------+ 376 * wakeup | ACTIVE | 377 * +------------------>+ contending | 378 * | add_running_bw | | 379 * | +----+------+------+ 380 * | | ^ 381 * | dequeue | | 382 * +--------+-------+ | | 383 * | | t >= 0-lag | | wakeup 384 * | INACTIVE |<---------------+ | 385 * | | sub_running_bw | | 386 * +--------+-------+ | | 387 * ^ | | 388 * | t < 0-lag | | 389 * | | | 390 * | V | 391 * | +----+------+------+ 392 * | sub_running_bw | ACTIVE | 393 * +-------------------+ | 394 * inactive timer | non contending | 395 * fired +------------------+ 396 * 397 * The task_non_contending() function is invoked when a task 398 * blocks, and checks if the 0-lag time already passed or 399 * not (in the first case, it directly updates running_bw; 400 * in the second case, it arms the inactive timer). 401 * 402 * The task_contending() function is invoked when a task wakes 403 * up, and checks if the task is still in the "ACTIVE non contending" 404 * state or not (in the second case, it updates running_bw). 405 */ 406 static void task_non_contending(struct sched_dl_entity *dl_se) 407 { 408 struct hrtimer *timer = &dl_se->inactive_timer; 409 struct rq *rq = rq_of_dl_se(dl_se); 410 struct dl_rq *dl_rq = &rq->dl; 411 s64 zerolag_time; 412 413 /* 414 * If this is a non-deadline task that has been boosted, 415 * do nothing 416 */ 417 if (dl_se->dl_runtime == 0) 418 return; 419 420 if (dl_entity_is_special(dl_se)) 421 return; 422 423 WARN_ON(dl_se->dl_non_contending); 424 425 zerolag_time = dl_se->deadline - 426 div64_long((dl_se->runtime * dl_se->dl_period), 427 dl_se->dl_runtime); 428 429 /* 430 * Using relative times instead of the absolute "0-lag time" 431 * allows to simplify the code 432 */ 433 zerolag_time -= rq_clock(rq); 434 435 /* 436 * If the "0-lag time" already passed, decrease the active 437 * utilization now, instead of starting a timer 438 */ 439 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 440 if (dl_server(dl_se)) { 441 sub_running_bw(dl_se, dl_rq); 442 } else { 443 struct task_struct *p = dl_task_of(dl_se); 444 445 if (dl_task(p)) 446 sub_running_bw(dl_se, dl_rq); 447 448 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 449 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 450 451 if (READ_ONCE(p->__state) == TASK_DEAD) 452 sub_rq_bw(dl_se, &rq->dl); 453 raw_spin_lock(&dl_b->lock); 454 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); 455 raw_spin_unlock(&dl_b->lock); 456 __dl_clear_params(dl_se); 457 } 458 } 459 460 return; 461 } 462 463 dl_se->dl_non_contending = 1; 464 if (!dl_server(dl_se)) 465 get_task_struct(dl_task_of(dl_se)); 466 467 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 468 } 469 470 static void task_contending(struct sched_dl_entity *dl_se, int flags) 471 { 472 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 473 474 /* 475 * If this is a non-deadline task that has been boosted, 476 * do nothing 477 */ 478 if (dl_se->dl_runtime == 0) 479 return; 480 481 if (flags & ENQUEUE_MIGRATED) 482 add_rq_bw(dl_se, dl_rq); 483 484 if (dl_se->dl_non_contending) { 485 dl_se->dl_non_contending = 0; 486 /* 487 * If the timer handler is currently running and the 488 * timer cannot be canceled, inactive_task_timer() 489 * will see that dl_not_contending is not set, and 490 * will not touch the rq's active utilization, 491 * so we are still safe. 492 */ 493 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { 494 if (!dl_server(dl_se)) 495 put_task_struct(dl_task_of(dl_se)); 496 } 497 } else { 498 /* 499 * Since "dl_non_contending" is not set, the 500 * task's utilization has already been removed from 501 * active utilization (either when the task blocked, 502 * when the "inactive timer" fired). 503 * So, add it back. 504 */ 505 add_running_bw(dl_se, dl_rq); 506 } 507 } 508 509 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 510 { 511 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node; 512 } 513 514 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 515 516 void init_dl_bw(struct dl_bw *dl_b) 517 { 518 raw_spin_lock_init(&dl_b->lock); 519 if (global_rt_runtime() == RUNTIME_INF) 520 dl_b->bw = -1; 521 else 522 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 523 dl_b->total_bw = 0; 524 } 525 526 void init_dl_rq(struct dl_rq *dl_rq) 527 { 528 dl_rq->root = RB_ROOT_CACHED; 529 530 #ifdef CONFIG_SMP 531 /* zero means no -deadline tasks */ 532 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 533 534 dl_rq->overloaded = 0; 535 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 536 #else 537 init_dl_bw(&dl_rq->dl_bw); 538 #endif 539 540 dl_rq->running_bw = 0; 541 dl_rq->this_bw = 0; 542 init_dl_rq_bw_ratio(dl_rq); 543 } 544 545 #ifdef CONFIG_SMP 546 547 static inline int dl_overloaded(struct rq *rq) 548 { 549 return atomic_read(&rq->rd->dlo_count); 550 } 551 552 static inline void dl_set_overload(struct rq *rq) 553 { 554 if (!rq->online) 555 return; 556 557 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 558 /* 559 * Must be visible before the overload count is 560 * set (as in sched_rt.c). 561 * 562 * Matched by the barrier in pull_dl_task(). 563 */ 564 smp_wmb(); 565 atomic_inc(&rq->rd->dlo_count); 566 } 567 568 static inline void dl_clear_overload(struct rq *rq) 569 { 570 if (!rq->online) 571 return; 572 573 atomic_dec(&rq->rd->dlo_count); 574 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 575 } 576 577 #define __node_2_pdl(node) \ 578 rb_entry((node), struct task_struct, pushable_dl_tasks) 579 580 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b) 581 { 582 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); 583 } 584 585 static inline int has_pushable_dl_tasks(struct rq *rq) 586 { 587 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 588 } 589 590 /* 591 * The list of pushable -deadline task is not a plist, like in 592 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 593 */ 594 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 595 { 596 struct rb_node *leftmost; 597 598 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 599 600 leftmost = rb_add_cached(&p->pushable_dl_tasks, 601 &rq->dl.pushable_dl_tasks_root, 602 __pushable_less); 603 if (leftmost) 604 rq->dl.earliest_dl.next = p->dl.deadline; 605 606 if (!rq->dl.overloaded) { 607 dl_set_overload(rq); 608 rq->dl.overloaded = 1; 609 } 610 } 611 612 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 613 { 614 struct dl_rq *dl_rq = &rq->dl; 615 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root; 616 struct rb_node *leftmost; 617 618 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 619 return; 620 621 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); 622 if (leftmost) 623 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; 624 625 RB_CLEAR_NODE(&p->pushable_dl_tasks); 626 627 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) { 628 dl_clear_overload(rq); 629 rq->dl.overloaded = 0; 630 } 631 } 632 633 static int push_dl_task(struct rq *rq); 634 635 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 636 { 637 return rq->online && dl_task(prev); 638 } 639 640 static DEFINE_PER_CPU(struct balance_callback, dl_push_head); 641 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head); 642 643 static void push_dl_tasks(struct rq *); 644 static void pull_dl_task(struct rq *); 645 646 static inline void deadline_queue_push_tasks(struct rq *rq) 647 { 648 if (!has_pushable_dl_tasks(rq)) 649 return; 650 651 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 652 } 653 654 static inline void deadline_queue_pull_task(struct rq *rq) 655 { 656 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 657 } 658 659 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 660 661 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 662 { 663 struct rq *later_rq = NULL; 664 struct dl_bw *dl_b; 665 666 later_rq = find_lock_later_rq(p, rq); 667 if (!later_rq) { 668 int cpu; 669 670 /* 671 * If we cannot preempt any rq, fall back to pick any 672 * online CPU: 673 */ 674 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 675 if (cpu >= nr_cpu_ids) { 676 /* 677 * Failed to find any suitable CPU. 678 * The task will never come back! 679 */ 680 WARN_ON_ONCE(dl_bandwidth_enabled()); 681 682 /* 683 * If admission control is disabled we 684 * try a little harder to let the task 685 * run. 686 */ 687 cpu = cpumask_any(cpu_active_mask); 688 } 689 later_rq = cpu_rq(cpu); 690 double_lock_balance(rq, later_rq); 691 } 692 693 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 694 /* 695 * Inactive timer is armed (or callback is running, but 696 * waiting for us to release rq locks). In any case, when it 697 * will fire (or continue), it will see running_bw of this 698 * task migrated to later_rq (and correctly handle it). 699 */ 700 sub_running_bw(&p->dl, &rq->dl); 701 sub_rq_bw(&p->dl, &rq->dl); 702 703 add_rq_bw(&p->dl, &later_rq->dl); 704 add_running_bw(&p->dl, &later_rq->dl); 705 } else { 706 sub_rq_bw(&p->dl, &rq->dl); 707 add_rq_bw(&p->dl, &later_rq->dl); 708 } 709 710 /* 711 * And we finally need to fix up root_domain(s) bandwidth accounting, 712 * since p is still hanging out in the old (now moved to default) root 713 * domain. 714 */ 715 dl_b = &rq->rd->dl_bw; 716 raw_spin_lock(&dl_b->lock); 717 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 718 raw_spin_unlock(&dl_b->lock); 719 720 dl_b = &later_rq->rd->dl_bw; 721 raw_spin_lock(&dl_b->lock); 722 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 723 raw_spin_unlock(&dl_b->lock); 724 725 set_task_cpu(p, later_rq->cpu); 726 double_unlock_balance(later_rq, rq); 727 728 return later_rq; 729 } 730 731 #else 732 733 static inline 734 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 735 { 736 } 737 738 static inline 739 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 740 { 741 } 742 743 static inline 744 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 745 { 746 } 747 748 static inline 749 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 750 { 751 } 752 753 static inline void deadline_queue_push_tasks(struct rq *rq) 754 { 755 } 756 757 static inline void deadline_queue_pull_task(struct rq *rq) 758 { 759 } 760 #endif /* CONFIG_SMP */ 761 762 static void 763 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); 764 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 765 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags); 766 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags); 767 768 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, 769 struct rq *rq) 770 { 771 /* for non-boosted task, pi_of(dl_se) == dl_se */ 772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 773 dl_se->runtime = pi_of(dl_se)->dl_runtime; 774 } 775 776 /* 777 * We are being explicitly informed that a new instance is starting, 778 * and this means that: 779 * - the absolute deadline of the entity has to be placed at 780 * current time + relative deadline; 781 * - the runtime of the entity has to be set to the maximum value. 782 * 783 * The capability of specifying such event is useful whenever a -deadline 784 * entity wants to (try to!) synchronize its behaviour with the scheduler's 785 * one, and to (try to!) reconcile itself with its own scheduling 786 * parameters. 787 */ 788 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 789 { 790 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 791 struct rq *rq = rq_of_dl_rq(dl_rq); 792 793 WARN_ON(is_dl_boosted(dl_se)); 794 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 795 796 /* 797 * We are racing with the deadline timer. So, do nothing because 798 * the deadline timer handler will take care of properly recharging 799 * the runtime and postponing the deadline 800 */ 801 if (dl_se->dl_throttled) 802 return; 803 804 /* 805 * We use the regular wall clock time to set deadlines in the 806 * future; in fact, we must consider execution overheads (time 807 * spent on hardirq context, etc.). 808 */ 809 replenish_dl_new_period(dl_se, rq); 810 } 811 812 /* 813 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 814 * possibility of a entity lasting more than what it declared, and thus 815 * exhausting its runtime. 816 * 817 * Here we are interested in making runtime overrun possible, but we do 818 * not want a entity which is misbehaving to affect the scheduling of all 819 * other entities. 820 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 821 * is used, in order to confine each entity within its own bandwidth. 822 * 823 * This function deals exactly with that, and ensures that when the runtime 824 * of a entity is replenished, its deadline is also postponed. That ensures 825 * the overrunning entity can't interfere with other entity in the system and 826 * can't make them miss their deadlines. Reasons why this kind of overruns 827 * could happen are, typically, a entity voluntarily trying to overcome its 828 * runtime, or it just underestimated it during sched_setattr(). 829 */ 830 static void replenish_dl_entity(struct sched_dl_entity *dl_se) 831 { 832 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 833 struct rq *rq = rq_of_dl_rq(dl_rq); 834 835 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0); 836 837 /* 838 * This could be the case for a !-dl task that is boosted. 839 * Just go with full inherited parameters. 840 */ 841 if (dl_se->dl_deadline == 0) 842 replenish_dl_new_period(dl_se, rq); 843 844 if (dl_se->dl_yielded && dl_se->runtime > 0) 845 dl_se->runtime = 0; 846 847 /* 848 * We keep moving the deadline away until we get some 849 * available runtime for the entity. This ensures correct 850 * handling of situations where the runtime overrun is 851 * arbitrary large. 852 */ 853 while (dl_se->runtime <= 0) { 854 dl_se->deadline += pi_of(dl_se)->dl_period; 855 dl_se->runtime += pi_of(dl_se)->dl_runtime; 856 } 857 858 /* 859 * At this point, the deadline really should be "in 860 * the future" with respect to rq->clock. If it's 861 * not, we are, for some reason, lagging too much! 862 * Anyway, after having warn userspace abut that, 863 * we still try to keep the things running by 864 * resetting the deadline and the budget of the 865 * entity. 866 */ 867 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 868 printk_deferred_once("sched: DL replenish lagged too much\n"); 869 replenish_dl_new_period(dl_se, rq); 870 } 871 872 if (dl_se->dl_yielded) 873 dl_se->dl_yielded = 0; 874 if (dl_se->dl_throttled) 875 dl_se->dl_throttled = 0; 876 } 877 878 /* 879 * Here we check if --at time t-- an entity (which is probably being 880 * [re]activated or, in general, enqueued) can use its remaining runtime 881 * and its current deadline _without_ exceeding the bandwidth it is 882 * assigned (function returns true if it can't). We are in fact applying 883 * one of the CBS rules: when a task wakes up, if the residual runtime 884 * over residual deadline fits within the allocated bandwidth, then we 885 * can keep the current (absolute) deadline and residual budget without 886 * disrupting the schedulability of the system. Otherwise, we should 887 * refill the runtime and set the deadline a period in the future, 888 * because keeping the current (absolute) deadline of the task would 889 * result in breaking guarantees promised to other tasks (refer to 890 * Documentation/scheduler/sched-deadline.rst for more information). 891 * 892 * This function returns true if: 893 * 894 * runtime / (deadline - t) > dl_runtime / dl_deadline , 895 * 896 * IOW we can't recycle current parameters. 897 * 898 * Notice that the bandwidth check is done against the deadline. For 899 * task with deadline equal to period this is the same of using 900 * dl_period instead of dl_deadline in the equation above. 901 */ 902 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t) 903 { 904 u64 left, right; 905 906 /* 907 * left and right are the two sides of the equation above, 908 * after a bit of shuffling to use multiplications instead 909 * of divisions. 910 * 911 * Note that none of the time values involved in the two 912 * multiplications are absolute: dl_deadline and dl_runtime 913 * are the relative deadline and the maximum runtime of each 914 * instance, runtime is the runtime left for the last instance 915 * and (deadline - t), since t is rq->clock, is the time left 916 * to the (absolute) deadline. Even if overflowing the u64 type 917 * is very unlikely to occur in both cases, here we scale down 918 * as we want to avoid that risk at all. Scaling down by 10 919 * means that we reduce granularity to 1us. We are fine with it, 920 * since this is only a true/false check and, anyway, thinking 921 * of anything below microseconds resolution is actually fiction 922 * (but still we want to give the user that illusion >;). 923 */ 924 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 925 right = ((dl_se->deadline - t) >> DL_SCALE) * 926 (pi_of(dl_se)->dl_runtime >> DL_SCALE); 927 928 return dl_time_before(right, left); 929 } 930 931 /* 932 * Revised wakeup rule [1]: For self-suspending tasks, rather then 933 * re-initializing task's runtime and deadline, the revised wakeup 934 * rule adjusts the task's runtime to avoid the task to overrun its 935 * density. 936 * 937 * Reasoning: a task may overrun the density if: 938 * runtime / (deadline - t) > dl_runtime / dl_deadline 939 * 940 * Therefore, runtime can be adjusted to: 941 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 942 * 943 * In such way that runtime will be equal to the maximum density 944 * the task can use without breaking any rule. 945 * 946 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 947 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 948 */ 949 static void 950 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 951 { 952 u64 laxity = dl_se->deadline - rq_clock(rq); 953 954 /* 955 * If the task has deadline < period, and the deadline is in the past, 956 * it should already be throttled before this check. 957 * 958 * See update_dl_entity() comments for further details. 959 */ 960 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 961 962 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 963 } 964 965 /* 966 * Regarding the deadline, a task with implicit deadline has a relative 967 * deadline == relative period. A task with constrained deadline has a 968 * relative deadline <= relative period. 969 * 970 * We support constrained deadline tasks. However, there are some restrictions 971 * applied only for tasks which do not have an implicit deadline. See 972 * update_dl_entity() to know more about such restrictions. 973 * 974 * The dl_is_implicit() returns true if the task has an implicit deadline. 975 */ 976 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 977 { 978 return dl_se->dl_deadline == dl_se->dl_period; 979 } 980 981 /* 982 * When a deadline entity is placed in the runqueue, its runtime and deadline 983 * might need to be updated. This is done by a CBS wake up rule. There are two 984 * different rules: 1) the original CBS; and 2) the Revisited CBS. 985 * 986 * When the task is starting a new period, the Original CBS is used. In this 987 * case, the runtime is replenished and a new absolute deadline is set. 988 * 989 * When a task is queued before the begin of the next period, using the 990 * remaining runtime and deadline could make the entity to overflow, see 991 * dl_entity_overflow() to find more about runtime overflow. When such case 992 * is detected, the runtime and deadline need to be updated. 993 * 994 * If the task has an implicit deadline, i.e., deadline == period, the Original 995 * CBS is applied. The runtime is replenished and a new absolute deadline is 996 * set, as in the previous cases. 997 * 998 * However, the Original CBS does not work properly for tasks with 999 * deadline < period, which are said to have a constrained deadline. By 1000 * applying the Original CBS, a constrained deadline task would be able to run 1001 * runtime/deadline in a period. With deadline < period, the task would 1002 * overrun the runtime/period allowed bandwidth, breaking the admission test. 1003 * 1004 * In order to prevent this misbehave, the Revisited CBS is used for 1005 * constrained deadline tasks when a runtime overflow is detected. In the 1006 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 1007 * the remaining runtime of the task is reduced to avoid runtime overflow. 1008 * Please refer to the comments update_dl_revised_wakeup() function to find 1009 * more about the Revised CBS rule. 1010 */ 1011 static void update_dl_entity(struct sched_dl_entity *dl_se) 1012 { 1013 struct rq *rq = rq_of_dl_se(dl_se); 1014 1015 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 1016 dl_entity_overflow(dl_se, rq_clock(rq))) { 1017 1018 if (unlikely(!dl_is_implicit(dl_se) && 1019 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 1020 !is_dl_boosted(dl_se))) { 1021 update_dl_revised_wakeup(dl_se, rq); 1022 return; 1023 } 1024 1025 replenish_dl_new_period(dl_se, rq); 1026 } 1027 } 1028 1029 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 1030 { 1031 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 1032 } 1033 1034 /* 1035 * If the entity depleted all its runtime, and if we want it to sleep 1036 * while waiting for some new execution time to become available, we 1037 * set the bandwidth replenishment timer to the replenishment instant 1038 * and try to activate it. 1039 * 1040 * Notice that it is important for the caller to know if the timer 1041 * actually started or not (i.e., the replenishment instant is in 1042 * the future or in the past). 1043 */ 1044 static int start_dl_timer(struct sched_dl_entity *dl_se) 1045 { 1046 struct hrtimer *timer = &dl_se->dl_timer; 1047 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1048 struct rq *rq = rq_of_dl_rq(dl_rq); 1049 ktime_t now, act; 1050 s64 delta; 1051 1052 lockdep_assert_rq_held(rq); 1053 1054 /* 1055 * We want the timer to fire at the deadline, but considering 1056 * that it is actually coming from rq->clock and not from 1057 * hrtimer's time base reading. 1058 */ 1059 act = ns_to_ktime(dl_next_period(dl_se)); 1060 now = hrtimer_cb_get_time(timer); 1061 delta = ktime_to_ns(now) - rq_clock(rq); 1062 act = ktime_add_ns(act, delta); 1063 1064 /* 1065 * If the expiry time already passed, e.g., because the value 1066 * chosen as the deadline is too small, don't even try to 1067 * start the timer in the past! 1068 */ 1069 if (ktime_us_delta(act, now) < 0) 1070 return 0; 1071 1072 /* 1073 * !enqueued will guarantee another callback; even if one is already in 1074 * progress. This ensures a balanced {get,put}_task_struct(). 1075 * 1076 * The race against __run_timer() clearing the enqueued state is 1077 * harmless because we're holding task_rq()->lock, therefore the timer 1078 * expiring after we've done the check will wait on its task_rq_lock() 1079 * and observe our state. 1080 */ 1081 if (!hrtimer_is_queued(timer)) { 1082 if (!dl_server(dl_se)) 1083 get_task_struct(dl_task_of(dl_se)); 1084 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 1085 } 1086 1087 return 1; 1088 } 1089 1090 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) 1091 { 1092 #ifdef CONFIG_SMP 1093 /* 1094 * Queueing this task back might have overloaded rq, check if we need 1095 * to kick someone away. 1096 */ 1097 if (has_pushable_dl_tasks(rq)) { 1098 /* 1099 * Nothing relies on rq->lock after this, so its safe to drop 1100 * rq->lock. 1101 */ 1102 rq_unpin_lock(rq, rf); 1103 push_dl_task(rq); 1104 rq_repin_lock(rq, rf); 1105 } 1106 #endif 1107 } 1108 1109 /* 1110 * This is the bandwidth enforcement timer callback. If here, we know 1111 * a task is not on its dl_rq, since the fact that the timer was running 1112 * means the task is throttled and needs a runtime replenishment. 1113 * 1114 * However, what we actually do depends on the fact the task is active, 1115 * (it is on its rq) or has been removed from there by a call to 1116 * dequeue_task_dl(). In the former case we must issue the runtime 1117 * replenishment and add the task back to the dl_rq; in the latter, we just 1118 * do nothing but clearing dl_throttled, so that runtime and deadline 1119 * updating (and the queueing back to dl_rq) will be done by the 1120 * next call to enqueue_task_dl(). 1121 */ 1122 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 1123 { 1124 struct sched_dl_entity *dl_se = container_of(timer, 1125 struct sched_dl_entity, 1126 dl_timer); 1127 struct task_struct *p; 1128 struct rq_flags rf; 1129 struct rq *rq; 1130 1131 if (dl_server(dl_se)) { 1132 struct rq *rq = rq_of_dl_se(dl_se); 1133 struct rq_flags rf; 1134 1135 rq_lock(rq, &rf); 1136 if (dl_se->dl_throttled) { 1137 sched_clock_tick(); 1138 update_rq_clock(rq); 1139 1140 if (dl_se->server_has_tasks(dl_se)) { 1141 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1142 resched_curr(rq); 1143 __push_dl_task(rq, &rf); 1144 } else { 1145 replenish_dl_entity(dl_se); 1146 } 1147 1148 } 1149 rq_unlock(rq, &rf); 1150 1151 return HRTIMER_NORESTART; 1152 } 1153 1154 p = dl_task_of(dl_se); 1155 rq = task_rq_lock(p, &rf); 1156 1157 /* 1158 * The task might have changed its scheduling policy to something 1159 * different than SCHED_DEADLINE (through switched_from_dl()). 1160 */ 1161 if (!dl_task(p)) 1162 goto unlock; 1163 1164 /* 1165 * The task might have been boosted by someone else and might be in the 1166 * boosting/deboosting path, its not throttled. 1167 */ 1168 if (is_dl_boosted(dl_se)) 1169 goto unlock; 1170 1171 /* 1172 * Spurious timer due to start_dl_timer() race; or we already received 1173 * a replenishment from rt_mutex_setprio(). 1174 */ 1175 if (!dl_se->dl_throttled) 1176 goto unlock; 1177 1178 sched_clock_tick(); 1179 update_rq_clock(rq); 1180 1181 /* 1182 * If the throttle happened during sched-out; like: 1183 * 1184 * schedule() 1185 * deactivate_task() 1186 * dequeue_task_dl() 1187 * update_curr_dl() 1188 * start_dl_timer() 1189 * __dequeue_task_dl() 1190 * prev->on_rq = 0; 1191 * 1192 * We can be both throttled and !queued. Replenish the counter 1193 * but do not enqueue -- wait for our wakeup to do that. 1194 */ 1195 if (!task_on_rq_queued(p)) { 1196 replenish_dl_entity(dl_se); 1197 goto unlock; 1198 } 1199 1200 #ifdef CONFIG_SMP 1201 if (unlikely(!rq->online)) { 1202 /* 1203 * If the runqueue is no longer available, migrate the 1204 * task elsewhere. This necessarily changes rq. 1205 */ 1206 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); 1207 rq = dl_task_offline_migration(rq, p); 1208 rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); 1209 update_rq_clock(rq); 1210 1211 /* 1212 * Now that the task has been migrated to the new RQ and we 1213 * have that locked, proceed as normal and enqueue the task 1214 * there. 1215 */ 1216 } 1217 #endif 1218 1219 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1220 if (dl_task(rq->curr)) 1221 wakeup_preempt_dl(rq, p, 0); 1222 else 1223 resched_curr(rq); 1224 1225 __push_dl_task(rq, &rf); 1226 1227 unlock: 1228 task_rq_unlock(rq, p, &rf); 1229 1230 /* 1231 * This can free the task_struct, including this hrtimer, do not touch 1232 * anything related to that after this. 1233 */ 1234 put_task_struct(p); 1235 1236 return HRTIMER_NORESTART; 1237 } 1238 1239 static void init_dl_task_timer(struct sched_dl_entity *dl_se) 1240 { 1241 struct hrtimer *timer = &dl_se->dl_timer; 1242 1243 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1244 timer->function = dl_task_timer; 1245 } 1246 1247 /* 1248 * During the activation, CBS checks if it can reuse the current task's 1249 * runtime and period. If the deadline of the task is in the past, CBS 1250 * cannot use the runtime, and so it replenishes the task. This rule 1251 * works fine for implicit deadline tasks (deadline == period), and the 1252 * CBS was designed for implicit deadline tasks. However, a task with 1253 * constrained deadline (deadline < period) might be awakened after the 1254 * deadline, but before the next period. In this case, replenishing the 1255 * task would allow it to run for runtime / deadline. As in this case 1256 * deadline < period, CBS enables a task to run for more than the 1257 * runtime / period. In a very loaded system, this can cause a domino 1258 * effect, making other tasks miss their deadlines. 1259 * 1260 * To avoid this problem, in the activation of a constrained deadline 1261 * task after the deadline but before the next period, throttle the 1262 * task and set the replenishing timer to the begin of the next period, 1263 * unless it is boosted. 1264 */ 1265 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1266 { 1267 struct rq *rq = rq_of_dl_se(dl_se); 1268 1269 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1270 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1271 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) 1272 return; 1273 dl_se->dl_throttled = 1; 1274 if (dl_se->runtime > 0) 1275 dl_se->runtime = 0; 1276 } 1277 } 1278 1279 static 1280 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1281 { 1282 return (dl_se->runtime <= 0); 1283 } 1284 1285 /* 1286 * This function implements the GRUB accounting rule. According to the 1287 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt", 1288 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt", 1289 * where u is the utilization of the task, Umax is the maximum reclaimable 1290 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1291 * as the difference between the "total runqueue utilization" and the 1292 * "runqueue active utilization", and Uextra is the (per runqueue) extra 1293 * reclaimable utilization. 1294 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied 1295 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT. 1296 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw 1297 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1298 * Since delta is a 64 bit variable, to have an overflow its value should be 1299 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is 1300 * not an issue here. 1301 */ 1302 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1303 { 1304 u64 u_act; 1305 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1306 1307 /* 1308 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we 1309 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra 1310 * can be larger than u_max. So, u_max - u_inact - u_extra would be 1311 * negative leading to wrong results. 1312 */ 1313 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw) 1314 u_act = dl_se->dl_bw; 1315 else 1316 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw; 1317 1318 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT; 1319 return (delta * u_act) >> BW_SHIFT; 1320 } 1321 1322 static inline void 1323 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1324 int flags); 1325 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) 1326 { 1327 s64 scaled_delta_exec; 1328 1329 if (unlikely(delta_exec <= 0)) { 1330 if (unlikely(dl_se->dl_yielded)) 1331 goto throttle; 1332 return; 1333 } 1334 1335 if (dl_entity_is_special(dl_se)) 1336 return; 1337 1338 /* 1339 * For tasks that participate in GRUB, we implement GRUB-PA: the 1340 * spare reclaimed bandwidth is used to clock down frequency. 1341 * 1342 * For the others, we still need to scale reservation parameters 1343 * according to current frequency and CPU maximum capacity. 1344 */ 1345 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1346 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se); 1347 } else { 1348 int cpu = cpu_of(rq); 1349 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1350 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1351 1352 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1353 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1354 } 1355 1356 dl_se->runtime -= scaled_delta_exec; 1357 1358 throttle: 1359 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1360 dl_se->dl_throttled = 1; 1361 1362 /* If requested, inform the user about runtime overruns. */ 1363 if (dl_runtime_exceeded(dl_se) && 1364 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1365 dl_se->dl_overrun = 1; 1366 1367 dequeue_dl_entity(dl_se, 0); 1368 if (!dl_server(dl_se)) { 1369 update_stats_dequeue_dl(&rq->dl, dl_se, 0); 1370 dequeue_pushable_dl_task(rq, dl_task_of(dl_se)); 1371 } 1372 1373 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) { 1374 if (dl_server(dl_se)) 1375 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1376 else 1377 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH); 1378 } 1379 1380 if (!is_leftmost(dl_se, &rq->dl)) 1381 resched_curr(rq); 1382 } 1383 1384 /* 1385 * Because -- for now -- we share the rt bandwidth, we need to 1386 * account our runtime there too, otherwise actual rt tasks 1387 * would be able to exceed the shared quota. 1388 * 1389 * Account to the root rt group for now. 1390 * 1391 * The solution we're working towards is having the RT groups scheduled 1392 * using deadline servers -- however there's a few nasties to figure 1393 * out before that can happen. 1394 */ 1395 if (rt_bandwidth_enabled()) { 1396 struct rt_rq *rt_rq = &rq->rt; 1397 1398 raw_spin_lock(&rt_rq->rt_runtime_lock); 1399 /* 1400 * We'll let actual RT tasks worry about the overflow here, we 1401 * have our own CBS to keep us inline; only account when RT 1402 * bandwidth is relevant. 1403 */ 1404 if (sched_rt_bandwidth_account(rt_rq)) 1405 rt_rq->rt_time += delta_exec; 1406 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1407 } 1408 } 1409 1410 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) 1411 { 1412 update_curr_dl_se(dl_se->rq, dl_se, delta_exec); 1413 } 1414 1415 void dl_server_start(struct sched_dl_entity *dl_se) 1416 { 1417 if (!dl_server(dl_se)) { 1418 dl_se->dl_server = 1; 1419 setup_new_dl_entity(dl_se); 1420 } 1421 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); 1422 } 1423 1424 void dl_server_stop(struct sched_dl_entity *dl_se) 1425 { 1426 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); 1427 } 1428 1429 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 1430 dl_server_has_tasks_f has_tasks, 1431 dl_server_pick_f pick) 1432 { 1433 dl_se->rq = rq; 1434 dl_se->server_has_tasks = has_tasks; 1435 dl_se->server_pick = pick; 1436 } 1437 1438 /* 1439 * Update the current task's runtime statistics (provided it is still 1440 * a -deadline task and has not been removed from the dl_rq). 1441 */ 1442 static void update_curr_dl(struct rq *rq) 1443 { 1444 struct task_struct *curr = rq->curr; 1445 struct sched_dl_entity *dl_se = &curr->dl; 1446 s64 delta_exec; 1447 1448 if (!dl_task(curr) || !on_dl_rq(dl_se)) 1449 return; 1450 1451 /* 1452 * Consumed budget is computed considering the time as 1453 * observed by schedulable tasks (excluding time spent 1454 * in hardirq context, etc.). Deadlines are instead 1455 * computed using hard walltime. This seems to be the more 1456 * natural solution, but the full ramifications of this 1457 * approach need further study. 1458 */ 1459 delta_exec = update_curr_common(rq); 1460 update_curr_dl_se(rq, dl_se, delta_exec); 1461 } 1462 1463 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1464 { 1465 struct sched_dl_entity *dl_se = container_of(timer, 1466 struct sched_dl_entity, 1467 inactive_timer); 1468 struct task_struct *p = NULL; 1469 struct rq_flags rf; 1470 struct rq *rq; 1471 1472 if (!dl_server(dl_se)) { 1473 p = dl_task_of(dl_se); 1474 rq = task_rq_lock(p, &rf); 1475 } else { 1476 rq = dl_se->rq; 1477 rq_lock(rq, &rf); 1478 } 1479 1480 sched_clock_tick(); 1481 update_rq_clock(rq); 1482 1483 if (dl_server(dl_se)) 1484 goto no_task; 1485 1486 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 1487 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1488 1489 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { 1490 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1491 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1492 dl_se->dl_non_contending = 0; 1493 } 1494 1495 raw_spin_lock(&dl_b->lock); 1496 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1497 raw_spin_unlock(&dl_b->lock); 1498 __dl_clear_params(dl_se); 1499 1500 goto unlock; 1501 } 1502 1503 no_task: 1504 if (dl_se->dl_non_contending == 0) 1505 goto unlock; 1506 1507 sub_running_bw(dl_se, &rq->dl); 1508 dl_se->dl_non_contending = 0; 1509 unlock: 1510 1511 if (!dl_server(dl_se)) { 1512 task_rq_unlock(rq, p, &rf); 1513 put_task_struct(p); 1514 } else { 1515 rq_unlock(rq, &rf); 1516 } 1517 1518 return HRTIMER_NORESTART; 1519 } 1520 1521 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1522 { 1523 struct hrtimer *timer = &dl_se->inactive_timer; 1524 1525 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1526 timer->function = inactive_task_timer; 1527 } 1528 1529 #define __node_2_dle(node) \ 1530 rb_entry((node), struct sched_dl_entity, rb_node) 1531 1532 #ifdef CONFIG_SMP 1533 1534 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1535 { 1536 struct rq *rq = rq_of_dl_rq(dl_rq); 1537 1538 if (dl_rq->earliest_dl.curr == 0 || 1539 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1540 if (dl_rq->earliest_dl.curr == 0) 1541 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); 1542 dl_rq->earliest_dl.curr = deadline; 1543 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1544 } 1545 } 1546 1547 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1548 { 1549 struct rq *rq = rq_of_dl_rq(dl_rq); 1550 1551 /* 1552 * Since we may have removed our earliest (and/or next earliest) 1553 * task we must recompute them. 1554 */ 1555 if (!dl_rq->dl_nr_running) { 1556 dl_rq->earliest_dl.curr = 0; 1557 dl_rq->earliest_dl.next = 0; 1558 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1559 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1560 } else { 1561 struct rb_node *leftmost = rb_first_cached(&dl_rq->root); 1562 struct sched_dl_entity *entry = __node_2_dle(leftmost); 1563 1564 dl_rq->earliest_dl.curr = entry->deadline; 1565 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1566 } 1567 } 1568 1569 #else 1570 1571 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1572 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1573 1574 #endif /* CONFIG_SMP */ 1575 1576 static inline 1577 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1578 { 1579 u64 deadline = dl_se->deadline; 1580 1581 dl_rq->dl_nr_running++; 1582 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1583 1584 inc_dl_deadline(dl_rq, deadline); 1585 } 1586 1587 static inline 1588 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1589 { 1590 WARN_ON(!dl_rq->dl_nr_running); 1591 dl_rq->dl_nr_running--; 1592 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1593 1594 dec_dl_deadline(dl_rq, dl_se->deadline); 1595 } 1596 1597 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) 1598 { 1599 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); 1600 } 1601 1602 static inline struct sched_statistics * 1603 __schedstats_from_dl_se(struct sched_dl_entity *dl_se) 1604 { 1605 return &dl_task_of(dl_se)->stats; 1606 } 1607 1608 static inline void 1609 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1610 { 1611 struct sched_statistics *stats; 1612 1613 if (!schedstat_enabled()) 1614 return; 1615 1616 stats = __schedstats_from_dl_se(dl_se); 1617 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1618 } 1619 1620 static inline void 1621 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1622 { 1623 struct sched_statistics *stats; 1624 1625 if (!schedstat_enabled()) 1626 return; 1627 1628 stats = __schedstats_from_dl_se(dl_se); 1629 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1630 } 1631 1632 static inline void 1633 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1634 { 1635 struct sched_statistics *stats; 1636 1637 if (!schedstat_enabled()) 1638 return; 1639 1640 stats = __schedstats_from_dl_se(dl_se); 1641 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1642 } 1643 1644 static inline void 1645 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1646 int flags) 1647 { 1648 if (!schedstat_enabled()) 1649 return; 1650 1651 if (flags & ENQUEUE_WAKEUP) 1652 update_stats_enqueue_sleeper_dl(dl_rq, dl_se); 1653 } 1654 1655 static inline void 1656 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1657 int flags) 1658 { 1659 struct task_struct *p = dl_task_of(dl_se); 1660 1661 if (!schedstat_enabled()) 1662 return; 1663 1664 if ((flags & DEQUEUE_SLEEP)) { 1665 unsigned int state; 1666 1667 state = READ_ONCE(p->__state); 1668 if (state & TASK_INTERRUPTIBLE) 1669 __schedstat_set(p->stats.sleep_start, 1670 rq_clock(rq_of_dl_rq(dl_rq))); 1671 1672 if (state & TASK_UNINTERRUPTIBLE) 1673 __schedstat_set(p->stats.block_start, 1674 rq_clock(rq_of_dl_rq(dl_rq))); 1675 } 1676 } 1677 1678 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1679 { 1680 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1681 1682 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node)); 1683 1684 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less); 1685 1686 inc_dl_tasks(dl_se, dl_rq); 1687 } 1688 1689 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1690 { 1691 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1692 1693 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1694 return; 1695 1696 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1697 1698 RB_CLEAR_NODE(&dl_se->rb_node); 1699 1700 dec_dl_tasks(dl_se, dl_rq); 1701 } 1702 1703 static void 1704 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) 1705 { 1706 WARN_ON_ONCE(on_dl_rq(dl_se)); 1707 1708 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags); 1709 1710 /* 1711 * Check if a constrained deadline task was activated 1712 * after the deadline but before the next period. 1713 * If that is the case, the task will be throttled and 1714 * the replenishment timer will be set to the next period. 1715 */ 1716 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se)) 1717 dl_check_constrained_dl(dl_se); 1718 1719 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) { 1720 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1721 1722 add_rq_bw(dl_se, dl_rq); 1723 add_running_bw(dl_se, dl_rq); 1724 } 1725 1726 /* 1727 * If p is throttled, we do not enqueue it. In fact, if it exhausted 1728 * its budget it needs a replenishment and, since it now is on 1729 * its rq, the bandwidth timer callback (which clearly has not 1730 * run yet) will take care of this. 1731 * However, the active utilization does not depend on the fact 1732 * that the task is on the runqueue or not (but depends on the 1733 * task's state - in GRUB parlance, "inactive" vs "active contending"). 1734 * In other words, even if a task is throttled its utilization must 1735 * be counted in the active utilization; hence, we need to call 1736 * add_running_bw(). 1737 */ 1738 if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 1739 if (flags & ENQUEUE_WAKEUP) 1740 task_contending(dl_se, flags); 1741 1742 return; 1743 } 1744 1745 /* 1746 * If this is a wakeup or a new instance, the scheduling 1747 * parameters of the task might need updating. Otherwise, 1748 * we want a replenishment of its runtime. 1749 */ 1750 if (flags & ENQUEUE_WAKEUP) { 1751 task_contending(dl_se, flags); 1752 update_dl_entity(dl_se); 1753 } else if (flags & ENQUEUE_REPLENISH) { 1754 replenish_dl_entity(dl_se); 1755 } else if ((flags & ENQUEUE_RESTORE) && 1756 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { 1757 setup_new_dl_entity(dl_se); 1758 } 1759 1760 __enqueue_dl_entity(dl_se); 1761 } 1762 1763 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags) 1764 { 1765 __dequeue_dl_entity(dl_se); 1766 1767 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) { 1768 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1769 1770 sub_running_bw(dl_se, dl_rq); 1771 sub_rq_bw(dl_se, dl_rq); 1772 } 1773 1774 /* 1775 * This check allows to start the inactive timer (or to immediately 1776 * decrease the active utilization, if needed) in two cases: 1777 * when the task blocks and when it is terminating 1778 * (p->state == TASK_DEAD). We can handle the two cases in the same 1779 * way, because from GRUB's point of view the same thing is happening 1780 * (the task moves from "active contending" to "active non contending" 1781 * or "inactive") 1782 */ 1783 if (flags & DEQUEUE_SLEEP) 1784 task_non_contending(dl_se); 1785 } 1786 1787 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1788 { 1789 if (is_dl_boosted(&p->dl)) { 1790 /* 1791 * Because of delays in the detection of the overrun of a 1792 * thread's runtime, it might be the case that a thread 1793 * goes to sleep in a rt mutex with negative runtime. As 1794 * a consequence, the thread will be throttled. 1795 * 1796 * While waiting for the mutex, this thread can also be 1797 * boosted via PI, resulting in a thread that is throttled 1798 * and boosted at the same time. 1799 * 1800 * In this case, the boost overrides the throttle. 1801 */ 1802 if (p->dl.dl_throttled) { 1803 /* 1804 * The replenish timer needs to be canceled. No 1805 * problem if it fires concurrently: boosted threads 1806 * are ignored in dl_task_timer(). 1807 * 1808 * If the timer callback was running (hrtimer_try_to_cancel == -1), 1809 * it will eventually call put_task_struct(). 1810 */ 1811 if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 && 1812 !dl_server(&p->dl)) 1813 put_task_struct(p); 1814 p->dl.dl_throttled = 0; 1815 } 1816 } else if (!dl_prio(p->normal_prio)) { 1817 /* 1818 * Special case in which we have a !SCHED_DEADLINE task that is going 1819 * to be deboosted, but exceeds its runtime while doing so. No point in 1820 * replenishing it, as it's going to return back to its original 1821 * scheduling class after this. If it has been throttled, we need to 1822 * clear the flag, otherwise the task may wake up as throttled after 1823 * being boosted again with no means to replenish the runtime and clear 1824 * the throttle. 1825 */ 1826 p->dl.dl_throttled = 0; 1827 if (!(flags & ENQUEUE_REPLENISH)) 1828 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", 1829 task_pid_nr(p)); 1830 1831 return; 1832 } 1833 1834 check_schedstat_required(); 1835 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); 1836 1837 if (p->on_rq == TASK_ON_RQ_MIGRATING) 1838 flags |= ENQUEUE_MIGRATING; 1839 1840 enqueue_dl_entity(&p->dl, flags); 1841 1842 if (dl_server(&p->dl)) 1843 return; 1844 1845 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) 1846 enqueue_pushable_dl_task(rq, p); 1847 } 1848 1849 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1850 { 1851 update_curr_dl(rq); 1852 1853 if (p->on_rq == TASK_ON_RQ_MIGRATING) 1854 flags |= DEQUEUE_MIGRATING; 1855 1856 dequeue_dl_entity(&p->dl, flags); 1857 if (!p->dl.dl_throttled && !dl_server(&p->dl)) 1858 dequeue_pushable_dl_task(rq, p); 1859 } 1860 1861 /* 1862 * Yield task semantic for -deadline tasks is: 1863 * 1864 * get off from the CPU until our next instance, with 1865 * a new runtime. This is of little use now, since we 1866 * don't have a bandwidth reclaiming mechanism. Anyway, 1867 * bandwidth reclaiming is planned for the future, and 1868 * yield_task_dl will indicate that some spare budget 1869 * is available for other task instances to use it. 1870 */ 1871 static void yield_task_dl(struct rq *rq) 1872 { 1873 /* 1874 * We make the task go to sleep until its current deadline by 1875 * forcing its runtime to zero. This way, update_curr_dl() stops 1876 * it and the bandwidth timer will wake it up and will give it 1877 * new scheduling parameters (thanks to dl_yielded=1). 1878 */ 1879 rq->curr->dl.dl_yielded = 1; 1880 1881 update_rq_clock(rq); 1882 update_curr_dl(rq); 1883 /* 1884 * Tell update_rq_clock() that we've just updated, 1885 * so we don't do microscopic update in schedule() 1886 * and double the fastpath cost. 1887 */ 1888 rq_clock_skip_update(rq); 1889 } 1890 1891 #ifdef CONFIG_SMP 1892 1893 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, 1894 struct rq *rq) 1895 { 1896 return (!rq->dl.dl_nr_running || 1897 dl_time_before(p->dl.deadline, 1898 rq->dl.earliest_dl.curr)); 1899 } 1900 1901 static int find_later_rq(struct task_struct *task); 1902 1903 static int 1904 select_task_rq_dl(struct task_struct *p, int cpu, int flags) 1905 { 1906 struct task_struct *curr; 1907 bool select_rq; 1908 struct rq *rq; 1909 1910 if (!(flags & WF_TTWU)) 1911 goto out; 1912 1913 rq = cpu_rq(cpu); 1914 1915 rcu_read_lock(); 1916 curr = READ_ONCE(rq->curr); /* unlocked access */ 1917 1918 /* 1919 * If we are dealing with a -deadline task, we must 1920 * decide where to wake it up. 1921 * If it has a later deadline and the current task 1922 * on this rq can't move (provided the waking task 1923 * can!) we prefer to send it somewhere else. On the 1924 * other hand, if it has a shorter deadline, we 1925 * try to make it stay here, it might be important. 1926 */ 1927 select_rq = unlikely(dl_task(curr)) && 1928 (curr->nr_cpus_allowed < 2 || 1929 !dl_entity_preempt(&p->dl, &curr->dl)) && 1930 p->nr_cpus_allowed > 1; 1931 1932 /* 1933 * Take the capacity of the CPU into account to 1934 * ensure it fits the requirement of the task. 1935 */ 1936 if (sched_asym_cpucap_active()) 1937 select_rq |= !dl_task_fits_capacity(p, cpu); 1938 1939 if (select_rq) { 1940 int target = find_later_rq(p); 1941 1942 if (target != -1 && 1943 dl_task_is_earliest_deadline(p, cpu_rq(target))) 1944 cpu = target; 1945 } 1946 rcu_read_unlock(); 1947 1948 out: 1949 return cpu; 1950 } 1951 1952 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 1953 { 1954 struct rq_flags rf; 1955 struct rq *rq; 1956 1957 if (READ_ONCE(p->__state) != TASK_WAKING) 1958 return; 1959 1960 rq = task_rq(p); 1961 /* 1962 * Since p->state == TASK_WAKING, set_task_cpu() has been called 1963 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 1964 * rq->lock is not... So, lock it 1965 */ 1966 rq_lock(rq, &rf); 1967 if (p->dl.dl_non_contending) { 1968 update_rq_clock(rq); 1969 sub_running_bw(&p->dl, &rq->dl); 1970 p->dl.dl_non_contending = 0; 1971 /* 1972 * If the timer handler is currently running and the 1973 * timer cannot be canceled, inactive_task_timer() 1974 * will see that dl_not_contending is not set, and 1975 * will not touch the rq's active utilization, 1976 * so we are still safe. 1977 */ 1978 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 1979 put_task_struct(p); 1980 } 1981 sub_rq_bw(&p->dl, &rq->dl); 1982 rq_unlock(rq, &rf); 1983 } 1984 1985 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1986 { 1987 /* 1988 * Current can't be migrated, useless to reschedule, 1989 * let's hope p can move out. 1990 */ 1991 if (rq->curr->nr_cpus_allowed == 1 || 1992 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) 1993 return; 1994 1995 /* 1996 * p is migratable, so let's not schedule it and 1997 * see if it is pushed or pulled somewhere else. 1998 */ 1999 if (p->nr_cpus_allowed != 1 && 2000 cpudl_find(&rq->rd->cpudl, p, NULL)) 2001 return; 2002 2003 resched_curr(rq); 2004 } 2005 2006 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 2007 { 2008 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 2009 /* 2010 * This is OK, because current is on_cpu, which avoids it being 2011 * picked for load-balance and preemption/IRQs are still 2012 * disabled avoiding further scheduler activity on it and we've 2013 * not yet started the picking loop. 2014 */ 2015 rq_unpin_lock(rq, rf); 2016 pull_dl_task(rq); 2017 rq_repin_lock(rq, rf); 2018 } 2019 2020 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 2021 } 2022 #endif /* CONFIG_SMP */ 2023 2024 /* 2025 * Only called when both the current and waking task are -deadline 2026 * tasks. 2027 */ 2028 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, 2029 int flags) 2030 { 2031 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 2032 resched_curr(rq); 2033 return; 2034 } 2035 2036 #ifdef CONFIG_SMP 2037 /* 2038 * In the unlikely case current and p have the same deadline 2039 * let us try to decide what's the best thing to do... 2040 */ 2041 if ((p->dl.deadline == rq->curr->dl.deadline) && 2042 !test_tsk_need_resched(rq->curr)) 2043 check_preempt_equal_dl(rq, p); 2044 #endif /* CONFIG_SMP */ 2045 } 2046 2047 #ifdef CONFIG_SCHED_HRTICK 2048 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2049 { 2050 hrtick_start(rq, dl_se->runtime); 2051 } 2052 #else /* !CONFIG_SCHED_HRTICK */ 2053 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2054 { 2055 } 2056 #endif 2057 2058 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 2059 { 2060 struct sched_dl_entity *dl_se = &p->dl; 2061 struct dl_rq *dl_rq = &rq->dl; 2062 2063 p->se.exec_start = rq_clock_task(rq); 2064 if (on_dl_rq(&p->dl)) 2065 update_stats_wait_end_dl(dl_rq, dl_se); 2066 2067 /* You can't push away the running task */ 2068 dequeue_pushable_dl_task(rq, p); 2069 2070 if (!first) 2071 return; 2072 2073 if (rq->curr->sched_class != &dl_sched_class) 2074 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2075 2076 deadline_queue_push_tasks(rq); 2077 } 2078 2079 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) 2080 { 2081 struct rb_node *left = rb_first_cached(&dl_rq->root); 2082 2083 if (!left) 2084 return NULL; 2085 2086 return __node_2_dle(left); 2087 } 2088 2089 static struct task_struct *pick_task_dl(struct rq *rq) 2090 { 2091 struct sched_dl_entity *dl_se; 2092 struct dl_rq *dl_rq = &rq->dl; 2093 struct task_struct *p; 2094 2095 again: 2096 if (!sched_dl_runnable(rq)) 2097 return NULL; 2098 2099 dl_se = pick_next_dl_entity(dl_rq); 2100 WARN_ON_ONCE(!dl_se); 2101 2102 if (dl_server(dl_se)) { 2103 p = dl_se->server_pick(dl_se); 2104 if (!p) { 2105 WARN_ON_ONCE(1); 2106 dl_se->dl_yielded = 1; 2107 update_curr_dl_se(rq, dl_se, 0); 2108 goto again; 2109 } 2110 p->dl_server = dl_se; 2111 } else { 2112 p = dl_task_of(dl_se); 2113 } 2114 2115 return p; 2116 } 2117 2118 static struct task_struct *pick_next_task_dl(struct rq *rq) 2119 { 2120 struct task_struct *p; 2121 2122 p = pick_task_dl(rq); 2123 if (!p) 2124 return p; 2125 2126 if (!p->dl_server) 2127 set_next_task_dl(rq, p, true); 2128 2129 if (hrtick_enabled(rq)) 2130 start_hrtick_dl(rq, &p->dl); 2131 2132 return p; 2133 } 2134 2135 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 2136 { 2137 struct sched_dl_entity *dl_se = &p->dl; 2138 struct dl_rq *dl_rq = &rq->dl; 2139 2140 if (on_dl_rq(&p->dl)) 2141 update_stats_wait_start_dl(dl_rq, dl_se); 2142 2143 update_curr_dl(rq); 2144 2145 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2146 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 2147 enqueue_pushable_dl_task(rq, p); 2148 } 2149 2150 /* 2151 * scheduler tick hitting a task of our scheduling class. 2152 * 2153 * NOTE: This function can be called remotely by the tick offload that 2154 * goes along full dynticks. Therefore no local assumption can be made 2155 * and everything must be accessed through the @rq and @curr passed in 2156 * parameters. 2157 */ 2158 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 2159 { 2160 update_curr_dl(rq); 2161 2162 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2163 /* 2164 * Even when we have runtime, update_curr_dl() might have resulted in us 2165 * not being the leftmost task anymore. In that case NEED_RESCHED will 2166 * be set and schedule() will start a new hrtick for the next task. 2167 */ 2168 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && 2169 is_leftmost(&p->dl, &rq->dl)) 2170 start_hrtick_dl(rq, &p->dl); 2171 } 2172 2173 static void task_fork_dl(struct task_struct *p) 2174 { 2175 /* 2176 * SCHED_DEADLINE tasks cannot fork and this is achieved through 2177 * sched_fork() 2178 */ 2179 } 2180 2181 #ifdef CONFIG_SMP 2182 2183 /* Only try algorithms three times */ 2184 #define DL_MAX_TRIES 3 2185 2186 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 2187 { 2188 if (!task_on_cpu(rq, p) && 2189 cpumask_test_cpu(cpu, &p->cpus_mask)) 2190 return 1; 2191 return 0; 2192 } 2193 2194 /* 2195 * Return the earliest pushable rq's task, which is suitable to be executed 2196 * on the CPU, NULL otherwise: 2197 */ 2198 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 2199 { 2200 struct task_struct *p = NULL; 2201 struct rb_node *next_node; 2202 2203 if (!has_pushable_dl_tasks(rq)) 2204 return NULL; 2205 2206 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); 2207 2208 next_node: 2209 if (next_node) { 2210 p = __node_2_pdl(next_node); 2211 2212 if (pick_dl_task(rq, p, cpu)) 2213 return p; 2214 2215 next_node = rb_next(next_node); 2216 goto next_node; 2217 } 2218 2219 return NULL; 2220 } 2221 2222 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 2223 2224 static int find_later_rq(struct task_struct *task) 2225 { 2226 struct sched_domain *sd; 2227 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 2228 int this_cpu = smp_processor_id(); 2229 int cpu = task_cpu(task); 2230 2231 /* Make sure the mask is initialized first */ 2232 if (unlikely(!later_mask)) 2233 return -1; 2234 2235 if (task->nr_cpus_allowed == 1) 2236 return -1; 2237 2238 /* 2239 * We have to consider system topology and task affinity 2240 * first, then we can look for a suitable CPU. 2241 */ 2242 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 2243 return -1; 2244 2245 /* 2246 * If we are here, some targets have been found, including 2247 * the most suitable which is, among the runqueues where the 2248 * current tasks have later deadlines than the task's one, the 2249 * rq with the latest possible one. 2250 * 2251 * Now we check how well this matches with task's 2252 * affinity and system topology. 2253 * 2254 * The last CPU where the task run is our first 2255 * guess, since it is most likely cache-hot there. 2256 */ 2257 if (cpumask_test_cpu(cpu, later_mask)) 2258 return cpu; 2259 /* 2260 * Check if this_cpu is to be skipped (i.e., it is 2261 * not in the mask) or not. 2262 */ 2263 if (!cpumask_test_cpu(this_cpu, later_mask)) 2264 this_cpu = -1; 2265 2266 rcu_read_lock(); 2267 for_each_domain(cpu, sd) { 2268 if (sd->flags & SD_WAKE_AFFINE) { 2269 int best_cpu; 2270 2271 /* 2272 * If possible, preempting this_cpu is 2273 * cheaper than migrating. 2274 */ 2275 if (this_cpu != -1 && 2276 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 2277 rcu_read_unlock(); 2278 return this_cpu; 2279 } 2280 2281 best_cpu = cpumask_any_and_distribute(later_mask, 2282 sched_domain_span(sd)); 2283 /* 2284 * Last chance: if a CPU being in both later_mask 2285 * and current sd span is valid, that becomes our 2286 * choice. Of course, the latest possible CPU is 2287 * already under consideration through later_mask. 2288 */ 2289 if (best_cpu < nr_cpu_ids) { 2290 rcu_read_unlock(); 2291 return best_cpu; 2292 } 2293 } 2294 } 2295 rcu_read_unlock(); 2296 2297 /* 2298 * At this point, all our guesses failed, we just return 2299 * 'something', and let the caller sort the things out. 2300 */ 2301 if (this_cpu != -1) 2302 return this_cpu; 2303 2304 cpu = cpumask_any_distribute(later_mask); 2305 if (cpu < nr_cpu_ids) 2306 return cpu; 2307 2308 return -1; 2309 } 2310 2311 /* Locks the rq it finds */ 2312 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 2313 { 2314 struct rq *later_rq = NULL; 2315 int tries; 2316 int cpu; 2317 2318 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 2319 cpu = find_later_rq(task); 2320 2321 if ((cpu == -1) || (cpu == rq->cpu)) 2322 break; 2323 2324 later_rq = cpu_rq(cpu); 2325 2326 if (!dl_task_is_earliest_deadline(task, later_rq)) { 2327 /* 2328 * Target rq has tasks of equal or earlier deadline, 2329 * retrying does not release any lock and is unlikely 2330 * to yield a different result. 2331 */ 2332 later_rq = NULL; 2333 break; 2334 } 2335 2336 /* Retry if something changed. */ 2337 if (double_lock_balance(rq, later_rq)) { 2338 if (unlikely(task_rq(task) != rq || 2339 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || 2340 task_on_cpu(rq, task) || 2341 !dl_task(task) || 2342 is_migration_disabled(task) || 2343 !task_on_rq_queued(task))) { 2344 double_unlock_balance(rq, later_rq); 2345 later_rq = NULL; 2346 break; 2347 } 2348 } 2349 2350 /* 2351 * If the rq we found has no -deadline task, or 2352 * its earliest one has a later deadline than our 2353 * task, the rq is a good one. 2354 */ 2355 if (dl_task_is_earliest_deadline(task, later_rq)) 2356 break; 2357 2358 /* Otherwise we try again. */ 2359 double_unlock_balance(rq, later_rq); 2360 later_rq = NULL; 2361 } 2362 2363 return later_rq; 2364 } 2365 2366 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2367 { 2368 struct task_struct *p; 2369 2370 if (!has_pushable_dl_tasks(rq)) 2371 return NULL; 2372 2373 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); 2374 2375 WARN_ON_ONCE(rq->cpu != task_cpu(p)); 2376 WARN_ON_ONCE(task_current(rq, p)); 2377 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); 2378 2379 WARN_ON_ONCE(!task_on_rq_queued(p)); 2380 WARN_ON_ONCE(!dl_task(p)); 2381 2382 return p; 2383 } 2384 2385 /* 2386 * See if the non running -deadline tasks on this rq 2387 * can be sent to some other CPU where they can preempt 2388 * and start executing. 2389 */ 2390 static int push_dl_task(struct rq *rq) 2391 { 2392 struct task_struct *next_task; 2393 struct rq *later_rq; 2394 int ret = 0; 2395 2396 next_task = pick_next_pushable_dl_task(rq); 2397 if (!next_task) 2398 return 0; 2399 2400 retry: 2401 /* 2402 * If next_task preempts rq->curr, and rq->curr 2403 * can move away, it makes sense to just reschedule 2404 * without going further in pushing next_task. 2405 */ 2406 if (dl_task(rq->curr) && 2407 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 2408 rq->curr->nr_cpus_allowed > 1) { 2409 resched_curr(rq); 2410 return 0; 2411 } 2412 2413 if (is_migration_disabled(next_task)) 2414 return 0; 2415 2416 if (WARN_ON(next_task == rq->curr)) 2417 return 0; 2418 2419 /* We might release rq lock */ 2420 get_task_struct(next_task); 2421 2422 /* Will lock the rq it'll find */ 2423 later_rq = find_lock_later_rq(next_task, rq); 2424 if (!later_rq) { 2425 struct task_struct *task; 2426 2427 /* 2428 * We must check all this again, since 2429 * find_lock_later_rq releases rq->lock and it is 2430 * then possible that next_task has migrated. 2431 */ 2432 task = pick_next_pushable_dl_task(rq); 2433 if (task == next_task) { 2434 /* 2435 * The task is still there. We don't try 2436 * again, some other CPU will pull it when ready. 2437 */ 2438 goto out; 2439 } 2440 2441 if (!task) 2442 /* No more tasks */ 2443 goto out; 2444 2445 put_task_struct(next_task); 2446 next_task = task; 2447 goto retry; 2448 } 2449 2450 deactivate_task(rq, next_task, 0); 2451 set_task_cpu(next_task, later_rq->cpu); 2452 activate_task(later_rq, next_task, 0); 2453 ret = 1; 2454 2455 resched_curr(later_rq); 2456 2457 double_unlock_balance(rq, later_rq); 2458 2459 out: 2460 put_task_struct(next_task); 2461 2462 return ret; 2463 } 2464 2465 static void push_dl_tasks(struct rq *rq) 2466 { 2467 /* push_dl_task() will return true if it moved a -deadline task */ 2468 while (push_dl_task(rq)) 2469 ; 2470 } 2471 2472 static void pull_dl_task(struct rq *this_rq) 2473 { 2474 int this_cpu = this_rq->cpu, cpu; 2475 struct task_struct *p, *push_task; 2476 bool resched = false; 2477 struct rq *src_rq; 2478 u64 dmin = LONG_MAX; 2479 2480 if (likely(!dl_overloaded(this_rq))) 2481 return; 2482 2483 /* 2484 * Match the barrier from dl_set_overloaded; this guarantees that if we 2485 * see overloaded we must also see the dlo_mask bit. 2486 */ 2487 smp_rmb(); 2488 2489 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2490 if (this_cpu == cpu) 2491 continue; 2492 2493 src_rq = cpu_rq(cpu); 2494 2495 /* 2496 * It looks racy, and it is! However, as in sched_rt.c, 2497 * we are fine with this. 2498 */ 2499 if (this_rq->dl.dl_nr_running && 2500 dl_time_before(this_rq->dl.earliest_dl.curr, 2501 src_rq->dl.earliest_dl.next)) 2502 continue; 2503 2504 /* Might drop this_rq->lock */ 2505 push_task = NULL; 2506 double_lock_balance(this_rq, src_rq); 2507 2508 /* 2509 * If there are no more pullable tasks on the 2510 * rq, we're done with it. 2511 */ 2512 if (src_rq->dl.dl_nr_running <= 1) 2513 goto skip; 2514 2515 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2516 2517 /* 2518 * We found a task to be pulled if: 2519 * - it preempts our current (if there's one), 2520 * - it will preempt the last one we pulled (if any). 2521 */ 2522 if (p && dl_time_before(p->dl.deadline, dmin) && 2523 dl_task_is_earliest_deadline(p, this_rq)) { 2524 WARN_ON(p == src_rq->curr); 2525 WARN_ON(!task_on_rq_queued(p)); 2526 2527 /* 2528 * Then we pull iff p has actually an earlier 2529 * deadline than the current task of its runqueue. 2530 */ 2531 if (dl_time_before(p->dl.deadline, 2532 src_rq->curr->dl.deadline)) 2533 goto skip; 2534 2535 if (is_migration_disabled(p)) { 2536 push_task = get_push_task(src_rq); 2537 } else { 2538 deactivate_task(src_rq, p, 0); 2539 set_task_cpu(p, this_cpu); 2540 activate_task(this_rq, p, 0); 2541 dmin = p->dl.deadline; 2542 resched = true; 2543 } 2544 2545 /* Is there any other task even earlier? */ 2546 } 2547 skip: 2548 double_unlock_balance(this_rq, src_rq); 2549 2550 if (push_task) { 2551 preempt_disable(); 2552 raw_spin_rq_unlock(this_rq); 2553 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2554 push_task, &src_rq->push_work); 2555 preempt_enable(); 2556 raw_spin_rq_lock(this_rq); 2557 } 2558 } 2559 2560 if (resched) 2561 resched_curr(this_rq); 2562 } 2563 2564 /* 2565 * Since the task is not running and a reschedule is not going to happen 2566 * anytime soon on its runqueue, we try pushing it away now. 2567 */ 2568 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2569 { 2570 if (!task_on_cpu(rq, p) && 2571 !test_tsk_need_resched(rq->curr) && 2572 p->nr_cpus_allowed > 1 && 2573 dl_task(rq->curr) && 2574 (rq->curr->nr_cpus_allowed < 2 || 2575 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 2576 push_dl_tasks(rq); 2577 } 2578 } 2579 2580 static void set_cpus_allowed_dl(struct task_struct *p, 2581 struct affinity_context *ctx) 2582 { 2583 struct root_domain *src_rd; 2584 struct rq *rq; 2585 2586 WARN_ON_ONCE(!dl_task(p)); 2587 2588 rq = task_rq(p); 2589 src_rd = rq->rd; 2590 /* 2591 * Migrating a SCHED_DEADLINE task between exclusive 2592 * cpusets (different root_domains) entails a bandwidth 2593 * update. We already made space for us in the destination 2594 * domain (see cpuset_can_attach()). 2595 */ 2596 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { 2597 struct dl_bw *src_dl_b; 2598 2599 src_dl_b = dl_bw_of(cpu_of(rq)); 2600 /* 2601 * We now free resources of the root_domain we are migrating 2602 * off. In the worst case, sched_setattr() may temporary fail 2603 * until we complete the update. 2604 */ 2605 raw_spin_lock(&src_dl_b->lock); 2606 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2607 raw_spin_unlock(&src_dl_b->lock); 2608 } 2609 2610 set_cpus_allowed_common(p, ctx); 2611 } 2612 2613 /* Assumes rq->lock is held */ 2614 static void rq_online_dl(struct rq *rq) 2615 { 2616 if (rq->dl.overloaded) 2617 dl_set_overload(rq); 2618 2619 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2620 if (rq->dl.dl_nr_running > 0) 2621 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2622 } 2623 2624 /* Assumes rq->lock is held */ 2625 static void rq_offline_dl(struct rq *rq) 2626 { 2627 if (rq->dl.overloaded) 2628 dl_clear_overload(rq); 2629 2630 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2631 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2632 } 2633 2634 void __init init_sched_dl_class(void) 2635 { 2636 unsigned int i; 2637 2638 for_each_possible_cpu(i) 2639 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2640 GFP_KERNEL, cpu_to_node(i)); 2641 } 2642 2643 void dl_add_task_root_domain(struct task_struct *p) 2644 { 2645 struct rq_flags rf; 2646 struct rq *rq; 2647 struct dl_bw *dl_b; 2648 2649 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2650 if (!dl_task(p)) { 2651 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 2652 return; 2653 } 2654 2655 rq = __task_rq_lock(p, &rf); 2656 2657 dl_b = &rq->rd->dl_bw; 2658 raw_spin_lock(&dl_b->lock); 2659 2660 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2661 2662 raw_spin_unlock(&dl_b->lock); 2663 2664 task_rq_unlock(rq, p, &rf); 2665 } 2666 2667 void dl_clear_root_domain(struct root_domain *rd) 2668 { 2669 unsigned long flags; 2670 2671 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); 2672 rd->dl_bw.total_bw = 0; 2673 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); 2674 } 2675 2676 #endif /* CONFIG_SMP */ 2677 2678 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2679 { 2680 /* 2681 * task_non_contending() can start the "inactive timer" (if the 0-lag 2682 * time is in the future). If the task switches back to dl before 2683 * the "inactive timer" fires, it can continue to consume its current 2684 * runtime using its current deadline. If it stays outside of 2685 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2686 * will reset the task parameters. 2687 */ 2688 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2689 task_non_contending(&p->dl); 2690 2691 /* 2692 * In case a task is setscheduled out from SCHED_DEADLINE we need to 2693 * keep track of that on its cpuset (for correct bandwidth tracking). 2694 */ 2695 dec_dl_tasks_cs(p); 2696 2697 if (!task_on_rq_queued(p)) { 2698 /* 2699 * Inactive timer is armed. However, p is leaving DEADLINE and 2700 * might migrate away from this rq while continuing to run on 2701 * some other class. We need to remove its contribution from 2702 * this rq running_bw now, or sub_rq_bw (below) will complain. 2703 */ 2704 if (p->dl.dl_non_contending) 2705 sub_running_bw(&p->dl, &rq->dl); 2706 sub_rq_bw(&p->dl, &rq->dl); 2707 } 2708 2709 /* 2710 * We cannot use inactive_task_timer() to invoke sub_running_bw() 2711 * at the 0-lag time, because the task could have been migrated 2712 * while SCHED_OTHER in the meanwhile. 2713 */ 2714 if (p->dl.dl_non_contending) 2715 p->dl.dl_non_contending = 0; 2716 2717 /* 2718 * Since this might be the only -deadline task on the rq, 2719 * this is the right place to try to pull some other one 2720 * from an overloaded CPU, if any. 2721 */ 2722 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 2723 return; 2724 2725 deadline_queue_pull_task(rq); 2726 } 2727 2728 /* 2729 * When switching to -deadline, we may overload the rq, then 2730 * we try to push someone off, if possible. 2731 */ 2732 static void switched_to_dl(struct rq *rq, struct task_struct *p) 2733 { 2734 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2735 put_task_struct(p); 2736 2737 /* 2738 * In case a task is setscheduled to SCHED_DEADLINE we need to keep 2739 * track of that on its cpuset (for correct bandwidth tracking). 2740 */ 2741 inc_dl_tasks_cs(p); 2742 2743 /* If p is not queued we will update its parameters at next wakeup. */ 2744 if (!task_on_rq_queued(p)) { 2745 add_rq_bw(&p->dl, &rq->dl); 2746 2747 return; 2748 } 2749 2750 if (rq->curr != p) { 2751 #ifdef CONFIG_SMP 2752 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 2753 deadline_queue_push_tasks(rq); 2754 #endif 2755 if (dl_task(rq->curr)) 2756 wakeup_preempt_dl(rq, p, 0); 2757 else 2758 resched_curr(rq); 2759 } else { 2760 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2761 } 2762 } 2763 2764 /* 2765 * If the scheduling parameters of a -deadline task changed, 2766 * a push or pull operation might be needed. 2767 */ 2768 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 2769 int oldprio) 2770 { 2771 if (!task_on_rq_queued(p)) 2772 return; 2773 2774 #ifdef CONFIG_SMP 2775 /* 2776 * This might be too much, but unfortunately 2777 * we don't have the old deadline value, and 2778 * we can't argue if the task is increasing 2779 * or lowering its prio, so... 2780 */ 2781 if (!rq->dl.overloaded) 2782 deadline_queue_pull_task(rq); 2783 2784 if (task_current(rq, p)) { 2785 /* 2786 * If we now have a earlier deadline task than p, 2787 * then reschedule, provided p is still on this 2788 * runqueue. 2789 */ 2790 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 2791 resched_curr(rq); 2792 } else { 2793 /* 2794 * Current may not be deadline in case p was throttled but we 2795 * have just replenished it (e.g. rt_mutex_setprio()). 2796 * 2797 * Otherwise, if p was given an earlier deadline, reschedule. 2798 */ 2799 if (!dl_task(rq->curr) || 2800 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) 2801 resched_curr(rq); 2802 } 2803 #else 2804 /* 2805 * We don't know if p has a earlier or later deadline, so let's blindly 2806 * set a (maybe not needed) rescheduling point. 2807 */ 2808 resched_curr(rq); 2809 #endif 2810 } 2811 2812 #ifdef CONFIG_SCHED_CORE 2813 static int task_is_throttled_dl(struct task_struct *p, int cpu) 2814 { 2815 return p->dl.dl_throttled; 2816 } 2817 #endif 2818 2819 DEFINE_SCHED_CLASS(dl) = { 2820 2821 .enqueue_task = enqueue_task_dl, 2822 .dequeue_task = dequeue_task_dl, 2823 .yield_task = yield_task_dl, 2824 2825 .wakeup_preempt = wakeup_preempt_dl, 2826 2827 .pick_next_task = pick_next_task_dl, 2828 .put_prev_task = put_prev_task_dl, 2829 .set_next_task = set_next_task_dl, 2830 2831 #ifdef CONFIG_SMP 2832 .balance = balance_dl, 2833 .pick_task = pick_task_dl, 2834 .select_task_rq = select_task_rq_dl, 2835 .migrate_task_rq = migrate_task_rq_dl, 2836 .set_cpus_allowed = set_cpus_allowed_dl, 2837 .rq_online = rq_online_dl, 2838 .rq_offline = rq_offline_dl, 2839 .task_woken = task_woken_dl, 2840 .find_lock_rq = find_lock_later_rq, 2841 #endif 2842 2843 .task_tick = task_tick_dl, 2844 .task_fork = task_fork_dl, 2845 2846 .prio_changed = prio_changed_dl, 2847 .switched_from = switched_from_dl, 2848 .switched_to = switched_to_dl, 2849 2850 .update_curr = update_curr_dl, 2851 #ifdef CONFIG_SCHED_CORE 2852 .task_is_throttled = task_is_throttled_dl, 2853 #endif 2854 }; 2855 2856 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ 2857 static u64 dl_generation; 2858 2859 int sched_dl_global_validate(void) 2860 { 2861 u64 runtime = global_rt_runtime(); 2862 u64 period = global_rt_period(); 2863 u64 new_bw = to_ratio(period, runtime); 2864 u64 gen = ++dl_generation; 2865 struct dl_bw *dl_b; 2866 int cpu, cpus, ret = 0; 2867 unsigned long flags; 2868 2869 /* 2870 * Here we want to check the bandwidth not being set to some 2871 * value smaller than the currently allocated bandwidth in 2872 * any of the root_domains. 2873 */ 2874 for_each_possible_cpu(cpu) { 2875 rcu_read_lock_sched(); 2876 2877 if (dl_bw_visited(cpu, gen)) 2878 goto next; 2879 2880 dl_b = dl_bw_of(cpu); 2881 cpus = dl_bw_cpus(cpu); 2882 2883 raw_spin_lock_irqsave(&dl_b->lock, flags); 2884 if (new_bw * cpus < dl_b->total_bw) 2885 ret = -EBUSY; 2886 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2887 2888 next: 2889 rcu_read_unlock_sched(); 2890 2891 if (ret) 2892 break; 2893 } 2894 2895 return ret; 2896 } 2897 2898 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 2899 { 2900 if (global_rt_runtime() == RUNTIME_INF) { 2901 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 2902 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT; 2903 } else { 2904 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 2905 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 2906 dl_rq->max_bw = dl_rq->extra_bw = 2907 to_ratio(global_rt_period(), global_rt_runtime()); 2908 } 2909 } 2910 2911 void sched_dl_do_global(void) 2912 { 2913 u64 new_bw = -1; 2914 u64 gen = ++dl_generation; 2915 struct dl_bw *dl_b; 2916 int cpu; 2917 unsigned long flags; 2918 2919 if (global_rt_runtime() != RUNTIME_INF) 2920 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 2921 2922 for_each_possible_cpu(cpu) { 2923 rcu_read_lock_sched(); 2924 2925 if (dl_bw_visited(cpu, gen)) { 2926 rcu_read_unlock_sched(); 2927 continue; 2928 } 2929 2930 dl_b = dl_bw_of(cpu); 2931 2932 raw_spin_lock_irqsave(&dl_b->lock, flags); 2933 dl_b->bw = new_bw; 2934 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 2935 2936 rcu_read_unlock_sched(); 2937 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 2938 } 2939 } 2940 2941 /* 2942 * We must be sure that accepting a new task (or allowing changing the 2943 * parameters of an existing one) is consistent with the bandwidth 2944 * constraints. If yes, this function also accordingly updates the currently 2945 * allocated bandwidth to reflect the new situation. 2946 * 2947 * This function is called while holding p's rq->lock. 2948 */ 2949 int sched_dl_overflow(struct task_struct *p, int policy, 2950 const struct sched_attr *attr) 2951 { 2952 u64 period = attr->sched_period ?: attr->sched_deadline; 2953 u64 runtime = attr->sched_runtime; 2954 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 2955 int cpus, err = -1, cpu = task_cpu(p); 2956 struct dl_bw *dl_b = dl_bw_of(cpu); 2957 unsigned long cap; 2958 2959 if (attr->sched_flags & SCHED_FLAG_SUGOV) 2960 return 0; 2961 2962 /* !deadline task may carry old deadline bandwidth */ 2963 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 2964 return 0; 2965 2966 /* 2967 * Either if a task, enters, leave, or stays -deadline but changes 2968 * its parameters, we may need to update accordingly the total 2969 * allocated bandwidth of the container. 2970 */ 2971 raw_spin_lock(&dl_b->lock); 2972 cpus = dl_bw_cpus(cpu); 2973 cap = dl_bw_capacity(cpu); 2974 2975 if (dl_policy(policy) && !task_has_dl_policy(p) && 2976 !__dl_overflow(dl_b, cap, 0, new_bw)) { 2977 if (hrtimer_active(&p->dl.inactive_timer)) 2978 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2979 __dl_add(dl_b, new_bw, cpus); 2980 err = 0; 2981 } else if (dl_policy(policy) && task_has_dl_policy(p) && 2982 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { 2983 /* 2984 * XXX this is slightly incorrect: when the task 2985 * utilization decreases, we should delay the total 2986 * utilization change until the task's 0-lag point. 2987 * But this would require to set the task's "inactive 2988 * timer" when the task is not inactive. 2989 */ 2990 __dl_sub(dl_b, p->dl.dl_bw, cpus); 2991 __dl_add(dl_b, new_bw, cpus); 2992 dl_change_utilization(p, new_bw); 2993 err = 0; 2994 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 2995 /* 2996 * Do not decrease the total deadline utilization here, 2997 * switched_from_dl() will take care to do it at the correct 2998 * (0-lag) time. 2999 */ 3000 err = 0; 3001 } 3002 raw_spin_unlock(&dl_b->lock); 3003 3004 return err; 3005 } 3006 3007 /* 3008 * This function initializes the sched_dl_entity of a newly becoming 3009 * SCHED_DEADLINE task. 3010 * 3011 * Only the static values are considered here, the actual runtime and the 3012 * absolute deadline will be properly calculated when the task is enqueued 3013 * for the first time with its new policy. 3014 */ 3015 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3016 { 3017 struct sched_dl_entity *dl_se = &p->dl; 3018 3019 dl_se->dl_runtime = attr->sched_runtime; 3020 dl_se->dl_deadline = attr->sched_deadline; 3021 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3022 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; 3023 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3024 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 3025 } 3026 3027 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3028 { 3029 struct sched_dl_entity *dl_se = &p->dl; 3030 3031 attr->sched_priority = p->rt_priority; 3032 attr->sched_runtime = dl_se->dl_runtime; 3033 attr->sched_deadline = dl_se->dl_deadline; 3034 attr->sched_period = dl_se->dl_period; 3035 attr->sched_flags &= ~SCHED_DL_FLAGS; 3036 attr->sched_flags |= dl_se->flags; 3037 } 3038 3039 /* 3040 * This function validates the new parameters of a -deadline task. 3041 * We ask for the deadline not being zero, and greater or equal 3042 * than the runtime, as well as the period of being zero or 3043 * greater than deadline. Furthermore, we have to be sure that 3044 * user parameters are above the internal resolution of 1us (we 3045 * check sched_runtime only since it is always the smaller one) and 3046 * below 2^63 ns (we have to check both sched_deadline and 3047 * sched_period, as the latter can be zero). 3048 */ 3049 bool __checkparam_dl(const struct sched_attr *attr) 3050 { 3051 u64 period, max, min; 3052 3053 /* special dl tasks don't actually use any parameter */ 3054 if (attr->sched_flags & SCHED_FLAG_SUGOV) 3055 return true; 3056 3057 /* deadline != 0 */ 3058 if (attr->sched_deadline == 0) 3059 return false; 3060 3061 /* 3062 * Since we truncate DL_SCALE bits, make sure we're at least 3063 * that big. 3064 */ 3065 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3066 return false; 3067 3068 /* 3069 * Since we use the MSB for wrap-around and sign issues, make 3070 * sure it's not set (mind that period can be equal to zero). 3071 */ 3072 if (attr->sched_deadline & (1ULL << 63) || 3073 attr->sched_period & (1ULL << 63)) 3074 return false; 3075 3076 period = attr->sched_period; 3077 if (!period) 3078 period = attr->sched_deadline; 3079 3080 /* runtime <= deadline <= period (if period != 0) */ 3081 if (period < attr->sched_deadline || 3082 attr->sched_deadline < attr->sched_runtime) 3083 return false; 3084 3085 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; 3086 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; 3087 3088 if (period < min || period > max) 3089 return false; 3090 3091 return true; 3092 } 3093 3094 /* 3095 * This function clears the sched_dl_entity static params. 3096 */ 3097 static void __dl_clear_params(struct sched_dl_entity *dl_se) 3098 { 3099 dl_se->dl_runtime = 0; 3100 dl_se->dl_deadline = 0; 3101 dl_se->dl_period = 0; 3102 dl_se->flags = 0; 3103 dl_se->dl_bw = 0; 3104 dl_se->dl_density = 0; 3105 3106 dl_se->dl_throttled = 0; 3107 dl_se->dl_yielded = 0; 3108 dl_se->dl_non_contending = 0; 3109 dl_se->dl_overrun = 0; 3110 dl_se->dl_server = 0; 3111 3112 #ifdef CONFIG_RT_MUTEXES 3113 dl_se->pi_se = dl_se; 3114 #endif 3115 } 3116 3117 void init_dl_entity(struct sched_dl_entity *dl_se) 3118 { 3119 RB_CLEAR_NODE(&dl_se->rb_node); 3120 init_dl_task_timer(dl_se); 3121 init_dl_inactive_task_timer(dl_se); 3122 __dl_clear_params(dl_se); 3123 } 3124 3125 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 3126 { 3127 struct sched_dl_entity *dl_se = &p->dl; 3128 3129 if (dl_se->dl_runtime != attr->sched_runtime || 3130 dl_se->dl_deadline != attr->sched_deadline || 3131 dl_se->dl_period != attr->sched_period || 3132 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS)) 3133 return true; 3134 3135 return false; 3136 } 3137 3138 #ifdef CONFIG_SMP 3139 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 3140 const struct cpumask *trial) 3141 { 3142 unsigned long flags, cap; 3143 struct dl_bw *cur_dl_b; 3144 int ret = 1; 3145 3146 rcu_read_lock_sched(); 3147 cur_dl_b = dl_bw_of(cpumask_any(cur)); 3148 cap = __dl_bw_capacity(trial); 3149 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 3150 if (__dl_overflow(cur_dl_b, cap, 0, 0)) 3151 ret = 0; 3152 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 3153 rcu_read_unlock_sched(); 3154 3155 return ret; 3156 } 3157 3158 enum dl_bw_request { 3159 dl_bw_req_check_overflow = 0, 3160 dl_bw_req_alloc, 3161 dl_bw_req_free 3162 }; 3163 3164 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) 3165 { 3166 unsigned long flags; 3167 struct dl_bw *dl_b; 3168 bool overflow = 0; 3169 3170 rcu_read_lock_sched(); 3171 dl_b = dl_bw_of(cpu); 3172 raw_spin_lock_irqsave(&dl_b->lock, flags); 3173 3174 if (req == dl_bw_req_free) { 3175 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); 3176 } else { 3177 unsigned long cap = dl_bw_capacity(cpu); 3178 3179 overflow = __dl_overflow(dl_b, cap, 0, dl_bw); 3180 3181 if (req == dl_bw_req_alloc && !overflow) { 3182 /* 3183 * We reserve space in the destination 3184 * root_domain, as we can't fail after this point. 3185 * We will free resources in the source root_domain 3186 * later on (see set_cpus_allowed_dl()). 3187 */ 3188 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); 3189 } 3190 } 3191 3192 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3193 rcu_read_unlock_sched(); 3194 3195 return overflow ? -EBUSY : 0; 3196 } 3197 3198 int dl_bw_check_overflow(int cpu) 3199 { 3200 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); 3201 } 3202 3203 int dl_bw_alloc(int cpu, u64 dl_bw) 3204 { 3205 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); 3206 } 3207 3208 void dl_bw_free(int cpu, u64 dl_bw) 3209 { 3210 dl_bw_manage(dl_bw_req_free, cpu, dl_bw); 3211 } 3212 #endif 3213 3214 #ifdef CONFIG_SCHED_DEBUG 3215 void print_dl_stats(struct seq_file *m, int cpu) 3216 { 3217 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 3218 } 3219 #endif /* CONFIG_SCHED_DEBUG */ 3220