1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Deadline Scheduling Class (SCHED_DEADLINE) 4 * 5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 6 * 7 * Tasks that periodically executes their instances for less than their 8 * runtime won't miss any of their deadlines. 9 * Tasks that are not periodic or sporadic or that tries to execute more 10 * than their reserved bandwidth will be slowed down (and may potentially 11 * miss some of their deadlines), and won't affect any other task. 12 * 13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 14 * Juri Lelli <juri.lelli@gmail.com>, 15 * Michael Trimarchi <michael@amarulasolutions.com>, 16 * Fabio Checconi <fchecconi@gmail.com> 17 */ 18 19 #include <linux/cpuset.h> 20 21 /* 22 * Default limits for DL period; on the top end we guard against small util 23 * tasks still getting ridiculously long effective runtimes, on the bottom end we 24 * guard against timer DoS. 25 */ 26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */ 27 static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */ 28 #ifdef CONFIG_SYSCTL 29 static struct ctl_table sched_dl_sysctls[] = { 30 { 31 .procname = "sched_deadline_period_max_us", 32 .data = &sysctl_sched_dl_period_max, 33 .maxlen = sizeof(unsigned int), 34 .mode = 0644, 35 .proc_handler = proc_douintvec_minmax, 36 .extra1 = (void *)&sysctl_sched_dl_period_min, 37 }, 38 { 39 .procname = "sched_deadline_period_min_us", 40 .data = &sysctl_sched_dl_period_min, 41 .maxlen = sizeof(unsigned int), 42 .mode = 0644, 43 .proc_handler = proc_douintvec_minmax, 44 .extra2 = (void *)&sysctl_sched_dl_period_max, 45 }, 46 }; 47 48 static int __init sched_dl_sysctl_init(void) 49 { 50 register_sysctl_init("kernel", sched_dl_sysctls); 51 return 0; 52 } 53 late_initcall(sched_dl_sysctl_init); 54 #endif 55 56 static bool dl_server(struct sched_dl_entity *dl_se) 57 { 58 return dl_se->dl_server; 59 } 60 61 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 62 { 63 BUG_ON(dl_server(dl_se)); 64 return container_of(dl_se, struct task_struct, dl); 65 } 66 67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 68 { 69 return container_of(dl_rq, struct rq, dl); 70 } 71 72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se) 73 { 74 struct rq *rq = dl_se->rq; 75 76 if (!dl_server(dl_se)) 77 rq = task_rq(dl_task_of(dl_se)); 78 79 return rq; 80 } 81 82 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 83 { 84 return &rq_of_dl_se(dl_se)->dl; 85 } 86 87 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 88 { 89 return !RB_EMPTY_NODE(&dl_se->rb_node); 90 } 91 92 #ifdef CONFIG_RT_MUTEXES 93 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 94 { 95 return dl_se->pi_se; 96 } 97 98 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 99 { 100 return pi_of(dl_se) != dl_se; 101 } 102 #else 103 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) 104 { 105 return dl_se; 106 } 107 108 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se) 109 { 110 return false; 111 } 112 #endif 113 114 #ifdef CONFIG_SMP 115 static inline struct dl_bw *dl_bw_of(int i) 116 { 117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 118 "sched RCU must be held"); 119 return &cpu_rq(i)->rd->dl_bw; 120 } 121 122 static inline int dl_bw_cpus(int i) 123 { 124 struct root_domain *rd = cpu_rq(i)->rd; 125 int cpus; 126 127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 128 "sched RCU must be held"); 129 130 if (cpumask_subset(rd->span, cpu_active_mask)) 131 return cpumask_weight(rd->span); 132 133 cpus = 0; 134 135 for_each_cpu_and(i, rd->span, cpu_active_mask) 136 cpus++; 137 138 return cpus; 139 } 140 141 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask) 142 { 143 unsigned long cap = 0; 144 int i; 145 146 for_each_cpu_and(i, mask, cpu_active_mask) 147 cap += arch_scale_cpu_capacity(i); 148 149 return cap; 150 } 151 152 /* 153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity 154 * of the CPU the task is running on rather rd's \Sum CPU capacity. 155 */ 156 static inline unsigned long dl_bw_capacity(int i) 157 { 158 if (!sched_asym_cpucap_active() && 159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) { 160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT; 161 } else { 162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 163 "sched RCU must be held"); 164 165 return __dl_bw_capacity(cpu_rq(i)->rd->span); 166 } 167 } 168 169 static inline bool dl_bw_visited(int cpu, u64 gen) 170 { 171 struct root_domain *rd = cpu_rq(cpu)->rd; 172 173 if (rd->visit_gen == gen) 174 return true; 175 176 rd->visit_gen = gen; 177 return false; 178 } 179 180 static inline 181 void __dl_update(struct dl_bw *dl_b, s64 bw) 182 { 183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 184 int i; 185 186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 187 "sched RCU must be held"); 188 for_each_cpu_and(i, rd->span, cpu_active_mask) { 189 struct rq *rq = cpu_rq(i); 190 191 rq->dl.extra_bw += bw; 192 } 193 } 194 #else 195 static inline struct dl_bw *dl_bw_of(int i) 196 { 197 return &cpu_rq(i)->dl.dl_bw; 198 } 199 200 static inline int dl_bw_cpus(int i) 201 { 202 return 1; 203 } 204 205 static inline unsigned long dl_bw_capacity(int i) 206 { 207 return SCHED_CAPACITY_SCALE; 208 } 209 210 static inline bool dl_bw_visited(int cpu, u64 gen) 211 { 212 return false; 213 } 214 215 static inline 216 void __dl_update(struct dl_bw *dl_b, s64 bw) 217 { 218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 219 220 dl->extra_bw += bw; 221 } 222 #endif 223 224 static inline 225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 226 { 227 dl_b->total_bw -= tsk_bw; 228 __dl_update(dl_b, (s32)tsk_bw / cpus); 229 } 230 231 static inline 232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 233 { 234 dl_b->total_bw += tsk_bw; 235 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 236 } 237 238 static inline bool 239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw) 240 { 241 return dl_b->bw != -1 && 242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 243 } 244 245 static inline 246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 247 { 248 u64 old = dl_rq->running_bw; 249 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 251 dl_rq->running_bw += dl_bw; 252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ 253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 256 } 257 258 static inline 259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) 260 { 261 u64 old = dl_rq->running_bw; 262 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 264 dl_rq->running_bw -= dl_bw; 265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ 266 if (dl_rq->running_bw > old) 267 dl_rq->running_bw = 0; 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */ 269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); 270 } 271 272 static inline 273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 274 { 275 u64 old = dl_rq->this_bw; 276 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 278 dl_rq->this_bw += dl_bw; 279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ 280 } 281 282 static inline 283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) 284 { 285 u64 old = dl_rq->this_bw; 286 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); 288 dl_rq->this_bw -= dl_bw; 289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ 290 if (dl_rq->this_bw > old) 291 dl_rq->this_bw = 0; 292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); 293 } 294 295 static inline 296 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 297 { 298 if (!dl_entity_is_special(dl_se)) 299 __add_rq_bw(dl_se->dl_bw, dl_rq); 300 } 301 302 static inline 303 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 304 { 305 if (!dl_entity_is_special(dl_se)) 306 __sub_rq_bw(dl_se->dl_bw, dl_rq); 307 } 308 309 static inline 310 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 311 { 312 if (!dl_entity_is_special(dl_se)) 313 __add_running_bw(dl_se->dl_bw, dl_rq); 314 } 315 316 static inline 317 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 318 { 319 if (!dl_entity_is_special(dl_se)) 320 __sub_running_bw(dl_se->dl_bw, dl_rq); 321 } 322 323 static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw) 324 { 325 if (dl_se->dl_non_contending) { 326 sub_running_bw(dl_se, &rq->dl); 327 dl_se->dl_non_contending = 0; 328 329 /* 330 * If the timer handler is currently running and the 331 * timer cannot be canceled, inactive_task_timer() 332 * will see that dl_not_contending is not set, and 333 * will not touch the rq's active utilization, 334 * so we are still safe. 335 */ 336 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { 337 if (!dl_server(dl_se)) 338 put_task_struct(dl_task_of(dl_se)); 339 } 340 } 341 __sub_rq_bw(dl_se->dl_bw, &rq->dl); 342 __add_rq_bw(new_bw, &rq->dl); 343 } 344 345 static void dl_change_utilization(struct task_struct *p, u64 new_bw) 346 { 347 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); 348 349 if (task_on_rq_queued(p)) 350 return; 351 352 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); 353 } 354 355 static void __dl_clear_params(struct sched_dl_entity *dl_se); 356 357 /* 358 * The utilization of a task cannot be immediately removed from 359 * the rq active utilization (running_bw) when the task blocks. 360 * Instead, we have to wait for the so called "0-lag time". 361 * 362 * If a task blocks before the "0-lag time", a timer (the inactive 363 * timer) is armed, and running_bw is decreased when the timer 364 * fires. 365 * 366 * If the task wakes up again before the inactive timer fires, 367 * the timer is canceled, whereas if the task wakes up after the 368 * inactive timer fired (and running_bw has been decreased) the 369 * task's utilization has to be added to running_bw again. 370 * A flag in the deadline scheduling entity (dl_non_contending) 371 * is used to avoid race conditions between the inactive timer handler 372 * and task wakeups. 373 * 374 * The following diagram shows how running_bw is updated. A task is 375 * "ACTIVE" when its utilization contributes to running_bw; an 376 * "ACTIVE contending" task is in the TASK_RUNNING state, while an 377 * "ACTIVE non contending" task is a blocked task for which the "0-lag time" 378 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag" 379 * time already passed, which does not contribute to running_bw anymore. 380 * +------------------+ 381 * wakeup | ACTIVE | 382 * +------------------>+ contending | 383 * | add_running_bw | | 384 * | +----+------+------+ 385 * | | ^ 386 * | dequeue | | 387 * +--------+-------+ | | 388 * | | t >= 0-lag | | wakeup 389 * | INACTIVE |<---------------+ | 390 * | | sub_running_bw | | 391 * +--------+-------+ | | 392 * ^ | | 393 * | t < 0-lag | | 394 * | | | 395 * | V | 396 * | +----+------+------+ 397 * | sub_running_bw | ACTIVE | 398 * +-------------------+ | 399 * inactive timer | non contending | 400 * fired +------------------+ 401 * 402 * The task_non_contending() function is invoked when a task 403 * blocks, and checks if the 0-lag time already passed or 404 * not (in the first case, it directly updates running_bw; 405 * in the second case, it arms the inactive timer). 406 * 407 * The task_contending() function is invoked when a task wakes 408 * up, and checks if the task is still in the "ACTIVE non contending" 409 * state or not (in the second case, it updates running_bw). 410 */ 411 static void task_non_contending(struct sched_dl_entity *dl_se) 412 { 413 struct hrtimer *timer = &dl_se->inactive_timer; 414 struct rq *rq = rq_of_dl_se(dl_se); 415 struct dl_rq *dl_rq = &rq->dl; 416 s64 zerolag_time; 417 418 /* 419 * If this is a non-deadline task that has been boosted, 420 * do nothing 421 */ 422 if (dl_se->dl_runtime == 0) 423 return; 424 425 if (dl_entity_is_special(dl_se)) 426 return; 427 428 WARN_ON(dl_se->dl_non_contending); 429 430 zerolag_time = dl_se->deadline - 431 div64_long((dl_se->runtime * dl_se->dl_period), 432 dl_se->dl_runtime); 433 434 /* 435 * Using relative times instead of the absolute "0-lag time" 436 * allows to simplify the code 437 */ 438 zerolag_time -= rq_clock(rq); 439 440 /* 441 * If the "0-lag time" already passed, decrease the active 442 * utilization now, instead of starting a timer 443 */ 444 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { 445 if (dl_server(dl_se)) { 446 sub_running_bw(dl_se, dl_rq); 447 } else { 448 struct task_struct *p = dl_task_of(dl_se); 449 450 if (dl_task(p)) 451 sub_running_bw(dl_se, dl_rq); 452 453 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 454 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 455 456 if (READ_ONCE(p->__state) == TASK_DEAD) 457 sub_rq_bw(dl_se, &rq->dl); 458 raw_spin_lock(&dl_b->lock); 459 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p))); 460 raw_spin_unlock(&dl_b->lock); 461 __dl_clear_params(dl_se); 462 } 463 } 464 465 return; 466 } 467 468 dl_se->dl_non_contending = 1; 469 if (!dl_server(dl_se)) 470 get_task_struct(dl_task_of(dl_se)); 471 472 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD); 473 } 474 475 static void task_contending(struct sched_dl_entity *dl_se, int flags) 476 { 477 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 478 479 /* 480 * If this is a non-deadline task that has been boosted, 481 * do nothing 482 */ 483 if (dl_se->dl_runtime == 0) 484 return; 485 486 if (flags & ENQUEUE_MIGRATED) 487 add_rq_bw(dl_se, dl_rq); 488 489 if (dl_se->dl_non_contending) { 490 dl_se->dl_non_contending = 0; 491 /* 492 * If the timer handler is currently running and the 493 * timer cannot be canceled, inactive_task_timer() 494 * will see that dl_not_contending is not set, and 495 * will not touch the rq's active utilization, 496 * so we are still safe. 497 */ 498 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { 499 if (!dl_server(dl_se)) 500 put_task_struct(dl_task_of(dl_se)); 501 } 502 } else { 503 /* 504 * Since "dl_non_contending" is not set, the 505 * task's utilization has already been removed from 506 * active utilization (either when the task blocked, 507 * when the "inactive timer" fired). 508 * So, add it back. 509 */ 510 add_running_bw(dl_se, dl_rq); 511 } 512 } 513 514 static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 515 { 516 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node; 517 } 518 519 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 520 521 void init_dl_bw(struct dl_bw *dl_b) 522 { 523 raw_spin_lock_init(&dl_b->lock); 524 if (global_rt_runtime() == RUNTIME_INF) 525 dl_b->bw = -1; 526 else 527 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 528 dl_b->total_bw = 0; 529 } 530 531 void init_dl_rq(struct dl_rq *dl_rq) 532 { 533 dl_rq->root = RB_ROOT_CACHED; 534 535 #ifdef CONFIG_SMP 536 /* zero means no -deadline tasks */ 537 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 538 539 dl_rq->overloaded = 0; 540 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; 541 #else 542 init_dl_bw(&dl_rq->dl_bw); 543 #endif 544 545 dl_rq->running_bw = 0; 546 dl_rq->this_bw = 0; 547 init_dl_rq_bw_ratio(dl_rq); 548 } 549 550 #ifdef CONFIG_SMP 551 552 static inline int dl_overloaded(struct rq *rq) 553 { 554 return atomic_read(&rq->rd->dlo_count); 555 } 556 557 static inline void dl_set_overload(struct rq *rq) 558 { 559 if (!rq->online) 560 return; 561 562 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 563 /* 564 * Must be visible before the overload count is 565 * set (as in sched_rt.c). 566 * 567 * Matched by the barrier in pull_dl_task(). 568 */ 569 smp_wmb(); 570 atomic_inc(&rq->rd->dlo_count); 571 } 572 573 static inline void dl_clear_overload(struct rq *rq) 574 { 575 if (!rq->online) 576 return; 577 578 atomic_dec(&rq->rd->dlo_count); 579 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 580 } 581 582 #define __node_2_pdl(node) \ 583 rb_entry((node), struct task_struct, pushable_dl_tasks) 584 585 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b) 586 { 587 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); 588 } 589 590 static inline int has_pushable_dl_tasks(struct rq *rq) 591 { 592 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); 593 } 594 595 /* 596 * The list of pushable -deadline task is not a plist, like in 597 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 598 */ 599 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 600 { 601 struct rb_node *leftmost; 602 603 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 604 605 leftmost = rb_add_cached(&p->pushable_dl_tasks, 606 &rq->dl.pushable_dl_tasks_root, 607 __pushable_less); 608 if (leftmost) 609 rq->dl.earliest_dl.next = p->dl.deadline; 610 611 if (!rq->dl.overloaded) { 612 dl_set_overload(rq); 613 rq->dl.overloaded = 1; 614 } 615 } 616 617 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 618 { 619 struct dl_rq *dl_rq = &rq->dl; 620 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root; 621 struct rb_node *leftmost; 622 623 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 624 return; 625 626 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); 627 if (leftmost) 628 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; 629 630 RB_CLEAR_NODE(&p->pushable_dl_tasks); 631 632 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) { 633 dl_clear_overload(rq); 634 rq->dl.overloaded = 0; 635 } 636 } 637 638 static int push_dl_task(struct rq *rq); 639 640 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 641 { 642 return rq->online && dl_task(prev); 643 } 644 645 static DEFINE_PER_CPU(struct balance_callback, dl_push_head); 646 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head); 647 648 static void push_dl_tasks(struct rq *); 649 static void pull_dl_task(struct rq *); 650 651 static inline void deadline_queue_push_tasks(struct rq *rq) 652 { 653 if (!has_pushable_dl_tasks(rq)) 654 return; 655 656 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 657 } 658 659 static inline void deadline_queue_pull_task(struct rq *rq) 660 { 661 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 662 } 663 664 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 665 666 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 667 { 668 struct rq *later_rq = NULL; 669 struct dl_bw *dl_b; 670 671 later_rq = find_lock_later_rq(p, rq); 672 if (!later_rq) { 673 int cpu; 674 675 /* 676 * If we cannot preempt any rq, fall back to pick any 677 * online CPU: 678 */ 679 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); 680 if (cpu >= nr_cpu_ids) { 681 /* 682 * Failed to find any suitable CPU. 683 * The task will never come back! 684 */ 685 WARN_ON_ONCE(dl_bandwidth_enabled()); 686 687 /* 688 * If admission control is disabled we 689 * try a little harder to let the task 690 * run. 691 */ 692 cpu = cpumask_any(cpu_active_mask); 693 } 694 later_rq = cpu_rq(cpu); 695 double_lock_balance(rq, later_rq); 696 } 697 698 if (p->dl.dl_non_contending || p->dl.dl_throttled) { 699 /* 700 * Inactive timer is armed (or callback is running, but 701 * waiting for us to release rq locks). In any case, when it 702 * will fire (or continue), it will see running_bw of this 703 * task migrated to later_rq (and correctly handle it). 704 */ 705 sub_running_bw(&p->dl, &rq->dl); 706 sub_rq_bw(&p->dl, &rq->dl); 707 708 add_rq_bw(&p->dl, &later_rq->dl); 709 add_running_bw(&p->dl, &later_rq->dl); 710 } else { 711 sub_rq_bw(&p->dl, &rq->dl); 712 add_rq_bw(&p->dl, &later_rq->dl); 713 } 714 715 /* 716 * And we finally need to fix up root_domain(s) bandwidth accounting, 717 * since p is still hanging out in the old (now moved to default) root 718 * domain. 719 */ 720 dl_b = &rq->rd->dl_bw; 721 raw_spin_lock(&dl_b->lock); 722 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 723 raw_spin_unlock(&dl_b->lock); 724 725 dl_b = &later_rq->rd->dl_bw; 726 raw_spin_lock(&dl_b->lock); 727 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); 728 raw_spin_unlock(&dl_b->lock); 729 730 set_task_cpu(p, later_rq->cpu); 731 double_unlock_balance(later_rq, rq); 732 733 return later_rq; 734 } 735 736 #else 737 738 static inline 739 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 740 { 741 } 742 743 static inline 744 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 745 { 746 } 747 748 static inline 749 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 750 { 751 } 752 753 static inline 754 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 755 { 756 } 757 758 static inline void deadline_queue_push_tasks(struct rq *rq) 759 { 760 } 761 762 static inline void deadline_queue_pull_task(struct rq *rq) 763 { 764 } 765 #endif /* CONFIG_SMP */ 766 767 static void 768 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); 769 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 770 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags); 771 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags); 772 773 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, 774 struct rq *rq) 775 { 776 /* for non-boosted task, pi_of(dl_se) == dl_se */ 777 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 778 dl_se->runtime = pi_of(dl_se)->dl_runtime; 779 780 /* 781 * If it is a deferred reservation, and the server 782 * is not handling an starvation case, defer it. 783 */ 784 if (dl_se->dl_defer && !dl_se->dl_defer_running) { 785 dl_se->dl_throttled = 1; 786 dl_se->dl_defer_armed = 1; 787 } 788 } 789 790 /* 791 * We are being explicitly informed that a new instance is starting, 792 * and this means that: 793 * - the absolute deadline of the entity has to be placed at 794 * current time + relative deadline; 795 * - the runtime of the entity has to be set to the maximum value. 796 * 797 * The capability of specifying such event is useful whenever a -deadline 798 * entity wants to (try to!) synchronize its behaviour with the scheduler's 799 * one, and to (try to!) reconcile itself with its own scheduling 800 * parameters. 801 */ 802 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) 803 { 804 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 805 struct rq *rq = rq_of_dl_rq(dl_rq); 806 807 WARN_ON(is_dl_boosted(dl_se)); 808 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); 809 810 /* 811 * We are racing with the deadline timer. So, do nothing because 812 * the deadline timer handler will take care of properly recharging 813 * the runtime and postponing the deadline 814 */ 815 if (dl_se->dl_throttled) 816 return; 817 818 /* 819 * We use the regular wall clock time to set deadlines in the 820 * future; in fact, we must consider execution overheads (time 821 * spent on hardirq context, etc.). 822 */ 823 replenish_dl_new_period(dl_se, rq); 824 } 825 826 static int start_dl_timer(struct sched_dl_entity *dl_se); 827 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t); 828 829 /* 830 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 831 * possibility of a entity lasting more than what it declared, and thus 832 * exhausting its runtime. 833 * 834 * Here we are interested in making runtime overrun possible, but we do 835 * not want a entity which is misbehaving to affect the scheduling of all 836 * other entities. 837 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 838 * is used, in order to confine each entity within its own bandwidth. 839 * 840 * This function deals exactly with that, and ensures that when the runtime 841 * of a entity is replenished, its deadline is also postponed. That ensures 842 * the overrunning entity can't interfere with other entity in the system and 843 * can't make them miss their deadlines. Reasons why this kind of overruns 844 * could happen are, typically, a entity voluntarily trying to overcome its 845 * runtime, or it just underestimated it during sched_setattr(). 846 */ 847 static void replenish_dl_entity(struct sched_dl_entity *dl_se) 848 { 849 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 850 struct rq *rq = rq_of_dl_rq(dl_rq); 851 852 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0); 853 854 /* 855 * This could be the case for a !-dl task that is boosted. 856 * Just go with full inherited parameters. 857 * 858 * Or, it could be the case of a deferred reservation that 859 * was not able to consume its runtime in background and 860 * reached this point with current u > U. 861 * 862 * In both cases, set a new period. 863 */ 864 if (dl_se->dl_deadline == 0 || 865 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) { 866 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; 867 dl_se->runtime = pi_of(dl_se)->dl_runtime; 868 } 869 870 if (dl_se->dl_yielded && dl_se->runtime > 0) 871 dl_se->runtime = 0; 872 873 /* 874 * We keep moving the deadline away until we get some 875 * available runtime for the entity. This ensures correct 876 * handling of situations where the runtime overrun is 877 * arbitrary large. 878 */ 879 while (dl_se->runtime <= 0) { 880 dl_se->deadline += pi_of(dl_se)->dl_period; 881 dl_se->runtime += pi_of(dl_se)->dl_runtime; 882 } 883 884 /* 885 * At this point, the deadline really should be "in 886 * the future" with respect to rq->clock. If it's 887 * not, we are, for some reason, lagging too much! 888 * Anyway, after having warn userspace abut that, 889 * we still try to keep the things running by 890 * resetting the deadline and the budget of the 891 * entity. 892 */ 893 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 894 printk_deferred_once("sched: DL replenish lagged too much\n"); 895 replenish_dl_new_period(dl_se, rq); 896 } 897 898 if (dl_se->dl_yielded) 899 dl_se->dl_yielded = 0; 900 if (dl_se->dl_throttled) 901 dl_se->dl_throttled = 0; 902 903 /* 904 * If this is the replenishment of a deferred reservation, 905 * clear the flag and return. 906 */ 907 if (dl_se->dl_defer_armed) { 908 dl_se->dl_defer_armed = 0; 909 return; 910 } 911 912 /* 913 * A this point, if the deferred server is not armed, and the deadline 914 * is in the future, if it is not running already, throttle the server 915 * and arm the defer timer. 916 */ 917 if (dl_se->dl_defer && !dl_se->dl_defer_running && 918 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) { 919 if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) { 920 921 /* 922 * Set dl_se->dl_defer_armed and dl_throttled variables to 923 * inform the start_dl_timer() that this is a deferred 924 * activation. 925 */ 926 dl_se->dl_defer_armed = 1; 927 dl_se->dl_throttled = 1; 928 if (!start_dl_timer(dl_se)) { 929 /* 930 * If for whatever reason (delays), a previous timer was 931 * queued but not serviced, cancel it and clean the 932 * deferrable server variables intended for start_dl_timer(). 933 */ 934 hrtimer_try_to_cancel(&dl_se->dl_timer); 935 dl_se->dl_defer_armed = 0; 936 dl_se->dl_throttled = 0; 937 } 938 } 939 } 940 } 941 942 /* 943 * Here we check if --at time t-- an entity (which is probably being 944 * [re]activated or, in general, enqueued) can use its remaining runtime 945 * and its current deadline _without_ exceeding the bandwidth it is 946 * assigned (function returns true if it can't). We are in fact applying 947 * one of the CBS rules: when a task wakes up, if the residual runtime 948 * over residual deadline fits within the allocated bandwidth, then we 949 * can keep the current (absolute) deadline and residual budget without 950 * disrupting the schedulability of the system. Otherwise, we should 951 * refill the runtime and set the deadline a period in the future, 952 * because keeping the current (absolute) deadline of the task would 953 * result in breaking guarantees promised to other tasks (refer to 954 * Documentation/scheduler/sched-deadline.rst for more information). 955 * 956 * This function returns true if: 957 * 958 * runtime / (deadline - t) > dl_runtime / dl_deadline , 959 * 960 * IOW we can't recycle current parameters. 961 * 962 * Notice that the bandwidth check is done against the deadline. For 963 * task with deadline equal to period this is the same of using 964 * dl_period instead of dl_deadline in the equation above. 965 */ 966 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t) 967 { 968 u64 left, right; 969 970 /* 971 * left and right are the two sides of the equation above, 972 * after a bit of shuffling to use multiplications instead 973 * of divisions. 974 * 975 * Note that none of the time values involved in the two 976 * multiplications are absolute: dl_deadline and dl_runtime 977 * are the relative deadline and the maximum runtime of each 978 * instance, runtime is the runtime left for the last instance 979 * and (deadline - t), since t is rq->clock, is the time left 980 * to the (absolute) deadline. Even if overflowing the u64 type 981 * is very unlikely to occur in both cases, here we scale down 982 * as we want to avoid that risk at all. Scaling down by 10 983 * means that we reduce granularity to 1us. We are fine with it, 984 * since this is only a true/false check and, anyway, thinking 985 * of anything below microseconds resolution is actually fiction 986 * (but still we want to give the user that illusion >;). 987 */ 988 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 989 right = ((dl_se->deadline - t) >> DL_SCALE) * 990 (pi_of(dl_se)->dl_runtime >> DL_SCALE); 991 992 return dl_time_before(right, left); 993 } 994 995 /* 996 * Revised wakeup rule [1]: For self-suspending tasks, rather then 997 * re-initializing task's runtime and deadline, the revised wakeup 998 * rule adjusts the task's runtime to avoid the task to overrun its 999 * density. 1000 * 1001 * Reasoning: a task may overrun the density if: 1002 * runtime / (deadline - t) > dl_runtime / dl_deadline 1003 * 1004 * Therefore, runtime can be adjusted to: 1005 * runtime = (dl_runtime / dl_deadline) * (deadline - t) 1006 * 1007 * In such way that runtime will be equal to the maximum density 1008 * the task can use without breaking any rule. 1009 * 1010 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant 1011 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. 1012 */ 1013 static void 1014 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) 1015 { 1016 u64 laxity = dl_se->deadline - rq_clock(rq); 1017 1018 /* 1019 * If the task has deadline < period, and the deadline is in the past, 1020 * it should already be throttled before this check. 1021 * 1022 * See update_dl_entity() comments for further details. 1023 */ 1024 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); 1025 1026 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT; 1027 } 1028 1029 /* 1030 * Regarding the deadline, a task with implicit deadline has a relative 1031 * deadline == relative period. A task with constrained deadline has a 1032 * relative deadline <= relative period. 1033 * 1034 * We support constrained deadline tasks. However, there are some restrictions 1035 * applied only for tasks which do not have an implicit deadline. See 1036 * update_dl_entity() to know more about such restrictions. 1037 * 1038 * The dl_is_implicit() returns true if the task has an implicit deadline. 1039 */ 1040 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) 1041 { 1042 return dl_se->dl_deadline == dl_se->dl_period; 1043 } 1044 1045 /* 1046 * When a deadline entity is placed in the runqueue, its runtime and deadline 1047 * might need to be updated. This is done by a CBS wake up rule. There are two 1048 * different rules: 1) the original CBS; and 2) the Revisited CBS. 1049 * 1050 * When the task is starting a new period, the Original CBS is used. In this 1051 * case, the runtime is replenished and a new absolute deadline is set. 1052 * 1053 * When a task is queued before the begin of the next period, using the 1054 * remaining runtime and deadline could make the entity to overflow, see 1055 * dl_entity_overflow() to find more about runtime overflow. When such case 1056 * is detected, the runtime and deadline need to be updated. 1057 * 1058 * If the task has an implicit deadline, i.e., deadline == period, the Original 1059 * CBS is applied. The runtime is replenished and a new absolute deadline is 1060 * set, as in the previous cases. 1061 * 1062 * However, the Original CBS does not work properly for tasks with 1063 * deadline < period, which are said to have a constrained deadline. By 1064 * applying the Original CBS, a constrained deadline task would be able to run 1065 * runtime/deadline in a period. With deadline < period, the task would 1066 * overrun the runtime/period allowed bandwidth, breaking the admission test. 1067 * 1068 * In order to prevent this misbehave, the Revisited CBS is used for 1069 * constrained deadline tasks when a runtime overflow is detected. In the 1070 * Revisited CBS, rather than replenishing & setting a new absolute deadline, 1071 * the remaining runtime of the task is reduced to avoid runtime overflow. 1072 * Please refer to the comments update_dl_revised_wakeup() function to find 1073 * more about the Revised CBS rule. 1074 */ 1075 static void update_dl_entity(struct sched_dl_entity *dl_se) 1076 { 1077 struct rq *rq = rq_of_dl_se(dl_se); 1078 1079 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 1080 dl_entity_overflow(dl_se, rq_clock(rq))) { 1081 1082 if (unlikely(!dl_is_implicit(dl_se) && 1083 !dl_time_before(dl_se->deadline, rq_clock(rq)) && 1084 !is_dl_boosted(dl_se))) { 1085 update_dl_revised_wakeup(dl_se, rq); 1086 return; 1087 } 1088 1089 replenish_dl_new_period(dl_se, rq); 1090 } else if (dl_server(dl_se) && dl_se->dl_defer) { 1091 /* 1092 * The server can still use its previous deadline, so check if 1093 * it left the dl_defer_running state. 1094 */ 1095 if (!dl_se->dl_defer_running) { 1096 dl_se->dl_defer_armed = 1; 1097 dl_se->dl_throttled = 1; 1098 } 1099 } 1100 } 1101 1102 static inline u64 dl_next_period(struct sched_dl_entity *dl_se) 1103 { 1104 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; 1105 } 1106 1107 /* 1108 * If the entity depleted all its runtime, and if we want it to sleep 1109 * while waiting for some new execution time to become available, we 1110 * set the bandwidth replenishment timer to the replenishment instant 1111 * and try to activate it. 1112 * 1113 * Notice that it is important for the caller to know if the timer 1114 * actually started or not (i.e., the replenishment instant is in 1115 * the future or in the past). 1116 */ 1117 static int start_dl_timer(struct sched_dl_entity *dl_se) 1118 { 1119 struct hrtimer *timer = &dl_se->dl_timer; 1120 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1121 struct rq *rq = rq_of_dl_rq(dl_rq); 1122 ktime_t now, act; 1123 s64 delta; 1124 1125 lockdep_assert_rq_held(rq); 1126 1127 /* 1128 * We want the timer to fire at the deadline, but considering 1129 * that it is actually coming from rq->clock and not from 1130 * hrtimer's time base reading. 1131 * 1132 * The deferred reservation will have its timer set to 1133 * (deadline - runtime). At that point, the CBS rule will decide 1134 * if the current deadline can be used, or if a replenishment is 1135 * required to avoid add too much pressure on the system 1136 * (current u > U). 1137 */ 1138 if (dl_se->dl_defer_armed) { 1139 WARN_ON_ONCE(!dl_se->dl_throttled); 1140 act = ns_to_ktime(dl_se->deadline - dl_se->runtime); 1141 } else { 1142 /* act = deadline - rel-deadline + period */ 1143 act = ns_to_ktime(dl_next_period(dl_se)); 1144 } 1145 1146 now = hrtimer_cb_get_time(timer); 1147 delta = ktime_to_ns(now) - rq_clock(rq); 1148 act = ktime_add_ns(act, delta); 1149 1150 /* 1151 * If the expiry time already passed, e.g., because the value 1152 * chosen as the deadline is too small, don't even try to 1153 * start the timer in the past! 1154 */ 1155 if (ktime_us_delta(act, now) < 0) 1156 return 0; 1157 1158 /* 1159 * !enqueued will guarantee another callback; even if one is already in 1160 * progress. This ensures a balanced {get,put}_task_struct(). 1161 * 1162 * The race against __run_timer() clearing the enqueued state is 1163 * harmless because we're holding task_rq()->lock, therefore the timer 1164 * expiring after we've done the check will wait on its task_rq_lock() 1165 * and observe our state. 1166 */ 1167 if (!hrtimer_is_queued(timer)) { 1168 if (!dl_server(dl_se)) 1169 get_task_struct(dl_task_of(dl_se)); 1170 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD); 1171 } 1172 1173 return 1; 1174 } 1175 1176 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) 1177 { 1178 #ifdef CONFIG_SMP 1179 /* 1180 * Queueing this task back might have overloaded rq, check if we need 1181 * to kick someone away. 1182 */ 1183 if (has_pushable_dl_tasks(rq)) { 1184 /* 1185 * Nothing relies on rq->lock after this, so its safe to drop 1186 * rq->lock. 1187 */ 1188 rq_unpin_lock(rq, rf); 1189 push_dl_task(rq); 1190 rq_repin_lock(rq, rf); 1191 } 1192 #endif 1193 } 1194 1195 /* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */ 1196 static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC; 1197 1198 static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se) 1199 { 1200 struct rq *rq = rq_of_dl_se(dl_se); 1201 u64 fw; 1202 1203 scoped_guard (rq_lock, rq) { 1204 struct rq_flags *rf = &scope.rf; 1205 1206 if (!dl_se->dl_throttled || !dl_se->dl_runtime) 1207 return HRTIMER_NORESTART; 1208 1209 sched_clock_tick(); 1210 update_rq_clock(rq); 1211 1212 if (!dl_se->dl_runtime) 1213 return HRTIMER_NORESTART; 1214 1215 if (!dl_se->server_has_tasks(dl_se)) { 1216 replenish_dl_entity(dl_se); 1217 return HRTIMER_NORESTART; 1218 } 1219 1220 if (dl_se->dl_defer_armed) { 1221 /* 1222 * First check if the server could consume runtime in background. 1223 * If so, it is possible to push the defer timer for this amount 1224 * of time. The dl_server_min_res serves as a limit to avoid 1225 * forwarding the timer for a too small amount of time. 1226 */ 1227 if (dl_time_before(rq_clock(dl_se->rq), 1228 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) { 1229 1230 /* reset the defer timer */ 1231 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime; 1232 1233 hrtimer_forward_now(timer, ns_to_ktime(fw)); 1234 return HRTIMER_RESTART; 1235 } 1236 1237 dl_se->dl_defer_running = 1; 1238 } 1239 1240 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1241 1242 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl)) 1243 resched_curr(rq); 1244 1245 __push_dl_task(rq, rf); 1246 } 1247 1248 return HRTIMER_NORESTART; 1249 } 1250 1251 /* 1252 * This is the bandwidth enforcement timer callback. If here, we know 1253 * a task is not on its dl_rq, since the fact that the timer was running 1254 * means the task is throttled and needs a runtime replenishment. 1255 * 1256 * However, what we actually do depends on the fact the task is active, 1257 * (it is on its rq) or has been removed from there by a call to 1258 * dequeue_task_dl(). In the former case we must issue the runtime 1259 * replenishment and add the task back to the dl_rq; in the latter, we just 1260 * do nothing but clearing dl_throttled, so that runtime and deadline 1261 * updating (and the queueing back to dl_rq) will be done by the 1262 * next call to enqueue_task_dl(). 1263 */ 1264 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 1265 { 1266 struct sched_dl_entity *dl_se = container_of(timer, 1267 struct sched_dl_entity, 1268 dl_timer); 1269 struct task_struct *p; 1270 struct rq_flags rf; 1271 struct rq *rq; 1272 1273 if (dl_server(dl_se)) 1274 return dl_server_timer(timer, dl_se); 1275 1276 p = dl_task_of(dl_se); 1277 rq = task_rq_lock(p, &rf); 1278 1279 /* 1280 * The task might have changed its scheduling policy to something 1281 * different than SCHED_DEADLINE (through switched_from_dl()). 1282 */ 1283 if (!dl_task(p)) 1284 goto unlock; 1285 1286 /* 1287 * The task might have been boosted by someone else and might be in the 1288 * boosting/deboosting path, its not throttled. 1289 */ 1290 if (is_dl_boosted(dl_se)) 1291 goto unlock; 1292 1293 /* 1294 * Spurious timer due to start_dl_timer() race; or we already received 1295 * a replenishment from rt_mutex_setprio(). 1296 */ 1297 if (!dl_se->dl_throttled) 1298 goto unlock; 1299 1300 sched_clock_tick(); 1301 update_rq_clock(rq); 1302 1303 /* 1304 * If the throttle happened during sched-out; like: 1305 * 1306 * schedule() 1307 * deactivate_task() 1308 * dequeue_task_dl() 1309 * update_curr_dl() 1310 * start_dl_timer() 1311 * __dequeue_task_dl() 1312 * prev->on_rq = 0; 1313 * 1314 * We can be both throttled and !queued. Replenish the counter 1315 * but do not enqueue -- wait for our wakeup to do that. 1316 */ 1317 if (!task_on_rq_queued(p)) { 1318 replenish_dl_entity(dl_se); 1319 goto unlock; 1320 } 1321 1322 #ifdef CONFIG_SMP 1323 if (unlikely(!rq->online)) { 1324 /* 1325 * If the runqueue is no longer available, migrate the 1326 * task elsewhere. This necessarily changes rq. 1327 */ 1328 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); 1329 rq = dl_task_offline_migration(rq, p); 1330 rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); 1331 update_rq_clock(rq); 1332 1333 /* 1334 * Now that the task has been migrated to the new RQ and we 1335 * have that locked, proceed as normal and enqueue the task 1336 * there. 1337 */ 1338 } 1339 #endif 1340 1341 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 1342 if (dl_task(rq->donor)) 1343 wakeup_preempt_dl(rq, p, 0); 1344 else 1345 resched_curr(rq); 1346 1347 __push_dl_task(rq, &rf); 1348 1349 unlock: 1350 task_rq_unlock(rq, p, &rf); 1351 1352 /* 1353 * This can free the task_struct, including this hrtimer, do not touch 1354 * anything related to that after this. 1355 */ 1356 put_task_struct(p); 1357 1358 return HRTIMER_NORESTART; 1359 } 1360 1361 static void init_dl_task_timer(struct sched_dl_entity *dl_se) 1362 { 1363 struct hrtimer *timer = &dl_se->dl_timer; 1364 1365 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1366 timer->function = dl_task_timer; 1367 } 1368 1369 /* 1370 * During the activation, CBS checks if it can reuse the current task's 1371 * runtime and period. If the deadline of the task is in the past, CBS 1372 * cannot use the runtime, and so it replenishes the task. This rule 1373 * works fine for implicit deadline tasks (deadline == period), and the 1374 * CBS was designed for implicit deadline tasks. However, a task with 1375 * constrained deadline (deadline < period) might be awakened after the 1376 * deadline, but before the next period. In this case, replenishing the 1377 * task would allow it to run for runtime / deadline. As in this case 1378 * deadline < period, CBS enables a task to run for more than the 1379 * runtime / period. In a very loaded system, this can cause a domino 1380 * effect, making other tasks miss their deadlines. 1381 * 1382 * To avoid this problem, in the activation of a constrained deadline 1383 * task after the deadline but before the next period, throttle the 1384 * task and set the replenishing timer to the begin of the next period, 1385 * unless it is boosted. 1386 */ 1387 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) 1388 { 1389 struct rq *rq = rq_of_dl_se(dl_se); 1390 1391 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && 1392 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { 1393 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) 1394 return; 1395 dl_se->dl_throttled = 1; 1396 if (dl_se->runtime > 0) 1397 dl_se->runtime = 0; 1398 } 1399 } 1400 1401 static 1402 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 1403 { 1404 return (dl_se->runtime <= 0); 1405 } 1406 1407 /* 1408 * This function implements the GRUB accounting rule. According to the 1409 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt", 1410 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt", 1411 * where u is the utilization of the task, Umax is the maximum reclaimable 1412 * utilization, Uinact is the (per-runqueue) inactive utilization, computed 1413 * as the difference between the "total runqueue utilization" and the 1414 * "runqueue active utilization", and Uextra is the (per runqueue) extra 1415 * reclaimable utilization. 1416 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied 1417 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT. 1418 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw 1419 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT. 1420 * Since delta is a 64 bit variable, to have an overflow its value should be 1421 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is 1422 * not an issue here. 1423 */ 1424 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) 1425 { 1426 u64 u_act; 1427 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ 1428 1429 /* 1430 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we 1431 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra 1432 * can be larger than u_max. So, u_max - u_inact - u_extra would be 1433 * negative leading to wrong results. 1434 */ 1435 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw) 1436 u_act = dl_se->dl_bw; 1437 else 1438 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw; 1439 1440 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT; 1441 return (delta * u_act) >> BW_SHIFT; 1442 } 1443 1444 s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) 1445 { 1446 s64 scaled_delta_exec; 1447 1448 /* 1449 * For tasks that participate in GRUB, we implement GRUB-PA: the 1450 * spare reclaimed bandwidth is used to clock down frequency. 1451 * 1452 * For the others, we still need to scale reservation parameters 1453 * according to current frequency and CPU maximum capacity. 1454 */ 1455 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) { 1456 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se); 1457 } else { 1458 int cpu = cpu_of(rq); 1459 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1460 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu); 1461 1462 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1463 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1464 } 1465 1466 return scaled_delta_exec; 1467 } 1468 1469 static inline void 1470 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1471 int flags); 1472 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) 1473 { 1474 s64 scaled_delta_exec; 1475 1476 if (unlikely(delta_exec <= 0)) { 1477 if (unlikely(dl_se->dl_yielded)) 1478 goto throttle; 1479 return; 1480 } 1481 1482 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer) 1483 return; 1484 1485 if (dl_entity_is_special(dl_se)) 1486 return; 1487 1488 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec); 1489 1490 dl_se->runtime -= scaled_delta_exec; 1491 1492 /* 1493 * The fair server can consume its runtime while throttled (not queued/ 1494 * running as regular CFS). 1495 * 1496 * If the server consumes its entire runtime in this state. The server 1497 * is not required for the current period. Thus, reset the server by 1498 * starting a new period, pushing the activation. 1499 */ 1500 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) { 1501 /* 1502 * If the server was previously activated - the starving condition 1503 * took place, it this point it went away because the fair scheduler 1504 * was able to get runtime in background. So return to the initial 1505 * state. 1506 */ 1507 dl_se->dl_defer_running = 0; 1508 1509 hrtimer_try_to_cancel(&dl_se->dl_timer); 1510 1511 replenish_dl_new_period(dl_se, dl_se->rq); 1512 1513 /* 1514 * Not being able to start the timer seems problematic. If it could not 1515 * be started for whatever reason, we need to "unthrottle" the DL server 1516 * and queue right away. Otherwise nothing might queue it. That's similar 1517 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn. 1518 */ 1519 WARN_ON_ONCE(!start_dl_timer(dl_se)); 1520 1521 return; 1522 } 1523 1524 throttle: 1525 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { 1526 dl_se->dl_throttled = 1; 1527 1528 /* If requested, inform the user about runtime overruns. */ 1529 if (dl_runtime_exceeded(dl_se) && 1530 (dl_se->flags & SCHED_FLAG_DL_OVERRUN)) 1531 dl_se->dl_overrun = 1; 1532 1533 dequeue_dl_entity(dl_se, 0); 1534 if (!dl_server(dl_se)) { 1535 update_stats_dequeue_dl(&rq->dl, dl_se, 0); 1536 dequeue_pushable_dl_task(rq, dl_task_of(dl_se)); 1537 } 1538 1539 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) { 1540 if (dl_server(dl_se)) 1541 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); 1542 else 1543 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH); 1544 } 1545 1546 if (!is_leftmost(dl_se, &rq->dl)) 1547 resched_curr(rq); 1548 } 1549 1550 /* 1551 * The fair server (sole dl_server) does not account for real-time 1552 * workload because it is running fair work. 1553 */ 1554 if (dl_se == &rq->fair_server) 1555 return; 1556 1557 #ifdef CONFIG_RT_GROUP_SCHED 1558 /* 1559 * Because -- for now -- we share the rt bandwidth, we need to 1560 * account our runtime there too, otherwise actual rt tasks 1561 * would be able to exceed the shared quota. 1562 * 1563 * Account to the root rt group for now. 1564 * 1565 * The solution we're working towards is having the RT groups scheduled 1566 * using deadline servers -- however there's a few nasties to figure 1567 * out before that can happen. 1568 */ 1569 if (rt_bandwidth_enabled()) { 1570 struct rt_rq *rt_rq = &rq->rt; 1571 1572 raw_spin_lock(&rt_rq->rt_runtime_lock); 1573 /* 1574 * We'll let actual RT tasks worry about the overflow here, we 1575 * have our own CBS to keep us inline; only account when RT 1576 * bandwidth is relevant. 1577 */ 1578 if (sched_rt_bandwidth_account(rt_rq)) 1579 rt_rq->rt_time += delta_exec; 1580 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1581 } 1582 #endif 1583 } 1584 1585 /* 1586 * In the non-defer mode, the idle time is not accounted, as the 1587 * server provides a guarantee. 1588 * 1589 * If the dl_server is in defer mode, the idle time is also considered 1590 * as time available for the fair server, avoiding a penalty for the 1591 * rt scheduler that did not consumed that time. 1592 */ 1593 void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) 1594 { 1595 s64 delta_exec, scaled_delta_exec; 1596 1597 if (!rq->fair_server.dl_defer) 1598 return; 1599 1600 /* no need to discount more */ 1601 if (rq->fair_server.runtime < 0) 1602 return; 1603 1604 delta_exec = rq_clock_task(rq) - p->se.exec_start; 1605 if (delta_exec < 0) 1606 return; 1607 1608 scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec); 1609 1610 rq->fair_server.runtime -= scaled_delta_exec; 1611 1612 if (rq->fair_server.runtime < 0) { 1613 rq->fair_server.dl_defer_running = 0; 1614 rq->fair_server.runtime = 0; 1615 } 1616 1617 p->se.exec_start = rq_clock_task(rq); 1618 } 1619 1620 void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) 1621 { 1622 /* 0 runtime = fair server disabled */ 1623 if (dl_se->dl_runtime) 1624 update_curr_dl_se(dl_se->rq, dl_se, delta_exec); 1625 } 1626 1627 void dl_server_start(struct sched_dl_entity *dl_se) 1628 { 1629 struct rq *rq = dl_se->rq; 1630 1631 /* 1632 * XXX: the apply do not work fine at the init phase for the 1633 * fair server because things are not yet set. We need to improve 1634 * this before getting generic. 1635 */ 1636 if (!dl_server(dl_se)) { 1637 u64 runtime = 50 * NSEC_PER_MSEC; 1638 u64 period = 1000 * NSEC_PER_MSEC; 1639 1640 dl_server_apply_params(dl_se, runtime, period, 1); 1641 1642 dl_se->dl_server = 1; 1643 dl_se->dl_defer = 1; 1644 setup_new_dl_entity(dl_se); 1645 } 1646 1647 if (!dl_se->dl_runtime) 1648 return; 1649 1650 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); 1651 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) 1652 resched_curr(dl_se->rq); 1653 } 1654 1655 void dl_server_stop(struct sched_dl_entity *dl_se) 1656 { 1657 if (!dl_se->dl_runtime) 1658 return; 1659 1660 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); 1661 hrtimer_try_to_cancel(&dl_se->dl_timer); 1662 dl_se->dl_defer_armed = 0; 1663 dl_se->dl_throttled = 0; 1664 } 1665 1666 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 1667 dl_server_has_tasks_f has_tasks, 1668 dl_server_pick_f pick_task) 1669 { 1670 dl_se->rq = rq; 1671 dl_se->server_has_tasks = has_tasks; 1672 dl_se->server_pick_task = pick_task; 1673 } 1674 1675 void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq) 1676 { 1677 u64 new_bw = dl_se->dl_bw; 1678 int cpu = cpu_of(rq); 1679 struct dl_bw *dl_b; 1680 1681 dl_b = dl_bw_of(cpu_of(rq)); 1682 guard(raw_spinlock)(&dl_b->lock); 1683 1684 if (!dl_bw_cpus(cpu)) 1685 return; 1686 1687 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu)); 1688 } 1689 1690 int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init) 1691 { 1692 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime); 1693 u64 new_bw = to_ratio(period, runtime); 1694 struct rq *rq = dl_se->rq; 1695 int cpu = cpu_of(rq); 1696 struct dl_bw *dl_b; 1697 unsigned long cap; 1698 int retval = 0; 1699 int cpus; 1700 1701 dl_b = dl_bw_of(cpu); 1702 guard(raw_spinlock)(&dl_b->lock); 1703 1704 cpus = dl_bw_cpus(cpu); 1705 cap = dl_bw_capacity(cpu); 1706 1707 if (__dl_overflow(dl_b, cap, old_bw, new_bw)) 1708 return -EBUSY; 1709 1710 if (init) { 1711 __add_rq_bw(new_bw, &rq->dl); 1712 __dl_add(dl_b, new_bw, cpus); 1713 } else { 1714 __dl_sub(dl_b, dl_se->dl_bw, cpus); 1715 __dl_add(dl_b, new_bw, cpus); 1716 1717 dl_rq_change_utilization(rq, dl_se, new_bw); 1718 } 1719 1720 dl_se->dl_runtime = runtime; 1721 dl_se->dl_deadline = period; 1722 dl_se->dl_period = period; 1723 1724 dl_se->runtime = 0; 1725 dl_se->deadline = 0; 1726 1727 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 1728 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 1729 1730 return retval; 1731 } 1732 1733 /* 1734 * Update the current task's runtime statistics (provided it is still 1735 * a -deadline task and has not been removed from the dl_rq). 1736 */ 1737 static void update_curr_dl(struct rq *rq) 1738 { 1739 struct task_struct *donor = rq->donor; 1740 struct sched_dl_entity *dl_se = &donor->dl; 1741 s64 delta_exec; 1742 1743 if (!dl_task(donor) || !on_dl_rq(dl_se)) 1744 return; 1745 1746 /* 1747 * Consumed budget is computed considering the time as 1748 * observed by schedulable tasks (excluding time spent 1749 * in hardirq context, etc.). Deadlines are instead 1750 * computed using hard walltime. This seems to be the more 1751 * natural solution, but the full ramifications of this 1752 * approach need further study. 1753 */ 1754 delta_exec = update_curr_common(rq); 1755 update_curr_dl_se(rq, dl_se, delta_exec); 1756 } 1757 1758 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) 1759 { 1760 struct sched_dl_entity *dl_se = container_of(timer, 1761 struct sched_dl_entity, 1762 inactive_timer); 1763 struct task_struct *p = NULL; 1764 struct rq_flags rf; 1765 struct rq *rq; 1766 1767 if (!dl_server(dl_se)) { 1768 p = dl_task_of(dl_se); 1769 rq = task_rq_lock(p, &rf); 1770 } else { 1771 rq = dl_se->rq; 1772 rq_lock(rq, &rf); 1773 } 1774 1775 sched_clock_tick(); 1776 update_rq_clock(rq); 1777 1778 if (dl_server(dl_se)) 1779 goto no_task; 1780 1781 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { 1782 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1783 1784 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { 1785 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); 1786 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); 1787 dl_se->dl_non_contending = 0; 1788 } 1789 1790 raw_spin_lock(&dl_b->lock); 1791 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 1792 raw_spin_unlock(&dl_b->lock); 1793 __dl_clear_params(dl_se); 1794 1795 goto unlock; 1796 } 1797 1798 no_task: 1799 if (dl_se->dl_non_contending == 0) 1800 goto unlock; 1801 1802 sub_running_bw(dl_se, &rq->dl); 1803 dl_se->dl_non_contending = 0; 1804 unlock: 1805 1806 if (!dl_server(dl_se)) { 1807 task_rq_unlock(rq, p, &rf); 1808 put_task_struct(p); 1809 } else { 1810 rq_unlock(rq, &rf); 1811 } 1812 1813 return HRTIMER_NORESTART; 1814 } 1815 1816 static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se) 1817 { 1818 struct hrtimer *timer = &dl_se->inactive_timer; 1819 1820 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 1821 timer->function = inactive_task_timer; 1822 } 1823 1824 #define __node_2_dle(node) \ 1825 rb_entry((node), struct sched_dl_entity, rb_node) 1826 1827 #ifdef CONFIG_SMP 1828 1829 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1830 { 1831 struct rq *rq = rq_of_dl_rq(dl_rq); 1832 1833 if (dl_rq->earliest_dl.curr == 0 || 1834 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 1835 if (dl_rq->earliest_dl.curr == 0) 1836 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); 1837 dl_rq->earliest_dl.curr = deadline; 1838 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); 1839 } 1840 } 1841 1842 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 1843 { 1844 struct rq *rq = rq_of_dl_rq(dl_rq); 1845 1846 /* 1847 * Since we may have removed our earliest (and/or next earliest) 1848 * task we must recompute them. 1849 */ 1850 if (!dl_rq->dl_nr_running) { 1851 dl_rq->earliest_dl.curr = 0; 1852 dl_rq->earliest_dl.next = 0; 1853 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1854 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1855 } else { 1856 struct rb_node *leftmost = rb_first_cached(&dl_rq->root); 1857 struct sched_dl_entity *entry = __node_2_dle(leftmost); 1858 1859 dl_rq->earliest_dl.curr = entry->deadline; 1860 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); 1861 } 1862 } 1863 1864 #else 1865 1866 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1867 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 1868 1869 #endif /* CONFIG_SMP */ 1870 1871 static inline 1872 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1873 { 1874 u64 deadline = dl_se->deadline; 1875 1876 dl_rq->dl_nr_running++; 1877 add_nr_running(rq_of_dl_rq(dl_rq), 1); 1878 1879 inc_dl_deadline(dl_rq, deadline); 1880 } 1881 1882 static inline 1883 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 1884 { 1885 WARN_ON(!dl_rq->dl_nr_running); 1886 dl_rq->dl_nr_running--; 1887 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 1888 1889 dec_dl_deadline(dl_rq, dl_se->deadline); 1890 } 1891 1892 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) 1893 { 1894 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); 1895 } 1896 1897 static __always_inline struct sched_statistics * 1898 __schedstats_from_dl_se(struct sched_dl_entity *dl_se) 1899 { 1900 if (!schedstat_enabled()) 1901 return NULL; 1902 1903 if (dl_server(dl_se)) 1904 return NULL; 1905 1906 return &dl_task_of(dl_se)->stats; 1907 } 1908 1909 static inline void 1910 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1911 { 1912 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1913 if (stats) 1914 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1915 } 1916 1917 static inline void 1918 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1919 { 1920 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1921 if (stats) 1922 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1923 } 1924 1925 static inline void 1926 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) 1927 { 1928 struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); 1929 if (stats) 1930 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); 1931 } 1932 1933 static inline void 1934 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1935 int flags) 1936 { 1937 if (!schedstat_enabled()) 1938 return; 1939 1940 if (flags & ENQUEUE_WAKEUP) 1941 update_stats_enqueue_sleeper_dl(dl_rq, dl_se); 1942 } 1943 1944 static inline void 1945 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, 1946 int flags) 1947 { 1948 struct task_struct *p = dl_task_of(dl_se); 1949 1950 if (!schedstat_enabled()) 1951 return; 1952 1953 if ((flags & DEQUEUE_SLEEP)) { 1954 unsigned int state; 1955 1956 state = READ_ONCE(p->__state); 1957 if (state & TASK_INTERRUPTIBLE) 1958 __schedstat_set(p->stats.sleep_start, 1959 rq_clock(rq_of_dl_rq(dl_rq))); 1960 1961 if (state & TASK_UNINTERRUPTIBLE) 1962 __schedstat_set(p->stats.block_start, 1963 rq_clock(rq_of_dl_rq(dl_rq))); 1964 } 1965 } 1966 1967 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1968 { 1969 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1970 1971 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node)); 1972 1973 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less); 1974 1975 inc_dl_tasks(dl_se, dl_rq); 1976 } 1977 1978 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 1979 { 1980 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1981 1982 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1983 return; 1984 1985 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); 1986 1987 RB_CLEAR_NODE(&dl_se->rb_node); 1988 1989 dec_dl_tasks(dl_se, dl_rq); 1990 } 1991 1992 static void 1993 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) 1994 { 1995 WARN_ON_ONCE(on_dl_rq(dl_se)); 1996 1997 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags); 1998 1999 /* 2000 * Check if a constrained deadline task was activated 2001 * after the deadline but before the next period. 2002 * If that is the case, the task will be throttled and 2003 * the replenishment timer will be set to the next period. 2004 */ 2005 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se)) 2006 dl_check_constrained_dl(dl_se); 2007 2008 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) { 2009 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 2010 2011 add_rq_bw(dl_se, dl_rq); 2012 add_running_bw(dl_se, dl_rq); 2013 } 2014 2015 /* 2016 * If p is throttled, we do not enqueue it. In fact, if it exhausted 2017 * its budget it needs a replenishment and, since it now is on 2018 * its rq, the bandwidth timer callback (which clearly has not 2019 * run yet) will take care of this. 2020 * However, the active utilization does not depend on the fact 2021 * that the task is on the runqueue or not (but depends on the 2022 * task's state - in GRUB parlance, "inactive" vs "active contending"). 2023 * In other words, even if a task is throttled its utilization must 2024 * be counted in the active utilization; hence, we need to call 2025 * add_running_bw(). 2026 */ 2027 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { 2028 if (flags & ENQUEUE_WAKEUP) 2029 task_contending(dl_se, flags); 2030 2031 return; 2032 } 2033 2034 /* 2035 * If this is a wakeup or a new instance, the scheduling 2036 * parameters of the task might need updating. Otherwise, 2037 * we want a replenishment of its runtime. 2038 */ 2039 if (flags & ENQUEUE_WAKEUP) { 2040 task_contending(dl_se, flags); 2041 update_dl_entity(dl_se); 2042 } else if (flags & ENQUEUE_REPLENISH) { 2043 replenish_dl_entity(dl_se); 2044 } else if ((flags & ENQUEUE_RESTORE) && 2045 !is_dl_boosted(dl_se) && 2046 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { 2047 setup_new_dl_entity(dl_se); 2048 } 2049 2050 /* 2051 * If the reservation is still throttled, e.g., it got replenished but is a 2052 * deferred task and still got to wait, don't enqueue. 2053 */ 2054 if (dl_se->dl_throttled && start_dl_timer(dl_se)) 2055 return; 2056 2057 /* 2058 * We're about to enqueue, make sure we're not ->dl_throttled! 2059 * In case the timer was not started, say because the defer time 2060 * has passed, mark as not throttled and mark unarmed. 2061 * Also cancel earlier timers, since letting those run is pointless. 2062 */ 2063 if (dl_se->dl_throttled) { 2064 hrtimer_try_to_cancel(&dl_se->dl_timer); 2065 dl_se->dl_defer_armed = 0; 2066 dl_se->dl_throttled = 0; 2067 } 2068 2069 __enqueue_dl_entity(dl_se); 2070 } 2071 2072 static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags) 2073 { 2074 __dequeue_dl_entity(dl_se); 2075 2076 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) { 2077 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 2078 2079 sub_running_bw(dl_se, dl_rq); 2080 sub_rq_bw(dl_se, dl_rq); 2081 } 2082 2083 /* 2084 * This check allows to start the inactive timer (or to immediately 2085 * decrease the active utilization, if needed) in two cases: 2086 * when the task blocks and when it is terminating 2087 * (p->state == TASK_DEAD). We can handle the two cases in the same 2088 * way, because from GRUB's point of view the same thing is happening 2089 * (the task moves from "active contending" to "active non contending" 2090 * or "inactive") 2091 */ 2092 if (flags & DEQUEUE_SLEEP) 2093 task_non_contending(dl_se); 2094 } 2095 2096 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 2097 { 2098 if (is_dl_boosted(&p->dl)) { 2099 /* 2100 * Because of delays in the detection of the overrun of a 2101 * thread's runtime, it might be the case that a thread 2102 * goes to sleep in a rt mutex with negative runtime. As 2103 * a consequence, the thread will be throttled. 2104 * 2105 * While waiting for the mutex, this thread can also be 2106 * boosted via PI, resulting in a thread that is throttled 2107 * and boosted at the same time. 2108 * 2109 * In this case, the boost overrides the throttle. 2110 */ 2111 if (p->dl.dl_throttled) { 2112 /* 2113 * The replenish timer needs to be canceled. No 2114 * problem if it fires concurrently: boosted threads 2115 * are ignored in dl_task_timer(). 2116 * 2117 * If the timer callback was running (hrtimer_try_to_cancel == -1), 2118 * it will eventually call put_task_struct(). 2119 */ 2120 if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 && 2121 !dl_server(&p->dl)) 2122 put_task_struct(p); 2123 p->dl.dl_throttled = 0; 2124 } 2125 } else if (!dl_prio(p->normal_prio)) { 2126 /* 2127 * Special case in which we have a !SCHED_DEADLINE task that is going 2128 * to be deboosted, but exceeds its runtime while doing so. No point in 2129 * replenishing it, as it's going to return back to its original 2130 * scheduling class after this. If it has been throttled, we need to 2131 * clear the flag, otherwise the task may wake up as throttled after 2132 * being boosted again with no means to replenish the runtime and clear 2133 * the throttle. 2134 */ 2135 p->dl.dl_throttled = 0; 2136 if (!(flags & ENQUEUE_REPLENISH)) 2137 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", 2138 task_pid_nr(p)); 2139 2140 return; 2141 } 2142 2143 check_schedstat_required(); 2144 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); 2145 2146 if (p->on_rq == TASK_ON_RQ_MIGRATING) 2147 flags |= ENQUEUE_MIGRATING; 2148 2149 enqueue_dl_entity(&p->dl, flags); 2150 2151 if (dl_server(&p->dl)) 2152 return; 2153 2154 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1) 2155 enqueue_pushable_dl_task(rq, p); 2156 } 2157 2158 static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 2159 { 2160 update_curr_dl(rq); 2161 2162 if (p->on_rq == TASK_ON_RQ_MIGRATING) 2163 flags |= DEQUEUE_MIGRATING; 2164 2165 dequeue_dl_entity(&p->dl, flags); 2166 if (!p->dl.dl_throttled && !dl_server(&p->dl)) 2167 dequeue_pushable_dl_task(rq, p); 2168 2169 return true; 2170 } 2171 2172 /* 2173 * Yield task semantic for -deadline tasks is: 2174 * 2175 * get off from the CPU until our next instance, with 2176 * a new runtime. This is of little use now, since we 2177 * don't have a bandwidth reclaiming mechanism. Anyway, 2178 * bandwidth reclaiming is planned for the future, and 2179 * yield_task_dl will indicate that some spare budget 2180 * is available for other task instances to use it. 2181 */ 2182 static void yield_task_dl(struct rq *rq) 2183 { 2184 /* 2185 * We make the task go to sleep until its current deadline by 2186 * forcing its runtime to zero. This way, update_curr_dl() stops 2187 * it and the bandwidth timer will wake it up and will give it 2188 * new scheduling parameters (thanks to dl_yielded=1). 2189 */ 2190 rq->curr->dl.dl_yielded = 1; 2191 2192 update_rq_clock(rq); 2193 update_curr_dl(rq); 2194 /* 2195 * Tell update_rq_clock() that we've just updated, 2196 * so we don't do microscopic update in schedule() 2197 * and double the fastpath cost. 2198 */ 2199 rq_clock_skip_update(rq); 2200 } 2201 2202 #ifdef CONFIG_SMP 2203 2204 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, 2205 struct rq *rq) 2206 { 2207 return (!rq->dl.dl_nr_running || 2208 dl_time_before(p->dl.deadline, 2209 rq->dl.earliest_dl.curr)); 2210 } 2211 2212 static int find_later_rq(struct task_struct *task); 2213 2214 static int 2215 select_task_rq_dl(struct task_struct *p, int cpu, int flags) 2216 { 2217 struct task_struct *curr, *donor; 2218 bool select_rq; 2219 struct rq *rq; 2220 2221 if (!(flags & WF_TTWU)) 2222 goto out; 2223 2224 rq = cpu_rq(cpu); 2225 2226 rcu_read_lock(); 2227 curr = READ_ONCE(rq->curr); /* unlocked access */ 2228 donor = READ_ONCE(rq->donor); 2229 2230 /* 2231 * If we are dealing with a -deadline task, we must 2232 * decide where to wake it up. 2233 * If it has a later deadline and the current task 2234 * on this rq can't move (provided the waking task 2235 * can!) we prefer to send it somewhere else. On the 2236 * other hand, if it has a shorter deadline, we 2237 * try to make it stay here, it might be important. 2238 */ 2239 select_rq = unlikely(dl_task(donor)) && 2240 (curr->nr_cpus_allowed < 2 || 2241 !dl_entity_preempt(&p->dl, &donor->dl)) && 2242 p->nr_cpus_allowed > 1; 2243 2244 /* 2245 * Take the capacity of the CPU into account to 2246 * ensure it fits the requirement of the task. 2247 */ 2248 if (sched_asym_cpucap_active()) 2249 select_rq |= !dl_task_fits_capacity(p, cpu); 2250 2251 if (select_rq) { 2252 int target = find_later_rq(p); 2253 2254 if (target != -1 && 2255 dl_task_is_earliest_deadline(p, cpu_rq(target))) 2256 cpu = target; 2257 } 2258 rcu_read_unlock(); 2259 2260 out: 2261 return cpu; 2262 } 2263 2264 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) 2265 { 2266 struct rq_flags rf; 2267 struct rq *rq; 2268 2269 if (READ_ONCE(p->__state) != TASK_WAKING) 2270 return; 2271 2272 rq = task_rq(p); 2273 /* 2274 * Since p->state == TASK_WAKING, set_task_cpu() has been called 2275 * from try_to_wake_up(). Hence, p->pi_lock is locked, but 2276 * rq->lock is not... So, lock it 2277 */ 2278 rq_lock(rq, &rf); 2279 if (p->dl.dl_non_contending) { 2280 update_rq_clock(rq); 2281 sub_running_bw(&p->dl, &rq->dl); 2282 p->dl.dl_non_contending = 0; 2283 /* 2284 * If the timer handler is currently running and the 2285 * timer cannot be canceled, inactive_task_timer() 2286 * will see that dl_not_contending is not set, and 2287 * will not touch the rq's active utilization, 2288 * so we are still safe. 2289 */ 2290 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 2291 put_task_struct(p); 2292 } 2293 sub_rq_bw(&p->dl, &rq->dl); 2294 rq_unlock(rq, &rf); 2295 } 2296 2297 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 2298 { 2299 /* 2300 * Current can't be migrated, useless to reschedule, 2301 * let's hope p can move out. 2302 */ 2303 if (rq->curr->nr_cpus_allowed == 1 || 2304 !cpudl_find(&rq->rd->cpudl, rq->donor, NULL)) 2305 return; 2306 2307 /* 2308 * p is migratable, so let's not schedule it and 2309 * see if it is pushed or pulled somewhere else. 2310 */ 2311 if (p->nr_cpus_allowed != 1 && 2312 cpudl_find(&rq->rd->cpudl, p, NULL)) 2313 return; 2314 2315 resched_curr(rq); 2316 } 2317 2318 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 2319 { 2320 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { 2321 /* 2322 * This is OK, because current is on_cpu, which avoids it being 2323 * picked for load-balance and preemption/IRQs are still 2324 * disabled avoiding further scheduler activity on it and we've 2325 * not yet started the picking loop. 2326 */ 2327 rq_unpin_lock(rq, rf); 2328 pull_dl_task(rq); 2329 rq_repin_lock(rq, rf); 2330 } 2331 2332 return sched_stop_runnable(rq) || sched_dl_runnable(rq); 2333 } 2334 #endif /* CONFIG_SMP */ 2335 2336 /* 2337 * Only called when both the current and waking task are -deadline 2338 * tasks. 2339 */ 2340 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, 2341 int flags) 2342 { 2343 if (dl_entity_preempt(&p->dl, &rq->donor->dl)) { 2344 resched_curr(rq); 2345 return; 2346 } 2347 2348 #ifdef CONFIG_SMP 2349 /* 2350 * In the unlikely case current and p have the same deadline 2351 * let us try to decide what's the best thing to do... 2352 */ 2353 if ((p->dl.deadline == rq->donor->dl.deadline) && 2354 !test_tsk_need_resched(rq->curr)) 2355 check_preempt_equal_dl(rq, p); 2356 #endif /* CONFIG_SMP */ 2357 } 2358 2359 #ifdef CONFIG_SCHED_HRTICK 2360 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2361 { 2362 hrtick_start(rq, dl_se->runtime); 2363 } 2364 #else /* !CONFIG_SCHED_HRTICK */ 2365 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) 2366 { 2367 } 2368 #endif 2369 2370 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) 2371 { 2372 struct sched_dl_entity *dl_se = &p->dl; 2373 struct dl_rq *dl_rq = &rq->dl; 2374 2375 p->se.exec_start = rq_clock_task(rq); 2376 if (on_dl_rq(&p->dl)) 2377 update_stats_wait_end_dl(dl_rq, dl_se); 2378 2379 /* You can't push away the running task */ 2380 dequeue_pushable_dl_task(rq, p); 2381 2382 if (!first) 2383 return; 2384 2385 if (rq->donor->sched_class != &dl_sched_class) 2386 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2387 2388 deadline_queue_push_tasks(rq); 2389 2390 if (hrtick_enabled_dl(rq)) 2391 start_hrtick_dl(rq, &p->dl); 2392 } 2393 2394 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) 2395 { 2396 struct rb_node *left = rb_first_cached(&dl_rq->root); 2397 2398 if (!left) 2399 return NULL; 2400 2401 return __node_2_dle(left); 2402 } 2403 2404 /* 2405 * __pick_next_task_dl - Helper to pick the next -deadline task to run. 2406 * @rq: The runqueue to pick the next task from. 2407 */ 2408 static struct task_struct *__pick_task_dl(struct rq *rq) 2409 { 2410 struct sched_dl_entity *dl_se; 2411 struct dl_rq *dl_rq = &rq->dl; 2412 struct task_struct *p; 2413 2414 again: 2415 if (!sched_dl_runnable(rq)) 2416 return NULL; 2417 2418 dl_se = pick_next_dl_entity(dl_rq); 2419 WARN_ON_ONCE(!dl_se); 2420 2421 if (dl_server(dl_se)) { 2422 p = dl_se->server_pick_task(dl_se); 2423 if (!p) { 2424 dl_se->dl_yielded = 1; 2425 update_curr_dl_se(rq, dl_se, 0); 2426 goto again; 2427 } 2428 rq->dl_server = dl_se; 2429 } else { 2430 p = dl_task_of(dl_se); 2431 } 2432 2433 return p; 2434 } 2435 2436 static struct task_struct *pick_task_dl(struct rq *rq) 2437 { 2438 return __pick_task_dl(rq); 2439 } 2440 2441 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) 2442 { 2443 struct sched_dl_entity *dl_se = &p->dl; 2444 struct dl_rq *dl_rq = &rq->dl; 2445 2446 if (on_dl_rq(&p->dl)) 2447 update_stats_wait_start_dl(dl_rq, dl_se); 2448 2449 update_curr_dl(rq); 2450 2451 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2452 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 2453 enqueue_pushable_dl_task(rq, p); 2454 } 2455 2456 /* 2457 * scheduler tick hitting a task of our scheduling class. 2458 * 2459 * NOTE: This function can be called remotely by the tick offload that 2460 * goes along full dynticks. Therefore no local assumption can be made 2461 * and everything must be accessed through the @rq and @curr passed in 2462 * parameters. 2463 */ 2464 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 2465 { 2466 update_curr_dl(rq); 2467 2468 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2469 /* 2470 * Even when we have runtime, update_curr_dl() might have resulted in us 2471 * not being the leftmost task anymore. In that case NEED_RESCHED will 2472 * be set and schedule() will start a new hrtick for the next task. 2473 */ 2474 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && 2475 is_leftmost(&p->dl, &rq->dl)) 2476 start_hrtick_dl(rq, &p->dl); 2477 } 2478 2479 static void task_fork_dl(struct task_struct *p) 2480 { 2481 /* 2482 * SCHED_DEADLINE tasks cannot fork and this is achieved through 2483 * sched_fork() 2484 */ 2485 } 2486 2487 #ifdef CONFIG_SMP 2488 2489 /* Only try algorithms three times */ 2490 #define DL_MAX_TRIES 3 2491 2492 /* 2493 * Return the earliest pushable rq's task, which is suitable to be executed 2494 * on the CPU, NULL otherwise: 2495 */ 2496 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 2497 { 2498 struct task_struct *p = NULL; 2499 struct rb_node *next_node; 2500 2501 if (!has_pushable_dl_tasks(rq)) 2502 return NULL; 2503 2504 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); 2505 2506 next_node: 2507 if (next_node) { 2508 p = __node_2_pdl(next_node); 2509 2510 if (task_is_pushable(rq, p, cpu)) 2511 return p; 2512 2513 next_node = rb_next(next_node); 2514 goto next_node; 2515 } 2516 2517 return NULL; 2518 } 2519 2520 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 2521 2522 static int find_later_rq(struct task_struct *task) 2523 { 2524 struct sched_domain *sd; 2525 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 2526 int this_cpu = smp_processor_id(); 2527 int cpu = task_cpu(task); 2528 2529 /* Make sure the mask is initialized first */ 2530 if (unlikely(!later_mask)) 2531 return -1; 2532 2533 if (task->nr_cpus_allowed == 1) 2534 return -1; 2535 2536 /* 2537 * We have to consider system topology and task affinity 2538 * first, then we can look for a suitable CPU. 2539 */ 2540 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) 2541 return -1; 2542 2543 /* 2544 * If we are here, some targets have been found, including 2545 * the most suitable which is, among the runqueues where the 2546 * current tasks have later deadlines than the task's one, the 2547 * rq with the latest possible one. 2548 * 2549 * Now we check how well this matches with task's 2550 * affinity and system topology. 2551 * 2552 * The last CPU where the task run is our first 2553 * guess, since it is most likely cache-hot there. 2554 */ 2555 if (cpumask_test_cpu(cpu, later_mask)) 2556 return cpu; 2557 /* 2558 * Check if this_cpu is to be skipped (i.e., it is 2559 * not in the mask) or not. 2560 */ 2561 if (!cpumask_test_cpu(this_cpu, later_mask)) 2562 this_cpu = -1; 2563 2564 rcu_read_lock(); 2565 for_each_domain(cpu, sd) { 2566 if (sd->flags & SD_WAKE_AFFINE) { 2567 int best_cpu; 2568 2569 /* 2570 * If possible, preempting this_cpu is 2571 * cheaper than migrating. 2572 */ 2573 if (this_cpu != -1 && 2574 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 2575 rcu_read_unlock(); 2576 return this_cpu; 2577 } 2578 2579 best_cpu = cpumask_any_and_distribute(later_mask, 2580 sched_domain_span(sd)); 2581 /* 2582 * Last chance: if a CPU being in both later_mask 2583 * and current sd span is valid, that becomes our 2584 * choice. Of course, the latest possible CPU is 2585 * already under consideration through later_mask. 2586 */ 2587 if (best_cpu < nr_cpu_ids) { 2588 rcu_read_unlock(); 2589 return best_cpu; 2590 } 2591 } 2592 } 2593 rcu_read_unlock(); 2594 2595 /* 2596 * At this point, all our guesses failed, we just return 2597 * 'something', and let the caller sort the things out. 2598 */ 2599 if (this_cpu != -1) 2600 return this_cpu; 2601 2602 cpu = cpumask_any_distribute(later_mask); 2603 if (cpu < nr_cpu_ids) 2604 return cpu; 2605 2606 return -1; 2607 } 2608 2609 /* Locks the rq it finds */ 2610 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 2611 { 2612 struct rq *later_rq = NULL; 2613 int tries; 2614 int cpu; 2615 2616 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 2617 cpu = find_later_rq(task); 2618 2619 if ((cpu == -1) || (cpu == rq->cpu)) 2620 break; 2621 2622 later_rq = cpu_rq(cpu); 2623 2624 if (!dl_task_is_earliest_deadline(task, later_rq)) { 2625 /* 2626 * Target rq has tasks of equal or earlier deadline, 2627 * retrying does not release any lock and is unlikely 2628 * to yield a different result. 2629 */ 2630 later_rq = NULL; 2631 break; 2632 } 2633 2634 /* Retry if something changed. */ 2635 if (double_lock_balance(rq, later_rq)) { 2636 if (unlikely(task_rq(task) != rq || 2637 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || 2638 task_on_cpu(rq, task) || 2639 !dl_task(task) || 2640 is_migration_disabled(task) || 2641 !task_on_rq_queued(task))) { 2642 double_unlock_balance(rq, later_rq); 2643 later_rq = NULL; 2644 break; 2645 } 2646 } 2647 2648 /* 2649 * If the rq we found has no -deadline task, or 2650 * its earliest one has a later deadline than our 2651 * task, the rq is a good one. 2652 */ 2653 if (dl_task_is_earliest_deadline(task, later_rq)) 2654 break; 2655 2656 /* Otherwise we try again. */ 2657 double_unlock_balance(rq, later_rq); 2658 later_rq = NULL; 2659 } 2660 2661 return later_rq; 2662 } 2663 2664 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 2665 { 2666 struct task_struct *p; 2667 2668 if (!has_pushable_dl_tasks(rq)) 2669 return NULL; 2670 2671 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); 2672 2673 WARN_ON_ONCE(rq->cpu != task_cpu(p)); 2674 WARN_ON_ONCE(task_current(rq, p)); 2675 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); 2676 2677 WARN_ON_ONCE(!task_on_rq_queued(p)); 2678 WARN_ON_ONCE(!dl_task(p)); 2679 2680 return p; 2681 } 2682 2683 /* 2684 * See if the non running -deadline tasks on this rq 2685 * can be sent to some other CPU where they can preempt 2686 * and start executing. 2687 */ 2688 static int push_dl_task(struct rq *rq) 2689 { 2690 struct task_struct *next_task; 2691 struct rq *later_rq; 2692 int ret = 0; 2693 2694 next_task = pick_next_pushable_dl_task(rq); 2695 if (!next_task) 2696 return 0; 2697 2698 retry: 2699 /* 2700 * If next_task preempts rq->curr, and rq->curr 2701 * can move away, it makes sense to just reschedule 2702 * without going further in pushing next_task. 2703 */ 2704 if (dl_task(rq->donor) && 2705 dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) && 2706 rq->curr->nr_cpus_allowed > 1) { 2707 resched_curr(rq); 2708 return 0; 2709 } 2710 2711 if (is_migration_disabled(next_task)) 2712 return 0; 2713 2714 if (WARN_ON(next_task == rq->curr)) 2715 return 0; 2716 2717 /* We might release rq lock */ 2718 get_task_struct(next_task); 2719 2720 /* Will lock the rq it'll find */ 2721 later_rq = find_lock_later_rq(next_task, rq); 2722 if (!later_rq) { 2723 struct task_struct *task; 2724 2725 /* 2726 * We must check all this again, since 2727 * find_lock_later_rq releases rq->lock and it is 2728 * then possible that next_task has migrated. 2729 */ 2730 task = pick_next_pushable_dl_task(rq); 2731 if (task == next_task) { 2732 /* 2733 * The task is still there. We don't try 2734 * again, some other CPU will pull it when ready. 2735 */ 2736 goto out; 2737 } 2738 2739 if (!task) 2740 /* No more tasks */ 2741 goto out; 2742 2743 put_task_struct(next_task); 2744 next_task = task; 2745 goto retry; 2746 } 2747 2748 move_queued_task_locked(rq, later_rq, next_task); 2749 ret = 1; 2750 2751 resched_curr(later_rq); 2752 2753 double_unlock_balance(rq, later_rq); 2754 2755 out: 2756 put_task_struct(next_task); 2757 2758 return ret; 2759 } 2760 2761 static void push_dl_tasks(struct rq *rq) 2762 { 2763 /* push_dl_task() will return true if it moved a -deadline task */ 2764 while (push_dl_task(rq)) 2765 ; 2766 } 2767 2768 static void pull_dl_task(struct rq *this_rq) 2769 { 2770 int this_cpu = this_rq->cpu, cpu; 2771 struct task_struct *p, *push_task; 2772 bool resched = false; 2773 struct rq *src_rq; 2774 u64 dmin = LONG_MAX; 2775 2776 if (likely(!dl_overloaded(this_rq))) 2777 return; 2778 2779 /* 2780 * Match the barrier from dl_set_overloaded; this guarantees that if we 2781 * see overloaded we must also see the dlo_mask bit. 2782 */ 2783 smp_rmb(); 2784 2785 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 2786 if (this_cpu == cpu) 2787 continue; 2788 2789 src_rq = cpu_rq(cpu); 2790 2791 /* 2792 * It looks racy, and it is! However, as in sched_rt.c, 2793 * we are fine with this. 2794 */ 2795 if (this_rq->dl.dl_nr_running && 2796 dl_time_before(this_rq->dl.earliest_dl.curr, 2797 src_rq->dl.earliest_dl.next)) 2798 continue; 2799 2800 /* Might drop this_rq->lock */ 2801 push_task = NULL; 2802 double_lock_balance(this_rq, src_rq); 2803 2804 /* 2805 * If there are no more pullable tasks on the 2806 * rq, we're done with it. 2807 */ 2808 if (src_rq->dl.dl_nr_running <= 1) 2809 goto skip; 2810 2811 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 2812 2813 /* 2814 * We found a task to be pulled if: 2815 * - it preempts our current (if there's one), 2816 * - it will preempt the last one we pulled (if any). 2817 */ 2818 if (p && dl_time_before(p->dl.deadline, dmin) && 2819 dl_task_is_earliest_deadline(p, this_rq)) { 2820 WARN_ON(p == src_rq->curr); 2821 WARN_ON(!task_on_rq_queued(p)); 2822 2823 /* 2824 * Then we pull iff p has actually an earlier 2825 * deadline than the current task of its runqueue. 2826 */ 2827 if (dl_time_before(p->dl.deadline, 2828 src_rq->donor->dl.deadline)) 2829 goto skip; 2830 2831 if (is_migration_disabled(p)) { 2832 push_task = get_push_task(src_rq); 2833 } else { 2834 move_queued_task_locked(src_rq, this_rq, p); 2835 dmin = p->dl.deadline; 2836 resched = true; 2837 } 2838 2839 /* Is there any other task even earlier? */ 2840 } 2841 skip: 2842 double_unlock_balance(this_rq, src_rq); 2843 2844 if (push_task) { 2845 preempt_disable(); 2846 raw_spin_rq_unlock(this_rq); 2847 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2848 push_task, &src_rq->push_work); 2849 preempt_enable(); 2850 raw_spin_rq_lock(this_rq); 2851 } 2852 } 2853 2854 if (resched) 2855 resched_curr(this_rq); 2856 } 2857 2858 /* 2859 * Since the task is not running and a reschedule is not going to happen 2860 * anytime soon on its runqueue, we try pushing it away now. 2861 */ 2862 static void task_woken_dl(struct rq *rq, struct task_struct *p) 2863 { 2864 if (!task_on_cpu(rq, p) && 2865 !test_tsk_need_resched(rq->curr) && 2866 p->nr_cpus_allowed > 1 && 2867 dl_task(rq->donor) && 2868 (rq->curr->nr_cpus_allowed < 2 || 2869 !dl_entity_preempt(&p->dl, &rq->donor->dl))) { 2870 push_dl_tasks(rq); 2871 } 2872 } 2873 2874 static void set_cpus_allowed_dl(struct task_struct *p, 2875 struct affinity_context *ctx) 2876 { 2877 struct root_domain *src_rd; 2878 struct rq *rq; 2879 2880 WARN_ON_ONCE(!dl_task(p)); 2881 2882 rq = task_rq(p); 2883 src_rd = rq->rd; 2884 /* 2885 * Migrating a SCHED_DEADLINE task between exclusive 2886 * cpusets (different root_domains) entails a bandwidth 2887 * update. We already made space for us in the destination 2888 * domain (see cpuset_can_attach()). 2889 */ 2890 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) { 2891 struct dl_bw *src_dl_b; 2892 2893 src_dl_b = dl_bw_of(cpu_of(rq)); 2894 /* 2895 * We now free resources of the root_domain we are migrating 2896 * off. In the worst case, sched_setattr() may temporary fail 2897 * until we complete the update. 2898 */ 2899 raw_spin_lock(&src_dl_b->lock); 2900 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); 2901 raw_spin_unlock(&src_dl_b->lock); 2902 } 2903 2904 set_cpus_allowed_common(p, ctx); 2905 } 2906 2907 /* Assumes rq->lock is held */ 2908 static void rq_online_dl(struct rq *rq) 2909 { 2910 if (rq->dl.overloaded) 2911 dl_set_overload(rq); 2912 2913 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 2914 if (rq->dl.dl_nr_running > 0) 2915 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); 2916 } 2917 2918 /* Assumes rq->lock is held */ 2919 static void rq_offline_dl(struct rq *rq) 2920 { 2921 if (rq->dl.overloaded) 2922 dl_clear_overload(rq); 2923 2924 cpudl_clear(&rq->rd->cpudl, rq->cpu); 2925 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 2926 } 2927 2928 void __init init_sched_dl_class(void) 2929 { 2930 unsigned int i; 2931 2932 for_each_possible_cpu(i) 2933 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 2934 GFP_KERNEL, cpu_to_node(i)); 2935 } 2936 2937 void dl_add_task_root_domain(struct task_struct *p) 2938 { 2939 struct rq_flags rf; 2940 struct rq *rq; 2941 struct dl_bw *dl_b; 2942 2943 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2944 if (!dl_task(p)) { 2945 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 2946 return; 2947 } 2948 2949 rq = __task_rq_lock(p, &rf); 2950 2951 dl_b = &rq->rd->dl_bw; 2952 raw_spin_lock(&dl_b->lock); 2953 2954 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); 2955 2956 raw_spin_unlock(&dl_b->lock); 2957 2958 task_rq_unlock(rq, p, &rf); 2959 } 2960 2961 void dl_clear_root_domain(struct root_domain *rd) 2962 { 2963 unsigned long flags; 2964 2965 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); 2966 rd->dl_bw.total_bw = 0; 2967 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); 2968 } 2969 2970 #endif /* CONFIG_SMP */ 2971 2972 static void switched_from_dl(struct rq *rq, struct task_struct *p) 2973 { 2974 /* 2975 * task_non_contending() can start the "inactive timer" (if the 0-lag 2976 * time is in the future). If the task switches back to dl before 2977 * the "inactive timer" fires, it can continue to consume its current 2978 * runtime using its current deadline. If it stays outside of 2979 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer() 2980 * will reset the task parameters. 2981 */ 2982 if (task_on_rq_queued(p) && p->dl.dl_runtime) 2983 task_non_contending(&p->dl); 2984 2985 /* 2986 * In case a task is setscheduled out from SCHED_DEADLINE we need to 2987 * keep track of that on its cpuset (for correct bandwidth tracking). 2988 */ 2989 dec_dl_tasks_cs(p); 2990 2991 if (!task_on_rq_queued(p)) { 2992 /* 2993 * Inactive timer is armed. However, p is leaving DEADLINE and 2994 * might migrate away from this rq while continuing to run on 2995 * some other class. We need to remove its contribution from 2996 * this rq running_bw now, or sub_rq_bw (below) will complain. 2997 */ 2998 if (p->dl.dl_non_contending) 2999 sub_running_bw(&p->dl, &rq->dl); 3000 sub_rq_bw(&p->dl, &rq->dl); 3001 } 3002 3003 /* 3004 * We cannot use inactive_task_timer() to invoke sub_running_bw() 3005 * at the 0-lag time, because the task could have been migrated 3006 * while SCHED_OTHER in the meanwhile. 3007 */ 3008 if (p->dl.dl_non_contending) 3009 p->dl.dl_non_contending = 0; 3010 3011 /* 3012 * Since this might be the only -deadline task on the rq, 3013 * this is the right place to try to pull some other one 3014 * from an overloaded CPU, if any. 3015 */ 3016 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 3017 return; 3018 3019 deadline_queue_pull_task(rq); 3020 } 3021 3022 /* 3023 * When switching to -deadline, we may overload the rq, then 3024 * we try to push someone off, if possible. 3025 */ 3026 static void switched_to_dl(struct rq *rq, struct task_struct *p) 3027 { 3028 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) 3029 put_task_struct(p); 3030 3031 /* 3032 * In case a task is setscheduled to SCHED_DEADLINE we need to keep 3033 * track of that on its cpuset (for correct bandwidth tracking). 3034 */ 3035 inc_dl_tasks_cs(p); 3036 3037 /* If p is not queued we will update its parameters at next wakeup. */ 3038 if (!task_on_rq_queued(p)) { 3039 add_rq_bw(&p->dl, &rq->dl); 3040 3041 return; 3042 } 3043 3044 if (rq->donor != p) { 3045 #ifdef CONFIG_SMP 3046 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 3047 deadline_queue_push_tasks(rq); 3048 #endif 3049 if (dl_task(rq->donor)) 3050 wakeup_preempt_dl(rq, p, 0); 3051 else 3052 resched_curr(rq); 3053 } else { 3054 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); 3055 } 3056 } 3057 3058 /* 3059 * If the scheduling parameters of a -deadline task changed, 3060 * a push or pull operation might be needed. 3061 */ 3062 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 3063 int oldprio) 3064 { 3065 if (!task_on_rq_queued(p)) 3066 return; 3067 3068 #ifdef CONFIG_SMP 3069 /* 3070 * This might be too much, but unfortunately 3071 * we don't have the old deadline value, and 3072 * we can't argue if the task is increasing 3073 * or lowering its prio, so... 3074 */ 3075 if (!rq->dl.overloaded) 3076 deadline_queue_pull_task(rq); 3077 3078 if (task_current_donor(rq, p)) { 3079 /* 3080 * If we now have a earlier deadline task than p, 3081 * then reschedule, provided p is still on this 3082 * runqueue. 3083 */ 3084 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 3085 resched_curr(rq); 3086 } else { 3087 /* 3088 * Current may not be deadline in case p was throttled but we 3089 * have just replenished it (e.g. rt_mutex_setprio()). 3090 * 3091 * Otherwise, if p was given an earlier deadline, reschedule. 3092 */ 3093 if (!dl_task(rq->curr) || 3094 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) 3095 resched_curr(rq); 3096 } 3097 #else 3098 /* 3099 * We don't know if p has a earlier or later deadline, so let's blindly 3100 * set a (maybe not needed) rescheduling point. 3101 */ 3102 resched_curr(rq); 3103 #endif 3104 } 3105 3106 #ifdef CONFIG_SCHED_CORE 3107 static int task_is_throttled_dl(struct task_struct *p, int cpu) 3108 { 3109 return p->dl.dl_throttled; 3110 } 3111 #endif 3112 3113 DEFINE_SCHED_CLASS(dl) = { 3114 3115 .enqueue_task = enqueue_task_dl, 3116 .dequeue_task = dequeue_task_dl, 3117 .yield_task = yield_task_dl, 3118 3119 .wakeup_preempt = wakeup_preempt_dl, 3120 3121 .pick_task = pick_task_dl, 3122 .put_prev_task = put_prev_task_dl, 3123 .set_next_task = set_next_task_dl, 3124 3125 #ifdef CONFIG_SMP 3126 .balance = balance_dl, 3127 .select_task_rq = select_task_rq_dl, 3128 .migrate_task_rq = migrate_task_rq_dl, 3129 .set_cpus_allowed = set_cpus_allowed_dl, 3130 .rq_online = rq_online_dl, 3131 .rq_offline = rq_offline_dl, 3132 .task_woken = task_woken_dl, 3133 .find_lock_rq = find_lock_later_rq, 3134 #endif 3135 3136 .task_tick = task_tick_dl, 3137 .task_fork = task_fork_dl, 3138 3139 .prio_changed = prio_changed_dl, 3140 .switched_from = switched_from_dl, 3141 .switched_to = switched_to_dl, 3142 3143 .update_curr = update_curr_dl, 3144 #ifdef CONFIG_SCHED_CORE 3145 .task_is_throttled = task_is_throttled_dl, 3146 #endif 3147 }; 3148 3149 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */ 3150 static u64 dl_generation; 3151 3152 int sched_dl_global_validate(void) 3153 { 3154 u64 runtime = global_rt_runtime(); 3155 u64 period = global_rt_period(); 3156 u64 new_bw = to_ratio(period, runtime); 3157 u64 gen = ++dl_generation; 3158 struct dl_bw *dl_b; 3159 int cpu, cpus, ret = 0; 3160 unsigned long flags; 3161 3162 /* 3163 * Here we want to check the bandwidth not being set to some 3164 * value smaller than the currently allocated bandwidth in 3165 * any of the root_domains. 3166 */ 3167 for_each_possible_cpu(cpu) { 3168 rcu_read_lock_sched(); 3169 3170 if (dl_bw_visited(cpu, gen)) 3171 goto next; 3172 3173 dl_b = dl_bw_of(cpu); 3174 cpus = dl_bw_cpus(cpu); 3175 3176 raw_spin_lock_irqsave(&dl_b->lock, flags); 3177 if (new_bw * cpus < dl_b->total_bw) 3178 ret = -EBUSY; 3179 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3180 3181 next: 3182 rcu_read_unlock_sched(); 3183 3184 if (ret) 3185 break; 3186 } 3187 3188 return ret; 3189 } 3190 3191 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) 3192 { 3193 if (global_rt_runtime() == RUNTIME_INF) { 3194 dl_rq->bw_ratio = 1 << RATIO_SHIFT; 3195 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT; 3196 } else { 3197 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), 3198 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT); 3199 dl_rq->max_bw = dl_rq->extra_bw = 3200 to_ratio(global_rt_period(), global_rt_runtime()); 3201 } 3202 } 3203 3204 void sched_dl_do_global(void) 3205 { 3206 u64 new_bw = -1; 3207 u64 gen = ++dl_generation; 3208 struct dl_bw *dl_b; 3209 int cpu; 3210 unsigned long flags; 3211 3212 if (global_rt_runtime() != RUNTIME_INF) 3213 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 3214 3215 for_each_possible_cpu(cpu) { 3216 rcu_read_lock_sched(); 3217 3218 if (dl_bw_visited(cpu, gen)) { 3219 rcu_read_unlock_sched(); 3220 continue; 3221 } 3222 3223 dl_b = dl_bw_of(cpu); 3224 3225 raw_spin_lock_irqsave(&dl_b->lock, flags); 3226 dl_b->bw = new_bw; 3227 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3228 3229 rcu_read_unlock_sched(); 3230 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); 3231 } 3232 } 3233 3234 /* 3235 * We must be sure that accepting a new task (or allowing changing the 3236 * parameters of an existing one) is consistent with the bandwidth 3237 * constraints. If yes, this function also accordingly updates the currently 3238 * allocated bandwidth to reflect the new situation. 3239 * 3240 * This function is called while holding p's rq->lock. 3241 */ 3242 int sched_dl_overflow(struct task_struct *p, int policy, 3243 const struct sched_attr *attr) 3244 { 3245 u64 period = attr->sched_period ?: attr->sched_deadline; 3246 u64 runtime = attr->sched_runtime; 3247 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 3248 int cpus, err = -1, cpu = task_cpu(p); 3249 struct dl_bw *dl_b = dl_bw_of(cpu); 3250 unsigned long cap; 3251 3252 if (attr->sched_flags & SCHED_FLAG_SUGOV) 3253 return 0; 3254 3255 /* !deadline task may carry old deadline bandwidth */ 3256 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) 3257 return 0; 3258 3259 /* 3260 * Either if a task, enters, leave, or stays -deadline but changes 3261 * its parameters, we may need to update accordingly the total 3262 * allocated bandwidth of the container. 3263 */ 3264 raw_spin_lock(&dl_b->lock); 3265 cpus = dl_bw_cpus(cpu); 3266 cap = dl_bw_capacity(cpu); 3267 3268 if (dl_policy(policy) && !task_has_dl_policy(p) && 3269 !__dl_overflow(dl_b, cap, 0, new_bw)) { 3270 if (hrtimer_active(&p->dl.inactive_timer)) 3271 __dl_sub(dl_b, p->dl.dl_bw, cpus); 3272 __dl_add(dl_b, new_bw, cpus); 3273 err = 0; 3274 } else if (dl_policy(policy) && task_has_dl_policy(p) && 3275 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { 3276 /* 3277 * XXX this is slightly incorrect: when the task 3278 * utilization decreases, we should delay the total 3279 * utilization change until the task's 0-lag point. 3280 * But this would require to set the task's "inactive 3281 * timer" when the task is not inactive. 3282 */ 3283 __dl_sub(dl_b, p->dl.dl_bw, cpus); 3284 __dl_add(dl_b, new_bw, cpus); 3285 dl_change_utilization(p, new_bw); 3286 err = 0; 3287 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { 3288 /* 3289 * Do not decrease the total deadline utilization here, 3290 * switched_from_dl() will take care to do it at the correct 3291 * (0-lag) time. 3292 */ 3293 err = 0; 3294 } 3295 raw_spin_unlock(&dl_b->lock); 3296 3297 return err; 3298 } 3299 3300 /* 3301 * This function initializes the sched_dl_entity of a newly becoming 3302 * SCHED_DEADLINE task. 3303 * 3304 * Only the static values are considered here, the actual runtime and the 3305 * absolute deadline will be properly calculated when the task is enqueued 3306 * for the first time with its new policy. 3307 */ 3308 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) 3309 { 3310 struct sched_dl_entity *dl_se = &p->dl; 3311 3312 dl_se->dl_runtime = attr->sched_runtime; 3313 dl_se->dl_deadline = attr->sched_deadline; 3314 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3315 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS; 3316 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3317 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); 3318 } 3319 3320 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) 3321 { 3322 struct sched_dl_entity *dl_se = &p->dl; 3323 3324 attr->sched_priority = p->rt_priority; 3325 attr->sched_runtime = dl_se->dl_runtime; 3326 attr->sched_deadline = dl_se->dl_deadline; 3327 attr->sched_period = dl_se->dl_period; 3328 attr->sched_flags &= ~SCHED_DL_FLAGS; 3329 attr->sched_flags |= dl_se->flags; 3330 } 3331 3332 /* 3333 * This function validates the new parameters of a -deadline task. 3334 * We ask for the deadline not being zero, and greater or equal 3335 * than the runtime, as well as the period of being zero or 3336 * greater than deadline. Furthermore, we have to be sure that 3337 * user parameters are above the internal resolution of 1us (we 3338 * check sched_runtime only since it is always the smaller one) and 3339 * below 2^63 ns (we have to check both sched_deadline and 3340 * sched_period, as the latter can be zero). 3341 */ 3342 bool __checkparam_dl(const struct sched_attr *attr) 3343 { 3344 u64 period, max, min; 3345 3346 /* special dl tasks don't actually use any parameter */ 3347 if (attr->sched_flags & SCHED_FLAG_SUGOV) 3348 return true; 3349 3350 /* deadline != 0 */ 3351 if (attr->sched_deadline == 0) 3352 return false; 3353 3354 /* 3355 * Since we truncate DL_SCALE bits, make sure we're at least 3356 * that big. 3357 */ 3358 if (attr->sched_runtime < (1ULL << DL_SCALE)) 3359 return false; 3360 3361 /* 3362 * Since we use the MSB for wrap-around and sign issues, make 3363 * sure it's not set (mind that period can be equal to zero). 3364 */ 3365 if (attr->sched_deadline & (1ULL << 63) || 3366 attr->sched_period & (1ULL << 63)) 3367 return false; 3368 3369 period = attr->sched_period; 3370 if (!period) 3371 period = attr->sched_deadline; 3372 3373 /* runtime <= deadline <= period (if period != 0) */ 3374 if (period < attr->sched_deadline || 3375 attr->sched_deadline < attr->sched_runtime) 3376 return false; 3377 3378 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC; 3379 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC; 3380 3381 if (period < min || period > max) 3382 return false; 3383 3384 return true; 3385 } 3386 3387 /* 3388 * This function clears the sched_dl_entity static params. 3389 */ 3390 static void __dl_clear_params(struct sched_dl_entity *dl_se) 3391 { 3392 dl_se->dl_runtime = 0; 3393 dl_se->dl_deadline = 0; 3394 dl_se->dl_period = 0; 3395 dl_se->flags = 0; 3396 dl_se->dl_bw = 0; 3397 dl_se->dl_density = 0; 3398 3399 dl_se->dl_throttled = 0; 3400 dl_se->dl_yielded = 0; 3401 dl_se->dl_non_contending = 0; 3402 dl_se->dl_overrun = 0; 3403 dl_se->dl_server = 0; 3404 3405 #ifdef CONFIG_RT_MUTEXES 3406 dl_se->pi_se = dl_se; 3407 #endif 3408 } 3409 3410 void init_dl_entity(struct sched_dl_entity *dl_se) 3411 { 3412 RB_CLEAR_NODE(&dl_se->rb_node); 3413 init_dl_task_timer(dl_se); 3414 init_dl_inactive_task_timer(dl_se); 3415 __dl_clear_params(dl_se); 3416 } 3417 3418 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) 3419 { 3420 struct sched_dl_entity *dl_se = &p->dl; 3421 3422 if (dl_se->dl_runtime != attr->sched_runtime || 3423 dl_se->dl_deadline != attr->sched_deadline || 3424 dl_se->dl_period != attr->sched_period || 3425 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS)) 3426 return true; 3427 3428 return false; 3429 } 3430 3431 #ifdef CONFIG_SMP 3432 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, 3433 const struct cpumask *trial) 3434 { 3435 unsigned long flags, cap; 3436 struct dl_bw *cur_dl_b; 3437 int ret = 1; 3438 3439 rcu_read_lock_sched(); 3440 cur_dl_b = dl_bw_of(cpumask_any(cur)); 3441 cap = __dl_bw_capacity(trial); 3442 raw_spin_lock_irqsave(&cur_dl_b->lock, flags); 3443 if (__dl_overflow(cur_dl_b, cap, 0, 0)) 3444 ret = 0; 3445 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); 3446 rcu_read_unlock_sched(); 3447 3448 return ret; 3449 } 3450 3451 enum dl_bw_request { 3452 dl_bw_req_check_overflow = 0, 3453 dl_bw_req_alloc, 3454 dl_bw_req_free 3455 }; 3456 3457 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) 3458 { 3459 unsigned long flags; 3460 struct dl_bw *dl_b; 3461 bool overflow = 0; 3462 3463 rcu_read_lock_sched(); 3464 dl_b = dl_bw_of(cpu); 3465 raw_spin_lock_irqsave(&dl_b->lock, flags); 3466 3467 if (req == dl_bw_req_free) { 3468 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); 3469 } else { 3470 unsigned long cap = dl_bw_capacity(cpu); 3471 3472 overflow = __dl_overflow(dl_b, cap, 0, dl_bw); 3473 3474 if (req == dl_bw_req_alloc && !overflow) { 3475 /* 3476 * We reserve space in the destination 3477 * root_domain, as we can't fail after this point. 3478 * We will free resources in the source root_domain 3479 * later on (see set_cpus_allowed_dl()). 3480 */ 3481 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); 3482 } 3483 } 3484 3485 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 3486 rcu_read_unlock_sched(); 3487 3488 return overflow ? -EBUSY : 0; 3489 } 3490 3491 int dl_bw_check_overflow(int cpu) 3492 { 3493 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); 3494 } 3495 3496 int dl_bw_alloc(int cpu, u64 dl_bw) 3497 { 3498 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); 3499 } 3500 3501 void dl_bw_free(int cpu, u64 dl_bw) 3502 { 3503 dl_bw_manage(dl_bw_req_free, cpu, dl_bw); 3504 } 3505 #endif 3506 3507 #ifdef CONFIG_SCHED_DEBUG 3508 void print_dl_stats(struct seq_file *m, int cpu) 3509 { 3510 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 3511 } 3512 #endif /* CONFIG_SCHED_DEBUG */ 3513