1 /* 2 * Deadline Scheduling Class (SCHED_DEADLINE) 3 * 4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 5 * 6 * Tasks that periodically executes their instances for less than their 7 * runtime won't miss any of their deadlines. 8 * Tasks that are not periodic or sporadic or that tries to execute more 9 * than their reserved bandwidth will be slowed down (and may potentially 10 * miss some of their deadlines), and won't affect any other task. 11 * 12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 13 * Juri Lelli <juri.lelli@gmail.com>, 14 * Michael Trimarchi <michael@amarulasolutions.com>, 15 * Fabio Checconi <fchecconi@gmail.com> 16 */ 17 #include "sched.h" 18 19 #include <linux/slab.h> 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 47 { 48 struct sched_dl_entity *dl_se = &p->dl; 49 50 return dl_rq->rb_leftmost == &dl_se->rb_node; 51 } 52 53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 54 { 55 raw_spin_lock_init(&dl_b->dl_runtime_lock); 56 dl_b->dl_period = period; 57 dl_b->dl_runtime = runtime; 58 } 59 60 void init_dl_bw(struct dl_bw *dl_b) 61 { 62 raw_spin_lock_init(&dl_b->lock); 63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 64 if (global_rt_runtime() == RUNTIME_INF) 65 dl_b->bw = -1; 66 else 67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 69 dl_b->total_bw = 0; 70 } 71 72 void init_dl_rq(struct dl_rq *dl_rq) 73 { 74 dl_rq->rb_root = RB_ROOT; 75 76 #ifdef CONFIG_SMP 77 /* zero means no -deadline tasks */ 78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 79 80 dl_rq->dl_nr_migratory = 0; 81 dl_rq->overloaded = 0; 82 dl_rq->pushable_dl_tasks_root = RB_ROOT; 83 #else 84 init_dl_bw(&dl_rq->dl_bw); 85 #endif 86 } 87 88 #ifdef CONFIG_SMP 89 90 static inline int dl_overloaded(struct rq *rq) 91 { 92 return atomic_read(&rq->rd->dlo_count); 93 } 94 95 static inline void dl_set_overload(struct rq *rq) 96 { 97 if (!rq->online) 98 return; 99 100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 101 /* 102 * Must be visible before the overload count is 103 * set (as in sched_rt.c). 104 * 105 * Matched by the barrier in pull_dl_task(). 106 */ 107 smp_wmb(); 108 atomic_inc(&rq->rd->dlo_count); 109 } 110 111 static inline void dl_clear_overload(struct rq *rq) 112 { 113 if (!rq->online) 114 return; 115 116 atomic_dec(&rq->rd->dlo_count); 117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 118 } 119 120 static void update_dl_migration(struct dl_rq *dl_rq) 121 { 122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 123 if (!dl_rq->overloaded) { 124 dl_set_overload(rq_of_dl_rq(dl_rq)); 125 dl_rq->overloaded = 1; 126 } 127 } else if (dl_rq->overloaded) { 128 dl_clear_overload(rq_of_dl_rq(dl_rq)); 129 dl_rq->overloaded = 0; 130 } 131 } 132 133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 134 { 135 struct task_struct *p = dl_task_of(dl_se); 136 137 if (p->nr_cpus_allowed > 1) 138 dl_rq->dl_nr_migratory++; 139 140 update_dl_migration(dl_rq); 141 } 142 143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 144 { 145 struct task_struct *p = dl_task_of(dl_se); 146 147 if (p->nr_cpus_allowed > 1) 148 dl_rq->dl_nr_migratory--; 149 150 update_dl_migration(dl_rq); 151 } 152 153 /* 154 * The list of pushable -deadline task is not a plist, like in 155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 156 */ 157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 158 { 159 struct dl_rq *dl_rq = &rq->dl; 160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; 161 struct rb_node *parent = NULL; 162 struct task_struct *entry; 163 int leftmost = 1; 164 165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 166 167 while (*link) { 168 parent = *link; 169 entry = rb_entry(parent, struct task_struct, 170 pushable_dl_tasks); 171 if (dl_entity_preempt(&p->dl, &entry->dl)) 172 link = &parent->rb_left; 173 else { 174 link = &parent->rb_right; 175 leftmost = 0; 176 } 177 } 178 179 if (leftmost) { 180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; 181 dl_rq->earliest_dl.next = p->dl.deadline; 182 } 183 184 rb_link_node(&p->pushable_dl_tasks, parent, link); 185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 186 } 187 188 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 189 { 190 struct dl_rq *dl_rq = &rq->dl; 191 192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 193 return; 194 195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { 196 struct rb_node *next_node; 197 198 next_node = rb_next(&p->pushable_dl_tasks); 199 dl_rq->pushable_dl_tasks_leftmost = next_node; 200 if (next_node) { 201 dl_rq->earliest_dl.next = rb_entry(next_node, 202 struct task_struct, pushable_dl_tasks)->dl.deadline; 203 } 204 } 205 206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 207 RB_CLEAR_NODE(&p->pushable_dl_tasks); 208 } 209 210 static inline int has_pushable_dl_tasks(struct rq *rq) 211 { 212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); 213 } 214 215 static int push_dl_task(struct rq *rq); 216 217 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 218 { 219 return dl_task(prev); 220 } 221 222 static DEFINE_PER_CPU(struct callback_head, dl_push_head); 223 static DEFINE_PER_CPU(struct callback_head, dl_pull_head); 224 225 static void push_dl_tasks(struct rq *); 226 static void pull_dl_task(struct rq *); 227 228 static inline void queue_push_tasks(struct rq *rq) 229 { 230 if (!has_pushable_dl_tasks(rq)) 231 return; 232 233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); 234 } 235 236 static inline void queue_pull_task(struct rq *rq) 237 { 238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); 239 } 240 241 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq); 242 243 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) 244 { 245 struct rq *later_rq = NULL; 246 bool fallback = false; 247 248 later_rq = find_lock_later_rq(p, rq); 249 250 if (!later_rq) { 251 int cpu; 252 253 /* 254 * If we cannot preempt any rq, fall back to pick any 255 * online cpu. 256 */ 257 fallback = true; 258 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); 259 if (cpu >= nr_cpu_ids) { 260 /* 261 * Fail to find any suitable cpu. 262 * The task will never come back! 263 */ 264 BUG_ON(dl_bandwidth_enabled()); 265 266 /* 267 * If admission control is disabled we 268 * try a little harder to let the task 269 * run. 270 */ 271 cpu = cpumask_any(cpu_active_mask); 272 } 273 later_rq = cpu_rq(cpu); 274 double_lock_balance(rq, later_rq); 275 } 276 277 /* 278 * By now the task is replenished and enqueued; migrate it. 279 */ 280 deactivate_task(rq, p, 0); 281 set_task_cpu(p, later_rq->cpu); 282 activate_task(later_rq, p, 0); 283 284 if (!fallback) 285 resched_curr(later_rq); 286 287 double_unlock_balance(later_rq, rq); 288 289 return later_rq; 290 } 291 292 #else 293 294 static inline 295 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 296 { 297 } 298 299 static inline 300 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 301 { 302 } 303 304 static inline 305 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 306 { 307 } 308 309 static inline 310 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 311 { 312 } 313 314 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 315 { 316 return false; 317 } 318 319 static inline void pull_dl_task(struct rq *rq) 320 { 321 } 322 323 static inline void queue_push_tasks(struct rq *rq) 324 { 325 } 326 327 static inline void queue_pull_task(struct rq *rq) 328 { 329 } 330 #endif /* CONFIG_SMP */ 331 332 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 333 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 334 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 335 int flags); 336 337 /* 338 * We are being explicitly informed that a new instance is starting, 339 * and this means that: 340 * - the absolute deadline of the entity has to be placed at 341 * current time + relative deadline; 342 * - the runtime of the entity has to be set to the maximum value. 343 * 344 * The capability of specifying such event is useful whenever a -deadline 345 * entity wants to (try to!) synchronize its behaviour with the scheduler's 346 * one, and to (try to!) reconcile itself with its own scheduling 347 * parameters. 348 */ 349 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, 350 struct sched_dl_entity *pi_se) 351 { 352 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 353 struct rq *rq = rq_of_dl_rq(dl_rq); 354 355 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); 356 357 /* 358 * We use the regular wall clock time to set deadlines in the 359 * future; in fact, we must consider execution overheads (time 360 * spent on hardirq context, etc.). 361 */ 362 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 363 dl_se->runtime = pi_se->dl_runtime; 364 dl_se->dl_new = 0; 365 } 366 367 /* 368 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 369 * possibility of a entity lasting more than what it declared, and thus 370 * exhausting its runtime. 371 * 372 * Here we are interested in making runtime overrun possible, but we do 373 * not want a entity which is misbehaving to affect the scheduling of all 374 * other entities. 375 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 376 * is used, in order to confine each entity within its own bandwidth. 377 * 378 * This function deals exactly with that, and ensures that when the runtime 379 * of a entity is replenished, its deadline is also postponed. That ensures 380 * the overrunning entity can't interfere with other entity in the system and 381 * can't make them miss their deadlines. Reasons why this kind of overruns 382 * could happen are, typically, a entity voluntarily trying to overcome its 383 * runtime, or it just underestimated it during sched_setattr(). 384 */ 385 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 386 struct sched_dl_entity *pi_se) 387 { 388 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 389 struct rq *rq = rq_of_dl_rq(dl_rq); 390 391 BUG_ON(pi_se->dl_runtime <= 0); 392 393 /* 394 * This could be the case for a !-dl task that is boosted. 395 * Just go with full inherited parameters. 396 */ 397 if (dl_se->dl_deadline == 0) { 398 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 399 dl_se->runtime = pi_se->dl_runtime; 400 } 401 402 /* 403 * We keep moving the deadline away until we get some 404 * available runtime for the entity. This ensures correct 405 * handling of situations where the runtime overrun is 406 * arbitrary large. 407 */ 408 while (dl_se->runtime <= 0) { 409 dl_se->deadline += pi_se->dl_period; 410 dl_se->runtime += pi_se->dl_runtime; 411 } 412 413 /* 414 * At this point, the deadline really should be "in 415 * the future" with respect to rq->clock. If it's 416 * not, we are, for some reason, lagging too much! 417 * Anyway, after having warn userspace abut that, 418 * we still try to keep the things running by 419 * resetting the deadline and the budget of the 420 * entity. 421 */ 422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 423 printk_deferred_once("sched: DL replenish lagged to much\n"); 424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 425 dl_se->runtime = pi_se->dl_runtime; 426 } 427 428 if (dl_se->dl_yielded) 429 dl_se->dl_yielded = 0; 430 if (dl_se->dl_throttled) 431 dl_se->dl_throttled = 0; 432 } 433 434 /* 435 * Here we check if --at time t-- an entity (which is probably being 436 * [re]activated or, in general, enqueued) can use its remaining runtime 437 * and its current deadline _without_ exceeding the bandwidth it is 438 * assigned (function returns true if it can't). We are in fact applying 439 * one of the CBS rules: when a task wakes up, if the residual runtime 440 * over residual deadline fits within the allocated bandwidth, then we 441 * can keep the current (absolute) deadline and residual budget without 442 * disrupting the schedulability of the system. Otherwise, we should 443 * refill the runtime and set the deadline a period in the future, 444 * because keeping the current (absolute) deadline of the task would 445 * result in breaking guarantees promised to other tasks (refer to 446 * Documentation/scheduler/sched-deadline.txt for more informations). 447 * 448 * This function returns true if: 449 * 450 * runtime / (deadline - t) > dl_runtime / dl_period , 451 * 452 * IOW we can't recycle current parameters. 453 * 454 * Notice that the bandwidth check is done against the period. For 455 * task with deadline equal to period this is the same of using 456 * dl_deadline instead of dl_period in the equation above. 457 */ 458 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 459 struct sched_dl_entity *pi_se, u64 t) 460 { 461 u64 left, right; 462 463 /* 464 * left and right are the two sides of the equation above, 465 * after a bit of shuffling to use multiplications instead 466 * of divisions. 467 * 468 * Note that none of the time values involved in the two 469 * multiplications are absolute: dl_deadline and dl_runtime 470 * are the relative deadline and the maximum runtime of each 471 * instance, runtime is the runtime left for the last instance 472 * and (deadline - t), since t is rq->clock, is the time left 473 * to the (absolute) deadline. Even if overflowing the u64 type 474 * is very unlikely to occur in both cases, here we scale down 475 * as we want to avoid that risk at all. Scaling down by 10 476 * means that we reduce granularity to 1us. We are fine with it, 477 * since this is only a true/false check and, anyway, thinking 478 * of anything below microseconds resolution is actually fiction 479 * (but still we want to give the user that illusion >;). 480 */ 481 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 482 right = ((dl_se->deadline - t) >> DL_SCALE) * 483 (pi_se->dl_runtime >> DL_SCALE); 484 485 return dl_time_before(right, left); 486 } 487 488 /* 489 * When a -deadline entity is queued back on the runqueue, its runtime and 490 * deadline might need updating. 491 * 492 * The policy here is that we update the deadline of the entity only if: 493 * - the current deadline is in the past, 494 * - using the remaining runtime with the current deadline would make 495 * the entity exceed its bandwidth. 496 */ 497 static void update_dl_entity(struct sched_dl_entity *dl_se, 498 struct sched_dl_entity *pi_se) 499 { 500 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 501 struct rq *rq = rq_of_dl_rq(dl_rq); 502 503 /* 504 * The arrival of a new instance needs special treatment, i.e., 505 * the actual scheduling parameters have to be "renewed". 506 */ 507 if (dl_se->dl_new) { 508 setup_new_dl_entity(dl_se, pi_se); 509 return; 510 } 511 512 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 513 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 514 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 515 dl_se->runtime = pi_se->dl_runtime; 516 } 517 } 518 519 /* 520 * If the entity depleted all its runtime, and if we want it to sleep 521 * while waiting for some new execution time to become available, we 522 * set the bandwidth enforcement timer to the replenishment instant 523 * and try to activate it. 524 * 525 * Notice that it is important for the caller to know if the timer 526 * actually started or not (i.e., the replenishment instant is in 527 * the future or in the past). 528 */ 529 static int start_dl_timer(struct task_struct *p) 530 { 531 struct sched_dl_entity *dl_se = &p->dl; 532 struct hrtimer *timer = &dl_se->dl_timer; 533 struct rq *rq = task_rq(p); 534 ktime_t now, act; 535 s64 delta; 536 537 lockdep_assert_held(&rq->lock); 538 539 /* 540 * We want the timer to fire at the deadline, but considering 541 * that it is actually coming from rq->clock and not from 542 * hrtimer's time base reading. 543 */ 544 act = ns_to_ktime(dl_se->deadline); 545 now = hrtimer_cb_get_time(timer); 546 delta = ktime_to_ns(now) - rq_clock(rq); 547 act = ktime_add_ns(act, delta); 548 549 /* 550 * If the expiry time already passed, e.g., because the value 551 * chosen as the deadline is too small, don't even try to 552 * start the timer in the past! 553 */ 554 if (ktime_us_delta(act, now) < 0) 555 return 0; 556 557 /* 558 * !enqueued will guarantee another callback; even if one is already in 559 * progress. This ensures a balanced {get,put}_task_struct(). 560 * 561 * The race against __run_timer() clearing the enqueued state is 562 * harmless because we're holding task_rq()->lock, therefore the timer 563 * expiring after we've done the check will wait on its task_rq_lock() 564 * and observe our state. 565 */ 566 if (!hrtimer_is_queued(timer)) { 567 get_task_struct(p); 568 hrtimer_start(timer, act, HRTIMER_MODE_ABS); 569 } 570 571 return 1; 572 } 573 574 /* 575 * This is the bandwidth enforcement timer callback. If here, we know 576 * a task is not on its dl_rq, since the fact that the timer was running 577 * means the task is throttled and needs a runtime replenishment. 578 * 579 * However, what we actually do depends on the fact the task is active, 580 * (it is on its rq) or has been removed from there by a call to 581 * dequeue_task_dl(). In the former case we must issue the runtime 582 * replenishment and add the task back to the dl_rq; in the latter, we just 583 * do nothing but clearing dl_throttled, so that runtime and deadline 584 * updating (and the queueing back to dl_rq) will be done by the 585 * next call to enqueue_task_dl(). 586 */ 587 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 588 { 589 struct sched_dl_entity *dl_se = container_of(timer, 590 struct sched_dl_entity, 591 dl_timer); 592 struct task_struct *p = dl_task_of(dl_se); 593 unsigned long flags; 594 struct rq *rq; 595 596 rq = task_rq_lock(p, &flags); 597 598 /* 599 * The task might have changed its scheduling policy to something 600 * different than SCHED_DEADLINE (through switched_fromd_dl()). 601 */ 602 if (!dl_task(p)) { 603 __dl_clear_params(p); 604 goto unlock; 605 } 606 607 /* 608 * This is possible if switched_from_dl() raced against a running 609 * callback that took the above !dl_task() path and we've since then 610 * switched back into SCHED_DEADLINE. 611 * 612 * There's nothing to do except drop our task reference. 613 */ 614 if (dl_se->dl_new) 615 goto unlock; 616 617 /* 618 * The task might have been boosted by someone else and might be in the 619 * boosting/deboosting path, its not throttled. 620 */ 621 if (dl_se->dl_boosted) 622 goto unlock; 623 624 /* 625 * Spurious timer due to start_dl_timer() race; or we already received 626 * a replenishment from rt_mutex_setprio(). 627 */ 628 if (!dl_se->dl_throttled) 629 goto unlock; 630 631 sched_clock_tick(); 632 update_rq_clock(rq); 633 634 /* 635 * If the throttle happened during sched-out; like: 636 * 637 * schedule() 638 * deactivate_task() 639 * dequeue_task_dl() 640 * update_curr_dl() 641 * start_dl_timer() 642 * __dequeue_task_dl() 643 * prev->on_rq = 0; 644 * 645 * We can be both throttled and !queued. Replenish the counter 646 * but do not enqueue -- wait for our wakeup to do that. 647 */ 648 if (!task_on_rq_queued(p)) { 649 replenish_dl_entity(dl_se, dl_se); 650 goto unlock; 651 } 652 653 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 654 if (dl_task(rq->curr)) 655 check_preempt_curr_dl(rq, p, 0); 656 else 657 resched_curr(rq); 658 659 #ifdef CONFIG_SMP 660 /* 661 * Perform balancing operations here; after the replenishments. We 662 * cannot drop rq->lock before this, otherwise the assertion in 663 * start_dl_timer() about not missing updates is not true. 664 * 665 * If we find that the rq the task was on is no longer available, we 666 * need to select a new rq. 667 * 668 * XXX figure out if select_task_rq_dl() deals with offline cpus. 669 */ 670 if (unlikely(!rq->online)) 671 rq = dl_task_offline_migration(rq, p); 672 673 /* 674 * Queueing this task back might have overloaded rq, check if we need 675 * to kick someone away. 676 */ 677 if (has_pushable_dl_tasks(rq)) { 678 /* 679 * Nothing relies on rq->lock after this, so its safe to drop 680 * rq->lock. 681 */ 682 lockdep_unpin_lock(&rq->lock); 683 push_dl_task(rq); 684 lockdep_pin_lock(&rq->lock); 685 } 686 #endif 687 688 unlock: 689 task_rq_unlock(rq, p, &flags); 690 691 /* 692 * This can free the task_struct, including this hrtimer, do not touch 693 * anything related to that after this. 694 */ 695 put_task_struct(p); 696 697 return HRTIMER_NORESTART; 698 } 699 700 void init_dl_task_timer(struct sched_dl_entity *dl_se) 701 { 702 struct hrtimer *timer = &dl_se->dl_timer; 703 704 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 705 timer->function = dl_task_timer; 706 } 707 708 static 709 int dl_runtime_exceeded(struct sched_dl_entity *dl_se) 710 { 711 return (dl_se->runtime <= 0); 712 } 713 714 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 715 716 /* 717 * Update the current task's runtime statistics (provided it is still 718 * a -deadline task and has not been removed from the dl_rq). 719 */ 720 static void update_curr_dl(struct rq *rq) 721 { 722 struct task_struct *curr = rq->curr; 723 struct sched_dl_entity *dl_se = &curr->dl; 724 u64 delta_exec; 725 726 if (!dl_task(curr) || !on_dl_rq(dl_se)) 727 return; 728 729 /* 730 * Consumed budget is computed considering the time as 731 * observed by schedulable tasks (excluding time spent 732 * in hardirq context, etc.). Deadlines are instead 733 * computed using hard walltime. This seems to be the more 734 * natural solution, but the full ramifications of this 735 * approach need further study. 736 */ 737 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 738 if (unlikely((s64)delta_exec <= 0)) 739 return; 740 741 schedstat_set(curr->se.statistics.exec_max, 742 max(curr->se.statistics.exec_max, delta_exec)); 743 744 curr->se.sum_exec_runtime += delta_exec; 745 account_group_exec_runtime(curr, delta_exec); 746 747 curr->se.exec_start = rq_clock_task(rq); 748 cpuacct_charge(curr, delta_exec); 749 750 sched_rt_avg_update(rq, delta_exec); 751 752 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec; 753 if (dl_runtime_exceeded(dl_se)) { 754 dl_se->dl_throttled = 1; 755 __dequeue_task_dl(rq, curr, 0); 756 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) 757 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 758 759 if (!is_leftmost(curr, &rq->dl)) 760 resched_curr(rq); 761 } 762 763 /* 764 * Because -- for now -- we share the rt bandwidth, we need to 765 * account our runtime there too, otherwise actual rt tasks 766 * would be able to exceed the shared quota. 767 * 768 * Account to the root rt group for now. 769 * 770 * The solution we're working towards is having the RT groups scheduled 771 * using deadline servers -- however there's a few nasties to figure 772 * out before that can happen. 773 */ 774 if (rt_bandwidth_enabled()) { 775 struct rt_rq *rt_rq = &rq->rt; 776 777 raw_spin_lock(&rt_rq->rt_runtime_lock); 778 /* 779 * We'll let actual RT tasks worry about the overflow here, we 780 * have our own CBS to keep us inline; only account when RT 781 * bandwidth is relevant. 782 */ 783 if (sched_rt_bandwidth_account(rt_rq)) 784 rt_rq->rt_time += delta_exec; 785 raw_spin_unlock(&rt_rq->rt_runtime_lock); 786 } 787 } 788 789 #ifdef CONFIG_SMP 790 791 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 792 { 793 struct rq *rq = rq_of_dl_rq(dl_rq); 794 795 if (dl_rq->earliest_dl.curr == 0 || 796 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 797 dl_rq->earliest_dl.curr = deadline; 798 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 799 } 800 } 801 802 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 803 { 804 struct rq *rq = rq_of_dl_rq(dl_rq); 805 806 /* 807 * Since we may have removed our earliest (and/or next earliest) 808 * task we must recompute them. 809 */ 810 if (!dl_rq->dl_nr_running) { 811 dl_rq->earliest_dl.curr = 0; 812 dl_rq->earliest_dl.next = 0; 813 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 814 } else { 815 struct rb_node *leftmost = dl_rq->rb_leftmost; 816 struct sched_dl_entity *entry; 817 818 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 819 dl_rq->earliest_dl.curr = entry->deadline; 820 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 821 } 822 } 823 824 #else 825 826 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 827 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 828 829 #endif /* CONFIG_SMP */ 830 831 static inline 832 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 833 { 834 int prio = dl_task_of(dl_se)->prio; 835 u64 deadline = dl_se->deadline; 836 837 WARN_ON(!dl_prio(prio)); 838 dl_rq->dl_nr_running++; 839 add_nr_running(rq_of_dl_rq(dl_rq), 1); 840 841 inc_dl_deadline(dl_rq, deadline); 842 inc_dl_migration(dl_se, dl_rq); 843 } 844 845 static inline 846 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 847 { 848 int prio = dl_task_of(dl_se)->prio; 849 850 WARN_ON(!dl_prio(prio)); 851 WARN_ON(!dl_rq->dl_nr_running); 852 dl_rq->dl_nr_running--; 853 sub_nr_running(rq_of_dl_rq(dl_rq), 1); 854 855 dec_dl_deadline(dl_rq, dl_se->deadline); 856 dec_dl_migration(dl_se, dl_rq); 857 } 858 859 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 860 { 861 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 862 struct rb_node **link = &dl_rq->rb_root.rb_node; 863 struct rb_node *parent = NULL; 864 struct sched_dl_entity *entry; 865 int leftmost = 1; 866 867 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 868 869 while (*link) { 870 parent = *link; 871 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 872 if (dl_time_before(dl_se->deadline, entry->deadline)) 873 link = &parent->rb_left; 874 else { 875 link = &parent->rb_right; 876 leftmost = 0; 877 } 878 } 879 880 if (leftmost) 881 dl_rq->rb_leftmost = &dl_se->rb_node; 882 883 rb_link_node(&dl_se->rb_node, parent, link); 884 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); 885 886 inc_dl_tasks(dl_se, dl_rq); 887 } 888 889 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 890 { 891 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 892 893 if (RB_EMPTY_NODE(&dl_se->rb_node)) 894 return; 895 896 if (dl_rq->rb_leftmost == &dl_se->rb_node) { 897 struct rb_node *next_node; 898 899 next_node = rb_next(&dl_se->rb_node); 900 dl_rq->rb_leftmost = next_node; 901 } 902 903 rb_erase(&dl_se->rb_node, &dl_rq->rb_root); 904 RB_CLEAR_NODE(&dl_se->rb_node); 905 906 dec_dl_tasks(dl_se, dl_rq); 907 } 908 909 static void 910 enqueue_dl_entity(struct sched_dl_entity *dl_se, 911 struct sched_dl_entity *pi_se, int flags) 912 { 913 BUG_ON(on_dl_rq(dl_se)); 914 915 /* 916 * If this is a wakeup or a new instance, the scheduling 917 * parameters of the task might need updating. Otherwise, 918 * we want a replenishment of its runtime. 919 */ 920 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) 921 update_dl_entity(dl_se, pi_se); 922 else if (flags & ENQUEUE_REPLENISH) 923 replenish_dl_entity(dl_se, pi_se); 924 925 __enqueue_dl_entity(dl_se); 926 } 927 928 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 929 { 930 __dequeue_dl_entity(dl_se); 931 } 932 933 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 934 { 935 struct task_struct *pi_task = rt_mutex_get_top_task(p); 936 struct sched_dl_entity *pi_se = &p->dl; 937 938 /* 939 * Use the scheduling parameters of the top pi-waiter 940 * task if we have one and its (absolute) deadline is 941 * smaller than our one... OTW we keep our runtime and 942 * deadline. 943 */ 944 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) { 945 pi_se = &pi_task->dl; 946 } else if (!dl_prio(p->normal_prio)) { 947 /* 948 * Special case in which we have a !SCHED_DEADLINE task 949 * that is going to be deboosted, but exceedes its 950 * runtime while doing so. No point in replenishing 951 * it, as it's going to return back to its original 952 * scheduling class after this. 953 */ 954 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH); 955 return; 956 } 957 958 /* 959 * If p is throttled, we do nothing. In fact, if it exhausted 960 * its budget it needs a replenishment and, since it now is on 961 * its rq, the bandwidth timer callback (which clearly has not 962 * run yet) will take care of this. 963 */ 964 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) 965 return; 966 967 enqueue_dl_entity(&p->dl, pi_se, flags); 968 969 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 970 enqueue_pushable_dl_task(rq, p); 971 } 972 973 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 974 { 975 dequeue_dl_entity(&p->dl); 976 dequeue_pushable_dl_task(rq, p); 977 } 978 979 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 980 { 981 update_curr_dl(rq); 982 __dequeue_task_dl(rq, p, flags); 983 } 984 985 /* 986 * Yield task semantic for -deadline tasks is: 987 * 988 * get off from the CPU until our next instance, with 989 * a new runtime. This is of little use now, since we 990 * don't have a bandwidth reclaiming mechanism. Anyway, 991 * bandwidth reclaiming is planned for the future, and 992 * yield_task_dl will indicate that some spare budget 993 * is available for other task instances to use it. 994 */ 995 static void yield_task_dl(struct rq *rq) 996 { 997 struct task_struct *p = rq->curr; 998 999 /* 1000 * We make the task go to sleep until its current deadline by 1001 * forcing its runtime to zero. This way, update_curr_dl() stops 1002 * it and the bandwidth timer will wake it up and will give it 1003 * new scheduling parameters (thanks to dl_yielded=1). 1004 */ 1005 if (p->dl.runtime > 0) { 1006 rq->curr->dl.dl_yielded = 1; 1007 p->dl.runtime = 0; 1008 } 1009 update_rq_clock(rq); 1010 update_curr_dl(rq); 1011 /* 1012 * Tell update_rq_clock() that we've just updated, 1013 * so we don't do microscopic update in schedule() 1014 * and double the fastpath cost. 1015 */ 1016 rq_clock_skip_update(rq, true); 1017 } 1018 1019 #ifdef CONFIG_SMP 1020 1021 static int find_later_rq(struct task_struct *task); 1022 1023 static int 1024 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 1025 { 1026 struct task_struct *curr; 1027 struct rq *rq; 1028 1029 if (sd_flag != SD_BALANCE_WAKE) 1030 goto out; 1031 1032 rq = cpu_rq(cpu); 1033 1034 rcu_read_lock(); 1035 curr = READ_ONCE(rq->curr); /* unlocked access */ 1036 1037 /* 1038 * If we are dealing with a -deadline task, we must 1039 * decide where to wake it up. 1040 * If it has a later deadline and the current task 1041 * on this rq can't move (provided the waking task 1042 * can!) we prefer to send it somewhere else. On the 1043 * other hand, if it has a shorter deadline, we 1044 * try to make it stay here, it might be important. 1045 */ 1046 if (unlikely(dl_task(curr)) && 1047 (curr->nr_cpus_allowed < 2 || 1048 !dl_entity_preempt(&p->dl, &curr->dl)) && 1049 (p->nr_cpus_allowed > 1)) { 1050 int target = find_later_rq(p); 1051 1052 if (target != -1 && 1053 (dl_time_before(p->dl.deadline, 1054 cpu_rq(target)->dl.earliest_dl.curr) || 1055 (cpu_rq(target)->dl.dl_nr_running == 0))) 1056 cpu = target; 1057 } 1058 rcu_read_unlock(); 1059 1060 out: 1061 return cpu; 1062 } 1063 1064 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 1065 { 1066 /* 1067 * Current can't be migrated, useless to reschedule, 1068 * let's hope p can move out. 1069 */ 1070 if (rq->curr->nr_cpus_allowed == 1 || 1071 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 1072 return; 1073 1074 /* 1075 * p is migratable, so let's not schedule it and 1076 * see if it is pushed or pulled somewhere else. 1077 */ 1078 if (p->nr_cpus_allowed != 1 && 1079 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 1080 return; 1081 1082 resched_curr(rq); 1083 } 1084 1085 #endif /* CONFIG_SMP */ 1086 1087 /* 1088 * Only called when both the current and waking task are -deadline 1089 * tasks. 1090 */ 1091 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 1092 int flags) 1093 { 1094 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 1095 resched_curr(rq); 1096 return; 1097 } 1098 1099 #ifdef CONFIG_SMP 1100 /* 1101 * In the unlikely case current and p have the same deadline 1102 * let us try to decide what's the best thing to do... 1103 */ 1104 if ((p->dl.deadline == rq->curr->dl.deadline) && 1105 !test_tsk_need_resched(rq->curr)) 1106 check_preempt_equal_dl(rq, p); 1107 #endif /* CONFIG_SMP */ 1108 } 1109 1110 #ifdef CONFIG_SCHED_HRTICK 1111 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1112 { 1113 hrtick_start(rq, p->dl.runtime); 1114 } 1115 #else /* !CONFIG_SCHED_HRTICK */ 1116 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 1117 { 1118 } 1119 #endif 1120 1121 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1122 struct dl_rq *dl_rq) 1123 { 1124 struct rb_node *left = dl_rq->rb_leftmost; 1125 1126 if (!left) 1127 return NULL; 1128 1129 return rb_entry(left, struct sched_dl_entity, rb_node); 1130 } 1131 1132 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) 1133 { 1134 struct sched_dl_entity *dl_se; 1135 struct task_struct *p; 1136 struct dl_rq *dl_rq; 1137 1138 dl_rq = &rq->dl; 1139 1140 if (need_pull_dl_task(rq, prev)) { 1141 /* 1142 * This is OK, because current is on_cpu, which avoids it being 1143 * picked for load-balance and preemption/IRQs are still 1144 * disabled avoiding further scheduler activity on it and we're 1145 * being very careful to re-start the picking loop. 1146 */ 1147 lockdep_unpin_lock(&rq->lock); 1148 pull_dl_task(rq); 1149 lockdep_pin_lock(&rq->lock); 1150 /* 1151 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1152 * means a stop task can slip in, in which case we need to 1153 * re-start task selection. 1154 */ 1155 if (rq->stop && task_on_rq_queued(rq->stop)) 1156 return RETRY_TASK; 1157 } 1158 1159 /* 1160 * When prev is DL, we may throttle it in put_prev_task(). 1161 * So, we update time before we check for dl_nr_running. 1162 */ 1163 if (prev->sched_class == &dl_sched_class) 1164 update_curr_dl(rq); 1165 1166 if (unlikely(!dl_rq->dl_nr_running)) 1167 return NULL; 1168 1169 put_prev_task(rq, prev); 1170 1171 dl_se = pick_next_dl_entity(rq, dl_rq); 1172 BUG_ON(!dl_se); 1173 1174 p = dl_task_of(dl_se); 1175 p->se.exec_start = rq_clock_task(rq); 1176 1177 /* Running task will never be pushed. */ 1178 dequeue_pushable_dl_task(rq, p); 1179 1180 if (hrtick_enabled(rq)) 1181 start_hrtick_dl(rq, p); 1182 1183 queue_push_tasks(rq); 1184 1185 return p; 1186 } 1187 1188 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1189 { 1190 update_curr_dl(rq); 1191 1192 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1193 enqueue_pushable_dl_task(rq, p); 1194 } 1195 1196 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1197 { 1198 update_curr_dl(rq); 1199 1200 /* 1201 * Even when we have runtime, update_curr_dl() might have resulted in us 1202 * not being the leftmost task anymore. In that case NEED_RESCHED will 1203 * be set and schedule() will start a new hrtick for the next task. 1204 */ 1205 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && 1206 is_leftmost(p, &rq->dl)) 1207 start_hrtick_dl(rq, p); 1208 } 1209 1210 static void task_fork_dl(struct task_struct *p) 1211 { 1212 /* 1213 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1214 * sched_fork() 1215 */ 1216 } 1217 1218 static void task_dead_dl(struct task_struct *p) 1219 { 1220 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1221 1222 /* 1223 * Since we are TASK_DEAD we won't slip out of the domain! 1224 */ 1225 raw_spin_lock_irq(&dl_b->lock); 1226 /* XXX we should retain the bw until 0-lag */ 1227 dl_b->total_bw -= p->dl.dl_bw; 1228 raw_spin_unlock_irq(&dl_b->lock); 1229 } 1230 1231 static void set_curr_task_dl(struct rq *rq) 1232 { 1233 struct task_struct *p = rq->curr; 1234 1235 p->se.exec_start = rq_clock_task(rq); 1236 1237 /* You can't push away the running task */ 1238 dequeue_pushable_dl_task(rq, p); 1239 } 1240 1241 #ifdef CONFIG_SMP 1242 1243 /* Only try algorithms three times */ 1244 #define DL_MAX_TRIES 3 1245 1246 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1247 { 1248 if (!task_running(rq, p) && 1249 cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 1250 return 1; 1251 return 0; 1252 } 1253 1254 /* 1255 * Return the earliest pushable rq's task, which is suitable to be executed 1256 * on the CPU, NULL otherwise: 1257 */ 1258 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1259 { 1260 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost; 1261 struct task_struct *p = NULL; 1262 1263 if (!has_pushable_dl_tasks(rq)) 1264 return NULL; 1265 1266 next_node: 1267 if (next_node) { 1268 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); 1269 1270 if (pick_dl_task(rq, p, cpu)) 1271 return p; 1272 1273 next_node = rb_next(next_node); 1274 goto next_node; 1275 } 1276 1277 return NULL; 1278 } 1279 1280 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1281 1282 static int find_later_rq(struct task_struct *task) 1283 { 1284 struct sched_domain *sd; 1285 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); 1286 int this_cpu = smp_processor_id(); 1287 int best_cpu, cpu = task_cpu(task); 1288 1289 /* Make sure the mask is initialized first */ 1290 if (unlikely(!later_mask)) 1291 return -1; 1292 1293 if (task->nr_cpus_allowed == 1) 1294 return -1; 1295 1296 /* 1297 * We have to consider system topology and task affinity 1298 * first, then we can look for a suitable cpu. 1299 */ 1300 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, 1301 task, later_mask); 1302 if (best_cpu == -1) 1303 return -1; 1304 1305 /* 1306 * If we are here, some target has been found, 1307 * the most suitable of which is cached in best_cpu. 1308 * This is, among the runqueues where the current tasks 1309 * have later deadlines than the task's one, the rq 1310 * with the latest possible one. 1311 * 1312 * Now we check how well this matches with task's 1313 * affinity and system topology. 1314 * 1315 * The last cpu where the task run is our first 1316 * guess, since it is most likely cache-hot there. 1317 */ 1318 if (cpumask_test_cpu(cpu, later_mask)) 1319 return cpu; 1320 /* 1321 * Check if this_cpu is to be skipped (i.e., it is 1322 * not in the mask) or not. 1323 */ 1324 if (!cpumask_test_cpu(this_cpu, later_mask)) 1325 this_cpu = -1; 1326 1327 rcu_read_lock(); 1328 for_each_domain(cpu, sd) { 1329 if (sd->flags & SD_WAKE_AFFINE) { 1330 1331 /* 1332 * If possible, preempting this_cpu is 1333 * cheaper than migrating. 1334 */ 1335 if (this_cpu != -1 && 1336 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1337 rcu_read_unlock(); 1338 return this_cpu; 1339 } 1340 1341 /* 1342 * Last chance: if best_cpu is valid and is 1343 * in the mask, that becomes our choice. 1344 */ 1345 if (best_cpu < nr_cpu_ids && 1346 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { 1347 rcu_read_unlock(); 1348 return best_cpu; 1349 } 1350 } 1351 } 1352 rcu_read_unlock(); 1353 1354 /* 1355 * At this point, all our guesses failed, we just return 1356 * 'something', and let the caller sort the things out. 1357 */ 1358 if (this_cpu != -1) 1359 return this_cpu; 1360 1361 cpu = cpumask_any(later_mask); 1362 if (cpu < nr_cpu_ids) 1363 return cpu; 1364 1365 return -1; 1366 } 1367 1368 /* Locks the rq it finds */ 1369 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1370 { 1371 struct rq *later_rq = NULL; 1372 int tries; 1373 int cpu; 1374 1375 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1376 cpu = find_later_rq(task); 1377 1378 if ((cpu == -1) || (cpu == rq->cpu)) 1379 break; 1380 1381 later_rq = cpu_rq(cpu); 1382 1383 if (later_rq->dl.dl_nr_running && 1384 !dl_time_before(task->dl.deadline, 1385 later_rq->dl.earliest_dl.curr)) { 1386 /* 1387 * Target rq has tasks of equal or earlier deadline, 1388 * retrying does not release any lock and is unlikely 1389 * to yield a different result. 1390 */ 1391 later_rq = NULL; 1392 break; 1393 } 1394 1395 /* Retry if something changed. */ 1396 if (double_lock_balance(rq, later_rq)) { 1397 if (unlikely(task_rq(task) != rq || 1398 !cpumask_test_cpu(later_rq->cpu, 1399 &task->cpus_allowed) || 1400 task_running(rq, task) || 1401 !task_on_rq_queued(task))) { 1402 double_unlock_balance(rq, later_rq); 1403 later_rq = NULL; 1404 break; 1405 } 1406 } 1407 1408 /* 1409 * If the rq we found has no -deadline task, or 1410 * its earliest one has a later deadline than our 1411 * task, the rq is a good one. 1412 */ 1413 if (!later_rq->dl.dl_nr_running || 1414 dl_time_before(task->dl.deadline, 1415 later_rq->dl.earliest_dl.curr)) 1416 break; 1417 1418 /* Otherwise we try again. */ 1419 double_unlock_balance(rq, later_rq); 1420 later_rq = NULL; 1421 } 1422 1423 return later_rq; 1424 } 1425 1426 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 1427 { 1428 struct task_struct *p; 1429 1430 if (!has_pushable_dl_tasks(rq)) 1431 return NULL; 1432 1433 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, 1434 struct task_struct, pushable_dl_tasks); 1435 1436 BUG_ON(rq->cpu != task_cpu(p)); 1437 BUG_ON(task_current(rq, p)); 1438 BUG_ON(p->nr_cpus_allowed <= 1); 1439 1440 BUG_ON(!task_on_rq_queued(p)); 1441 BUG_ON(!dl_task(p)); 1442 1443 return p; 1444 } 1445 1446 /* 1447 * See if the non running -deadline tasks on this rq 1448 * can be sent to some other CPU where they can preempt 1449 * and start executing. 1450 */ 1451 static int push_dl_task(struct rq *rq) 1452 { 1453 struct task_struct *next_task; 1454 struct rq *later_rq; 1455 int ret = 0; 1456 1457 if (!rq->dl.overloaded) 1458 return 0; 1459 1460 next_task = pick_next_pushable_dl_task(rq); 1461 if (!next_task) 1462 return 0; 1463 1464 retry: 1465 if (unlikely(next_task == rq->curr)) { 1466 WARN_ON(1); 1467 return 0; 1468 } 1469 1470 /* 1471 * If next_task preempts rq->curr, and rq->curr 1472 * can move away, it makes sense to just reschedule 1473 * without going further in pushing next_task. 1474 */ 1475 if (dl_task(rq->curr) && 1476 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1477 rq->curr->nr_cpus_allowed > 1) { 1478 resched_curr(rq); 1479 return 0; 1480 } 1481 1482 /* We might release rq lock */ 1483 get_task_struct(next_task); 1484 1485 /* Will lock the rq it'll find */ 1486 later_rq = find_lock_later_rq(next_task, rq); 1487 if (!later_rq) { 1488 struct task_struct *task; 1489 1490 /* 1491 * We must check all this again, since 1492 * find_lock_later_rq releases rq->lock and it is 1493 * then possible that next_task has migrated. 1494 */ 1495 task = pick_next_pushable_dl_task(rq); 1496 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1497 /* 1498 * The task is still there. We don't try 1499 * again, some other cpu will pull it when ready. 1500 */ 1501 goto out; 1502 } 1503 1504 if (!task) 1505 /* No more tasks */ 1506 goto out; 1507 1508 put_task_struct(next_task); 1509 next_task = task; 1510 goto retry; 1511 } 1512 1513 deactivate_task(rq, next_task, 0); 1514 set_task_cpu(next_task, later_rq->cpu); 1515 activate_task(later_rq, next_task, 0); 1516 ret = 1; 1517 1518 resched_curr(later_rq); 1519 1520 double_unlock_balance(rq, later_rq); 1521 1522 out: 1523 put_task_struct(next_task); 1524 1525 return ret; 1526 } 1527 1528 static void push_dl_tasks(struct rq *rq) 1529 { 1530 /* push_dl_task() will return true if it moved a -deadline task */ 1531 while (push_dl_task(rq)) 1532 ; 1533 } 1534 1535 static void pull_dl_task(struct rq *this_rq) 1536 { 1537 int this_cpu = this_rq->cpu, cpu; 1538 struct task_struct *p; 1539 bool resched = false; 1540 struct rq *src_rq; 1541 u64 dmin = LONG_MAX; 1542 1543 if (likely(!dl_overloaded(this_rq))) 1544 return; 1545 1546 /* 1547 * Match the barrier from dl_set_overloaded; this guarantees that if we 1548 * see overloaded we must also see the dlo_mask bit. 1549 */ 1550 smp_rmb(); 1551 1552 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 1553 if (this_cpu == cpu) 1554 continue; 1555 1556 src_rq = cpu_rq(cpu); 1557 1558 /* 1559 * It looks racy, abd it is! However, as in sched_rt.c, 1560 * we are fine with this. 1561 */ 1562 if (this_rq->dl.dl_nr_running && 1563 dl_time_before(this_rq->dl.earliest_dl.curr, 1564 src_rq->dl.earliest_dl.next)) 1565 continue; 1566 1567 /* Might drop this_rq->lock */ 1568 double_lock_balance(this_rq, src_rq); 1569 1570 /* 1571 * If there are no more pullable tasks on the 1572 * rq, we're done with it. 1573 */ 1574 if (src_rq->dl.dl_nr_running <= 1) 1575 goto skip; 1576 1577 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); 1578 1579 /* 1580 * We found a task to be pulled if: 1581 * - it preempts our current (if there's one), 1582 * - it will preempt the last one we pulled (if any). 1583 */ 1584 if (p && dl_time_before(p->dl.deadline, dmin) && 1585 (!this_rq->dl.dl_nr_running || 1586 dl_time_before(p->dl.deadline, 1587 this_rq->dl.earliest_dl.curr))) { 1588 WARN_ON(p == src_rq->curr); 1589 WARN_ON(!task_on_rq_queued(p)); 1590 1591 /* 1592 * Then we pull iff p has actually an earlier 1593 * deadline than the current task of its runqueue. 1594 */ 1595 if (dl_time_before(p->dl.deadline, 1596 src_rq->curr->dl.deadline)) 1597 goto skip; 1598 1599 resched = true; 1600 1601 deactivate_task(src_rq, p, 0); 1602 set_task_cpu(p, this_cpu); 1603 activate_task(this_rq, p, 0); 1604 dmin = p->dl.deadline; 1605 1606 /* Is there any other task even earlier? */ 1607 } 1608 skip: 1609 double_unlock_balance(this_rq, src_rq); 1610 } 1611 1612 if (resched) 1613 resched_curr(this_rq); 1614 } 1615 1616 /* 1617 * Since the task is not running and a reschedule is not going to happen 1618 * anytime soon on its runqueue, we try pushing it away now. 1619 */ 1620 static void task_woken_dl(struct rq *rq, struct task_struct *p) 1621 { 1622 if (!task_running(rq, p) && 1623 !test_tsk_need_resched(rq->curr) && 1624 p->nr_cpus_allowed > 1 && 1625 dl_task(rq->curr) && 1626 (rq->curr->nr_cpus_allowed < 2 || 1627 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { 1628 push_dl_tasks(rq); 1629 } 1630 } 1631 1632 static void set_cpus_allowed_dl(struct task_struct *p, 1633 const struct cpumask *new_mask) 1634 { 1635 struct root_domain *src_rd; 1636 struct rq *rq; 1637 1638 BUG_ON(!dl_task(p)); 1639 1640 rq = task_rq(p); 1641 src_rd = rq->rd; 1642 /* 1643 * Migrating a SCHED_DEADLINE task between exclusive 1644 * cpusets (different root_domains) entails a bandwidth 1645 * update. We already made space for us in the destination 1646 * domain (see cpuset_can_attach()). 1647 */ 1648 if (!cpumask_intersects(src_rd->span, new_mask)) { 1649 struct dl_bw *src_dl_b; 1650 1651 src_dl_b = dl_bw_of(cpu_of(rq)); 1652 /* 1653 * We now free resources of the root_domain we are migrating 1654 * off. In the worst case, sched_setattr() may temporary fail 1655 * until we complete the update. 1656 */ 1657 raw_spin_lock(&src_dl_b->lock); 1658 __dl_clear(src_dl_b, p->dl.dl_bw); 1659 raw_spin_unlock(&src_dl_b->lock); 1660 } 1661 1662 set_cpus_allowed_common(p, new_mask); 1663 } 1664 1665 /* Assumes rq->lock is held */ 1666 static void rq_online_dl(struct rq *rq) 1667 { 1668 if (rq->dl.overloaded) 1669 dl_set_overload(rq); 1670 1671 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); 1672 if (rq->dl.dl_nr_running > 0) 1673 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1674 } 1675 1676 /* Assumes rq->lock is held */ 1677 static void rq_offline_dl(struct rq *rq) 1678 { 1679 if (rq->dl.overloaded) 1680 dl_clear_overload(rq); 1681 1682 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1683 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); 1684 } 1685 1686 void __init init_sched_dl_class(void) 1687 { 1688 unsigned int i; 1689 1690 for_each_possible_cpu(i) 1691 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 1692 GFP_KERNEL, cpu_to_node(i)); 1693 } 1694 1695 #endif /* CONFIG_SMP */ 1696 1697 static void switched_from_dl(struct rq *rq, struct task_struct *p) 1698 { 1699 /* 1700 * Start the deadline timer; if we switch back to dl before this we'll 1701 * continue consuming our current CBS slice. If we stay outside of 1702 * SCHED_DEADLINE until the deadline passes, the timer will reset the 1703 * task. 1704 */ 1705 if (!start_dl_timer(p)) 1706 __dl_clear_params(p); 1707 1708 /* 1709 * Since this might be the only -deadline task on the rq, 1710 * this is the right place to try to pull some other one 1711 * from an overloaded cpu, if any. 1712 */ 1713 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) 1714 return; 1715 1716 queue_pull_task(rq); 1717 } 1718 1719 /* 1720 * When switching to -deadline, we may overload the rq, then 1721 * we try to push someone off, if possible. 1722 */ 1723 static void switched_to_dl(struct rq *rq, struct task_struct *p) 1724 { 1725 if (task_on_rq_queued(p) && rq->curr != p) { 1726 #ifdef CONFIG_SMP 1727 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) 1728 queue_push_tasks(rq); 1729 #else 1730 if (dl_task(rq->curr)) 1731 check_preempt_curr_dl(rq, p, 0); 1732 else 1733 resched_curr(rq); 1734 #endif 1735 } 1736 } 1737 1738 /* 1739 * If the scheduling parameters of a -deadline task changed, 1740 * a push or pull operation might be needed. 1741 */ 1742 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 1743 int oldprio) 1744 { 1745 if (task_on_rq_queued(p) || rq->curr == p) { 1746 #ifdef CONFIG_SMP 1747 /* 1748 * This might be too much, but unfortunately 1749 * we don't have the old deadline value, and 1750 * we can't argue if the task is increasing 1751 * or lowering its prio, so... 1752 */ 1753 if (!rq->dl.overloaded) 1754 queue_pull_task(rq); 1755 1756 /* 1757 * If we now have a earlier deadline task than p, 1758 * then reschedule, provided p is still on this 1759 * runqueue. 1760 */ 1761 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) 1762 resched_curr(rq); 1763 #else 1764 /* 1765 * Again, we don't know if p has a earlier 1766 * or later deadline, so let's blindly set a 1767 * (maybe not needed) rescheduling point. 1768 */ 1769 resched_curr(rq); 1770 #endif /* CONFIG_SMP */ 1771 } else 1772 switched_to_dl(rq, p); 1773 } 1774 1775 const struct sched_class dl_sched_class = { 1776 .next = &rt_sched_class, 1777 .enqueue_task = enqueue_task_dl, 1778 .dequeue_task = dequeue_task_dl, 1779 .yield_task = yield_task_dl, 1780 1781 .check_preempt_curr = check_preempt_curr_dl, 1782 1783 .pick_next_task = pick_next_task_dl, 1784 .put_prev_task = put_prev_task_dl, 1785 1786 #ifdef CONFIG_SMP 1787 .select_task_rq = select_task_rq_dl, 1788 .set_cpus_allowed = set_cpus_allowed_dl, 1789 .rq_online = rq_online_dl, 1790 .rq_offline = rq_offline_dl, 1791 .task_woken = task_woken_dl, 1792 #endif 1793 1794 .set_curr_task = set_curr_task_dl, 1795 .task_tick = task_tick_dl, 1796 .task_fork = task_fork_dl, 1797 .task_dead = task_dead_dl, 1798 1799 .prio_changed = prio_changed_dl, 1800 .switched_from = switched_from_dl, 1801 .switched_to = switched_to_dl, 1802 1803 .update_curr = update_curr_dl, 1804 }; 1805 1806 #ifdef CONFIG_SCHED_DEBUG 1807 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 1808 1809 void print_dl_stats(struct seq_file *m, int cpu) 1810 { 1811 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); 1812 } 1813 #endif /* CONFIG_SCHED_DEBUG */ 1814