1 /* 2 * Deadline Scheduling Class (SCHED_DEADLINE) 3 * 4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). 5 * 6 * Tasks that periodically executes their instances for less than their 7 * runtime won't miss any of their deadlines. 8 * Tasks that are not periodic or sporadic or that tries to execute more 9 * than their reserved bandwidth will be slowed down (and may potentially 10 * miss some of their deadlines), and won't affect any other task. 11 * 12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, 13 * Juri Lelli <juri.lelli@gmail.com>, 14 * Michael Trimarchi <michael@amarulasolutions.com>, 15 * Fabio Checconi <fchecconi@gmail.com> 16 */ 17 #include "sched.h" 18 19 #include <linux/slab.h> 20 21 struct dl_bandwidth def_dl_bandwidth; 22 23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) 24 { 25 return container_of(dl_se, struct task_struct, dl); 26 } 27 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) 29 { 30 return container_of(dl_rq, struct rq, dl); 31 } 32 33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) 34 { 35 struct task_struct *p = dl_task_of(dl_se); 36 struct rq *rq = task_rq(p); 37 38 return &rq->dl; 39 } 40 41 static inline int on_dl_rq(struct sched_dl_entity *dl_se) 42 { 43 return !RB_EMPTY_NODE(&dl_se->rb_node); 44 } 45 46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) 47 { 48 struct sched_dl_entity *dl_se = &p->dl; 49 50 return dl_rq->rb_leftmost == &dl_se->rb_node; 51 } 52 53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 54 { 55 raw_spin_lock_init(&dl_b->dl_runtime_lock); 56 dl_b->dl_period = period; 57 dl_b->dl_runtime = runtime; 58 } 59 60 extern unsigned long to_ratio(u64 period, u64 runtime); 61 62 void init_dl_bw(struct dl_bw *dl_b) 63 { 64 raw_spin_lock_init(&dl_b->lock); 65 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock); 66 if (global_rt_runtime() == RUNTIME_INF) 67 dl_b->bw = -1; 68 else 69 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime()); 70 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock); 71 dl_b->total_bw = 0; 72 } 73 74 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) 75 { 76 dl_rq->rb_root = RB_ROOT; 77 78 #ifdef CONFIG_SMP 79 /* zero means no -deadline tasks */ 80 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; 81 82 dl_rq->dl_nr_migratory = 0; 83 dl_rq->overloaded = 0; 84 dl_rq->pushable_dl_tasks_root = RB_ROOT; 85 #else 86 init_dl_bw(&dl_rq->dl_bw); 87 #endif 88 } 89 90 #ifdef CONFIG_SMP 91 92 static inline int dl_overloaded(struct rq *rq) 93 { 94 return atomic_read(&rq->rd->dlo_count); 95 } 96 97 static inline void dl_set_overload(struct rq *rq) 98 { 99 if (!rq->online) 100 return; 101 102 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); 103 /* 104 * Must be visible before the overload count is 105 * set (as in sched_rt.c). 106 * 107 * Matched by the barrier in pull_dl_task(). 108 */ 109 smp_wmb(); 110 atomic_inc(&rq->rd->dlo_count); 111 } 112 113 static inline void dl_clear_overload(struct rq *rq) 114 { 115 if (!rq->online) 116 return; 117 118 atomic_dec(&rq->rd->dlo_count); 119 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); 120 } 121 122 static void update_dl_migration(struct dl_rq *dl_rq) 123 { 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 125 if (!dl_rq->overloaded) { 126 dl_set_overload(rq_of_dl_rq(dl_rq)); 127 dl_rq->overloaded = 1; 128 } 129 } else if (dl_rq->overloaded) { 130 dl_clear_overload(rq_of_dl_rq(dl_rq)); 131 dl_rq->overloaded = 0; 132 } 133 } 134 135 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 136 { 137 struct task_struct *p = dl_task_of(dl_se); 138 139 if (p->nr_cpus_allowed > 1) 140 dl_rq->dl_nr_migratory++; 141 142 update_dl_migration(dl_rq); 143 } 144 145 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 146 { 147 struct task_struct *p = dl_task_of(dl_se); 148 149 if (p->nr_cpus_allowed > 1) 150 dl_rq->dl_nr_migratory--; 151 152 update_dl_migration(dl_rq); 153 } 154 155 /* 156 * The list of pushable -deadline task is not a plist, like in 157 * sched_rt.c, it is an rb-tree with tasks ordered by deadline. 158 */ 159 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 160 { 161 struct dl_rq *dl_rq = &rq->dl; 162 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; 163 struct rb_node *parent = NULL; 164 struct task_struct *entry; 165 int leftmost = 1; 166 167 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 168 169 while (*link) { 170 parent = *link; 171 entry = rb_entry(parent, struct task_struct, 172 pushable_dl_tasks); 173 if (dl_entity_preempt(&p->dl, &entry->dl)) 174 link = &parent->rb_left; 175 else { 176 link = &parent->rb_right; 177 leftmost = 0; 178 } 179 } 180 181 if (leftmost) 182 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; 183 184 rb_link_node(&p->pushable_dl_tasks, parent, link); 185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 186 } 187 188 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 189 { 190 struct dl_rq *dl_rq = &rq->dl; 191 192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 193 return; 194 195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { 196 struct rb_node *next_node; 197 198 next_node = rb_next(&p->pushable_dl_tasks); 199 dl_rq->pushable_dl_tasks_leftmost = next_node; 200 } 201 202 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 203 RB_CLEAR_NODE(&p->pushable_dl_tasks); 204 } 205 206 static inline int has_pushable_dl_tasks(struct rq *rq) 207 { 208 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); 209 } 210 211 static int push_dl_task(struct rq *rq); 212 213 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 214 { 215 return dl_task(prev); 216 } 217 218 static inline void set_post_schedule(struct rq *rq) 219 { 220 rq->post_schedule = has_pushable_dl_tasks(rq); 221 } 222 223 #else 224 225 static inline 226 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 227 { 228 } 229 230 static inline 231 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 232 { 233 } 234 235 static inline 236 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 237 { 238 } 239 240 static inline 241 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 242 { 243 } 244 245 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) 246 { 247 return false; 248 } 249 250 static inline int pull_dl_task(struct rq *rq) 251 { 252 return 0; 253 } 254 255 static inline void set_post_schedule(struct rq *rq) 256 { 257 } 258 #endif /* CONFIG_SMP */ 259 260 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); 261 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); 262 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 263 int flags); 264 265 /* 266 * We are being explicitly informed that a new instance is starting, 267 * and this means that: 268 * - the absolute deadline of the entity has to be placed at 269 * current time + relative deadline; 270 * - the runtime of the entity has to be set to the maximum value. 271 * 272 * The capability of specifying such event is useful whenever a -deadline 273 * entity wants to (try to!) synchronize its behaviour with the scheduler's 274 * one, and to (try to!) reconcile itself with its own scheduling 275 * parameters. 276 */ 277 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, 278 struct sched_dl_entity *pi_se) 279 { 280 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 281 struct rq *rq = rq_of_dl_rq(dl_rq); 282 283 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); 284 285 /* 286 * We use the regular wall clock time to set deadlines in the 287 * future; in fact, we must consider execution overheads (time 288 * spent on hardirq context, etc.). 289 */ 290 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 291 dl_se->runtime = pi_se->dl_runtime; 292 dl_se->dl_new = 0; 293 } 294 295 /* 296 * Pure Earliest Deadline First (EDF) scheduling does not deal with the 297 * possibility of a entity lasting more than what it declared, and thus 298 * exhausting its runtime. 299 * 300 * Here we are interested in making runtime overrun possible, but we do 301 * not want a entity which is misbehaving to affect the scheduling of all 302 * other entities. 303 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) 304 * is used, in order to confine each entity within its own bandwidth. 305 * 306 * This function deals exactly with that, and ensures that when the runtime 307 * of a entity is replenished, its deadline is also postponed. That ensures 308 * the overrunning entity can't interfere with other entity in the system and 309 * can't make them miss their deadlines. Reasons why this kind of overruns 310 * could happen are, typically, a entity voluntarily trying to overcome its 311 * runtime, or it just underestimated it during sched_setscheduler_ex(). 312 */ 313 static void replenish_dl_entity(struct sched_dl_entity *dl_se, 314 struct sched_dl_entity *pi_se) 315 { 316 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 317 struct rq *rq = rq_of_dl_rq(dl_rq); 318 319 BUG_ON(pi_se->dl_runtime <= 0); 320 321 /* 322 * This could be the case for a !-dl task that is boosted. 323 * Just go with full inherited parameters. 324 */ 325 if (dl_se->dl_deadline == 0) { 326 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 327 dl_se->runtime = pi_se->dl_runtime; 328 } 329 330 /* 331 * We keep moving the deadline away until we get some 332 * available runtime for the entity. This ensures correct 333 * handling of situations where the runtime overrun is 334 * arbitrary large. 335 */ 336 while (dl_se->runtime <= 0) { 337 dl_se->deadline += pi_se->dl_period; 338 dl_se->runtime += pi_se->dl_runtime; 339 } 340 341 /* 342 * At this point, the deadline really should be "in 343 * the future" with respect to rq->clock. If it's 344 * not, we are, for some reason, lagging too much! 345 * Anyway, after having warn userspace abut that, 346 * we still try to keep the things running by 347 * resetting the deadline and the budget of the 348 * entity. 349 */ 350 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 351 static bool lag_once = false; 352 353 if (!lag_once) { 354 lag_once = true; 355 printk_sched("sched: DL replenish lagged to much\n"); 356 } 357 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 358 dl_se->runtime = pi_se->dl_runtime; 359 } 360 } 361 362 /* 363 * Here we check if --at time t-- an entity (which is probably being 364 * [re]activated or, in general, enqueued) can use its remaining runtime 365 * and its current deadline _without_ exceeding the bandwidth it is 366 * assigned (function returns true if it can't). We are in fact applying 367 * one of the CBS rules: when a task wakes up, if the residual runtime 368 * over residual deadline fits within the allocated bandwidth, then we 369 * can keep the current (absolute) deadline and residual budget without 370 * disrupting the schedulability of the system. Otherwise, we should 371 * refill the runtime and set the deadline a period in the future, 372 * because keeping the current (absolute) deadline of the task would 373 * result in breaking guarantees promised to other tasks (refer to 374 * Documentation/scheduler/sched-deadline.txt for more informations). 375 * 376 * This function returns true if: 377 * 378 * runtime / (deadline - t) > dl_runtime / dl_period , 379 * 380 * IOW we can't recycle current parameters. 381 * 382 * Notice that the bandwidth check is done against the period. For 383 * task with deadline equal to period this is the same of using 384 * dl_deadline instead of dl_period in the equation above. 385 */ 386 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, 387 struct sched_dl_entity *pi_se, u64 t) 388 { 389 u64 left, right; 390 391 /* 392 * left and right are the two sides of the equation above, 393 * after a bit of shuffling to use multiplications instead 394 * of divisions. 395 * 396 * Note that none of the time values involved in the two 397 * multiplications are absolute: dl_deadline and dl_runtime 398 * are the relative deadline and the maximum runtime of each 399 * instance, runtime is the runtime left for the last instance 400 * and (deadline - t), since t is rq->clock, is the time left 401 * to the (absolute) deadline. Even if overflowing the u64 type 402 * is very unlikely to occur in both cases, here we scale down 403 * as we want to avoid that risk at all. Scaling down by 10 404 * means that we reduce granularity to 1us. We are fine with it, 405 * since this is only a true/false check and, anyway, thinking 406 * of anything below microseconds resolution is actually fiction 407 * (but still we want to give the user that illusion >;). 408 */ 409 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); 410 right = ((dl_se->deadline - t) >> DL_SCALE) * 411 (pi_se->dl_runtime >> DL_SCALE); 412 413 return dl_time_before(right, left); 414 } 415 416 /* 417 * When a -deadline entity is queued back on the runqueue, its runtime and 418 * deadline might need updating. 419 * 420 * The policy here is that we update the deadline of the entity only if: 421 * - the current deadline is in the past, 422 * - using the remaining runtime with the current deadline would make 423 * the entity exceed its bandwidth. 424 */ 425 static void update_dl_entity(struct sched_dl_entity *dl_se, 426 struct sched_dl_entity *pi_se) 427 { 428 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 429 struct rq *rq = rq_of_dl_rq(dl_rq); 430 431 /* 432 * The arrival of a new instance needs special treatment, i.e., 433 * the actual scheduling parameters have to be "renewed". 434 */ 435 if (dl_se->dl_new) { 436 setup_new_dl_entity(dl_se, pi_se); 437 return; 438 } 439 440 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 441 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 442 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 443 dl_se->runtime = pi_se->dl_runtime; 444 } 445 } 446 447 /* 448 * If the entity depleted all its runtime, and if we want it to sleep 449 * while waiting for some new execution time to become available, we 450 * set the bandwidth enforcement timer to the replenishment instant 451 * and try to activate it. 452 * 453 * Notice that it is important for the caller to know if the timer 454 * actually started or not (i.e., the replenishment instant is in 455 * the future or in the past). 456 */ 457 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted) 458 { 459 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 460 struct rq *rq = rq_of_dl_rq(dl_rq); 461 ktime_t now, act; 462 ktime_t soft, hard; 463 unsigned long range; 464 s64 delta; 465 466 if (boosted) 467 return 0; 468 /* 469 * We want the timer to fire at the deadline, but considering 470 * that it is actually coming from rq->clock and not from 471 * hrtimer's time base reading. 472 */ 473 act = ns_to_ktime(dl_se->deadline); 474 now = hrtimer_cb_get_time(&dl_se->dl_timer); 475 delta = ktime_to_ns(now) - rq_clock(rq); 476 act = ktime_add_ns(act, delta); 477 478 /* 479 * If the expiry time already passed, e.g., because the value 480 * chosen as the deadline is too small, don't even try to 481 * start the timer in the past! 482 */ 483 if (ktime_us_delta(act, now) < 0) 484 return 0; 485 486 hrtimer_set_expires(&dl_se->dl_timer, act); 487 488 soft = hrtimer_get_softexpires(&dl_se->dl_timer); 489 hard = hrtimer_get_expires(&dl_se->dl_timer); 490 range = ktime_to_ns(ktime_sub(hard, soft)); 491 __hrtimer_start_range_ns(&dl_se->dl_timer, soft, 492 range, HRTIMER_MODE_ABS, 0); 493 494 return hrtimer_active(&dl_se->dl_timer); 495 } 496 497 /* 498 * This is the bandwidth enforcement timer callback. If here, we know 499 * a task is not on its dl_rq, since the fact that the timer was running 500 * means the task is throttled and needs a runtime replenishment. 501 * 502 * However, what we actually do depends on the fact the task is active, 503 * (it is on its rq) or has been removed from there by a call to 504 * dequeue_task_dl(). In the former case we must issue the runtime 505 * replenishment and add the task back to the dl_rq; in the latter, we just 506 * do nothing but clearing dl_throttled, so that runtime and deadline 507 * updating (and the queueing back to dl_rq) will be done by the 508 * next call to enqueue_task_dl(). 509 */ 510 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) 511 { 512 struct sched_dl_entity *dl_se = container_of(timer, 513 struct sched_dl_entity, 514 dl_timer); 515 struct task_struct *p = dl_task_of(dl_se); 516 struct rq *rq = task_rq(p); 517 raw_spin_lock(&rq->lock); 518 519 /* 520 * We need to take care of a possible races here. In fact, the 521 * task might have changed its scheduling policy to something 522 * different from SCHED_DEADLINE or changed its reservation 523 * parameters (through sched_setscheduler()). 524 */ 525 if (!dl_task(p) || dl_se->dl_new) 526 goto unlock; 527 528 sched_clock_tick(); 529 update_rq_clock(rq); 530 dl_se->dl_throttled = 0; 531 dl_se->dl_yielded = 0; 532 if (p->on_rq) { 533 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 534 if (task_has_dl_policy(rq->curr)) 535 check_preempt_curr_dl(rq, p, 0); 536 else 537 resched_task(rq->curr); 538 #ifdef CONFIG_SMP 539 /* 540 * Queueing this task back might have overloaded rq, 541 * check if we need to kick someone away. 542 */ 543 if (has_pushable_dl_tasks(rq)) 544 push_dl_task(rq); 545 #endif 546 } 547 unlock: 548 raw_spin_unlock(&rq->lock); 549 550 return HRTIMER_NORESTART; 551 } 552 553 void init_dl_task_timer(struct sched_dl_entity *dl_se) 554 { 555 struct hrtimer *timer = &dl_se->dl_timer; 556 557 if (hrtimer_active(timer)) { 558 hrtimer_try_to_cancel(timer); 559 return; 560 } 561 562 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 563 timer->function = dl_task_timer; 564 } 565 566 static 567 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 568 { 569 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 570 int rorun = dl_se->runtime <= 0; 571 572 if (!rorun && !dmiss) 573 return 0; 574 575 /* 576 * If we are beyond our current deadline and we are still 577 * executing, then we have already used some of the runtime of 578 * the next instance. Thus, if we do not account that, we are 579 * stealing bandwidth from the system at each deadline miss! 580 */ 581 if (dmiss) { 582 dl_se->runtime = rorun ? dl_se->runtime : 0; 583 dl_se->runtime -= rq_clock(rq) - dl_se->deadline; 584 } 585 586 return 1; 587 } 588 589 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 590 591 /* 592 * Update the current task's runtime statistics (provided it is still 593 * a -deadline task and has not been removed from the dl_rq). 594 */ 595 static void update_curr_dl(struct rq *rq) 596 { 597 struct task_struct *curr = rq->curr; 598 struct sched_dl_entity *dl_se = &curr->dl; 599 u64 delta_exec; 600 601 if (!dl_task(curr) || !on_dl_rq(dl_se)) 602 return; 603 604 /* 605 * Consumed budget is computed considering the time as 606 * observed by schedulable tasks (excluding time spent 607 * in hardirq context, etc.). Deadlines are instead 608 * computed using hard walltime. This seems to be the more 609 * natural solution, but the full ramifications of this 610 * approach need further study. 611 */ 612 delta_exec = rq_clock_task(rq) - curr->se.exec_start; 613 if (unlikely((s64)delta_exec <= 0)) 614 return; 615 616 schedstat_set(curr->se.statistics.exec_max, 617 max(curr->se.statistics.exec_max, delta_exec)); 618 619 curr->se.sum_exec_runtime += delta_exec; 620 account_group_exec_runtime(curr, delta_exec); 621 622 curr->se.exec_start = rq_clock_task(rq); 623 cpuacct_charge(curr, delta_exec); 624 625 sched_rt_avg_update(rq, delta_exec); 626 627 dl_se->runtime -= delta_exec; 628 if (dl_runtime_exceeded(rq, dl_se)) { 629 __dequeue_task_dl(rq, curr, 0); 630 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) 631 dl_se->dl_throttled = 1; 632 else 633 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); 634 635 if (!is_leftmost(curr, &rq->dl)) 636 resched_task(curr); 637 } 638 639 /* 640 * Because -- for now -- we share the rt bandwidth, we need to 641 * account our runtime there too, otherwise actual rt tasks 642 * would be able to exceed the shared quota. 643 * 644 * Account to the root rt group for now. 645 * 646 * The solution we're working towards is having the RT groups scheduled 647 * using deadline servers -- however there's a few nasties to figure 648 * out before that can happen. 649 */ 650 if (rt_bandwidth_enabled()) { 651 struct rt_rq *rt_rq = &rq->rt; 652 653 raw_spin_lock(&rt_rq->rt_runtime_lock); 654 /* 655 * We'll let actual RT tasks worry about the overflow here, we 656 * have our own CBS to keep us inline; only account when RT 657 * bandwidth is relevant. 658 */ 659 if (sched_rt_bandwidth_account(rt_rq)) 660 rt_rq->rt_time += delta_exec; 661 raw_spin_unlock(&rt_rq->rt_runtime_lock); 662 } 663 } 664 665 #ifdef CONFIG_SMP 666 667 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu); 668 669 static inline u64 next_deadline(struct rq *rq) 670 { 671 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu); 672 673 if (next && dl_prio(next->prio)) 674 return next->dl.deadline; 675 else 676 return 0; 677 } 678 679 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 680 { 681 struct rq *rq = rq_of_dl_rq(dl_rq); 682 683 if (dl_rq->earliest_dl.curr == 0 || 684 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { 685 /* 686 * If the dl_rq had no -deadline tasks, or if the new task 687 * has shorter deadline than the current one on dl_rq, we 688 * know that the previous earliest becomes our next earliest, 689 * as the new task becomes the earliest itself. 690 */ 691 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; 692 dl_rq->earliest_dl.curr = deadline; 693 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1); 694 } else if (dl_rq->earliest_dl.next == 0 || 695 dl_time_before(deadline, dl_rq->earliest_dl.next)) { 696 /* 697 * On the other hand, if the new -deadline task has a 698 * a later deadline than the earliest one on dl_rq, but 699 * it is earlier than the next (if any), we must 700 * recompute the next-earliest. 701 */ 702 dl_rq->earliest_dl.next = next_deadline(rq); 703 } 704 } 705 706 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) 707 { 708 struct rq *rq = rq_of_dl_rq(dl_rq); 709 710 /* 711 * Since we may have removed our earliest (and/or next earliest) 712 * task we must recompute them. 713 */ 714 if (!dl_rq->dl_nr_running) { 715 dl_rq->earliest_dl.curr = 0; 716 dl_rq->earliest_dl.next = 0; 717 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 718 } else { 719 struct rb_node *leftmost = dl_rq->rb_leftmost; 720 struct sched_dl_entity *entry; 721 722 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 723 dl_rq->earliest_dl.curr = entry->deadline; 724 dl_rq->earliest_dl.next = next_deadline(rq); 725 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1); 726 } 727 } 728 729 #else 730 731 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 732 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} 733 734 #endif /* CONFIG_SMP */ 735 736 static inline 737 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 738 { 739 int prio = dl_task_of(dl_se)->prio; 740 u64 deadline = dl_se->deadline; 741 742 WARN_ON(!dl_prio(prio)); 743 dl_rq->dl_nr_running++; 744 inc_nr_running(rq_of_dl_rq(dl_rq)); 745 746 inc_dl_deadline(dl_rq, deadline); 747 inc_dl_migration(dl_se, dl_rq); 748 } 749 750 static inline 751 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 752 { 753 int prio = dl_task_of(dl_se)->prio; 754 755 WARN_ON(!dl_prio(prio)); 756 WARN_ON(!dl_rq->dl_nr_running); 757 dl_rq->dl_nr_running--; 758 dec_nr_running(rq_of_dl_rq(dl_rq)); 759 760 dec_dl_deadline(dl_rq, dl_se->deadline); 761 dec_dl_migration(dl_se, dl_rq); 762 } 763 764 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 765 { 766 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 767 struct rb_node **link = &dl_rq->rb_root.rb_node; 768 struct rb_node *parent = NULL; 769 struct sched_dl_entity *entry; 770 int leftmost = 1; 771 772 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); 773 774 while (*link) { 775 parent = *link; 776 entry = rb_entry(parent, struct sched_dl_entity, rb_node); 777 if (dl_time_before(dl_se->deadline, entry->deadline)) 778 link = &parent->rb_left; 779 else { 780 link = &parent->rb_right; 781 leftmost = 0; 782 } 783 } 784 785 if (leftmost) 786 dl_rq->rb_leftmost = &dl_se->rb_node; 787 788 rb_link_node(&dl_se->rb_node, parent, link); 789 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); 790 791 inc_dl_tasks(dl_se, dl_rq); 792 } 793 794 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) 795 { 796 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 797 798 if (RB_EMPTY_NODE(&dl_se->rb_node)) 799 return; 800 801 if (dl_rq->rb_leftmost == &dl_se->rb_node) { 802 struct rb_node *next_node; 803 804 next_node = rb_next(&dl_se->rb_node); 805 dl_rq->rb_leftmost = next_node; 806 } 807 808 rb_erase(&dl_se->rb_node, &dl_rq->rb_root); 809 RB_CLEAR_NODE(&dl_se->rb_node); 810 811 dec_dl_tasks(dl_se, dl_rq); 812 } 813 814 static void 815 enqueue_dl_entity(struct sched_dl_entity *dl_se, 816 struct sched_dl_entity *pi_se, int flags) 817 { 818 BUG_ON(on_dl_rq(dl_se)); 819 820 /* 821 * If this is a wakeup or a new instance, the scheduling 822 * parameters of the task might need updating. Otherwise, 823 * we want a replenishment of its runtime. 824 */ 825 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 826 replenish_dl_entity(dl_se, pi_se); 827 else 828 update_dl_entity(dl_se, pi_se); 829 830 __enqueue_dl_entity(dl_se); 831 } 832 833 static void dequeue_dl_entity(struct sched_dl_entity *dl_se) 834 { 835 __dequeue_dl_entity(dl_se); 836 } 837 838 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 839 { 840 struct task_struct *pi_task = rt_mutex_get_top_task(p); 841 struct sched_dl_entity *pi_se = &p->dl; 842 843 /* 844 * Use the scheduling parameters of the top pi-waiter 845 * task if we have one and its (relative) deadline is 846 * smaller than our one... OTW we keep our runtime and 847 * deadline. 848 */ 849 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) 850 pi_se = &pi_task->dl; 851 852 /* 853 * If p is throttled, we do nothing. In fact, if it exhausted 854 * its budget it needs a replenishment and, since it now is on 855 * its rq, the bandwidth timer callback (which clearly has not 856 * run yet) will take care of this. 857 */ 858 if (p->dl.dl_throttled) 859 return; 860 861 enqueue_dl_entity(&p->dl, pi_se, flags); 862 863 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 864 enqueue_pushable_dl_task(rq, p); 865 } 866 867 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 868 { 869 dequeue_dl_entity(&p->dl); 870 dequeue_pushable_dl_task(rq, p); 871 } 872 873 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) 874 { 875 update_curr_dl(rq); 876 __dequeue_task_dl(rq, p, flags); 877 } 878 879 /* 880 * Yield task semantic for -deadline tasks is: 881 * 882 * get off from the CPU until our next instance, with 883 * a new runtime. This is of little use now, since we 884 * don't have a bandwidth reclaiming mechanism. Anyway, 885 * bandwidth reclaiming is planned for the future, and 886 * yield_task_dl will indicate that some spare budget 887 * is available for other task instances to use it. 888 */ 889 static void yield_task_dl(struct rq *rq) 890 { 891 struct task_struct *p = rq->curr; 892 893 /* 894 * We make the task go to sleep until its current deadline by 895 * forcing its runtime to zero. This way, update_curr_dl() stops 896 * it and the bandwidth timer will wake it up and will give it 897 * new scheduling parameters (thanks to dl_yielded=1). 898 */ 899 if (p->dl.runtime > 0) { 900 rq->curr->dl.dl_yielded = 1; 901 p->dl.runtime = 0; 902 } 903 update_curr_dl(rq); 904 } 905 906 #ifdef CONFIG_SMP 907 908 static int find_later_rq(struct task_struct *task); 909 910 static int 911 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) 912 { 913 struct task_struct *curr; 914 struct rq *rq; 915 916 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 917 goto out; 918 919 rq = cpu_rq(cpu); 920 921 rcu_read_lock(); 922 curr = ACCESS_ONCE(rq->curr); /* unlocked access */ 923 924 /* 925 * If we are dealing with a -deadline task, we must 926 * decide where to wake it up. 927 * If it has a later deadline and the current task 928 * on this rq can't move (provided the waking task 929 * can!) we prefer to send it somewhere else. On the 930 * other hand, if it has a shorter deadline, we 931 * try to make it stay here, it might be important. 932 */ 933 if (unlikely(dl_task(curr)) && 934 (curr->nr_cpus_allowed < 2 || 935 !dl_entity_preempt(&p->dl, &curr->dl)) && 936 (p->nr_cpus_allowed > 1)) { 937 int target = find_later_rq(p); 938 939 if (target != -1) 940 cpu = target; 941 } 942 rcu_read_unlock(); 943 944 out: 945 return cpu; 946 } 947 948 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) 949 { 950 /* 951 * Current can't be migrated, useless to reschedule, 952 * let's hope p can move out. 953 */ 954 if (rq->curr->nr_cpus_allowed == 1 || 955 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) 956 return; 957 958 /* 959 * p is migratable, so let's not schedule it and 960 * see if it is pushed or pulled somewhere else. 961 */ 962 if (p->nr_cpus_allowed != 1 && 963 cpudl_find(&rq->rd->cpudl, p, NULL) != -1) 964 return; 965 966 resched_task(rq->curr); 967 } 968 969 static int pull_dl_task(struct rq *this_rq); 970 971 #endif /* CONFIG_SMP */ 972 973 /* 974 * Only called when both the current and waking task are -deadline 975 * tasks. 976 */ 977 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, 978 int flags) 979 { 980 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { 981 resched_task(rq->curr); 982 return; 983 } 984 985 #ifdef CONFIG_SMP 986 /* 987 * In the unlikely case current and p have the same deadline 988 * let us try to decide what's the best thing to do... 989 */ 990 if ((p->dl.deadline == rq->curr->dl.deadline) && 991 !test_tsk_need_resched(rq->curr)) 992 check_preempt_equal_dl(rq, p); 993 #endif /* CONFIG_SMP */ 994 } 995 996 #ifdef CONFIG_SCHED_HRTICK 997 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) 998 { 999 s64 delta = p->dl.dl_runtime - p->dl.runtime; 1000 1001 if (delta > 10000) 1002 hrtick_start(rq, p->dl.runtime); 1003 } 1004 #endif 1005 1006 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1007 struct dl_rq *dl_rq) 1008 { 1009 struct rb_node *left = dl_rq->rb_leftmost; 1010 1011 if (!left) 1012 return NULL; 1013 1014 return rb_entry(left, struct sched_dl_entity, rb_node); 1015 } 1016 1017 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) 1018 { 1019 struct sched_dl_entity *dl_se; 1020 struct task_struct *p; 1021 struct dl_rq *dl_rq; 1022 1023 dl_rq = &rq->dl; 1024 1025 if (need_pull_dl_task(rq, prev)) { 1026 pull_dl_task(rq); 1027 /* 1028 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1029 * means a stop task can slip in, in which case we need to 1030 * re-start task selection. 1031 */ 1032 if (rq->stop && rq->stop->on_rq) 1033 return RETRY_TASK; 1034 } 1035 1036 /* 1037 * When prev is DL, we may throttle it in put_prev_task(). 1038 * So, we update time before we check for dl_nr_running. 1039 */ 1040 if (prev->sched_class == &dl_sched_class) 1041 update_curr_dl(rq); 1042 1043 if (unlikely(!dl_rq->dl_nr_running)) 1044 return NULL; 1045 1046 put_prev_task(rq, prev); 1047 1048 dl_se = pick_next_dl_entity(rq, dl_rq); 1049 BUG_ON(!dl_se); 1050 1051 p = dl_task_of(dl_se); 1052 p->se.exec_start = rq_clock_task(rq); 1053 1054 /* Running task will never be pushed. */ 1055 dequeue_pushable_dl_task(rq, p); 1056 1057 #ifdef CONFIG_SCHED_HRTICK 1058 if (hrtick_enabled(rq)) 1059 start_hrtick_dl(rq, p); 1060 #endif 1061 1062 set_post_schedule(rq); 1063 1064 return p; 1065 } 1066 1067 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) 1068 { 1069 update_curr_dl(rq); 1070 1071 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1072 enqueue_pushable_dl_task(rq, p); 1073 } 1074 1075 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) 1076 { 1077 update_curr_dl(rq); 1078 1079 #ifdef CONFIG_SCHED_HRTICK 1080 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) 1081 start_hrtick_dl(rq, p); 1082 #endif 1083 } 1084 1085 static void task_fork_dl(struct task_struct *p) 1086 { 1087 /* 1088 * SCHED_DEADLINE tasks cannot fork and this is achieved through 1089 * sched_fork() 1090 */ 1091 } 1092 1093 static void task_dead_dl(struct task_struct *p) 1094 { 1095 struct hrtimer *timer = &p->dl.dl_timer; 1096 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1097 1098 /* 1099 * Since we are TASK_DEAD we won't slip out of the domain! 1100 */ 1101 raw_spin_lock_irq(&dl_b->lock); 1102 dl_b->total_bw -= p->dl.dl_bw; 1103 raw_spin_unlock_irq(&dl_b->lock); 1104 1105 hrtimer_cancel(timer); 1106 } 1107 1108 static void set_curr_task_dl(struct rq *rq) 1109 { 1110 struct task_struct *p = rq->curr; 1111 1112 p->se.exec_start = rq_clock_task(rq); 1113 1114 /* You can't push away the running task */ 1115 dequeue_pushable_dl_task(rq, p); 1116 } 1117 1118 #ifdef CONFIG_SMP 1119 1120 /* Only try algorithms three times */ 1121 #define DL_MAX_TRIES 3 1122 1123 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) 1124 { 1125 if (!task_running(rq, p) && 1126 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && 1127 (p->nr_cpus_allowed > 1)) 1128 return 1; 1129 1130 return 0; 1131 } 1132 1133 /* Returns the second earliest -deadline task, NULL otherwise */ 1134 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu) 1135 { 1136 struct rb_node *next_node = rq->dl.rb_leftmost; 1137 struct sched_dl_entity *dl_se; 1138 struct task_struct *p = NULL; 1139 1140 next_node: 1141 next_node = rb_next(next_node); 1142 if (next_node) { 1143 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); 1144 p = dl_task_of(dl_se); 1145 1146 if (pick_dl_task(rq, p, cpu)) 1147 return p; 1148 1149 goto next_node; 1150 } 1151 1152 return NULL; 1153 } 1154 1155 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); 1156 1157 static int find_later_rq(struct task_struct *task) 1158 { 1159 struct sched_domain *sd; 1160 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl); 1161 int this_cpu = smp_processor_id(); 1162 int best_cpu, cpu = task_cpu(task); 1163 1164 /* Make sure the mask is initialized first */ 1165 if (unlikely(!later_mask)) 1166 return -1; 1167 1168 if (task->nr_cpus_allowed == 1) 1169 return -1; 1170 1171 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl, 1172 task, later_mask); 1173 if (best_cpu == -1) 1174 return -1; 1175 1176 /* 1177 * If we are here, some target has been found, 1178 * the most suitable of which is cached in best_cpu. 1179 * This is, among the runqueues where the current tasks 1180 * have later deadlines than the task's one, the rq 1181 * with the latest possible one. 1182 * 1183 * Now we check how well this matches with task's 1184 * affinity and system topology. 1185 * 1186 * The last cpu where the task run is our first 1187 * guess, since it is most likely cache-hot there. 1188 */ 1189 if (cpumask_test_cpu(cpu, later_mask)) 1190 return cpu; 1191 /* 1192 * Check if this_cpu is to be skipped (i.e., it is 1193 * not in the mask) or not. 1194 */ 1195 if (!cpumask_test_cpu(this_cpu, later_mask)) 1196 this_cpu = -1; 1197 1198 rcu_read_lock(); 1199 for_each_domain(cpu, sd) { 1200 if (sd->flags & SD_WAKE_AFFINE) { 1201 1202 /* 1203 * If possible, preempting this_cpu is 1204 * cheaper than migrating. 1205 */ 1206 if (this_cpu != -1 && 1207 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1208 rcu_read_unlock(); 1209 return this_cpu; 1210 } 1211 1212 /* 1213 * Last chance: if best_cpu is valid and is 1214 * in the mask, that becomes our choice. 1215 */ 1216 if (best_cpu < nr_cpu_ids && 1217 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { 1218 rcu_read_unlock(); 1219 return best_cpu; 1220 } 1221 } 1222 } 1223 rcu_read_unlock(); 1224 1225 /* 1226 * At this point, all our guesses failed, we just return 1227 * 'something', and let the caller sort the things out. 1228 */ 1229 if (this_cpu != -1) 1230 return this_cpu; 1231 1232 cpu = cpumask_any(later_mask); 1233 if (cpu < nr_cpu_ids) 1234 return cpu; 1235 1236 return -1; 1237 } 1238 1239 /* Locks the rq it finds */ 1240 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) 1241 { 1242 struct rq *later_rq = NULL; 1243 int tries; 1244 int cpu; 1245 1246 for (tries = 0; tries < DL_MAX_TRIES; tries++) { 1247 cpu = find_later_rq(task); 1248 1249 if ((cpu == -1) || (cpu == rq->cpu)) 1250 break; 1251 1252 later_rq = cpu_rq(cpu); 1253 1254 /* Retry if something changed. */ 1255 if (double_lock_balance(rq, later_rq)) { 1256 if (unlikely(task_rq(task) != rq || 1257 !cpumask_test_cpu(later_rq->cpu, 1258 &task->cpus_allowed) || 1259 task_running(rq, task) || !task->on_rq)) { 1260 double_unlock_balance(rq, later_rq); 1261 later_rq = NULL; 1262 break; 1263 } 1264 } 1265 1266 /* 1267 * If the rq we found has no -deadline task, or 1268 * its earliest one has a later deadline than our 1269 * task, the rq is a good one. 1270 */ 1271 if (!later_rq->dl.dl_nr_running || 1272 dl_time_before(task->dl.deadline, 1273 later_rq->dl.earliest_dl.curr)) 1274 break; 1275 1276 /* Otherwise we try again. */ 1277 double_unlock_balance(rq, later_rq); 1278 later_rq = NULL; 1279 } 1280 1281 return later_rq; 1282 } 1283 1284 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) 1285 { 1286 struct task_struct *p; 1287 1288 if (!has_pushable_dl_tasks(rq)) 1289 return NULL; 1290 1291 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, 1292 struct task_struct, pushable_dl_tasks); 1293 1294 BUG_ON(rq->cpu != task_cpu(p)); 1295 BUG_ON(task_current(rq, p)); 1296 BUG_ON(p->nr_cpus_allowed <= 1); 1297 1298 BUG_ON(!p->on_rq); 1299 BUG_ON(!dl_task(p)); 1300 1301 return p; 1302 } 1303 1304 /* 1305 * See if the non running -deadline tasks on this rq 1306 * can be sent to some other CPU where they can preempt 1307 * and start executing. 1308 */ 1309 static int push_dl_task(struct rq *rq) 1310 { 1311 struct task_struct *next_task; 1312 struct rq *later_rq; 1313 1314 if (!rq->dl.overloaded) 1315 return 0; 1316 1317 next_task = pick_next_pushable_dl_task(rq); 1318 if (!next_task) 1319 return 0; 1320 1321 retry: 1322 if (unlikely(next_task == rq->curr)) { 1323 WARN_ON(1); 1324 return 0; 1325 } 1326 1327 /* 1328 * If next_task preempts rq->curr, and rq->curr 1329 * can move away, it makes sense to just reschedule 1330 * without going further in pushing next_task. 1331 */ 1332 if (dl_task(rq->curr) && 1333 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && 1334 rq->curr->nr_cpus_allowed > 1) { 1335 resched_task(rq->curr); 1336 return 0; 1337 } 1338 1339 /* We might release rq lock */ 1340 get_task_struct(next_task); 1341 1342 /* Will lock the rq it'll find */ 1343 later_rq = find_lock_later_rq(next_task, rq); 1344 if (!later_rq) { 1345 struct task_struct *task; 1346 1347 /* 1348 * We must check all this again, since 1349 * find_lock_later_rq releases rq->lock and it is 1350 * then possible that next_task has migrated. 1351 */ 1352 task = pick_next_pushable_dl_task(rq); 1353 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1354 /* 1355 * The task is still there. We don't try 1356 * again, some other cpu will pull it when ready. 1357 */ 1358 dequeue_pushable_dl_task(rq, next_task); 1359 goto out; 1360 } 1361 1362 if (!task) 1363 /* No more tasks */ 1364 goto out; 1365 1366 put_task_struct(next_task); 1367 next_task = task; 1368 goto retry; 1369 } 1370 1371 deactivate_task(rq, next_task, 0); 1372 set_task_cpu(next_task, later_rq->cpu); 1373 activate_task(later_rq, next_task, 0); 1374 1375 resched_task(later_rq->curr); 1376 1377 double_unlock_balance(rq, later_rq); 1378 1379 out: 1380 put_task_struct(next_task); 1381 1382 return 1; 1383 } 1384 1385 static void push_dl_tasks(struct rq *rq) 1386 { 1387 /* Terminates as it moves a -deadline task */ 1388 while (push_dl_task(rq)) 1389 ; 1390 } 1391 1392 static int pull_dl_task(struct rq *this_rq) 1393 { 1394 int this_cpu = this_rq->cpu, ret = 0, cpu; 1395 struct task_struct *p; 1396 struct rq *src_rq; 1397 u64 dmin = LONG_MAX; 1398 1399 if (likely(!dl_overloaded(this_rq))) 1400 return 0; 1401 1402 /* 1403 * Match the barrier from dl_set_overloaded; this guarantees that if we 1404 * see overloaded we must also see the dlo_mask bit. 1405 */ 1406 smp_rmb(); 1407 1408 for_each_cpu(cpu, this_rq->rd->dlo_mask) { 1409 if (this_cpu == cpu) 1410 continue; 1411 1412 src_rq = cpu_rq(cpu); 1413 1414 /* 1415 * It looks racy, abd it is! However, as in sched_rt.c, 1416 * we are fine with this. 1417 */ 1418 if (this_rq->dl.dl_nr_running && 1419 dl_time_before(this_rq->dl.earliest_dl.curr, 1420 src_rq->dl.earliest_dl.next)) 1421 continue; 1422 1423 /* Might drop this_rq->lock */ 1424 double_lock_balance(this_rq, src_rq); 1425 1426 /* 1427 * If there are no more pullable tasks on the 1428 * rq, we're done with it. 1429 */ 1430 if (src_rq->dl.dl_nr_running <= 1) 1431 goto skip; 1432 1433 p = pick_next_earliest_dl_task(src_rq, this_cpu); 1434 1435 /* 1436 * We found a task to be pulled if: 1437 * - it preempts our current (if there's one), 1438 * - it will preempt the last one we pulled (if any). 1439 */ 1440 if (p && dl_time_before(p->dl.deadline, dmin) && 1441 (!this_rq->dl.dl_nr_running || 1442 dl_time_before(p->dl.deadline, 1443 this_rq->dl.earliest_dl.curr))) { 1444 WARN_ON(p == src_rq->curr); 1445 WARN_ON(!p->on_rq); 1446 1447 /* 1448 * Then we pull iff p has actually an earlier 1449 * deadline than the current task of its runqueue. 1450 */ 1451 if (dl_time_before(p->dl.deadline, 1452 src_rq->curr->dl.deadline)) 1453 goto skip; 1454 1455 ret = 1; 1456 1457 deactivate_task(src_rq, p, 0); 1458 set_task_cpu(p, this_cpu); 1459 activate_task(this_rq, p, 0); 1460 dmin = p->dl.deadline; 1461 1462 /* Is there any other task even earlier? */ 1463 } 1464 skip: 1465 double_unlock_balance(this_rq, src_rq); 1466 } 1467 1468 return ret; 1469 } 1470 1471 static void post_schedule_dl(struct rq *rq) 1472 { 1473 push_dl_tasks(rq); 1474 } 1475 1476 /* 1477 * Since the task is not running and a reschedule is not going to happen 1478 * anytime soon on its runqueue, we try pushing it away now. 1479 */ 1480 static void task_woken_dl(struct rq *rq, struct task_struct *p) 1481 { 1482 if (!task_running(rq, p) && 1483 !test_tsk_need_resched(rq->curr) && 1484 has_pushable_dl_tasks(rq) && 1485 p->nr_cpus_allowed > 1 && 1486 dl_task(rq->curr) && 1487 (rq->curr->nr_cpus_allowed < 2 || 1488 dl_entity_preempt(&rq->curr->dl, &p->dl))) { 1489 push_dl_tasks(rq); 1490 } 1491 } 1492 1493 static void set_cpus_allowed_dl(struct task_struct *p, 1494 const struct cpumask *new_mask) 1495 { 1496 struct rq *rq; 1497 int weight; 1498 1499 BUG_ON(!dl_task(p)); 1500 1501 /* 1502 * Update only if the task is actually running (i.e., 1503 * it is on the rq AND it is not throttled). 1504 */ 1505 if (!on_dl_rq(&p->dl)) 1506 return; 1507 1508 weight = cpumask_weight(new_mask); 1509 1510 /* 1511 * Only update if the process changes its state from whether it 1512 * can migrate or not. 1513 */ 1514 if ((p->nr_cpus_allowed > 1) == (weight > 1)) 1515 return; 1516 1517 rq = task_rq(p); 1518 1519 /* 1520 * The process used to be able to migrate OR it can now migrate 1521 */ 1522 if (weight <= 1) { 1523 if (!task_current(rq, p)) 1524 dequeue_pushable_dl_task(rq, p); 1525 BUG_ON(!rq->dl.dl_nr_migratory); 1526 rq->dl.dl_nr_migratory--; 1527 } else { 1528 if (!task_current(rq, p)) 1529 enqueue_pushable_dl_task(rq, p); 1530 rq->dl.dl_nr_migratory++; 1531 } 1532 1533 update_dl_migration(&rq->dl); 1534 } 1535 1536 /* Assumes rq->lock is held */ 1537 static void rq_online_dl(struct rq *rq) 1538 { 1539 if (rq->dl.overloaded) 1540 dl_set_overload(rq); 1541 1542 if (rq->dl.dl_nr_running > 0) 1543 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); 1544 } 1545 1546 /* Assumes rq->lock is held */ 1547 static void rq_offline_dl(struct rq *rq) 1548 { 1549 if (rq->dl.overloaded) 1550 dl_clear_overload(rq); 1551 1552 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0); 1553 } 1554 1555 void init_sched_dl_class(void) 1556 { 1557 unsigned int i; 1558 1559 for_each_possible_cpu(i) 1560 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), 1561 GFP_KERNEL, cpu_to_node(i)); 1562 } 1563 1564 #endif /* CONFIG_SMP */ 1565 1566 static void switched_from_dl(struct rq *rq, struct task_struct *p) 1567 { 1568 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy)) 1569 hrtimer_try_to_cancel(&p->dl.dl_timer); 1570 1571 #ifdef CONFIG_SMP 1572 /* 1573 * Since this might be the only -deadline task on the rq, 1574 * this is the right place to try to pull some other one 1575 * from an overloaded cpu, if any. 1576 */ 1577 if (!rq->dl.dl_nr_running) 1578 pull_dl_task(rq); 1579 #endif 1580 } 1581 1582 /* 1583 * When switching to -deadline, we may overload the rq, then 1584 * we try to push someone off, if possible. 1585 */ 1586 static void switched_to_dl(struct rq *rq, struct task_struct *p) 1587 { 1588 int check_resched = 1; 1589 1590 /* 1591 * If p is throttled, don't consider the possibility 1592 * of preempting rq->curr, the check will be done right 1593 * after its runtime will get replenished. 1594 */ 1595 if (unlikely(p->dl.dl_throttled)) 1596 return; 1597 1598 if (p->on_rq && rq->curr != p) { 1599 #ifdef CONFIG_SMP 1600 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) 1601 /* Only reschedule if pushing failed */ 1602 check_resched = 0; 1603 #endif /* CONFIG_SMP */ 1604 if (check_resched && task_has_dl_policy(rq->curr)) 1605 check_preempt_curr_dl(rq, p, 0); 1606 } 1607 } 1608 1609 /* 1610 * If the scheduling parameters of a -deadline task changed, 1611 * a push or pull operation might be needed. 1612 */ 1613 static void prio_changed_dl(struct rq *rq, struct task_struct *p, 1614 int oldprio) 1615 { 1616 if (p->on_rq || rq->curr == p) { 1617 #ifdef CONFIG_SMP 1618 /* 1619 * This might be too much, but unfortunately 1620 * we don't have the old deadline value, and 1621 * we can't argue if the task is increasing 1622 * or lowering its prio, so... 1623 */ 1624 if (!rq->dl.overloaded) 1625 pull_dl_task(rq); 1626 1627 /* 1628 * If we now have a earlier deadline task than p, 1629 * then reschedule, provided p is still on this 1630 * runqueue. 1631 */ 1632 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && 1633 rq->curr == p) 1634 resched_task(p); 1635 #else 1636 /* 1637 * Again, we don't know if p has a earlier 1638 * or later deadline, so let's blindly set a 1639 * (maybe not needed) rescheduling point. 1640 */ 1641 resched_task(p); 1642 #endif /* CONFIG_SMP */ 1643 } else 1644 switched_to_dl(rq, p); 1645 } 1646 1647 const struct sched_class dl_sched_class = { 1648 .next = &rt_sched_class, 1649 .enqueue_task = enqueue_task_dl, 1650 .dequeue_task = dequeue_task_dl, 1651 .yield_task = yield_task_dl, 1652 1653 .check_preempt_curr = check_preempt_curr_dl, 1654 1655 .pick_next_task = pick_next_task_dl, 1656 .put_prev_task = put_prev_task_dl, 1657 1658 #ifdef CONFIG_SMP 1659 .select_task_rq = select_task_rq_dl, 1660 .set_cpus_allowed = set_cpus_allowed_dl, 1661 .rq_online = rq_online_dl, 1662 .rq_offline = rq_offline_dl, 1663 .post_schedule = post_schedule_dl, 1664 .task_woken = task_woken_dl, 1665 #endif 1666 1667 .set_curr_task = set_curr_task_dl, 1668 .task_tick = task_tick_dl, 1669 .task_fork = task_fork_dl, 1670 .task_dead = task_dead_dl, 1671 1672 .prio_changed = prio_changed_dl, 1673 .switched_from = switched_from_dl, 1674 .switched_to = switched_to_dl, 1675 }; 1676