1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4 * policies) 5 */ 6 7 #include "sched.h" 8 #include "pelt.h" 9 10 int sched_rr_timeslice = RR_TIMESLICE; 11 /* More than 4 hours if BW_SHIFT equals 20. */ 12 static const u64 max_rt_runtime = MAX_BW; 13 14 /* 15 * period over which we measure -rt task CPU usage in us. 16 * default: 1s 17 */ 18 int sysctl_sched_rt_period = 1000000; 19 20 /* 21 * part of the period that we allow rt tasks to run in us. 22 * default: 0.95s 23 */ 24 int sysctl_sched_rt_runtime = 950000; 25 26 #ifdef CONFIG_SYSCTL 27 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ; 28 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer, 29 size_t *lenp, loff_t *ppos); 30 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer, 31 size_t *lenp, loff_t *ppos); 32 static const struct ctl_table sched_rt_sysctls[] = { 33 { 34 .procname = "sched_rt_period_us", 35 .data = &sysctl_sched_rt_period, 36 .maxlen = sizeof(int), 37 .mode = 0644, 38 .proc_handler = sched_rt_handler, 39 .extra1 = SYSCTL_ONE, 40 .extra2 = SYSCTL_INT_MAX, 41 }, 42 { 43 .procname = "sched_rt_runtime_us", 44 .data = &sysctl_sched_rt_runtime, 45 .maxlen = sizeof(int), 46 .mode = 0644, 47 .proc_handler = sched_rt_handler, 48 .extra1 = SYSCTL_NEG_ONE, 49 .extra2 = (void *)&sysctl_sched_rt_period, 50 }, 51 { 52 .procname = "sched_rr_timeslice_ms", 53 .data = &sysctl_sched_rr_timeslice, 54 .maxlen = sizeof(int), 55 .mode = 0644, 56 .proc_handler = sched_rr_handler, 57 }, 58 }; 59 60 static int __init sched_rt_sysctl_init(void) 61 { 62 register_sysctl_init("kernel", sched_rt_sysctls); 63 return 0; 64 } 65 late_initcall(sched_rt_sysctl_init); 66 #endif /* CONFIG_SYSCTL */ 67 68 void init_rt_rq(struct rt_rq *rt_rq) 69 { 70 struct rt_prio_array *array; 71 int i; 72 73 array = &rt_rq->active; 74 for (i = 0; i < MAX_RT_PRIO; i++) { 75 INIT_LIST_HEAD(array->queue + i); 76 __clear_bit(i, array->bitmap); 77 } 78 /* delimiter for bitsearch: */ 79 __set_bit(MAX_RT_PRIO, array->bitmap); 80 81 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 82 rt_rq->highest_prio.next = MAX_RT_PRIO-1; 83 rt_rq->overloaded = 0; 84 plist_head_init(&rt_rq->pushable_tasks); 85 /* We start is dequeued state, because no RT tasks are queued */ 86 rt_rq->rt_queued = 0; 87 88 #ifdef CONFIG_RT_GROUP_SCHED 89 rt_rq->rt_time = 0; 90 rt_rq->rt_throttled = 0; 91 rt_rq->rt_runtime = 0; 92 raw_spin_lock_init(&rt_rq->rt_runtime_lock); 93 rt_rq->tg = &root_task_group; 94 #endif 95 } 96 97 #ifdef CONFIG_RT_GROUP_SCHED 98 99 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 100 101 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 102 { 103 struct rt_bandwidth *rt_b = 104 container_of(timer, struct rt_bandwidth, rt_period_timer); 105 int idle = 0; 106 int overrun; 107 108 raw_spin_lock(&rt_b->rt_runtime_lock); 109 for (;;) { 110 overrun = hrtimer_forward_now(timer, rt_b->rt_period); 111 if (!overrun) 112 break; 113 114 raw_spin_unlock(&rt_b->rt_runtime_lock); 115 idle = do_sched_rt_period_timer(rt_b, overrun); 116 raw_spin_lock(&rt_b->rt_runtime_lock); 117 } 118 if (idle) 119 rt_b->rt_period_active = 0; 120 raw_spin_unlock(&rt_b->rt_runtime_lock); 121 122 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 123 } 124 125 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 126 { 127 rt_b->rt_period = ns_to_ktime(period); 128 rt_b->rt_runtime = runtime; 129 130 raw_spin_lock_init(&rt_b->rt_runtime_lock); 131 132 hrtimer_setup(&rt_b->rt_period_timer, sched_rt_period_timer, CLOCK_MONOTONIC, 133 HRTIMER_MODE_REL_HARD); 134 } 135 136 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) 137 { 138 raw_spin_lock(&rt_b->rt_runtime_lock); 139 if (!rt_b->rt_period_active) { 140 rt_b->rt_period_active = 1; 141 /* 142 * SCHED_DEADLINE updates the bandwidth, as a run away 143 * RT task with a DL task could hog a CPU. But DL does 144 * not reset the period. If a deadline task was running 145 * without an RT task running, it can cause RT tasks to 146 * throttle when they start up. Kick the timer right away 147 * to update the period. 148 */ 149 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 150 hrtimer_start_expires(&rt_b->rt_period_timer, 151 HRTIMER_MODE_ABS_PINNED_HARD); 152 } 153 raw_spin_unlock(&rt_b->rt_runtime_lock); 154 } 155 156 static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 157 { 158 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 159 return; 160 161 do_start_rt_bandwidth(rt_b); 162 } 163 164 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 165 { 166 hrtimer_cancel(&rt_b->rt_period_timer); 167 } 168 169 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 170 171 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 172 { 173 WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 174 175 return container_of(rt_se, struct task_struct, rt); 176 } 177 178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 179 { 180 /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */ 181 WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); 182 return rt_rq->rq; 183 } 184 185 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 186 { 187 WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group); 188 return rt_se->rt_rq; 189 } 190 191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 192 { 193 struct rt_rq *rt_rq = rt_se->rt_rq; 194 195 WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); 196 return rt_rq->rq; 197 } 198 199 void unregister_rt_sched_group(struct task_group *tg) 200 { 201 if (!rt_group_sched_enabled()) 202 return; 203 204 if (tg->rt_se) 205 destroy_rt_bandwidth(&tg->rt_bandwidth); 206 } 207 208 void free_rt_sched_group(struct task_group *tg) 209 { 210 int i; 211 212 if (!rt_group_sched_enabled()) 213 return; 214 215 for_each_possible_cpu(i) { 216 if (tg->rt_rq) 217 kfree(tg->rt_rq[i]); 218 if (tg->rt_se) 219 kfree(tg->rt_se[i]); 220 } 221 222 kfree(tg->rt_rq); 223 kfree(tg->rt_se); 224 } 225 226 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 227 struct sched_rt_entity *rt_se, int cpu, 228 struct sched_rt_entity *parent) 229 { 230 struct rq *rq = cpu_rq(cpu); 231 232 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 233 rt_rq->rt_nr_boosted = 0; 234 rt_rq->rq = rq; 235 rt_rq->tg = tg; 236 237 tg->rt_rq[cpu] = rt_rq; 238 tg->rt_se[cpu] = rt_se; 239 240 if (!rt_se) 241 return; 242 243 if (!parent) 244 rt_se->rt_rq = &rq->rt; 245 else 246 rt_se->rt_rq = parent->my_q; 247 248 rt_se->my_q = rt_rq; 249 rt_se->parent = parent; 250 INIT_LIST_HEAD(&rt_se->run_list); 251 } 252 253 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 254 { 255 struct rt_rq *rt_rq; 256 struct sched_rt_entity *rt_se; 257 int i; 258 259 if (!rt_group_sched_enabled()) 260 return 1; 261 262 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); 263 if (!tg->rt_rq) 264 goto err; 265 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); 266 if (!tg->rt_se) 267 goto err; 268 269 init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0); 270 271 for_each_possible_cpu(i) { 272 rt_rq = kzalloc_node(sizeof(struct rt_rq), 273 GFP_KERNEL, cpu_to_node(i)); 274 if (!rt_rq) 275 goto err; 276 277 rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 278 GFP_KERNEL, cpu_to_node(i)); 279 if (!rt_se) 280 goto err_free_rq; 281 282 init_rt_rq(rt_rq); 283 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 284 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 285 } 286 287 return 1; 288 289 err_free_rq: 290 kfree(rt_rq); 291 err: 292 return 0; 293 } 294 295 #else /* !CONFIG_RT_GROUP_SCHED: */ 296 297 #define rt_entity_is_task(rt_se) (1) 298 299 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 300 { 301 return container_of(rt_se, struct task_struct, rt); 302 } 303 304 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 305 { 306 return container_of(rt_rq, struct rq, rt); 307 } 308 309 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 310 { 311 struct task_struct *p = rt_task_of(rt_se); 312 313 return task_rq(p); 314 } 315 316 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 317 { 318 struct rq *rq = rq_of_rt_se(rt_se); 319 320 return &rq->rt; 321 } 322 323 void unregister_rt_sched_group(struct task_group *tg) { } 324 325 void free_rt_sched_group(struct task_group *tg) { } 326 327 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 328 { 329 return 1; 330 } 331 #endif /* !CONFIG_RT_GROUP_SCHED */ 332 333 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 334 { 335 /* Try to pull RT tasks here if we lower this rq's prio */ 336 return rq->online && rq->rt.highest_prio.curr > prev->prio; 337 } 338 339 static inline int rt_overloaded(struct rq *rq) 340 { 341 return atomic_read(&rq->rd->rto_count); 342 } 343 344 static inline void rt_set_overload(struct rq *rq) 345 { 346 if (!rq->online) 347 return; 348 349 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 350 /* 351 * Make sure the mask is visible before we set 352 * the overload count. That is checked to determine 353 * if we should look at the mask. It would be a shame 354 * if we looked at the mask, but the mask was not 355 * updated yet. 356 * 357 * Matched by the barrier in pull_rt_task(). 358 */ 359 smp_wmb(); 360 atomic_inc(&rq->rd->rto_count); 361 } 362 363 static inline void rt_clear_overload(struct rq *rq) 364 { 365 if (!rq->online) 366 return; 367 368 /* the order here really doesn't matter */ 369 atomic_dec(&rq->rd->rto_count); 370 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 371 } 372 373 static inline int has_pushable_tasks(struct rq *rq) 374 { 375 return !plist_head_empty(&rq->rt.pushable_tasks); 376 } 377 378 static DEFINE_PER_CPU(struct balance_callback, rt_push_head); 379 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head); 380 381 static void push_rt_tasks(struct rq *); 382 static void pull_rt_task(struct rq *); 383 384 static inline void rt_queue_push_tasks(struct rq *rq) 385 { 386 if (!has_pushable_tasks(rq)) 387 return; 388 389 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 390 } 391 392 static inline void rt_queue_pull_task(struct rq *rq) 393 { 394 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 395 } 396 397 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 398 { 399 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 400 plist_node_init(&p->pushable_tasks, p->prio); 401 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 402 403 /* Update the highest prio pushable task */ 404 if (p->prio < rq->rt.highest_prio.next) 405 rq->rt.highest_prio.next = p->prio; 406 407 if (!rq->rt.overloaded) { 408 rt_set_overload(rq); 409 rq->rt.overloaded = 1; 410 } 411 } 412 413 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 414 { 415 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 416 417 /* Update the new highest prio pushable task */ 418 if (has_pushable_tasks(rq)) { 419 p = plist_first_entry(&rq->rt.pushable_tasks, 420 struct task_struct, pushable_tasks); 421 rq->rt.highest_prio.next = p->prio; 422 } else { 423 rq->rt.highest_prio.next = MAX_RT_PRIO-1; 424 425 if (rq->rt.overloaded) { 426 rt_clear_overload(rq); 427 rq->rt.overloaded = 0; 428 } 429 } 430 } 431 432 static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 433 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count); 434 435 static inline int on_rt_rq(struct sched_rt_entity *rt_se) 436 { 437 return rt_se->on_rq; 438 } 439 440 #ifdef CONFIG_UCLAMP_TASK 441 /* 442 * Verify the fitness of task @p to run on @cpu taking into account the uclamp 443 * settings. 444 * 445 * This check is only important for heterogeneous systems where uclamp_min value 446 * is higher than the capacity of a @cpu. For non-heterogeneous system this 447 * function will always return true. 448 * 449 * The function will return true if the capacity of the @cpu is >= the 450 * uclamp_min and false otherwise. 451 * 452 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min 453 * > uclamp_max. 454 */ 455 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 456 { 457 unsigned int min_cap; 458 unsigned int max_cap; 459 unsigned int cpu_cap; 460 461 /* Only heterogeneous systems can benefit from this check */ 462 if (!sched_asym_cpucap_active()) 463 return true; 464 465 min_cap = uclamp_eff_value(p, UCLAMP_MIN); 466 max_cap = uclamp_eff_value(p, UCLAMP_MAX); 467 468 cpu_cap = arch_scale_cpu_capacity(cpu); 469 470 return cpu_cap >= min(min_cap, max_cap); 471 } 472 #else /* !CONFIG_UCLAMP_TASK: */ 473 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 474 { 475 return true; 476 } 477 #endif /* !CONFIG_UCLAMP_TASK */ 478 479 #ifdef CONFIG_RT_GROUP_SCHED 480 481 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 482 { 483 return rt_rq->rt_runtime; 484 } 485 486 static inline u64 sched_rt_period(struct rt_rq *rt_rq) 487 { 488 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 489 } 490 491 typedef struct task_group *rt_rq_iter_t; 492 493 static inline struct task_group *next_task_group(struct task_group *tg) 494 { 495 if (!rt_group_sched_enabled()) { 496 WARN_ON(tg != &root_task_group); 497 return NULL; 498 } 499 500 do { 501 tg = list_entry_rcu(tg->list.next, 502 typeof(struct task_group), list); 503 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 504 505 if (&tg->list == &task_groups) 506 tg = NULL; 507 508 return tg; 509 } 510 511 #define for_each_rt_rq(rt_rq, iter, rq) \ 512 for (iter = &root_task_group; \ 513 iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \ 514 iter = next_task_group(iter)) 515 516 #define for_each_sched_rt_entity(rt_se) \ 517 for (; rt_se; rt_se = rt_se->parent) 518 519 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 520 { 521 return rt_se->my_q; 522 } 523 524 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 525 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 526 527 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 528 { 529 struct task_struct *donor = rq_of_rt_rq(rt_rq)->donor; 530 struct rq *rq = rq_of_rt_rq(rt_rq); 531 struct sched_rt_entity *rt_se; 532 533 int cpu = cpu_of(rq); 534 535 rt_se = rt_rq->tg->rt_se[cpu]; 536 537 if (rt_rq->rt_nr_running) { 538 if (!rt_se) 539 enqueue_top_rt_rq(rt_rq); 540 else if (!on_rt_rq(rt_se)) 541 enqueue_rt_entity(rt_se, 0); 542 543 if (rt_rq->highest_prio.curr < donor->prio) 544 resched_curr(rq); 545 } 546 } 547 548 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 549 { 550 struct sched_rt_entity *rt_se; 551 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 552 553 rt_se = rt_rq->tg->rt_se[cpu]; 554 555 if (!rt_se) { 556 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); 557 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 558 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); 559 } 560 else if (on_rt_rq(rt_se)) 561 dequeue_rt_entity(rt_se, 0); 562 } 563 564 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 565 { 566 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 567 } 568 569 static int rt_se_boosted(struct sched_rt_entity *rt_se) 570 { 571 struct rt_rq *rt_rq = group_rt_rq(rt_se); 572 struct task_struct *p; 573 574 if (rt_rq) 575 return !!rt_rq->rt_nr_boosted; 576 577 p = rt_task_of(rt_se); 578 return p->prio != p->normal_prio; 579 } 580 581 static inline const struct cpumask *sched_rt_period_mask(void) 582 { 583 return this_rq()->rd->span; 584 } 585 586 static inline 587 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 588 { 589 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 590 } 591 592 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 593 { 594 return &rt_rq->tg->rt_bandwidth; 595 } 596 597 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 598 { 599 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 600 601 return (hrtimer_active(&rt_b->rt_period_timer) || 602 rt_rq->rt_time < rt_b->rt_runtime); 603 } 604 605 /* 606 * We ran out of runtime, see if we can borrow some from our neighbours. 607 */ 608 static void do_balance_runtime(struct rt_rq *rt_rq) 609 { 610 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 611 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 612 int i, weight; 613 u64 rt_period; 614 615 weight = cpumask_weight(rd->span); 616 617 raw_spin_lock(&rt_b->rt_runtime_lock); 618 rt_period = ktime_to_ns(rt_b->rt_period); 619 for_each_cpu(i, rd->span) { 620 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 621 s64 diff; 622 623 if (iter == rt_rq) 624 continue; 625 626 raw_spin_lock(&iter->rt_runtime_lock); 627 /* 628 * Either all rqs have inf runtime and there's nothing to steal 629 * or __disable_runtime() below sets a specific rq to inf to 630 * indicate its been disabled and disallow stealing. 631 */ 632 if (iter->rt_runtime == RUNTIME_INF) 633 goto next; 634 635 /* 636 * From runqueues with spare time, take 1/n part of their 637 * spare time, but no more than our period. 638 */ 639 diff = iter->rt_runtime - iter->rt_time; 640 if (diff > 0) { 641 diff = div_u64((u64)diff, weight); 642 if (rt_rq->rt_runtime + diff > rt_period) 643 diff = rt_period - rt_rq->rt_runtime; 644 iter->rt_runtime -= diff; 645 rt_rq->rt_runtime += diff; 646 if (rt_rq->rt_runtime == rt_period) { 647 raw_spin_unlock(&iter->rt_runtime_lock); 648 break; 649 } 650 } 651 next: 652 raw_spin_unlock(&iter->rt_runtime_lock); 653 } 654 raw_spin_unlock(&rt_b->rt_runtime_lock); 655 } 656 657 /* 658 * Ensure this RQ takes back all the runtime it lend to its neighbours. 659 */ 660 static void __disable_runtime(struct rq *rq) 661 { 662 struct root_domain *rd = rq->rd; 663 rt_rq_iter_t iter; 664 struct rt_rq *rt_rq; 665 666 if (unlikely(!scheduler_running)) 667 return; 668 669 for_each_rt_rq(rt_rq, iter, rq) { 670 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 671 s64 want; 672 int i; 673 674 raw_spin_lock(&rt_b->rt_runtime_lock); 675 raw_spin_lock(&rt_rq->rt_runtime_lock); 676 /* 677 * Either we're all inf and nobody needs to borrow, or we're 678 * already disabled and thus have nothing to do, or we have 679 * exactly the right amount of runtime to take out. 680 */ 681 if (rt_rq->rt_runtime == RUNTIME_INF || 682 rt_rq->rt_runtime == rt_b->rt_runtime) 683 goto balanced; 684 raw_spin_unlock(&rt_rq->rt_runtime_lock); 685 686 /* 687 * Calculate the difference between what we started out with 688 * and what we current have, that's the amount of runtime 689 * we lend and now have to reclaim. 690 */ 691 want = rt_b->rt_runtime - rt_rq->rt_runtime; 692 693 /* 694 * Greedy reclaim, take back as much as we can. 695 */ 696 for_each_cpu(i, rd->span) { 697 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 698 s64 diff; 699 700 /* 701 * Can't reclaim from ourselves or disabled runqueues. 702 */ 703 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 704 continue; 705 706 raw_spin_lock(&iter->rt_runtime_lock); 707 if (want > 0) { 708 diff = min_t(s64, iter->rt_runtime, want); 709 iter->rt_runtime -= diff; 710 want -= diff; 711 } else { 712 iter->rt_runtime -= want; 713 want -= want; 714 } 715 raw_spin_unlock(&iter->rt_runtime_lock); 716 717 if (!want) 718 break; 719 } 720 721 raw_spin_lock(&rt_rq->rt_runtime_lock); 722 /* 723 * We cannot be left wanting - that would mean some runtime 724 * leaked out of the system. 725 */ 726 WARN_ON_ONCE(want); 727 balanced: 728 /* 729 * Disable all the borrow logic by pretending we have inf 730 * runtime - in which case borrowing doesn't make sense. 731 */ 732 rt_rq->rt_runtime = RUNTIME_INF; 733 rt_rq->rt_throttled = 0; 734 raw_spin_unlock(&rt_rq->rt_runtime_lock); 735 raw_spin_unlock(&rt_b->rt_runtime_lock); 736 737 /* Make rt_rq available for pick_next_task() */ 738 sched_rt_rq_enqueue(rt_rq); 739 } 740 } 741 742 static void __enable_runtime(struct rq *rq) 743 { 744 rt_rq_iter_t iter; 745 struct rt_rq *rt_rq; 746 747 if (unlikely(!scheduler_running)) 748 return; 749 750 /* 751 * Reset each runqueue's bandwidth settings 752 */ 753 for_each_rt_rq(rt_rq, iter, rq) { 754 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 755 756 raw_spin_lock(&rt_b->rt_runtime_lock); 757 raw_spin_lock(&rt_rq->rt_runtime_lock); 758 rt_rq->rt_runtime = rt_b->rt_runtime; 759 rt_rq->rt_time = 0; 760 rt_rq->rt_throttled = 0; 761 raw_spin_unlock(&rt_rq->rt_runtime_lock); 762 raw_spin_unlock(&rt_b->rt_runtime_lock); 763 } 764 } 765 766 static void balance_runtime(struct rt_rq *rt_rq) 767 { 768 if (!sched_feat(RT_RUNTIME_SHARE)) 769 return; 770 771 if (rt_rq->rt_time > rt_rq->rt_runtime) { 772 raw_spin_unlock(&rt_rq->rt_runtime_lock); 773 do_balance_runtime(rt_rq); 774 raw_spin_lock(&rt_rq->rt_runtime_lock); 775 } 776 } 777 778 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 779 { 780 int i, idle = 1, throttled = 0; 781 const struct cpumask *span; 782 783 span = sched_rt_period_mask(); 784 785 /* 786 * FIXME: isolated CPUs should really leave the root task group, 787 * whether they are isolcpus or were isolated via cpusets, lest 788 * the timer run on a CPU which does not service all runqueues, 789 * potentially leaving other CPUs indefinitely throttled. If 790 * isolation is really required, the user will turn the throttle 791 * off to kill the perturbations it causes anyway. Meanwhile, 792 * this maintains functionality for boot and/or troubleshooting. 793 */ 794 if (rt_b == &root_task_group.rt_bandwidth) 795 span = cpu_online_mask; 796 797 for_each_cpu(i, span) { 798 int enqueue = 0; 799 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 800 struct rq *rq = rq_of_rt_rq(rt_rq); 801 struct rq_flags rf; 802 int skip; 803 804 /* 805 * When span == cpu_online_mask, taking each rq->lock 806 * can be time-consuming. Try to avoid it when possible. 807 */ 808 raw_spin_lock(&rt_rq->rt_runtime_lock); 809 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 810 rt_rq->rt_runtime = rt_b->rt_runtime; 811 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 812 raw_spin_unlock(&rt_rq->rt_runtime_lock); 813 if (skip) 814 continue; 815 816 rq_lock(rq, &rf); 817 update_rq_clock(rq); 818 819 if (rt_rq->rt_time) { 820 u64 runtime; 821 822 raw_spin_lock(&rt_rq->rt_runtime_lock); 823 if (rt_rq->rt_throttled) 824 balance_runtime(rt_rq); 825 runtime = rt_rq->rt_runtime; 826 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 827 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 828 rt_rq->rt_throttled = 0; 829 enqueue = 1; 830 831 /* 832 * When we're idle and a woken (rt) task is 833 * throttled wakeup_preempt() will set 834 * skip_update and the time between the wakeup 835 * and this unthrottle will get accounted as 836 * 'runtime'. 837 */ 838 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 839 rq_clock_cancel_skipupdate(rq); 840 } 841 if (rt_rq->rt_time || rt_rq->rt_nr_running) 842 idle = 0; 843 raw_spin_unlock(&rt_rq->rt_runtime_lock); 844 } else if (rt_rq->rt_nr_running) { 845 idle = 0; 846 if (!rt_rq_throttled(rt_rq)) 847 enqueue = 1; 848 } 849 if (rt_rq->rt_throttled) 850 throttled = 1; 851 852 if (enqueue) 853 sched_rt_rq_enqueue(rt_rq); 854 rq_unlock(rq, &rf); 855 } 856 857 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 858 return 1; 859 860 return idle; 861 } 862 863 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 864 { 865 u64 runtime = sched_rt_runtime(rt_rq); 866 867 if (rt_rq->rt_throttled) 868 return rt_rq_throttled(rt_rq); 869 870 if (runtime >= sched_rt_period(rt_rq)) 871 return 0; 872 873 balance_runtime(rt_rq); 874 runtime = sched_rt_runtime(rt_rq); 875 if (runtime == RUNTIME_INF) 876 return 0; 877 878 if (rt_rq->rt_time > runtime) { 879 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 880 881 /* 882 * Don't actually throttle groups that have no runtime assigned 883 * but accrue some time due to boosting. 884 */ 885 if (likely(rt_b->rt_runtime)) { 886 rt_rq->rt_throttled = 1; 887 printk_deferred_once("sched: RT throttling activated\n"); 888 } else { 889 /* 890 * In case we did anyway, make it go away, 891 * replenishment is a joke, since it will replenish us 892 * with exactly 0 ns. 893 */ 894 rt_rq->rt_time = 0; 895 } 896 897 if (rt_rq_throttled(rt_rq)) { 898 sched_rt_rq_dequeue(rt_rq); 899 return 1; 900 } 901 } 902 903 return 0; 904 } 905 906 #else /* !CONFIG_RT_GROUP_SCHED: */ 907 908 typedef struct rt_rq *rt_rq_iter_t; 909 910 #define for_each_rt_rq(rt_rq, iter, rq) \ 911 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 912 913 #define for_each_sched_rt_entity(rt_se) \ 914 for (; rt_se; rt_se = NULL) 915 916 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 917 { 918 return NULL; 919 } 920 921 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 922 { 923 struct rq *rq = rq_of_rt_rq(rt_rq); 924 925 if (!rt_rq->rt_nr_running) 926 return; 927 928 enqueue_top_rt_rq(rt_rq); 929 resched_curr(rq); 930 } 931 932 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 933 { 934 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); 935 } 936 937 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 938 { 939 return false; 940 } 941 942 static inline const struct cpumask *sched_rt_period_mask(void) 943 { 944 return cpu_online_mask; 945 } 946 947 static inline 948 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 949 { 950 return &cpu_rq(cpu)->rt; 951 } 952 953 static void __enable_runtime(struct rq *rq) { } 954 static void __disable_runtime(struct rq *rq) { } 955 956 #endif /* !CONFIG_RT_GROUP_SCHED */ 957 958 static inline int rt_se_prio(struct sched_rt_entity *rt_se) 959 { 960 #ifdef CONFIG_RT_GROUP_SCHED 961 struct rt_rq *rt_rq = group_rt_rq(rt_se); 962 963 if (rt_rq) 964 return rt_rq->highest_prio.curr; 965 #endif 966 967 return rt_task_of(rt_se)->prio; 968 } 969 970 /* 971 * Update the current task's runtime statistics. Skip current tasks that 972 * are not in our scheduling class. 973 */ 974 static void update_curr_rt(struct rq *rq) 975 { 976 struct task_struct *donor = rq->donor; 977 s64 delta_exec; 978 979 if (donor->sched_class != &rt_sched_class) 980 return; 981 982 delta_exec = update_curr_common(rq); 983 if (unlikely(delta_exec <= 0)) 984 return; 985 986 #ifdef CONFIG_RT_GROUP_SCHED 987 struct sched_rt_entity *rt_se = &donor->rt; 988 989 if (!rt_bandwidth_enabled()) 990 return; 991 992 for_each_sched_rt_entity(rt_se) { 993 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 994 int exceeded; 995 996 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 997 raw_spin_lock(&rt_rq->rt_runtime_lock); 998 rt_rq->rt_time += delta_exec; 999 exceeded = sched_rt_runtime_exceeded(rt_rq); 1000 if (exceeded) 1001 resched_curr(rq); 1002 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1003 if (exceeded) 1004 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); 1005 } 1006 } 1007 #endif /* CONFIG_RT_GROUP_SCHED */ 1008 } 1009 1010 static void 1011 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count) 1012 { 1013 struct rq *rq = rq_of_rt_rq(rt_rq); 1014 1015 BUG_ON(&rq->rt != rt_rq); 1016 1017 if (!rt_rq->rt_queued) 1018 return; 1019 1020 BUG_ON(!rq->nr_running); 1021 1022 sub_nr_running(rq, count); 1023 rt_rq->rt_queued = 0; 1024 1025 } 1026 1027 static void 1028 enqueue_top_rt_rq(struct rt_rq *rt_rq) 1029 { 1030 struct rq *rq = rq_of_rt_rq(rt_rq); 1031 1032 BUG_ON(&rq->rt != rt_rq); 1033 1034 if (rt_rq->rt_queued) 1035 return; 1036 1037 if (rt_rq_throttled(rt_rq)) 1038 return; 1039 1040 if (rt_rq->rt_nr_running) { 1041 add_nr_running(rq, rt_rq->rt_nr_running); 1042 rt_rq->rt_queued = 1; 1043 } 1044 1045 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 1046 cpufreq_update_util(rq, 0); 1047 } 1048 1049 static void 1050 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1051 { 1052 struct rq *rq = rq_of_rt_rq(rt_rq); 1053 1054 /* 1055 * Change rq's cpupri only if rt_rq is the top queue. 1056 */ 1057 if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq) 1058 return; 1059 1060 if (rq->online && prio < prev_prio) 1061 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1062 } 1063 1064 static void 1065 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1066 { 1067 struct rq *rq = rq_of_rt_rq(rt_rq); 1068 1069 /* 1070 * Change rq's cpupri only if rt_rq is the top queue. 1071 */ 1072 if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq) 1073 return; 1074 1075 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1076 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1077 } 1078 1079 static void 1080 inc_rt_prio(struct rt_rq *rt_rq, int prio) 1081 { 1082 int prev_prio = rt_rq->highest_prio.curr; 1083 1084 if (prio < prev_prio) 1085 rt_rq->highest_prio.curr = prio; 1086 1087 inc_rt_prio_smp(rt_rq, prio, prev_prio); 1088 } 1089 1090 static void 1091 dec_rt_prio(struct rt_rq *rt_rq, int prio) 1092 { 1093 int prev_prio = rt_rq->highest_prio.curr; 1094 1095 if (rt_rq->rt_nr_running) { 1096 1097 WARN_ON(prio < prev_prio); 1098 1099 /* 1100 * This may have been our highest task, and therefore 1101 * we may have some re-computation to do 1102 */ 1103 if (prio == prev_prio) { 1104 struct rt_prio_array *array = &rt_rq->active; 1105 1106 rt_rq->highest_prio.curr = 1107 sched_find_first_bit(array->bitmap); 1108 } 1109 1110 } else { 1111 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 1112 } 1113 1114 dec_rt_prio_smp(rt_rq, prio, prev_prio); 1115 } 1116 1117 #ifdef CONFIG_RT_GROUP_SCHED 1118 1119 static void 1120 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1121 { 1122 if (rt_se_boosted(rt_se)) 1123 rt_rq->rt_nr_boosted++; 1124 1125 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1126 } 1127 1128 static void 1129 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1130 { 1131 if (rt_se_boosted(rt_se)) 1132 rt_rq->rt_nr_boosted--; 1133 1134 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1135 } 1136 1137 #else /* !CONFIG_RT_GROUP_SCHED: */ 1138 1139 static void 1140 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1141 { 1142 } 1143 1144 static inline 1145 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1146 1147 #endif /* !CONFIG_RT_GROUP_SCHED */ 1148 1149 static inline 1150 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 1151 { 1152 struct rt_rq *group_rq = group_rt_rq(rt_se); 1153 1154 if (group_rq) 1155 return group_rq->rt_nr_running; 1156 else 1157 return 1; 1158 } 1159 1160 static inline 1161 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 1162 { 1163 struct rt_rq *group_rq = group_rt_rq(rt_se); 1164 struct task_struct *tsk; 1165 1166 if (group_rq) 1167 return group_rq->rr_nr_running; 1168 1169 tsk = rt_task_of(rt_se); 1170 1171 return (tsk->policy == SCHED_RR) ? 1 : 0; 1172 } 1173 1174 static inline 1175 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1176 { 1177 int prio = rt_se_prio(rt_se); 1178 1179 WARN_ON(!rt_prio(prio)); 1180 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 1181 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1182 1183 inc_rt_prio(rt_rq, prio); 1184 inc_rt_group(rt_se, rt_rq); 1185 } 1186 1187 static inline 1188 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1189 { 1190 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1191 WARN_ON(!rt_rq->rt_nr_running); 1192 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 1193 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1194 1195 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1196 dec_rt_group(rt_se, rt_rq); 1197 } 1198 1199 /* 1200 * Change rt_se->run_list location unless SAVE && !MOVE 1201 * 1202 * assumes ENQUEUE/DEQUEUE flags match 1203 */ 1204 static inline bool move_entity(unsigned int flags) 1205 { 1206 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1207 return false; 1208 1209 return true; 1210 } 1211 1212 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1213 { 1214 list_del_init(&rt_se->run_list); 1215 1216 if (list_empty(array->queue + rt_se_prio(rt_se))) 1217 __clear_bit(rt_se_prio(rt_se), array->bitmap); 1218 1219 rt_se->on_list = 0; 1220 } 1221 1222 static inline struct sched_statistics * 1223 __schedstats_from_rt_se(struct sched_rt_entity *rt_se) 1224 { 1225 /* schedstats is not supported for rt group. */ 1226 if (!rt_entity_is_task(rt_se)) 1227 return NULL; 1228 1229 return &rt_task_of(rt_se)->stats; 1230 } 1231 1232 static inline void 1233 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1234 { 1235 struct sched_statistics *stats; 1236 struct task_struct *p = NULL; 1237 1238 if (!schedstat_enabled()) 1239 return; 1240 1241 if (rt_entity_is_task(rt_se)) 1242 p = rt_task_of(rt_se); 1243 1244 stats = __schedstats_from_rt_se(rt_se); 1245 if (!stats) 1246 return; 1247 1248 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); 1249 } 1250 1251 static inline void 1252 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1253 { 1254 struct sched_statistics *stats; 1255 struct task_struct *p = NULL; 1256 1257 if (!schedstat_enabled()) 1258 return; 1259 1260 if (rt_entity_is_task(rt_se)) 1261 p = rt_task_of(rt_se); 1262 1263 stats = __schedstats_from_rt_se(rt_se); 1264 if (!stats) 1265 return; 1266 1267 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); 1268 } 1269 1270 static inline void 1271 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1272 int flags) 1273 { 1274 if (!schedstat_enabled()) 1275 return; 1276 1277 if (flags & ENQUEUE_WAKEUP) 1278 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); 1279 } 1280 1281 static inline void 1282 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1283 { 1284 struct sched_statistics *stats; 1285 struct task_struct *p = NULL; 1286 1287 if (!schedstat_enabled()) 1288 return; 1289 1290 if (rt_entity_is_task(rt_se)) 1291 p = rt_task_of(rt_se); 1292 1293 stats = __schedstats_from_rt_se(rt_se); 1294 if (!stats) 1295 return; 1296 1297 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); 1298 } 1299 1300 static inline void 1301 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1302 int flags) 1303 { 1304 struct task_struct *p = NULL; 1305 1306 if (!schedstat_enabled()) 1307 return; 1308 1309 if (rt_entity_is_task(rt_se)) 1310 p = rt_task_of(rt_se); 1311 1312 if ((flags & DEQUEUE_SLEEP) && p) { 1313 unsigned int state; 1314 1315 state = READ_ONCE(p->__state); 1316 if (state & TASK_INTERRUPTIBLE) 1317 __schedstat_set(p->stats.sleep_start, 1318 rq_clock(rq_of_rt_rq(rt_rq))); 1319 1320 if (state & TASK_UNINTERRUPTIBLE) 1321 __schedstat_set(p->stats.block_start, 1322 rq_clock(rq_of_rt_rq(rt_rq))); 1323 } 1324 } 1325 1326 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1327 { 1328 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1329 struct rt_prio_array *array = &rt_rq->active; 1330 struct rt_rq *group_rq = group_rt_rq(rt_se); 1331 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1332 1333 /* 1334 * Don't enqueue the group if its throttled, or when empty. 1335 * The latter is a consequence of the former when a child group 1336 * get throttled and the current group doesn't have any other 1337 * active members. 1338 */ 1339 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1340 if (rt_se->on_list) 1341 __delist_rt_entity(rt_se, array); 1342 return; 1343 } 1344 1345 if (move_entity(flags)) { 1346 WARN_ON_ONCE(rt_se->on_list); 1347 if (flags & ENQUEUE_HEAD) 1348 list_add(&rt_se->run_list, queue); 1349 else 1350 list_add_tail(&rt_se->run_list, queue); 1351 1352 __set_bit(rt_se_prio(rt_se), array->bitmap); 1353 rt_se->on_list = 1; 1354 } 1355 rt_se->on_rq = 1; 1356 1357 inc_rt_tasks(rt_se, rt_rq); 1358 } 1359 1360 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1361 { 1362 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1363 struct rt_prio_array *array = &rt_rq->active; 1364 1365 if (move_entity(flags)) { 1366 WARN_ON_ONCE(!rt_se->on_list); 1367 __delist_rt_entity(rt_se, array); 1368 } 1369 rt_se->on_rq = 0; 1370 1371 dec_rt_tasks(rt_se, rt_rq); 1372 } 1373 1374 /* 1375 * Because the prio of an upper entry depends on the lower 1376 * entries, we must remove entries top - down. 1377 */ 1378 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1379 { 1380 struct sched_rt_entity *back = NULL; 1381 unsigned int rt_nr_running; 1382 1383 for_each_sched_rt_entity(rt_se) { 1384 rt_se->back = back; 1385 back = rt_se; 1386 } 1387 1388 rt_nr_running = rt_rq_of_se(back)->rt_nr_running; 1389 1390 for (rt_se = back; rt_se; rt_se = rt_se->back) { 1391 if (on_rt_rq(rt_se)) 1392 __dequeue_rt_entity(rt_se, flags); 1393 } 1394 1395 dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running); 1396 } 1397 1398 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1399 { 1400 struct rq *rq = rq_of_rt_se(rt_se); 1401 1402 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1403 1404 dequeue_rt_stack(rt_se, flags); 1405 for_each_sched_rt_entity(rt_se) 1406 __enqueue_rt_entity(rt_se, flags); 1407 enqueue_top_rt_rq(&rq->rt); 1408 } 1409 1410 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1411 { 1412 struct rq *rq = rq_of_rt_se(rt_se); 1413 1414 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1415 1416 dequeue_rt_stack(rt_se, flags); 1417 1418 for_each_sched_rt_entity(rt_se) { 1419 struct rt_rq *rt_rq = group_rt_rq(rt_se); 1420 1421 if (rt_rq && rt_rq->rt_nr_running) 1422 __enqueue_rt_entity(rt_se, flags); 1423 } 1424 enqueue_top_rt_rq(&rq->rt); 1425 } 1426 1427 /* 1428 * Adding/removing a task to/from a priority array: 1429 */ 1430 static void 1431 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1432 { 1433 struct sched_rt_entity *rt_se = &p->rt; 1434 1435 if (flags & ENQUEUE_WAKEUP) 1436 rt_se->timeout = 0; 1437 1438 check_schedstat_required(); 1439 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se); 1440 1441 enqueue_rt_entity(rt_se, flags); 1442 1443 if (task_is_blocked(p)) 1444 return; 1445 1446 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1447 enqueue_pushable_task(rq, p); 1448 } 1449 1450 static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1451 { 1452 struct sched_rt_entity *rt_se = &p->rt; 1453 1454 update_curr_rt(rq); 1455 dequeue_rt_entity(rt_se, flags); 1456 1457 dequeue_pushable_task(rq, p); 1458 1459 return true; 1460 } 1461 1462 /* 1463 * Put task to the head or the end of the run list without the overhead of 1464 * dequeue followed by enqueue. 1465 */ 1466 static void 1467 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1468 { 1469 if (on_rt_rq(rt_se)) { 1470 struct rt_prio_array *array = &rt_rq->active; 1471 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1472 1473 if (head) 1474 list_move(&rt_se->run_list, queue); 1475 else 1476 list_move_tail(&rt_se->run_list, queue); 1477 } 1478 } 1479 1480 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1481 { 1482 struct sched_rt_entity *rt_se = &p->rt; 1483 struct rt_rq *rt_rq; 1484 1485 for_each_sched_rt_entity(rt_se) { 1486 rt_rq = rt_rq_of_se(rt_se); 1487 requeue_rt_entity(rt_rq, rt_se, head); 1488 } 1489 } 1490 1491 static void yield_task_rt(struct rq *rq) 1492 { 1493 requeue_task_rt(rq, rq->donor, 0); 1494 } 1495 1496 static int find_lowest_rq(struct task_struct *task); 1497 1498 static int 1499 select_task_rq_rt(struct task_struct *p, int cpu, int flags) 1500 { 1501 struct task_struct *curr, *donor; 1502 struct rq *rq; 1503 bool test; 1504 1505 /* For anything but wake ups, just return the task_cpu */ 1506 if (!(flags & (WF_TTWU | WF_FORK))) 1507 goto out; 1508 1509 rq = cpu_rq(cpu); 1510 1511 rcu_read_lock(); 1512 curr = READ_ONCE(rq->curr); /* unlocked access */ 1513 donor = READ_ONCE(rq->donor); 1514 1515 /* 1516 * If the current task on @p's runqueue is an RT task, then 1517 * try to see if we can wake this RT task up on another 1518 * runqueue. Otherwise simply start this RT task 1519 * on its current runqueue. 1520 * 1521 * We want to avoid overloading runqueues. If the woken 1522 * task is a higher priority, then it will stay on this CPU 1523 * and the lower prio task should be moved to another CPU. 1524 * Even though this will probably make the lower prio task 1525 * lose its cache, we do not want to bounce a higher task 1526 * around just because it gave up its CPU, perhaps for a 1527 * lock? 1528 * 1529 * For equal prio tasks, we just let the scheduler sort it out. 1530 * 1531 * Otherwise, just let it ride on the affine RQ and the 1532 * post-schedule router will push the preempted task away 1533 * 1534 * This test is optimistic, if we get it wrong the load-balancer 1535 * will have to sort it out. 1536 * 1537 * We take into account the capacity of the CPU to ensure it fits the 1538 * requirement of the task - which is only important on heterogeneous 1539 * systems like big.LITTLE. 1540 */ 1541 test = curr && 1542 unlikely(rt_task(donor)) && 1543 (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio); 1544 1545 if (test || !rt_task_fits_capacity(p, cpu)) { 1546 int target = find_lowest_rq(p); 1547 1548 /* 1549 * Bail out if we were forcing a migration to find a better 1550 * fitting CPU but our search failed. 1551 */ 1552 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) 1553 goto out_unlock; 1554 1555 /* 1556 * Don't bother moving it if the destination CPU is 1557 * not running a lower priority task. 1558 */ 1559 if (target != -1 && 1560 p->prio < cpu_rq(target)->rt.highest_prio.curr) 1561 cpu = target; 1562 } 1563 1564 out_unlock: 1565 rcu_read_unlock(); 1566 1567 out: 1568 return cpu; 1569 } 1570 1571 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1572 { 1573 if (rq->curr->nr_cpus_allowed == 1 || 1574 !cpupri_find(&rq->rd->cpupri, rq->donor, NULL)) 1575 return; 1576 1577 /* 1578 * p is migratable, so let's not schedule it and 1579 * see if it is pushed or pulled somewhere else. 1580 */ 1581 if (p->nr_cpus_allowed != 1 && 1582 cpupri_find(&rq->rd->cpupri, p, NULL)) 1583 return; 1584 1585 /* 1586 * There appear to be other CPUs that can accept 1587 * the current task but none can run 'p', so lets reschedule 1588 * to try and push the current task away: 1589 */ 1590 requeue_task_rt(rq, p, 1); 1591 resched_curr(rq); 1592 } 1593 1594 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1595 { 1596 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { 1597 /* 1598 * This is OK, because current is on_cpu, which avoids it being 1599 * picked for load-balance and preemption/IRQs are still 1600 * disabled avoiding further scheduler activity on it and we've 1601 * not yet started the picking loop. 1602 */ 1603 rq_unpin_lock(rq, rf); 1604 pull_rt_task(rq); 1605 rq_repin_lock(rq, rf); 1606 } 1607 1608 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); 1609 } 1610 1611 /* 1612 * Preempt the current task with a newly woken task if needed: 1613 */ 1614 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags) 1615 { 1616 struct task_struct *donor = rq->donor; 1617 1618 /* 1619 * XXX If we're preempted by DL, queue a push? 1620 */ 1621 if (p->sched_class != &rt_sched_class) 1622 return; 1623 1624 if (p->prio < donor->prio) { 1625 resched_curr(rq); 1626 return; 1627 } 1628 1629 /* 1630 * If: 1631 * 1632 * - the newly woken task is of equal priority to the current task 1633 * - the newly woken task is non-migratable while current is migratable 1634 * - current will be preempted on the next reschedule 1635 * 1636 * we should check to see if current can readily move to a different 1637 * cpu. If so, we will reschedule to allow the push logic to try 1638 * to move current somewhere else, making room for our non-migratable 1639 * task. 1640 */ 1641 if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr)) 1642 check_preempt_equal_prio(rq, p); 1643 } 1644 1645 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) 1646 { 1647 struct sched_rt_entity *rt_se = &p->rt; 1648 struct rt_rq *rt_rq = &rq->rt; 1649 1650 p->se.exec_start = rq_clock_task(rq); 1651 if (on_rt_rq(&p->rt)) 1652 update_stats_wait_end_rt(rt_rq, rt_se); 1653 1654 /* The running task is never eligible for pushing */ 1655 dequeue_pushable_task(rq, p); 1656 1657 if (!first) 1658 return; 1659 1660 /* 1661 * If prev task was rt, put_prev_task() has already updated the 1662 * utilization. We only care of the case where we start to schedule a 1663 * rt task 1664 */ 1665 if (rq->donor->sched_class != &rt_sched_class) 1666 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1667 1668 rt_queue_push_tasks(rq); 1669 } 1670 1671 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) 1672 { 1673 struct rt_prio_array *array = &rt_rq->active; 1674 struct sched_rt_entity *next = NULL; 1675 struct list_head *queue; 1676 int idx; 1677 1678 idx = sched_find_first_bit(array->bitmap); 1679 BUG_ON(idx >= MAX_RT_PRIO); 1680 1681 queue = array->queue + idx; 1682 if (WARN_ON_ONCE(list_empty(queue))) 1683 return NULL; 1684 next = list_entry(queue->next, struct sched_rt_entity, run_list); 1685 1686 return next; 1687 } 1688 1689 static struct task_struct *_pick_next_task_rt(struct rq *rq) 1690 { 1691 struct sched_rt_entity *rt_se; 1692 struct rt_rq *rt_rq = &rq->rt; 1693 1694 do { 1695 rt_se = pick_next_rt_entity(rt_rq); 1696 if (unlikely(!rt_se)) 1697 return NULL; 1698 rt_rq = group_rt_rq(rt_se); 1699 } while (rt_rq); 1700 1701 return rt_task_of(rt_se); 1702 } 1703 1704 static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf) 1705 { 1706 struct task_struct *p; 1707 1708 if (!sched_rt_runnable(rq)) 1709 return NULL; 1710 1711 p = _pick_next_task_rt(rq); 1712 1713 return p; 1714 } 1715 1716 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) 1717 { 1718 struct sched_rt_entity *rt_se = &p->rt; 1719 struct rt_rq *rt_rq = &rq->rt; 1720 1721 if (on_rt_rq(&p->rt)) 1722 update_stats_wait_start_rt(rt_rq, rt_se); 1723 1724 update_curr_rt(rq); 1725 1726 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1727 1728 if (task_is_blocked(p)) 1729 return; 1730 /* 1731 * The previous task needs to be made eligible for pushing 1732 * if it is still active 1733 */ 1734 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1735 enqueue_pushable_task(rq, p); 1736 } 1737 1738 /* Only try algorithms three times */ 1739 #define RT_MAX_TRIES 3 1740 1741 /* 1742 * Return the highest pushable rq's task, which is suitable to be executed 1743 * on the CPU, NULL otherwise 1744 */ 1745 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1746 { 1747 struct plist_head *head = &rq->rt.pushable_tasks; 1748 struct task_struct *p; 1749 1750 if (!has_pushable_tasks(rq)) 1751 return NULL; 1752 1753 plist_for_each_entry(p, head, pushable_tasks) { 1754 if (task_is_pushable(rq, p, cpu)) 1755 return p; 1756 } 1757 1758 return NULL; 1759 } 1760 1761 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1762 1763 static int find_lowest_rq(struct task_struct *task) 1764 { 1765 struct sched_domain *sd; 1766 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1767 int this_cpu = smp_processor_id(); 1768 int cpu = task_cpu(task); 1769 int ret; 1770 1771 /* Make sure the mask is initialized first */ 1772 if (unlikely(!lowest_mask)) 1773 return -1; 1774 1775 if (task->nr_cpus_allowed == 1) 1776 return -1; /* No other targets possible */ 1777 1778 /* 1779 * If we're on asym system ensure we consider the different capacities 1780 * of the CPUs when searching for the lowest_mask. 1781 */ 1782 if (sched_asym_cpucap_active()) { 1783 1784 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, 1785 task, lowest_mask, 1786 rt_task_fits_capacity); 1787 } else { 1788 1789 ret = cpupri_find(&task_rq(task)->rd->cpupri, 1790 task, lowest_mask); 1791 } 1792 1793 if (!ret) 1794 return -1; /* No targets found */ 1795 1796 /* 1797 * At this point we have built a mask of CPUs representing the 1798 * lowest priority tasks in the system. Now we want to elect 1799 * the best one based on our affinity and topology. 1800 * 1801 * We prioritize the last CPU that the task executed on since 1802 * it is most likely cache-hot in that location. 1803 */ 1804 if (cpumask_test_cpu(cpu, lowest_mask)) 1805 return cpu; 1806 1807 /* 1808 * Otherwise, we consult the sched_domains span maps to figure 1809 * out which CPU is logically closest to our hot cache data. 1810 */ 1811 if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1812 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1813 1814 rcu_read_lock(); 1815 for_each_domain(cpu, sd) { 1816 if (sd->flags & SD_WAKE_AFFINE) { 1817 int best_cpu; 1818 1819 /* 1820 * "this_cpu" is cheaper to preempt than a 1821 * remote processor. 1822 */ 1823 if (this_cpu != -1 && 1824 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1825 rcu_read_unlock(); 1826 return this_cpu; 1827 } 1828 1829 best_cpu = cpumask_any_and_distribute(lowest_mask, 1830 sched_domain_span(sd)); 1831 if (best_cpu < nr_cpu_ids) { 1832 rcu_read_unlock(); 1833 return best_cpu; 1834 } 1835 } 1836 } 1837 rcu_read_unlock(); 1838 1839 /* 1840 * And finally, if there were no matches within the domains 1841 * just give the caller *something* to work with from the compatible 1842 * locations. 1843 */ 1844 if (this_cpu != -1) 1845 return this_cpu; 1846 1847 cpu = cpumask_any_distribute(lowest_mask); 1848 if (cpu < nr_cpu_ids) 1849 return cpu; 1850 1851 return -1; 1852 } 1853 1854 static struct task_struct *pick_next_pushable_task(struct rq *rq) 1855 { 1856 struct task_struct *p; 1857 1858 if (!has_pushable_tasks(rq)) 1859 return NULL; 1860 1861 p = plist_first_entry(&rq->rt.pushable_tasks, 1862 struct task_struct, pushable_tasks); 1863 1864 BUG_ON(rq->cpu != task_cpu(p)); 1865 BUG_ON(task_current(rq, p)); 1866 BUG_ON(task_current_donor(rq, p)); 1867 BUG_ON(p->nr_cpus_allowed <= 1); 1868 1869 BUG_ON(!task_on_rq_queued(p)); 1870 BUG_ON(!rt_task(p)); 1871 1872 return p; 1873 } 1874 1875 /* Will lock the rq it finds */ 1876 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1877 { 1878 struct rq *lowest_rq = NULL; 1879 int tries; 1880 int cpu; 1881 1882 for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1883 cpu = find_lowest_rq(task); 1884 1885 if ((cpu == -1) || (cpu == rq->cpu)) 1886 break; 1887 1888 lowest_rq = cpu_rq(cpu); 1889 1890 if (lowest_rq->rt.highest_prio.curr <= task->prio) { 1891 /* 1892 * Target rq has tasks of equal or higher priority, 1893 * retrying does not release any lock and is unlikely 1894 * to yield a different result. 1895 */ 1896 lowest_rq = NULL; 1897 break; 1898 } 1899 1900 /* if the prio of this runqueue changed, try again */ 1901 if (double_lock_balance(rq, lowest_rq)) { 1902 /* 1903 * We had to unlock the run queue. In 1904 * the mean time, task could have 1905 * migrated already or had its affinity changed, 1906 * therefore check if the task is still at the 1907 * head of the pushable tasks list. 1908 * It is possible the task was scheduled, set 1909 * "migrate_disabled" and then got preempted, so we must 1910 * check the task migration disable flag here too. 1911 */ 1912 if (unlikely(is_migration_disabled(task) || 1913 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || 1914 task != pick_next_pushable_task(rq))) { 1915 1916 double_unlock_balance(rq, lowest_rq); 1917 lowest_rq = NULL; 1918 break; 1919 } 1920 } 1921 1922 /* If this rq is still suitable use it. */ 1923 if (lowest_rq->rt.highest_prio.curr > task->prio) 1924 break; 1925 1926 /* try again */ 1927 double_unlock_balance(rq, lowest_rq); 1928 lowest_rq = NULL; 1929 } 1930 1931 return lowest_rq; 1932 } 1933 1934 /* 1935 * If the current CPU has more than one RT task, see if the non 1936 * running task can migrate over to a CPU that is running a task 1937 * of lesser priority. 1938 */ 1939 static int push_rt_task(struct rq *rq, bool pull) 1940 { 1941 struct task_struct *next_task; 1942 struct rq *lowest_rq; 1943 int ret = 0; 1944 1945 if (!rq->rt.overloaded) 1946 return 0; 1947 1948 next_task = pick_next_pushable_task(rq); 1949 if (!next_task) 1950 return 0; 1951 1952 retry: 1953 /* 1954 * It's possible that the next_task slipped in of 1955 * higher priority than current. If that's the case 1956 * just reschedule current. 1957 */ 1958 if (unlikely(next_task->prio < rq->donor->prio)) { 1959 resched_curr(rq); 1960 return 0; 1961 } 1962 1963 if (is_migration_disabled(next_task)) { 1964 struct task_struct *push_task = NULL; 1965 int cpu; 1966 1967 if (!pull || rq->push_busy) 1968 return 0; 1969 1970 /* 1971 * Invoking find_lowest_rq() on anything but an RT task doesn't 1972 * make sense. Per the above priority check, curr has to 1973 * be of higher priority than next_task, so no need to 1974 * reschedule when bailing out. 1975 * 1976 * Note that the stoppers are masqueraded as SCHED_FIFO 1977 * (cf. sched_set_stop_task()), so we can't rely on rt_task(). 1978 */ 1979 if (rq->donor->sched_class != &rt_sched_class) 1980 return 0; 1981 1982 cpu = find_lowest_rq(rq->curr); 1983 if (cpu == -1 || cpu == rq->cpu) 1984 return 0; 1985 1986 /* 1987 * Given we found a CPU with lower priority than @next_task, 1988 * therefore it should be running. However we cannot migrate it 1989 * to this other CPU, instead attempt to push the current 1990 * running task on this CPU away. 1991 */ 1992 push_task = get_push_task(rq); 1993 if (push_task) { 1994 preempt_disable(); 1995 raw_spin_rq_unlock(rq); 1996 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 1997 push_task, &rq->push_work); 1998 preempt_enable(); 1999 raw_spin_rq_lock(rq); 2000 } 2001 2002 return 0; 2003 } 2004 2005 if (WARN_ON(next_task == rq->curr)) 2006 return 0; 2007 2008 /* We might release rq lock */ 2009 get_task_struct(next_task); 2010 2011 /* find_lock_lowest_rq locks the rq if found */ 2012 lowest_rq = find_lock_lowest_rq(next_task, rq); 2013 if (!lowest_rq) { 2014 struct task_struct *task; 2015 /* 2016 * find_lock_lowest_rq releases rq->lock 2017 * so it is possible that next_task has migrated. 2018 * 2019 * We need to make sure that the task is still on the same 2020 * run-queue and is also still the next task eligible for 2021 * pushing. 2022 */ 2023 task = pick_next_pushable_task(rq); 2024 if (task == next_task) { 2025 /* 2026 * The task hasn't migrated, and is still the next 2027 * eligible task, but we failed to find a run-queue 2028 * to push it to. Do not retry in this case, since 2029 * other CPUs will pull from us when ready. 2030 */ 2031 goto out; 2032 } 2033 2034 if (!task) 2035 /* No more tasks, just exit */ 2036 goto out; 2037 2038 /* 2039 * Something has shifted, try again. 2040 */ 2041 put_task_struct(next_task); 2042 next_task = task; 2043 goto retry; 2044 } 2045 2046 move_queued_task_locked(rq, lowest_rq, next_task); 2047 resched_curr(lowest_rq); 2048 ret = 1; 2049 2050 double_unlock_balance(rq, lowest_rq); 2051 out: 2052 put_task_struct(next_task); 2053 2054 return ret; 2055 } 2056 2057 static void push_rt_tasks(struct rq *rq) 2058 { 2059 /* push_rt_task will return true if it moved an RT */ 2060 while (push_rt_task(rq, false)) 2061 ; 2062 } 2063 2064 #ifdef HAVE_RT_PUSH_IPI 2065 2066 /* 2067 * When a high priority task schedules out from a CPU and a lower priority 2068 * task is scheduled in, a check is made to see if there's any RT tasks 2069 * on other CPUs that are waiting to run because a higher priority RT task 2070 * is currently running on its CPU. In this case, the CPU with multiple RT 2071 * tasks queued on it (overloaded) needs to be notified that a CPU has opened 2072 * up that may be able to run one of its non-running queued RT tasks. 2073 * 2074 * All CPUs with overloaded RT tasks need to be notified as there is currently 2075 * no way to know which of these CPUs have the highest priority task waiting 2076 * to run. Instead of trying to take a spinlock on each of these CPUs, 2077 * which has shown to cause large latency when done on machines with many 2078 * CPUs, sending an IPI to the CPUs to have them push off the overloaded 2079 * RT tasks waiting to run. 2080 * 2081 * Just sending an IPI to each of the CPUs is also an issue, as on large 2082 * count CPU machines, this can cause an IPI storm on a CPU, especially 2083 * if its the only CPU with multiple RT tasks queued, and a large number 2084 * of CPUs scheduling a lower priority task at the same time. 2085 * 2086 * Each root domain has its own IRQ work function that can iterate over 2087 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 2088 * task must be checked if there's one or many CPUs that are lowering 2089 * their priority, there's a single IRQ work iterator that will try to 2090 * push off RT tasks that are waiting to run. 2091 * 2092 * When a CPU schedules a lower priority task, it will kick off the 2093 * IRQ work iterator that will jump to each CPU with overloaded RT tasks. 2094 * As it only takes the first CPU that schedules a lower priority task 2095 * to start the process, the rto_start variable is incremented and if 2096 * the atomic result is one, then that CPU will try to take the rto_lock. 2097 * This prevents high contention on the lock as the process handles all 2098 * CPUs scheduling lower priority tasks. 2099 * 2100 * All CPUs that are scheduling a lower priority task will increment the 2101 * rt_loop_next variable. This will make sure that the IRQ work iterator 2102 * checks all RT overloaded CPUs whenever a CPU schedules a new lower 2103 * priority task, even if the iterator is in the middle of a scan. Incrementing 2104 * the rt_loop_next will cause the iterator to perform another scan. 2105 * 2106 */ 2107 static int rto_next_cpu(struct root_domain *rd) 2108 { 2109 int this_cpu = smp_processor_id(); 2110 int next; 2111 int cpu; 2112 2113 /* 2114 * When starting the IPI RT pushing, the rto_cpu is set to -1, 2115 * rt_next_cpu() will simply return the first CPU found in 2116 * the rto_mask. 2117 * 2118 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it 2119 * will return the next CPU found in the rto_mask. 2120 * 2121 * If there are no more CPUs left in the rto_mask, then a check is made 2122 * against rto_loop and rto_loop_next. rto_loop is only updated with 2123 * the rto_lock held, but any CPU may increment the rto_loop_next 2124 * without any locking. 2125 */ 2126 for (;;) { 2127 2128 /* When rto_cpu is -1 this acts like cpumask_first() */ 2129 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 2130 2131 rd->rto_cpu = cpu; 2132 2133 /* Do not send IPI to self */ 2134 if (cpu == this_cpu) 2135 continue; 2136 2137 if (cpu < nr_cpu_ids) 2138 return cpu; 2139 2140 rd->rto_cpu = -1; 2141 2142 /* 2143 * ACQUIRE ensures we see the @rto_mask changes 2144 * made prior to the @next value observed. 2145 * 2146 * Matches WMB in rt_set_overload(). 2147 */ 2148 next = atomic_read_acquire(&rd->rto_loop_next); 2149 2150 if (rd->rto_loop == next) 2151 break; 2152 2153 rd->rto_loop = next; 2154 } 2155 2156 return -1; 2157 } 2158 2159 static inline bool rto_start_trylock(atomic_t *v) 2160 { 2161 return !atomic_cmpxchg_acquire(v, 0, 1); 2162 } 2163 2164 static inline void rto_start_unlock(atomic_t *v) 2165 { 2166 atomic_set_release(v, 0); 2167 } 2168 2169 static void tell_cpu_to_push(struct rq *rq) 2170 { 2171 int cpu = -1; 2172 2173 /* Keep the loop going if the IPI is currently active */ 2174 atomic_inc(&rq->rd->rto_loop_next); 2175 2176 /* Only one CPU can initiate a loop at a time */ 2177 if (!rto_start_trylock(&rq->rd->rto_loop_start)) 2178 return; 2179 2180 raw_spin_lock(&rq->rd->rto_lock); 2181 2182 /* 2183 * The rto_cpu is updated under the lock, if it has a valid CPU 2184 * then the IPI is still running and will continue due to the 2185 * update to loop_next, and nothing needs to be done here. 2186 * Otherwise it is finishing up and an IPI needs to be sent. 2187 */ 2188 if (rq->rd->rto_cpu < 0) 2189 cpu = rto_next_cpu(rq->rd); 2190 2191 raw_spin_unlock(&rq->rd->rto_lock); 2192 2193 rto_start_unlock(&rq->rd->rto_loop_start); 2194 2195 if (cpu >= 0) { 2196 /* Make sure the rd does not get freed while pushing */ 2197 sched_get_rd(rq->rd); 2198 irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2199 } 2200 } 2201 2202 /* Called from hardirq context */ 2203 void rto_push_irq_work_func(struct irq_work *work) 2204 { 2205 struct root_domain *rd = 2206 container_of(work, struct root_domain, rto_push_work); 2207 struct rq *rq; 2208 int cpu; 2209 2210 rq = this_rq(); 2211 2212 /* 2213 * We do not need to grab the lock to check for has_pushable_tasks. 2214 * When it gets updated, a check is made if a push is possible. 2215 */ 2216 if (has_pushable_tasks(rq)) { 2217 raw_spin_rq_lock(rq); 2218 while (push_rt_task(rq, true)) 2219 ; 2220 raw_spin_rq_unlock(rq); 2221 } 2222 2223 raw_spin_lock(&rd->rto_lock); 2224 2225 /* Pass the IPI to the next rt overloaded queue */ 2226 cpu = rto_next_cpu(rd); 2227 2228 raw_spin_unlock(&rd->rto_lock); 2229 2230 if (cpu < 0) { 2231 sched_put_rd(rd); 2232 return; 2233 } 2234 2235 /* Try the next RT overloaded CPU */ 2236 irq_work_queue_on(&rd->rto_push_work, cpu); 2237 } 2238 #endif /* HAVE_RT_PUSH_IPI */ 2239 2240 static void pull_rt_task(struct rq *this_rq) 2241 { 2242 int this_cpu = this_rq->cpu, cpu; 2243 bool resched = false; 2244 struct task_struct *p, *push_task; 2245 struct rq *src_rq; 2246 int rt_overload_count = rt_overloaded(this_rq); 2247 2248 if (likely(!rt_overload_count)) 2249 return; 2250 2251 /* 2252 * Match the barrier from rt_set_overloaded; this guarantees that if we 2253 * see overloaded we must also see the rto_mask bit. 2254 */ 2255 smp_rmb(); 2256 2257 /* If we are the only overloaded CPU do nothing */ 2258 if (rt_overload_count == 1 && 2259 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) 2260 return; 2261 2262 #ifdef HAVE_RT_PUSH_IPI 2263 if (sched_feat(RT_PUSH_IPI)) { 2264 tell_cpu_to_push(this_rq); 2265 return; 2266 } 2267 #endif 2268 2269 for_each_cpu(cpu, this_rq->rd->rto_mask) { 2270 if (this_cpu == cpu) 2271 continue; 2272 2273 src_rq = cpu_rq(cpu); 2274 2275 /* 2276 * Don't bother taking the src_rq->lock if the next highest 2277 * task is known to be lower-priority than our current task. 2278 * This may look racy, but if this value is about to go 2279 * logically higher, the src_rq will push this task away. 2280 * And if its going logically lower, we do not care 2281 */ 2282 if (src_rq->rt.highest_prio.next >= 2283 this_rq->rt.highest_prio.curr) 2284 continue; 2285 2286 /* 2287 * We can potentially drop this_rq's lock in 2288 * double_lock_balance, and another CPU could 2289 * alter this_rq 2290 */ 2291 push_task = NULL; 2292 double_lock_balance(this_rq, src_rq); 2293 2294 /* 2295 * We can pull only a task, which is pushable 2296 * on its rq, and no others. 2297 */ 2298 p = pick_highest_pushable_task(src_rq, this_cpu); 2299 2300 /* 2301 * Do we have an RT task that preempts 2302 * the to-be-scheduled task? 2303 */ 2304 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2305 WARN_ON(p == src_rq->curr); 2306 WARN_ON(!task_on_rq_queued(p)); 2307 2308 /* 2309 * There's a chance that p is higher in priority 2310 * than what's currently running on its CPU. 2311 * This is just that p is waking up and hasn't 2312 * had a chance to schedule. We only pull 2313 * p if it is lower in priority than the 2314 * current task on the run queue 2315 */ 2316 if (p->prio < src_rq->donor->prio) 2317 goto skip; 2318 2319 if (is_migration_disabled(p)) { 2320 push_task = get_push_task(src_rq); 2321 } else { 2322 move_queued_task_locked(src_rq, this_rq, p); 2323 resched = true; 2324 } 2325 /* 2326 * We continue with the search, just in 2327 * case there's an even higher prio task 2328 * in another runqueue. (low likelihood 2329 * but possible) 2330 */ 2331 } 2332 skip: 2333 double_unlock_balance(this_rq, src_rq); 2334 2335 if (push_task) { 2336 preempt_disable(); 2337 raw_spin_rq_unlock(this_rq); 2338 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2339 push_task, &src_rq->push_work); 2340 preempt_enable(); 2341 raw_spin_rq_lock(this_rq); 2342 } 2343 } 2344 2345 if (resched) 2346 resched_curr(this_rq); 2347 } 2348 2349 /* 2350 * If we are not running and we are not going to reschedule soon, we should 2351 * try to push tasks away now 2352 */ 2353 static void task_woken_rt(struct rq *rq, struct task_struct *p) 2354 { 2355 bool need_to_push = !task_on_cpu(rq, p) && 2356 !test_tsk_need_resched(rq->curr) && 2357 p->nr_cpus_allowed > 1 && 2358 (dl_task(rq->donor) || rt_task(rq->donor)) && 2359 (rq->curr->nr_cpus_allowed < 2 || 2360 rq->donor->prio <= p->prio); 2361 2362 if (need_to_push) 2363 push_rt_tasks(rq); 2364 } 2365 2366 /* Assumes rq->lock is held */ 2367 static void rq_online_rt(struct rq *rq) 2368 { 2369 if (rq->rt.overloaded) 2370 rt_set_overload(rq); 2371 2372 __enable_runtime(rq); 2373 2374 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2375 } 2376 2377 /* Assumes rq->lock is held */ 2378 static void rq_offline_rt(struct rq *rq) 2379 { 2380 if (rq->rt.overloaded) 2381 rt_clear_overload(rq); 2382 2383 __disable_runtime(rq); 2384 2385 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2386 } 2387 2388 /* 2389 * When switch from the rt queue, we bring ourselves to a position 2390 * that we might want to pull RT tasks from other runqueues. 2391 */ 2392 static void switched_from_rt(struct rq *rq, struct task_struct *p) 2393 { 2394 /* 2395 * If there are other RT tasks then we will reschedule 2396 * and the scheduling of the other RT tasks will handle 2397 * the balancing. But if we are the last RT task 2398 * we may need to handle the pulling of RT tasks 2399 * now. 2400 */ 2401 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 2402 return; 2403 2404 rt_queue_pull_task(rq); 2405 } 2406 2407 void __init init_sched_rt_class(void) 2408 { 2409 unsigned int i; 2410 2411 for_each_possible_cpu(i) { 2412 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2413 GFP_KERNEL, cpu_to_node(i)); 2414 } 2415 } 2416 2417 /* 2418 * When switching a task to RT, we may overload the runqueue 2419 * with RT tasks. In this case we try to push them off to 2420 * other runqueues. 2421 */ 2422 static void switched_to_rt(struct rq *rq, struct task_struct *p) 2423 { 2424 /* 2425 * If we are running, update the avg_rt tracking, as the running time 2426 * will now on be accounted into the latter. 2427 */ 2428 if (task_current(rq, p)) { 2429 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2430 return; 2431 } 2432 2433 /* 2434 * If we are not running we may need to preempt the current 2435 * running task. If that current running task is also an RT task 2436 * then see if we can move to another run queue. 2437 */ 2438 if (task_on_rq_queued(p)) { 2439 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2440 rt_queue_push_tasks(rq); 2441 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) 2442 resched_curr(rq); 2443 } 2444 } 2445 2446 /* 2447 * Priority of the task has changed. This may cause 2448 * us to initiate a push or pull. 2449 */ 2450 static void 2451 prio_changed_rt(struct rq *rq, struct task_struct *p, u64 oldprio) 2452 { 2453 if (!task_on_rq_queued(p)) 2454 return; 2455 2456 if (p->prio == oldprio) 2457 return; 2458 2459 if (task_current_donor(rq, p)) { 2460 /* 2461 * If our priority decreases while running, we 2462 * may need to pull tasks to this runqueue. 2463 */ 2464 if (oldprio < p->prio) 2465 rt_queue_pull_task(rq); 2466 2467 /* 2468 * If there's a higher priority task waiting to run 2469 * then reschedule. 2470 */ 2471 if (p->prio > rq->rt.highest_prio.curr) 2472 resched_curr(rq); 2473 } else { 2474 /* 2475 * This task is not running, but if it is 2476 * greater than the current running task 2477 * then reschedule. 2478 */ 2479 if (p->prio < rq->donor->prio) 2480 resched_curr(rq); 2481 } 2482 } 2483 2484 #ifdef CONFIG_POSIX_TIMERS 2485 static void watchdog(struct rq *rq, struct task_struct *p) 2486 { 2487 unsigned long soft, hard; 2488 2489 /* max may change after cur was read, this will be fixed next tick */ 2490 soft = task_rlimit(p, RLIMIT_RTTIME); 2491 hard = task_rlimit_max(p, RLIMIT_RTTIME); 2492 2493 if (soft != RLIM_INFINITY) { 2494 unsigned long next; 2495 2496 if (p->rt.watchdog_stamp != jiffies) { 2497 p->rt.timeout++; 2498 p->rt.watchdog_stamp = jiffies; 2499 } 2500 2501 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2502 if (p->rt.timeout > next) { 2503 posix_cputimers_rt_watchdog(&p->posix_cputimers, 2504 p->se.sum_exec_runtime); 2505 } 2506 } 2507 } 2508 #else /* !CONFIG_POSIX_TIMERS: */ 2509 static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2510 #endif /* !CONFIG_POSIX_TIMERS */ 2511 2512 /* 2513 * scheduler tick hitting a task of our scheduling class. 2514 * 2515 * NOTE: This function can be called remotely by the tick offload that 2516 * goes along full dynticks. Therefore no local assumption can be made 2517 * and everything must be accessed through the @rq and @curr passed in 2518 * parameters. 2519 */ 2520 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2521 { 2522 struct sched_rt_entity *rt_se = &p->rt; 2523 2524 update_curr_rt(rq); 2525 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2526 2527 watchdog(rq, p); 2528 2529 /* 2530 * RR tasks need a special form of time-slice management. 2531 * FIFO tasks have no timeslices. 2532 */ 2533 if (p->policy != SCHED_RR) 2534 return; 2535 2536 if (--p->rt.time_slice) 2537 return; 2538 2539 p->rt.time_slice = sched_rr_timeslice; 2540 2541 /* 2542 * Requeue to the end of queue if we (and all of our ancestors) are not 2543 * the only element on the queue 2544 */ 2545 for_each_sched_rt_entity(rt_se) { 2546 if (rt_se->run_list.prev != rt_se->run_list.next) { 2547 requeue_task_rt(rq, p, 0); 2548 resched_curr(rq); 2549 return; 2550 } 2551 } 2552 } 2553 2554 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2555 { 2556 /* 2557 * Time slice is 0 for SCHED_FIFO tasks 2558 */ 2559 if (task->policy == SCHED_RR) 2560 return sched_rr_timeslice; 2561 else 2562 return 0; 2563 } 2564 2565 #ifdef CONFIG_SCHED_CORE 2566 static int task_is_throttled_rt(struct task_struct *p, int cpu) 2567 { 2568 struct rt_rq *rt_rq; 2569 2570 #ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq 2571 rt_rq = task_group(p)->rt_rq[cpu]; 2572 WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); 2573 #else 2574 rt_rq = &cpu_rq(cpu)->rt; 2575 #endif 2576 2577 return rt_rq_throttled(rt_rq); 2578 } 2579 #endif /* CONFIG_SCHED_CORE */ 2580 2581 DEFINE_SCHED_CLASS(rt) = { 2582 .enqueue_task = enqueue_task_rt, 2583 .dequeue_task = dequeue_task_rt, 2584 .yield_task = yield_task_rt, 2585 2586 .wakeup_preempt = wakeup_preempt_rt, 2587 2588 .pick_task = pick_task_rt, 2589 .put_prev_task = put_prev_task_rt, 2590 .set_next_task = set_next_task_rt, 2591 2592 .balance = balance_rt, 2593 .select_task_rq = select_task_rq_rt, 2594 .set_cpus_allowed = set_cpus_allowed_common, 2595 .rq_online = rq_online_rt, 2596 .rq_offline = rq_offline_rt, 2597 .task_woken = task_woken_rt, 2598 .switched_from = switched_from_rt, 2599 .find_lock_rq = find_lock_lowest_rq, 2600 2601 .task_tick = task_tick_rt, 2602 2603 .get_rr_interval = get_rr_interval_rt, 2604 2605 .switched_to = switched_to_rt, 2606 .prio_changed = prio_changed_rt, 2607 2608 .update_curr = update_curr_rt, 2609 2610 #ifdef CONFIG_SCHED_CORE 2611 .task_is_throttled = task_is_throttled_rt, 2612 #endif 2613 2614 #ifdef CONFIG_UCLAMP_TASK 2615 .uclamp_enabled = 1, 2616 #endif 2617 }; 2618 2619 #ifdef CONFIG_RT_GROUP_SCHED 2620 /* 2621 * Ensure that the real time constraints are schedulable. 2622 */ 2623 static DEFINE_MUTEX(rt_constraints_mutex); 2624 2625 static inline int tg_has_rt_tasks(struct task_group *tg) 2626 { 2627 struct task_struct *task; 2628 struct css_task_iter it; 2629 int ret = 0; 2630 2631 /* 2632 * Autogroups do not have RT tasks; see autogroup_create(). 2633 */ 2634 if (task_group_is_autogroup(tg)) 2635 return 0; 2636 2637 css_task_iter_start(&tg->css, 0, &it); 2638 while (!ret && (task = css_task_iter_next(&it))) 2639 ret |= rt_task(task); 2640 css_task_iter_end(&it); 2641 2642 return ret; 2643 } 2644 2645 struct rt_schedulable_data { 2646 struct task_group *tg; 2647 u64 rt_period; 2648 u64 rt_runtime; 2649 }; 2650 2651 static int tg_rt_schedulable(struct task_group *tg, void *data) 2652 { 2653 struct rt_schedulable_data *d = data; 2654 struct task_group *child; 2655 unsigned long total, sum = 0; 2656 u64 period, runtime; 2657 2658 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2659 runtime = tg->rt_bandwidth.rt_runtime; 2660 2661 if (tg == d->tg) { 2662 period = d->rt_period; 2663 runtime = d->rt_runtime; 2664 } 2665 2666 /* 2667 * Cannot have more runtime than the period. 2668 */ 2669 if (runtime > period && runtime != RUNTIME_INF) 2670 return -EINVAL; 2671 2672 /* 2673 * Ensure we don't starve existing RT tasks if runtime turns zero. 2674 */ 2675 if (rt_bandwidth_enabled() && !runtime && 2676 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) 2677 return -EBUSY; 2678 2679 if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group)) 2680 return -EBUSY; 2681 2682 total = to_ratio(period, runtime); 2683 2684 /* 2685 * Nobody can have more than the global setting allows. 2686 */ 2687 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 2688 return -EINVAL; 2689 2690 /* 2691 * The sum of our children's runtime should not exceed our own. 2692 */ 2693 list_for_each_entry_rcu(child, &tg->children, siblings) { 2694 period = ktime_to_ns(child->rt_bandwidth.rt_period); 2695 runtime = child->rt_bandwidth.rt_runtime; 2696 2697 if (child == d->tg) { 2698 period = d->rt_period; 2699 runtime = d->rt_runtime; 2700 } 2701 2702 sum += to_ratio(period, runtime); 2703 } 2704 2705 if (sum > total) 2706 return -EINVAL; 2707 2708 return 0; 2709 } 2710 2711 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 2712 { 2713 int ret; 2714 2715 struct rt_schedulable_data data = { 2716 .tg = tg, 2717 .rt_period = period, 2718 .rt_runtime = runtime, 2719 }; 2720 2721 rcu_read_lock(); 2722 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 2723 rcu_read_unlock(); 2724 2725 return ret; 2726 } 2727 2728 static int tg_set_rt_bandwidth(struct task_group *tg, 2729 u64 rt_period, u64 rt_runtime) 2730 { 2731 int i, err = 0; 2732 2733 /* 2734 * Disallowing the root group RT runtime is BAD, it would disallow the 2735 * kernel creating (and or operating) RT threads. 2736 */ 2737 if (tg == &root_task_group && rt_runtime == 0) 2738 return -EINVAL; 2739 2740 /* No period doesn't make any sense. */ 2741 if (rt_period == 0) 2742 return -EINVAL; 2743 2744 /* 2745 * Bound quota to defend quota against overflow during bandwidth shift. 2746 */ 2747 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) 2748 return -EINVAL; 2749 2750 mutex_lock(&rt_constraints_mutex); 2751 err = __rt_schedulable(tg, rt_period, rt_runtime); 2752 if (err) 2753 goto unlock; 2754 2755 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2756 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 2757 tg->rt_bandwidth.rt_runtime = rt_runtime; 2758 2759 for_each_possible_cpu(i) { 2760 struct rt_rq *rt_rq = tg->rt_rq[i]; 2761 2762 raw_spin_lock(&rt_rq->rt_runtime_lock); 2763 rt_rq->rt_runtime = rt_runtime; 2764 raw_spin_unlock(&rt_rq->rt_runtime_lock); 2765 } 2766 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2767 unlock: 2768 mutex_unlock(&rt_constraints_mutex); 2769 2770 return err; 2771 } 2772 2773 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 2774 { 2775 u64 rt_runtime, rt_period; 2776 2777 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2778 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 2779 if (rt_runtime_us < 0) 2780 rt_runtime = RUNTIME_INF; 2781 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) 2782 return -EINVAL; 2783 2784 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2785 } 2786 2787 long sched_group_rt_runtime(struct task_group *tg) 2788 { 2789 u64 rt_runtime_us; 2790 2791 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 2792 return -1; 2793 2794 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 2795 do_div(rt_runtime_us, NSEC_PER_USEC); 2796 return rt_runtime_us; 2797 } 2798 2799 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 2800 { 2801 u64 rt_runtime, rt_period; 2802 2803 if (rt_period_us > U64_MAX / NSEC_PER_USEC) 2804 return -EINVAL; 2805 2806 rt_period = rt_period_us * NSEC_PER_USEC; 2807 rt_runtime = tg->rt_bandwidth.rt_runtime; 2808 2809 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2810 } 2811 2812 long sched_group_rt_period(struct task_group *tg) 2813 { 2814 u64 rt_period_us; 2815 2816 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 2817 do_div(rt_period_us, NSEC_PER_USEC); 2818 return rt_period_us; 2819 } 2820 2821 #ifdef CONFIG_SYSCTL 2822 static int sched_rt_global_constraints(void) 2823 { 2824 int ret = 0; 2825 2826 mutex_lock(&rt_constraints_mutex); 2827 ret = __rt_schedulable(NULL, 0, 0); 2828 mutex_unlock(&rt_constraints_mutex); 2829 2830 return ret; 2831 } 2832 #endif /* CONFIG_SYSCTL */ 2833 2834 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 2835 { 2836 /* Don't accept real-time tasks when there is no way for them to run */ 2837 if (rt_group_sched_enabled() && rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 2838 return 0; 2839 2840 return 1; 2841 } 2842 2843 #else /* !CONFIG_RT_GROUP_SCHED: */ 2844 2845 #ifdef CONFIG_SYSCTL 2846 static int sched_rt_global_constraints(void) 2847 { 2848 return 0; 2849 } 2850 #endif /* CONFIG_SYSCTL */ 2851 #endif /* !CONFIG_RT_GROUP_SCHED */ 2852 2853 #ifdef CONFIG_SYSCTL 2854 static int sched_rt_global_validate(void) 2855 { 2856 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 2857 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || 2858 ((u64)sysctl_sched_rt_runtime * 2859 NSEC_PER_USEC > max_rt_runtime))) 2860 return -EINVAL; 2861 2862 return 0; 2863 } 2864 2865 static void sched_rt_do_global(void) 2866 { 2867 } 2868 2869 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer, 2870 size_t *lenp, loff_t *ppos) 2871 { 2872 int old_period, old_runtime; 2873 static DEFINE_MUTEX(mutex); 2874 int ret; 2875 2876 mutex_lock(&mutex); 2877 sched_domains_mutex_lock(); 2878 old_period = sysctl_sched_rt_period; 2879 old_runtime = sysctl_sched_rt_runtime; 2880 2881 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 2882 2883 if (!ret && write) { 2884 ret = sched_rt_global_validate(); 2885 if (ret) 2886 goto undo; 2887 2888 ret = sched_dl_global_validate(); 2889 if (ret) 2890 goto undo; 2891 2892 ret = sched_rt_global_constraints(); 2893 if (ret) 2894 goto undo; 2895 2896 sched_rt_do_global(); 2897 sched_dl_do_global(); 2898 } 2899 if (0) { 2900 undo: 2901 sysctl_sched_rt_period = old_period; 2902 sysctl_sched_rt_runtime = old_runtime; 2903 } 2904 sched_domains_mutex_unlock(); 2905 mutex_unlock(&mutex); 2906 2907 /* 2908 * After changing maximum available bandwidth for DEADLINE, we need to 2909 * recompute per root domain and per cpus variables accordingly. 2910 */ 2911 rebuild_sched_domains(); 2912 2913 return ret; 2914 } 2915 2916 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer, 2917 size_t *lenp, loff_t *ppos) 2918 { 2919 int ret; 2920 static DEFINE_MUTEX(mutex); 2921 2922 mutex_lock(&mutex); 2923 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2924 /* 2925 * Make sure that internally we keep jiffies. 2926 * Also, writing zero resets the time-slice to default: 2927 */ 2928 if (!ret && write) { 2929 sched_rr_timeslice = 2930 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 2931 msecs_to_jiffies(sysctl_sched_rr_timeslice); 2932 2933 if (sysctl_sched_rr_timeslice <= 0) 2934 sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE); 2935 } 2936 mutex_unlock(&mutex); 2937 2938 return ret; 2939 } 2940 #endif /* CONFIG_SYSCTL */ 2941 2942 void print_rt_stats(struct seq_file *m, int cpu) 2943 { 2944 rt_rq_iter_t iter; 2945 struct rt_rq *rt_rq; 2946 2947 rcu_read_lock(); 2948 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2949 print_rt_rq(m, cpu, rt_rq); 2950 rcu_read_unlock(); 2951 } 2952