1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include "sched.h" 10 11 #include <linux/nospec.h> 12 13 #include <linux/kcov.h> 14 #include <linux/scs.h> 15 16 #include <asm/switch_to.h> 17 #include <asm/tlb.h> 18 19 #include "../workqueue_internal.h" 20 #include "../../fs/io-wq.h" 21 #include "../smpboot.h" 22 23 #include "pelt.h" 24 #include "smp.h" 25 26 #define CREATE_TRACE_POINTS 27 #include <trace/events/sched.h> 28 29 /* 30 * Export tracepoints that act as a bare tracehook (ie: have no trace event 31 * associated with them) to allow external modules to probe them. 32 */ 33 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 34 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 35 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 36 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 37 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 38 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 39 40 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 41 42 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 43 /* 44 * Debugging: various feature bits 45 * 46 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 47 * sysctl_sched_features, defined in sched.h, to allow constants propagation 48 * at compile time and compiler optimization based on features default. 49 */ 50 #define SCHED_FEAT(name, enabled) \ 51 (1UL << __SCHED_FEAT_##name) * enabled | 52 const_debug unsigned int sysctl_sched_features = 53 #include "features.h" 54 0; 55 #undef SCHED_FEAT 56 #endif 57 58 /* 59 * Number of tasks to iterate in a single balance run. 60 * Limited because this is done with IRQs disabled. 61 */ 62 const_debug unsigned int sysctl_sched_nr_migrate = 32; 63 64 /* 65 * period over which we measure -rt task CPU usage in us. 66 * default: 1s 67 */ 68 unsigned int sysctl_sched_rt_period = 1000000; 69 70 __read_mostly int scheduler_running; 71 72 /* 73 * part of the period that we allow rt tasks to run in us. 74 * default: 0.95s 75 */ 76 int sysctl_sched_rt_runtime = 950000; 77 78 /* 79 * __task_rq_lock - lock the rq @p resides on. 80 */ 81 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 82 __acquires(rq->lock) 83 { 84 struct rq *rq; 85 86 lockdep_assert_held(&p->pi_lock); 87 88 for (;;) { 89 rq = task_rq(p); 90 raw_spin_lock(&rq->lock); 91 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 92 rq_pin_lock(rq, rf); 93 return rq; 94 } 95 raw_spin_unlock(&rq->lock); 96 97 while (unlikely(task_on_rq_migrating(p))) 98 cpu_relax(); 99 } 100 } 101 102 /* 103 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 104 */ 105 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 106 __acquires(p->pi_lock) 107 __acquires(rq->lock) 108 { 109 struct rq *rq; 110 111 for (;;) { 112 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 113 rq = task_rq(p); 114 raw_spin_lock(&rq->lock); 115 /* 116 * move_queued_task() task_rq_lock() 117 * 118 * ACQUIRE (rq->lock) 119 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 120 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 121 * [S] ->cpu = new_cpu [L] task_rq() 122 * [L] ->on_rq 123 * RELEASE (rq->lock) 124 * 125 * If we observe the old CPU in task_rq_lock(), the acquire of 126 * the old rq->lock will fully serialize against the stores. 127 * 128 * If we observe the new CPU in task_rq_lock(), the address 129 * dependency headed by '[L] rq = task_rq()' and the acquire 130 * will pair with the WMB to ensure we then also see migrating. 131 */ 132 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 133 rq_pin_lock(rq, rf); 134 return rq; 135 } 136 raw_spin_unlock(&rq->lock); 137 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 138 139 while (unlikely(task_on_rq_migrating(p))) 140 cpu_relax(); 141 } 142 } 143 144 /* 145 * RQ-clock updating methods: 146 */ 147 148 static void update_rq_clock_task(struct rq *rq, s64 delta) 149 { 150 /* 151 * In theory, the compile should just see 0 here, and optimize out the call 152 * to sched_rt_avg_update. But I don't trust it... 153 */ 154 s64 __maybe_unused steal = 0, irq_delta = 0; 155 156 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 157 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 158 159 /* 160 * Since irq_time is only updated on {soft,}irq_exit, we might run into 161 * this case when a previous update_rq_clock() happened inside a 162 * {soft,}irq region. 163 * 164 * When this happens, we stop ->clock_task and only update the 165 * prev_irq_time stamp to account for the part that fit, so that a next 166 * update will consume the rest. This ensures ->clock_task is 167 * monotonic. 168 * 169 * It does however cause some slight miss-attribution of {soft,}irq 170 * time, a more accurate solution would be to update the irq_time using 171 * the current rq->clock timestamp, except that would require using 172 * atomic ops. 173 */ 174 if (irq_delta > delta) 175 irq_delta = delta; 176 177 rq->prev_irq_time += irq_delta; 178 delta -= irq_delta; 179 #endif 180 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 181 if (static_key_false((¶virt_steal_rq_enabled))) { 182 steal = paravirt_steal_clock(cpu_of(rq)); 183 steal -= rq->prev_steal_time_rq; 184 185 if (unlikely(steal > delta)) 186 steal = delta; 187 188 rq->prev_steal_time_rq += steal; 189 delta -= steal; 190 } 191 #endif 192 193 rq->clock_task += delta; 194 195 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 196 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 197 update_irq_load_avg(rq, irq_delta + steal); 198 #endif 199 update_rq_clock_pelt(rq, delta); 200 } 201 202 void update_rq_clock(struct rq *rq) 203 { 204 s64 delta; 205 206 lockdep_assert_held(&rq->lock); 207 208 if (rq->clock_update_flags & RQCF_ACT_SKIP) 209 return; 210 211 #ifdef CONFIG_SCHED_DEBUG 212 if (sched_feat(WARN_DOUBLE_CLOCK)) 213 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 214 rq->clock_update_flags |= RQCF_UPDATED; 215 #endif 216 217 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 218 if (delta < 0) 219 return; 220 rq->clock += delta; 221 update_rq_clock_task(rq, delta); 222 } 223 224 static inline void 225 rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func) 226 { 227 csd->flags = 0; 228 csd->func = func; 229 csd->info = rq; 230 } 231 232 #ifdef CONFIG_SCHED_HRTICK 233 /* 234 * Use HR-timers to deliver accurate preemption points. 235 */ 236 237 static void hrtick_clear(struct rq *rq) 238 { 239 if (hrtimer_active(&rq->hrtick_timer)) 240 hrtimer_cancel(&rq->hrtick_timer); 241 } 242 243 /* 244 * High-resolution timer tick. 245 * Runs from hardirq context with interrupts disabled. 246 */ 247 static enum hrtimer_restart hrtick(struct hrtimer *timer) 248 { 249 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 250 struct rq_flags rf; 251 252 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 253 254 rq_lock(rq, &rf); 255 update_rq_clock(rq); 256 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 257 rq_unlock(rq, &rf); 258 259 return HRTIMER_NORESTART; 260 } 261 262 #ifdef CONFIG_SMP 263 264 static void __hrtick_restart(struct rq *rq) 265 { 266 struct hrtimer *timer = &rq->hrtick_timer; 267 268 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); 269 } 270 271 /* 272 * called from hardirq (IPI) context 273 */ 274 static void __hrtick_start(void *arg) 275 { 276 struct rq *rq = arg; 277 struct rq_flags rf; 278 279 rq_lock(rq, &rf); 280 __hrtick_restart(rq); 281 rq_unlock(rq, &rf); 282 } 283 284 /* 285 * Called to set the hrtick timer state. 286 * 287 * called with rq->lock held and irqs disabled 288 */ 289 void hrtick_start(struct rq *rq, u64 delay) 290 { 291 struct hrtimer *timer = &rq->hrtick_timer; 292 ktime_t time; 293 s64 delta; 294 295 /* 296 * Don't schedule slices shorter than 10000ns, that just 297 * doesn't make sense and can cause timer DoS. 298 */ 299 delta = max_t(s64, delay, 10000LL); 300 time = ktime_add_ns(timer->base->get_time(), delta); 301 302 hrtimer_set_expires(timer, time); 303 304 if (rq == this_rq()) 305 __hrtick_restart(rq); 306 else 307 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 308 } 309 310 #else 311 /* 312 * Called to set the hrtick timer state. 313 * 314 * called with rq->lock held and irqs disabled 315 */ 316 void hrtick_start(struct rq *rq, u64 delay) 317 { 318 /* 319 * Don't schedule slices shorter than 10000ns, that just 320 * doesn't make sense. Rely on vruntime for fairness. 321 */ 322 delay = max_t(u64, delay, 10000LL); 323 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 324 HRTIMER_MODE_REL_PINNED_HARD); 325 } 326 327 #endif /* CONFIG_SMP */ 328 329 static void hrtick_rq_init(struct rq *rq) 330 { 331 #ifdef CONFIG_SMP 332 rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start); 333 #endif 334 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 335 rq->hrtick_timer.function = hrtick; 336 } 337 #else /* CONFIG_SCHED_HRTICK */ 338 static inline void hrtick_clear(struct rq *rq) 339 { 340 } 341 342 static inline void hrtick_rq_init(struct rq *rq) 343 { 344 } 345 #endif /* CONFIG_SCHED_HRTICK */ 346 347 /* 348 * cmpxchg based fetch_or, macro so it works for different integer types 349 */ 350 #define fetch_or(ptr, mask) \ 351 ({ \ 352 typeof(ptr) _ptr = (ptr); \ 353 typeof(mask) _mask = (mask); \ 354 typeof(*_ptr) _old, _val = *_ptr; \ 355 \ 356 for (;;) { \ 357 _old = cmpxchg(_ptr, _val, _val | _mask); \ 358 if (_old == _val) \ 359 break; \ 360 _val = _old; \ 361 } \ 362 _old; \ 363 }) 364 365 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 366 /* 367 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 368 * this avoids any races wrt polling state changes and thereby avoids 369 * spurious IPIs. 370 */ 371 static bool set_nr_and_not_polling(struct task_struct *p) 372 { 373 struct thread_info *ti = task_thread_info(p); 374 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 375 } 376 377 /* 378 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 379 * 380 * If this returns true, then the idle task promises to call 381 * sched_ttwu_pending() and reschedule soon. 382 */ 383 static bool set_nr_if_polling(struct task_struct *p) 384 { 385 struct thread_info *ti = task_thread_info(p); 386 typeof(ti->flags) old, val = READ_ONCE(ti->flags); 387 388 for (;;) { 389 if (!(val & _TIF_POLLING_NRFLAG)) 390 return false; 391 if (val & _TIF_NEED_RESCHED) 392 return true; 393 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); 394 if (old == val) 395 break; 396 val = old; 397 } 398 return true; 399 } 400 401 #else 402 static bool set_nr_and_not_polling(struct task_struct *p) 403 { 404 set_tsk_need_resched(p); 405 return true; 406 } 407 408 #ifdef CONFIG_SMP 409 static bool set_nr_if_polling(struct task_struct *p) 410 { 411 return false; 412 } 413 #endif 414 #endif 415 416 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 417 { 418 struct wake_q_node *node = &task->wake_q; 419 420 /* 421 * Atomically grab the task, if ->wake_q is !nil already it means 422 * its already queued (either by us or someone else) and will get the 423 * wakeup due to that. 424 * 425 * In order to ensure that a pending wakeup will observe our pending 426 * state, even in the failed case, an explicit smp_mb() must be used. 427 */ 428 smp_mb__before_atomic(); 429 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 430 return false; 431 432 /* 433 * The head is context local, there can be no concurrency. 434 */ 435 *head->lastp = node; 436 head->lastp = &node->next; 437 return true; 438 } 439 440 /** 441 * wake_q_add() - queue a wakeup for 'later' waking. 442 * @head: the wake_q_head to add @task to 443 * @task: the task to queue for 'later' wakeup 444 * 445 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 446 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 447 * instantly. 448 * 449 * This function must be used as-if it were wake_up_process(); IOW the task 450 * must be ready to be woken at this location. 451 */ 452 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 453 { 454 if (__wake_q_add(head, task)) 455 get_task_struct(task); 456 } 457 458 /** 459 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 460 * @head: the wake_q_head to add @task to 461 * @task: the task to queue for 'later' wakeup 462 * 463 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 464 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 465 * instantly. 466 * 467 * This function must be used as-if it were wake_up_process(); IOW the task 468 * must be ready to be woken at this location. 469 * 470 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 471 * that already hold reference to @task can call the 'safe' version and trust 472 * wake_q to do the right thing depending whether or not the @task is already 473 * queued for wakeup. 474 */ 475 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 476 { 477 if (!__wake_q_add(head, task)) 478 put_task_struct(task); 479 } 480 481 void wake_up_q(struct wake_q_head *head) 482 { 483 struct wake_q_node *node = head->first; 484 485 while (node != WAKE_Q_TAIL) { 486 struct task_struct *task; 487 488 task = container_of(node, struct task_struct, wake_q); 489 BUG_ON(!task); 490 /* Task can safely be re-inserted now: */ 491 node = node->next; 492 task->wake_q.next = NULL; 493 494 /* 495 * wake_up_process() executes a full barrier, which pairs with 496 * the queueing in wake_q_add() so as not to miss wakeups. 497 */ 498 wake_up_process(task); 499 put_task_struct(task); 500 } 501 } 502 503 /* 504 * resched_curr - mark rq's current task 'to be rescheduled now'. 505 * 506 * On UP this means the setting of the need_resched flag, on SMP it 507 * might also involve a cross-CPU call to trigger the scheduler on 508 * the target CPU. 509 */ 510 void resched_curr(struct rq *rq) 511 { 512 struct task_struct *curr = rq->curr; 513 int cpu; 514 515 lockdep_assert_held(&rq->lock); 516 517 if (test_tsk_need_resched(curr)) 518 return; 519 520 cpu = cpu_of(rq); 521 522 if (cpu == smp_processor_id()) { 523 set_tsk_need_resched(curr); 524 set_preempt_need_resched(); 525 return; 526 } 527 528 if (set_nr_and_not_polling(curr)) 529 smp_send_reschedule(cpu); 530 else 531 trace_sched_wake_idle_without_ipi(cpu); 532 } 533 534 void resched_cpu(int cpu) 535 { 536 struct rq *rq = cpu_rq(cpu); 537 unsigned long flags; 538 539 raw_spin_lock_irqsave(&rq->lock, flags); 540 if (cpu_online(cpu) || cpu == smp_processor_id()) 541 resched_curr(rq); 542 raw_spin_unlock_irqrestore(&rq->lock, flags); 543 } 544 545 #ifdef CONFIG_SMP 546 #ifdef CONFIG_NO_HZ_COMMON 547 /* 548 * In the semi idle case, use the nearest busy CPU for migrating timers 549 * from an idle CPU. This is good for power-savings. 550 * 551 * We don't do similar optimization for completely idle system, as 552 * selecting an idle CPU will add more delays to the timers than intended 553 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 554 */ 555 int get_nohz_timer_target(void) 556 { 557 int i, cpu = smp_processor_id(), default_cpu = -1; 558 struct sched_domain *sd; 559 560 if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { 561 if (!idle_cpu(cpu)) 562 return cpu; 563 default_cpu = cpu; 564 } 565 566 rcu_read_lock(); 567 for_each_domain(cpu, sd) { 568 for_each_cpu_and(i, sched_domain_span(sd), 569 housekeeping_cpumask(HK_FLAG_TIMER)) { 570 if (cpu == i) 571 continue; 572 573 if (!idle_cpu(i)) { 574 cpu = i; 575 goto unlock; 576 } 577 } 578 } 579 580 if (default_cpu == -1) 581 default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); 582 cpu = default_cpu; 583 unlock: 584 rcu_read_unlock(); 585 return cpu; 586 } 587 588 /* 589 * When add_timer_on() enqueues a timer into the timer wheel of an 590 * idle CPU then this timer might expire before the next timer event 591 * which is scheduled to wake up that CPU. In case of a completely 592 * idle system the next event might even be infinite time into the 593 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 594 * leaves the inner idle loop so the newly added timer is taken into 595 * account when the CPU goes back to idle and evaluates the timer 596 * wheel for the next timer event. 597 */ 598 static void wake_up_idle_cpu(int cpu) 599 { 600 struct rq *rq = cpu_rq(cpu); 601 602 if (cpu == smp_processor_id()) 603 return; 604 605 if (set_nr_and_not_polling(rq->idle)) 606 smp_send_reschedule(cpu); 607 else 608 trace_sched_wake_idle_without_ipi(cpu); 609 } 610 611 static bool wake_up_full_nohz_cpu(int cpu) 612 { 613 /* 614 * We just need the target to call irq_exit() and re-evaluate 615 * the next tick. The nohz full kick at least implies that. 616 * If needed we can still optimize that later with an 617 * empty IRQ. 618 */ 619 if (cpu_is_offline(cpu)) 620 return true; /* Don't try to wake offline CPUs. */ 621 if (tick_nohz_full_cpu(cpu)) { 622 if (cpu != smp_processor_id() || 623 tick_nohz_tick_stopped()) 624 tick_nohz_full_kick_cpu(cpu); 625 return true; 626 } 627 628 return false; 629 } 630 631 /* 632 * Wake up the specified CPU. If the CPU is going offline, it is the 633 * caller's responsibility to deal with the lost wakeup, for example, 634 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 635 */ 636 void wake_up_nohz_cpu(int cpu) 637 { 638 if (!wake_up_full_nohz_cpu(cpu)) 639 wake_up_idle_cpu(cpu); 640 } 641 642 static void nohz_csd_func(void *info) 643 { 644 struct rq *rq = info; 645 int cpu = cpu_of(rq); 646 unsigned int flags; 647 648 /* 649 * Release the rq::nohz_csd. 650 */ 651 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); 652 WARN_ON(!(flags & NOHZ_KICK_MASK)); 653 654 rq->idle_balance = idle_cpu(cpu); 655 if (rq->idle_balance && !need_resched()) { 656 rq->nohz_idle_balance = flags; 657 raise_softirq_irqoff(SCHED_SOFTIRQ); 658 } 659 } 660 661 #endif /* CONFIG_NO_HZ_COMMON */ 662 663 #ifdef CONFIG_NO_HZ_FULL 664 bool sched_can_stop_tick(struct rq *rq) 665 { 666 int fifo_nr_running; 667 668 /* Deadline tasks, even if single, need the tick */ 669 if (rq->dl.dl_nr_running) 670 return false; 671 672 /* 673 * If there are more than one RR tasks, we need the tick to effect the 674 * actual RR behaviour. 675 */ 676 if (rq->rt.rr_nr_running) { 677 if (rq->rt.rr_nr_running == 1) 678 return true; 679 else 680 return false; 681 } 682 683 /* 684 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 685 * forced preemption between FIFO tasks. 686 */ 687 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 688 if (fifo_nr_running) 689 return true; 690 691 /* 692 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 693 * if there's more than one we need the tick for involuntary 694 * preemption. 695 */ 696 if (rq->nr_running > 1) 697 return false; 698 699 return true; 700 } 701 #endif /* CONFIG_NO_HZ_FULL */ 702 #endif /* CONFIG_SMP */ 703 704 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 705 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 706 /* 707 * Iterate task_group tree rooted at *from, calling @down when first entering a 708 * node and @up when leaving it for the final time. 709 * 710 * Caller must hold rcu_lock or sufficient equivalent. 711 */ 712 int walk_tg_tree_from(struct task_group *from, 713 tg_visitor down, tg_visitor up, void *data) 714 { 715 struct task_group *parent, *child; 716 int ret; 717 718 parent = from; 719 720 down: 721 ret = (*down)(parent, data); 722 if (ret) 723 goto out; 724 list_for_each_entry_rcu(child, &parent->children, siblings) { 725 parent = child; 726 goto down; 727 728 up: 729 continue; 730 } 731 ret = (*up)(parent, data); 732 if (ret || parent == from) 733 goto out; 734 735 child = parent; 736 parent = parent->parent; 737 if (parent) 738 goto up; 739 out: 740 return ret; 741 } 742 743 int tg_nop(struct task_group *tg, void *data) 744 { 745 return 0; 746 } 747 #endif 748 749 static void set_load_weight(struct task_struct *p, bool update_load) 750 { 751 int prio = p->static_prio - MAX_RT_PRIO; 752 struct load_weight *load = &p->se.load; 753 754 /* 755 * SCHED_IDLE tasks get minimal weight: 756 */ 757 if (task_has_idle_policy(p)) { 758 load->weight = scale_load(WEIGHT_IDLEPRIO); 759 load->inv_weight = WMULT_IDLEPRIO; 760 return; 761 } 762 763 /* 764 * SCHED_OTHER tasks have to update their load when changing their 765 * weight 766 */ 767 if (update_load && p->sched_class == &fair_sched_class) { 768 reweight_task(p, prio); 769 } else { 770 load->weight = scale_load(sched_prio_to_weight[prio]); 771 load->inv_weight = sched_prio_to_wmult[prio]; 772 } 773 } 774 775 #ifdef CONFIG_UCLAMP_TASK 776 /* 777 * Serializes updates of utilization clamp values 778 * 779 * The (slow-path) user-space triggers utilization clamp value updates which 780 * can require updates on (fast-path) scheduler's data structures used to 781 * support enqueue/dequeue operations. 782 * While the per-CPU rq lock protects fast-path update operations, user-space 783 * requests are serialized using a mutex to reduce the risk of conflicting 784 * updates or API abuses. 785 */ 786 static DEFINE_MUTEX(uclamp_mutex); 787 788 /* Max allowed minimum utilization */ 789 unsigned int sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 790 791 /* Max allowed maximum utilization */ 792 unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 793 794 /* All clamps are required to be less or equal than these values */ 795 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 796 797 /* Integer rounded range for each bucket */ 798 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 799 800 #define for_each_clamp_id(clamp_id) \ 801 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 802 803 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 804 { 805 return clamp_value / UCLAMP_BUCKET_DELTA; 806 } 807 808 static inline unsigned int uclamp_bucket_base_value(unsigned int clamp_value) 809 { 810 return UCLAMP_BUCKET_DELTA * uclamp_bucket_id(clamp_value); 811 } 812 813 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 814 { 815 if (clamp_id == UCLAMP_MIN) 816 return 0; 817 return SCHED_CAPACITY_SCALE; 818 } 819 820 static inline void uclamp_se_set(struct uclamp_se *uc_se, 821 unsigned int value, bool user_defined) 822 { 823 uc_se->value = value; 824 uc_se->bucket_id = uclamp_bucket_id(value); 825 uc_se->user_defined = user_defined; 826 } 827 828 static inline unsigned int 829 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 830 unsigned int clamp_value) 831 { 832 /* 833 * Avoid blocked utilization pushing up the frequency when we go 834 * idle (which drops the max-clamp) by retaining the last known 835 * max-clamp. 836 */ 837 if (clamp_id == UCLAMP_MAX) { 838 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 839 return clamp_value; 840 } 841 842 return uclamp_none(UCLAMP_MIN); 843 } 844 845 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 846 unsigned int clamp_value) 847 { 848 /* Reset max-clamp retention only on idle exit */ 849 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 850 return; 851 852 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); 853 } 854 855 static inline 856 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 857 unsigned int clamp_value) 858 { 859 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 860 int bucket_id = UCLAMP_BUCKETS - 1; 861 862 /* 863 * Since both min and max clamps are max aggregated, find the 864 * top most bucket with tasks in. 865 */ 866 for ( ; bucket_id >= 0; bucket_id--) { 867 if (!bucket[bucket_id].tasks) 868 continue; 869 return bucket[bucket_id].value; 870 } 871 872 /* No tasks -- default clamp values */ 873 return uclamp_idle_value(rq, clamp_id, clamp_value); 874 } 875 876 static inline struct uclamp_se 877 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 878 { 879 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 880 #ifdef CONFIG_UCLAMP_TASK_GROUP 881 struct uclamp_se uc_max; 882 883 /* 884 * Tasks in autogroups or root task group will be 885 * restricted by system defaults. 886 */ 887 if (task_group_is_autogroup(task_group(p))) 888 return uc_req; 889 if (task_group(p) == &root_task_group) 890 return uc_req; 891 892 uc_max = task_group(p)->uclamp[clamp_id]; 893 if (uc_req.value > uc_max.value || !uc_req.user_defined) 894 return uc_max; 895 #endif 896 897 return uc_req; 898 } 899 900 /* 901 * The effective clamp bucket index of a task depends on, by increasing 902 * priority: 903 * - the task specific clamp value, when explicitly requested from userspace 904 * - the task group effective clamp value, for tasks not either in the root 905 * group or in an autogroup 906 * - the system default clamp value, defined by the sysadmin 907 */ 908 static inline struct uclamp_se 909 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 910 { 911 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 912 struct uclamp_se uc_max = uclamp_default[clamp_id]; 913 914 /* System default restrictions always apply */ 915 if (unlikely(uc_req.value > uc_max.value)) 916 return uc_max; 917 918 return uc_req; 919 } 920 921 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 922 { 923 struct uclamp_se uc_eff; 924 925 /* Task currently refcounted: use back-annotated (effective) value */ 926 if (p->uclamp[clamp_id].active) 927 return (unsigned long)p->uclamp[clamp_id].value; 928 929 uc_eff = uclamp_eff_get(p, clamp_id); 930 931 return (unsigned long)uc_eff.value; 932 } 933 934 /* 935 * When a task is enqueued on a rq, the clamp bucket currently defined by the 936 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 937 * updates the rq's clamp value if required. 938 * 939 * Tasks can have a task-specific value requested from user-space, track 940 * within each bucket the maximum value for tasks refcounted in it. 941 * This "local max aggregation" allows to track the exact "requested" value 942 * for each bucket when all its RUNNABLE tasks require the same clamp. 943 */ 944 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 945 enum uclamp_id clamp_id) 946 { 947 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 948 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 949 struct uclamp_bucket *bucket; 950 951 lockdep_assert_held(&rq->lock); 952 953 /* Update task effective clamp */ 954 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 955 956 bucket = &uc_rq->bucket[uc_se->bucket_id]; 957 bucket->tasks++; 958 uc_se->active = true; 959 960 uclamp_idle_reset(rq, clamp_id, uc_se->value); 961 962 /* 963 * Local max aggregation: rq buckets always track the max 964 * "requested" clamp value of its RUNNABLE tasks. 965 */ 966 if (bucket->tasks == 1 || uc_se->value > bucket->value) 967 bucket->value = uc_se->value; 968 969 if (uc_se->value > READ_ONCE(uc_rq->value)) 970 WRITE_ONCE(uc_rq->value, uc_se->value); 971 } 972 973 /* 974 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 975 * is released. If this is the last task reference counting the rq's max 976 * active clamp value, then the rq's clamp value is updated. 977 * 978 * Both refcounted tasks and rq's cached clamp values are expected to be 979 * always valid. If it's detected they are not, as defensive programming, 980 * enforce the expected state and warn. 981 */ 982 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 983 enum uclamp_id clamp_id) 984 { 985 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 986 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 987 struct uclamp_bucket *bucket; 988 unsigned int bkt_clamp; 989 unsigned int rq_clamp; 990 991 lockdep_assert_held(&rq->lock); 992 993 bucket = &uc_rq->bucket[uc_se->bucket_id]; 994 SCHED_WARN_ON(!bucket->tasks); 995 if (likely(bucket->tasks)) 996 bucket->tasks--; 997 uc_se->active = false; 998 999 /* 1000 * Keep "local max aggregation" simple and accept to (possibly) 1001 * overboost some RUNNABLE tasks in the same bucket. 1002 * The rq clamp bucket value is reset to its base value whenever 1003 * there are no more RUNNABLE tasks refcounting it. 1004 */ 1005 if (likely(bucket->tasks)) 1006 return; 1007 1008 rq_clamp = READ_ONCE(uc_rq->value); 1009 /* 1010 * Defensive programming: this should never happen. If it happens, 1011 * e.g. due to future modification, warn and fixup the expected value. 1012 */ 1013 SCHED_WARN_ON(bucket->value > rq_clamp); 1014 if (bucket->value >= rq_clamp) { 1015 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1016 WRITE_ONCE(uc_rq->value, bkt_clamp); 1017 } 1018 } 1019 1020 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1021 { 1022 enum uclamp_id clamp_id; 1023 1024 if (unlikely(!p->sched_class->uclamp_enabled)) 1025 return; 1026 1027 for_each_clamp_id(clamp_id) 1028 uclamp_rq_inc_id(rq, p, clamp_id); 1029 1030 /* Reset clamp idle holding when there is one RUNNABLE task */ 1031 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1032 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1033 } 1034 1035 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1036 { 1037 enum uclamp_id clamp_id; 1038 1039 if (unlikely(!p->sched_class->uclamp_enabled)) 1040 return; 1041 1042 for_each_clamp_id(clamp_id) 1043 uclamp_rq_dec_id(rq, p, clamp_id); 1044 } 1045 1046 static inline void 1047 uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) 1048 { 1049 struct rq_flags rf; 1050 struct rq *rq; 1051 1052 /* 1053 * Lock the task and the rq where the task is (or was) queued. 1054 * 1055 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1056 * price to pay to safely serialize util_{min,max} updates with 1057 * enqueues, dequeues and migration operations. 1058 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1059 */ 1060 rq = task_rq_lock(p, &rf); 1061 1062 /* 1063 * Setting the clamp bucket is serialized by task_rq_lock(). 1064 * If the task is not yet RUNNABLE and its task_struct is not 1065 * affecting a valid clamp bucket, the next time it's enqueued, 1066 * it will already see the updated clamp bucket value. 1067 */ 1068 if (p->uclamp[clamp_id].active) { 1069 uclamp_rq_dec_id(rq, p, clamp_id); 1070 uclamp_rq_inc_id(rq, p, clamp_id); 1071 } 1072 1073 task_rq_unlock(rq, p, &rf); 1074 } 1075 1076 #ifdef CONFIG_UCLAMP_TASK_GROUP 1077 static inline void 1078 uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1079 unsigned int clamps) 1080 { 1081 enum uclamp_id clamp_id; 1082 struct css_task_iter it; 1083 struct task_struct *p; 1084 1085 css_task_iter_start(css, 0, &it); 1086 while ((p = css_task_iter_next(&it))) { 1087 for_each_clamp_id(clamp_id) { 1088 if ((0x1 << clamp_id) & clamps) 1089 uclamp_update_active(p, clamp_id); 1090 } 1091 } 1092 css_task_iter_end(&it); 1093 } 1094 1095 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1096 static void uclamp_update_root_tg(void) 1097 { 1098 struct task_group *tg = &root_task_group; 1099 1100 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1101 sysctl_sched_uclamp_util_min, false); 1102 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1103 sysctl_sched_uclamp_util_max, false); 1104 1105 rcu_read_lock(); 1106 cpu_util_update_eff(&root_task_group.css); 1107 rcu_read_unlock(); 1108 } 1109 #else 1110 static void uclamp_update_root_tg(void) { } 1111 #endif 1112 1113 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1114 void *buffer, size_t *lenp, loff_t *ppos) 1115 { 1116 bool update_root_tg = false; 1117 int old_min, old_max; 1118 int result; 1119 1120 mutex_lock(&uclamp_mutex); 1121 old_min = sysctl_sched_uclamp_util_min; 1122 old_max = sysctl_sched_uclamp_util_max; 1123 1124 result = proc_dointvec(table, write, buffer, lenp, ppos); 1125 if (result) 1126 goto undo; 1127 if (!write) 1128 goto done; 1129 1130 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1131 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE) { 1132 result = -EINVAL; 1133 goto undo; 1134 } 1135 1136 if (old_min != sysctl_sched_uclamp_util_min) { 1137 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1138 sysctl_sched_uclamp_util_min, false); 1139 update_root_tg = true; 1140 } 1141 if (old_max != sysctl_sched_uclamp_util_max) { 1142 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1143 sysctl_sched_uclamp_util_max, false); 1144 update_root_tg = true; 1145 } 1146 1147 if (update_root_tg) 1148 uclamp_update_root_tg(); 1149 1150 /* 1151 * We update all RUNNABLE tasks only when task groups are in use. 1152 * Otherwise, keep it simple and do just a lazy update at each next 1153 * task enqueue time. 1154 */ 1155 1156 goto done; 1157 1158 undo: 1159 sysctl_sched_uclamp_util_min = old_min; 1160 sysctl_sched_uclamp_util_max = old_max; 1161 done: 1162 mutex_unlock(&uclamp_mutex); 1163 1164 return result; 1165 } 1166 1167 static int uclamp_validate(struct task_struct *p, 1168 const struct sched_attr *attr) 1169 { 1170 unsigned int lower_bound = p->uclamp_req[UCLAMP_MIN].value; 1171 unsigned int upper_bound = p->uclamp_req[UCLAMP_MAX].value; 1172 1173 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) 1174 lower_bound = attr->sched_util_min; 1175 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) 1176 upper_bound = attr->sched_util_max; 1177 1178 if (lower_bound > upper_bound) 1179 return -EINVAL; 1180 if (upper_bound > SCHED_CAPACITY_SCALE) 1181 return -EINVAL; 1182 1183 return 0; 1184 } 1185 1186 static void __setscheduler_uclamp(struct task_struct *p, 1187 const struct sched_attr *attr) 1188 { 1189 enum uclamp_id clamp_id; 1190 1191 /* 1192 * On scheduling class change, reset to default clamps for tasks 1193 * without a task-specific value. 1194 */ 1195 for_each_clamp_id(clamp_id) { 1196 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1197 unsigned int clamp_value = uclamp_none(clamp_id); 1198 1199 /* Keep using defined clamps across class changes */ 1200 if (uc_se->user_defined) 1201 continue; 1202 1203 /* By default, RT tasks always get 100% boost */ 1204 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1205 clamp_value = uclamp_none(UCLAMP_MAX); 1206 1207 uclamp_se_set(uc_se, clamp_value, false); 1208 } 1209 1210 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1211 return; 1212 1213 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1214 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1215 attr->sched_util_min, true); 1216 } 1217 1218 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1219 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 1220 attr->sched_util_max, true); 1221 } 1222 } 1223 1224 static void uclamp_fork(struct task_struct *p) 1225 { 1226 enum uclamp_id clamp_id; 1227 1228 for_each_clamp_id(clamp_id) 1229 p->uclamp[clamp_id].active = false; 1230 1231 if (likely(!p->sched_reset_on_fork)) 1232 return; 1233 1234 for_each_clamp_id(clamp_id) { 1235 uclamp_se_set(&p->uclamp_req[clamp_id], 1236 uclamp_none(clamp_id), false); 1237 } 1238 } 1239 1240 static void __init init_uclamp(void) 1241 { 1242 struct uclamp_se uc_max = {}; 1243 enum uclamp_id clamp_id; 1244 int cpu; 1245 1246 mutex_init(&uclamp_mutex); 1247 1248 for_each_possible_cpu(cpu) { 1249 memset(&cpu_rq(cpu)->uclamp, 0, 1250 sizeof(struct uclamp_rq)*UCLAMP_CNT); 1251 cpu_rq(cpu)->uclamp_flags = 0; 1252 } 1253 1254 for_each_clamp_id(clamp_id) { 1255 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1256 uclamp_none(clamp_id), false); 1257 } 1258 1259 /* System defaults allow max clamp values for both indexes */ 1260 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1261 for_each_clamp_id(clamp_id) { 1262 uclamp_default[clamp_id] = uc_max; 1263 #ifdef CONFIG_UCLAMP_TASK_GROUP 1264 root_task_group.uclamp_req[clamp_id] = uc_max; 1265 root_task_group.uclamp[clamp_id] = uc_max; 1266 #endif 1267 } 1268 } 1269 1270 #else /* CONFIG_UCLAMP_TASK */ 1271 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1272 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1273 static inline int uclamp_validate(struct task_struct *p, 1274 const struct sched_attr *attr) 1275 { 1276 return -EOPNOTSUPP; 1277 } 1278 static void __setscheduler_uclamp(struct task_struct *p, 1279 const struct sched_attr *attr) { } 1280 static inline void uclamp_fork(struct task_struct *p) { } 1281 static inline void init_uclamp(void) { } 1282 #endif /* CONFIG_UCLAMP_TASK */ 1283 1284 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1285 { 1286 if (!(flags & ENQUEUE_NOCLOCK)) 1287 update_rq_clock(rq); 1288 1289 if (!(flags & ENQUEUE_RESTORE)) { 1290 sched_info_queued(rq, p); 1291 psi_enqueue(p, flags & ENQUEUE_WAKEUP); 1292 } 1293 1294 uclamp_rq_inc(rq, p); 1295 p->sched_class->enqueue_task(rq, p, flags); 1296 } 1297 1298 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 1299 { 1300 if (!(flags & DEQUEUE_NOCLOCK)) 1301 update_rq_clock(rq); 1302 1303 if (!(flags & DEQUEUE_SAVE)) { 1304 sched_info_dequeued(rq, p); 1305 psi_dequeue(p, flags & DEQUEUE_SLEEP); 1306 } 1307 1308 uclamp_rq_dec(rq, p); 1309 p->sched_class->dequeue_task(rq, p, flags); 1310 } 1311 1312 void activate_task(struct rq *rq, struct task_struct *p, int flags) 1313 { 1314 if (task_contributes_to_load(p)) 1315 rq->nr_uninterruptible--; 1316 1317 enqueue_task(rq, p, flags); 1318 1319 p->on_rq = TASK_ON_RQ_QUEUED; 1320 } 1321 1322 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 1323 { 1324 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; 1325 1326 if (task_contributes_to_load(p)) 1327 rq->nr_uninterruptible++; 1328 1329 dequeue_task(rq, p, flags); 1330 } 1331 1332 /* 1333 * __normal_prio - return the priority that is based on the static prio 1334 */ 1335 static inline int __normal_prio(struct task_struct *p) 1336 { 1337 return p->static_prio; 1338 } 1339 1340 /* 1341 * Calculate the expected normal priority: i.e. priority 1342 * without taking RT-inheritance into account. Might be 1343 * boosted by interactivity modifiers. Changes upon fork, 1344 * setprio syscalls, and whenever the interactivity 1345 * estimator recalculates. 1346 */ 1347 static inline int normal_prio(struct task_struct *p) 1348 { 1349 int prio; 1350 1351 if (task_has_dl_policy(p)) 1352 prio = MAX_DL_PRIO-1; 1353 else if (task_has_rt_policy(p)) 1354 prio = MAX_RT_PRIO-1 - p->rt_priority; 1355 else 1356 prio = __normal_prio(p); 1357 return prio; 1358 } 1359 1360 /* 1361 * Calculate the current priority, i.e. the priority 1362 * taken into account by the scheduler. This value might 1363 * be boosted by RT tasks, or might be boosted by 1364 * interactivity modifiers. Will be RT if the task got 1365 * RT-boosted. If not then it returns p->normal_prio. 1366 */ 1367 static int effective_prio(struct task_struct *p) 1368 { 1369 p->normal_prio = normal_prio(p); 1370 /* 1371 * If we are RT tasks or we were boosted to RT priority, 1372 * keep the priority unchanged. Otherwise, update priority 1373 * to the normal priority: 1374 */ 1375 if (!rt_prio(p->prio)) 1376 return p->normal_prio; 1377 return p->prio; 1378 } 1379 1380 /** 1381 * task_curr - is this task currently executing on a CPU? 1382 * @p: the task in question. 1383 * 1384 * Return: 1 if the task is currently executing. 0 otherwise. 1385 */ 1386 inline int task_curr(const struct task_struct *p) 1387 { 1388 return cpu_curr(task_cpu(p)) == p; 1389 } 1390 1391 /* 1392 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 1393 * use the balance_callback list if you want balancing. 1394 * 1395 * this means any call to check_class_changed() must be followed by a call to 1396 * balance_callback(). 1397 */ 1398 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1399 const struct sched_class *prev_class, 1400 int oldprio) 1401 { 1402 if (prev_class != p->sched_class) { 1403 if (prev_class->switched_from) 1404 prev_class->switched_from(rq, p); 1405 1406 p->sched_class->switched_to(rq, p); 1407 } else if (oldprio != p->prio || dl_task(p)) 1408 p->sched_class->prio_changed(rq, p, oldprio); 1409 } 1410 1411 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 1412 { 1413 const struct sched_class *class; 1414 1415 if (p->sched_class == rq->curr->sched_class) { 1416 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 1417 } else { 1418 for_each_class(class) { 1419 if (class == rq->curr->sched_class) 1420 break; 1421 if (class == p->sched_class) { 1422 resched_curr(rq); 1423 break; 1424 } 1425 } 1426 } 1427 1428 /* 1429 * A queue event has occurred, and we're going to schedule. In 1430 * this case, we can save a useless back to back clock update. 1431 */ 1432 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1433 rq_clock_skip_update(rq); 1434 } 1435 1436 #ifdef CONFIG_SMP 1437 1438 /* 1439 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 1440 * __set_cpus_allowed_ptr() and select_fallback_rq(). 1441 */ 1442 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 1443 { 1444 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 1445 return false; 1446 1447 if (is_per_cpu_kthread(p)) 1448 return cpu_online(cpu); 1449 1450 return cpu_active(cpu); 1451 } 1452 1453 /* 1454 * This is how migration works: 1455 * 1456 * 1) we invoke migration_cpu_stop() on the target CPU using 1457 * stop_one_cpu(). 1458 * 2) stopper starts to run (implicitly forcing the migrated thread 1459 * off the CPU) 1460 * 3) it checks whether the migrated task is still in the wrong runqueue. 1461 * 4) if it's in the wrong runqueue then the migration thread removes 1462 * it and puts it into the right queue. 1463 * 5) stopper completes and stop_one_cpu() returns and the migration 1464 * is done. 1465 */ 1466 1467 /* 1468 * move_queued_task - move a queued task to new rq. 1469 * 1470 * Returns (locked) new rq. Old rq's lock is released. 1471 */ 1472 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 1473 struct task_struct *p, int new_cpu) 1474 { 1475 lockdep_assert_held(&rq->lock); 1476 1477 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 1478 dequeue_task(rq, p, DEQUEUE_NOCLOCK); 1479 set_task_cpu(p, new_cpu); 1480 rq_unlock(rq, rf); 1481 1482 rq = cpu_rq(new_cpu); 1483 1484 rq_lock(rq, rf); 1485 BUG_ON(task_cpu(p) != new_cpu); 1486 enqueue_task(rq, p, 0); 1487 p->on_rq = TASK_ON_RQ_QUEUED; 1488 check_preempt_curr(rq, p, 0); 1489 1490 return rq; 1491 } 1492 1493 struct migration_arg { 1494 struct task_struct *task; 1495 int dest_cpu; 1496 }; 1497 1498 /* 1499 * Move (not current) task off this CPU, onto the destination CPU. We're doing 1500 * this because either it can't run here any more (set_cpus_allowed() 1501 * away from this CPU, or CPU going down), or because we're 1502 * attempting to rebalance this task on exec (sched_exec). 1503 * 1504 * So we race with normal scheduler movements, but that's OK, as long 1505 * as the task is no longer on this CPU. 1506 */ 1507 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 1508 struct task_struct *p, int dest_cpu) 1509 { 1510 /* Affinity changed (again). */ 1511 if (!is_cpu_allowed(p, dest_cpu)) 1512 return rq; 1513 1514 update_rq_clock(rq); 1515 rq = move_queued_task(rq, rf, p, dest_cpu); 1516 1517 return rq; 1518 } 1519 1520 /* 1521 * migration_cpu_stop - this will be executed by a highprio stopper thread 1522 * and performs thread migration by bumping thread off CPU then 1523 * 'pushing' onto another runqueue. 1524 */ 1525 static int migration_cpu_stop(void *data) 1526 { 1527 struct migration_arg *arg = data; 1528 struct task_struct *p = arg->task; 1529 struct rq *rq = this_rq(); 1530 struct rq_flags rf; 1531 1532 /* 1533 * The original target CPU might have gone down and we might 1534 * be on another CPU but it doesn't matter. 1535 */ 1536 local_irq_disable(); 1537 /* 1538 * We need to explicitly wake pending tasks before running 1539 * __migrate_task() such that we will not miss enforcing cpus_ptr 1540 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 1541 */ 1542 flush_smp_call_function_from_idle(); 1543 1544 raw_spin_lock(&p->pi_lock); 1545 rq_lock(rq, &rf); 1546 /* 1547 * If task_rq(p) != rq, it cannot be migrated here, because we're 1548 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 1549 * we're holding p->pi_lock. 1550 */ 1551 if (task_rq(p) == rq) { 1552 if (task_on_rq_queued(p)) 1553 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 1554 else 1555 p->wake_cpu = arg->dest_cpu; 1556 } 1557 rq_unlock(rq, &rf); 1558 raw_spin_unlock(&p->pi_lock); 1559 1560 local_irq_enable(); 1561 return 0; 1562 } 1563 1564 /* 1565 * sched_class::set_cpus_allowed must do the below, but is not required to 1566 * actually call this function. 1567 */ 1568 void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) 1569 { 1570 cpumask_copy(&p->cpus_mask, new_mask); 1571 p->nr_cpus_allowed = cpumask_weight(new_mask); 1572 } 1573 1574 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 1575 { 1576 struct rq *rq = task_rq(p); 1577 bool queued, running; 1578 1579 lockdep_assert_held(&p->pi_lock); 1580 1581 queued = task_on_rq_queued(p); 1582 running = task_current(rq, p); 1583 1584 if (queued) { 1585 /* 1586 * Because __kthread_bind() calls this on blocked tasks without 1587 * holding rq->lock. 1588 */ 1589 lockdep_assert_held(&rq->lock); 1590 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 1591 } 1592 if (running) 1593 put_prev_task(rq, p); 1594 1595 p->sched_class->set_cpus_allowed(p, new_mask); 1596 1597 if (queued) 1598 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 1599 if (running) 1600 set_next_task(rq, p); 1601 } 1602 1603 /* 1604 * Change a given task's CPU affinity. Migrate the thread to a 1605 * proper CPU and schedule it away if the CPU it's executing on 1606 * is removed from the allowed bitmask. 1607 * 1608 * NOTE: the caller must have a valid reference to the task, the 1609 * task must not exit() & deallocate itself prematurely. The 1610 * call is not atomic; no spinlocks may be held. 1611 */ 1612 static int __set_cpus_allowed_ptr(struct task_struct *p, 1613 const struct cpumask *new_mask, bool check) 1614 { 1615 const struct cpumask *cpu_valid_mask = cpu_active_mask; 1616 unsigned int dest_cpu; 1617 struct rq_flags rf; 1618 struct rq *rq; 1619 int ret = 0; 1620 1621 rq = task_rq_lock(p, &rf); 1622 update_rq_clock(rq); 1623 1624 if (p->flags & PF_KTHREAD) { 1625 /* 1626 * Kernel threads are allowed on online && !active CPUs 1627 */ 1628 cpu_valid_mask = cpu_online_mask; 1629 } 1630 1631 /* 1632 * Must re-check here, to close a race against __kthread_bind(), 1633 * sched_setaffinity() is not guaranteed to observe the flag. 1634 */ 1635 if (check && (p->flags & PF_NO_SETAFFINITY)) { 1636 ret = -EINVAL; 1637 goto out; 1638 } 1639 1640 if (cpumask_equal(p->cpus_ptr, new_mask)) 1641 goto out; 1642 1643 /* 1644 * Picking a ~random cpu helps in cases where we are changing affinity 1645 * for groups of tasks (ie. cpuset), so that load balancing is not 1646 * immediately required to distribute the tasks within their new mask. 1647 */ 1648 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); 1649 if (dest_cpu >= nr_cpu_ids) { 1650 ret = -EINVAL; 1651 goto out; 1652 } 1653 1654 do_set_cpus_allowed(p, new_mask); 1655 1656 if (p->flags & PF_KTHREAD) { 1657 /* 1658 * For kernel threads that do indeed end up on online && 1659 * !active we want to ensure they are strict per-CPU threads. 1660 */ 1661 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && 1662 !cpumask_intersects(new_mask, cpu_active_mask) && 1663 p->nr_cpus_allowed != 1); 1664 } 1665 1666 /* Can the task run on the task's current CPU? If so, we're done */ 1667 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1668 goto out; 1669 1670 if (task_running(rq, p) || p->state == TASK_WAKING) { 1671 struct migration_arg arg = { p, dest_cpu }; 1672 /* Need help from migration thread: drop lock and wait. */ 1673 task_rq_unlock(rq, p, &rf); 1674 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); 1675 return 0; 1676 } else if (task_on_rq_queued(p)) { 1677 /* 1678 * OK, since we're going to drop the lock immediately 1679 * afterwards anyway. 1680 */ 1681 rq = move_queued_task(rq, &rf, p, dest_cpu); 1682 } 1683 out: 1684 task_rq_unlock(rq, p, &rf); 1685 1686 return ret; 1687 } 1688 1689 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 1690 { 1691 return __set_cpus_allowed_ptr(p, new_mask, false); 1692 } 1693 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 1694 1695 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 1696 { 1697 #ifdef CONFIG_SCHED_DEBUG 1698 /* 1699 * We should never call set_task_cpu() on a blocked task, 1700 * ttwu() will sort out the placement. 1701 */ 1702 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && 1703 !p->on_rq); 1704 1705 /* 1706 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 1707 * because schedstat_wait_{start,end} rebase migrating task's wait_start 1708 * time relying on p->on_rq. 1709 */ 1710 WARN_ON_ONCE(p->state == TASK_RUNNING && 1711 p->sched_class == &fair_sched_class && 1712 (p->on_rq && !task_on_rq_migrating(p))); 1713 1714 #ifdef CONFIG_LOCKDEP 1715 /* 1716 * The caller should hold either p->pi_lock or rq->lock, when changing 1717 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 1718 * 1719 * sched_move_task() holds both and thus holding either pins the cgroup, 1720 * see task_group(). 1721 * 1722 * Furthermore, all task_rq users should acquire both locks, see 1723 * task_rq_lock(). 1724 */ 1725 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1726 lockdep_is_held(&task_rq(p)->lock))); 1727 #endif 1728 /* 1729 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 1730 */ 1731 WARN_ON_ONCE(!cpu_online(new_cpu)); 1732 #endif 1733 1734 trace_sched_migrate_task(p, new_cpu); 1735 1736 if (task_cpu(p) != new_cpu) { 1737 if (p->sched_class->migrate_task_rq) 1738 p->sched_class->migrate_task_rq(p, new_cpu); 1739 p->se.nr_migrations++; 1740 rseq_migrate(p); 1741 perf_event_task_migrate(p); 1742 } 1743 1744 __set_task_cpu(p, new_cpu); 1745 } 1746 1747 #ifdef CONFIG_NUMA_BALANCING 1748 static void __migrate_swap_task(struct task_struct *p, int cpu) 1749 { 1750 if (task_on_rq_queued(p)) { 1751 struct rq *src_rq, *dst_rq; 1752 struct rq_flags srf, drf; 1753 1754 src_rq = task_rq(p); 1755 dst_rq = cpu_rq(cpu); 1756 1757 rq_pin_lock(src_rq, &srf); 1758 rq_pin_lock(dst_rq, &drf); 1759 1760 deactivate_task(src_rq, p, 0); 1761 set_task_cpu(p, cpu); 1762 activate_task(dst_rq, p, 0); 1763 check_preempt_curr(dst_rq, p, 0); 1764 1765 rq_unpin_lock(dst_rq, &drf); 1766 rq_unpin_lock(src_rq, &srf); 1767 1768 } else { 1769 /* 1770 * Task isn't running anymore; make it appear like we migrated 1771 * it before it went to sleep. This means on wakeup we make the 1772 * previous CPU our target instead of where it really is. 1773 */ 1774 p->wake_cpu = cpu; 1775 } 1776 } 1777 1778 struct migration_swap_arg { 1779 struct task_struct *src_task, *dst_task; 1780 int src_cpu, dst_cpu; 1781 }; 1782 1783 static int migrate_swap_stop(void *data) 1784 { 1785 struct migration_swap_arg *arg = data; 1786 struct rq *src_rq, *dst_rq; 1787 int ret = -EAGAIN; 1788 1789 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 1790 return -EAGAIN; 1791 1792 src_rq = cpu_rq(arg->src_cpu); 1793 dst_rq = cpu_rq(arg->dst_cpu); 1794 1795 double_raw_lock(&arg->src_task->pi_lock, 1796 &arg->dst_task->pi_lock); 1797 double_rq_lock(src_rq, dst_rq); 1798 1799 if (task_cpu(arg->dst_task) != arg->dst_cpu) 1800 goto unlock; 1801 1802 if (task_cpu(arg->src_task) != arg->src_cpu) 1803 goto unlock; 1804 1805 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 1806 goto unlock; 1807 1808 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 1809 goto unlock; 1810 1811 __migrate_swap_task(arg->src_task, arg->dst_cpu); 1812 __migrate_swap_task(arg->dst_task, arg->src_cpu); 1813 1814 ret = 0; 1815 1816 unlock: 1817 double_rq_unlock(src_rq, dst_rq); 1818 raw_spin_unlock(&arg->dst_task->pi_lock); 1819 raw_spin_unlock(&arg->src_task->pi_lock); 1820 1821 return ret; 1822 } 1823 1824 /* 1825 * Cross migrate two tasks 1826 */ 1827 int migrate_swap(struct task_struct *cur, struct task_struct *p, 1828 int target_cpu, int curr_cpu) 1829 { 1830 struct migration_swap_arg arg; 1831 int ret = -EINVAL; 1832 1833 arg = (struct migration_swap_arg){ 1834 .src_task = cur, 1835 .src_cpu = curr_cpu, 1836 .dst_task = p, 1837 .dst_cpu = target_cpu, 1838 }; 1839 1840 if (arg.src_cpu == arg.dst_cpu) 1841 goto out; 1842 1843 /* 1844 * These three tests are all lockless; this is OK since all of them 1845 * will be re-checked with proper locks held further down the line. 1846 */ 1847 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 1848 goto out; 1849 1850 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 1851 goto out; 1852 1853 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 1854 goto out; 1855 1856 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 1857 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1858 1859 out: 1860 return ret; 1861 } 1862 #endif /* CONFIG_NUMA_BALANCING */ 1863 1864 /* 1865 * wait_task_inactive - wait for a thread to unschedule. 1866 * 1867 * If @match_state is nonzero, it's the @p->state value just checked and 1868 * not expected to change. If it changes, i.e. @p might have woken up, 1869 * then return zero. When we succeed in waiting for @p to be off its CPU, 1870 * we return a positive number (its total switch count). If a second call 1871 * a short while later returns the same number, the caller can be sure that 1872 * @p has remained unscheduled the whole time. 1873 * 1874 * The caller must ensure that the task *will* unschedule sometime soon, 1875 * else this function might spin for a *long* time. This function can't 1876 * be called with interrupts off, or it may introduce deadlock with 1877 * smp_call_function() if an IPI is sent by the same process we are 1878 * waiting to become inactive. 1879 */ 1880 unsigned long wait_task_inactive(struct task_struct *p, long match_state) 1881 { 1882 int running, queued; 1883 struct rq_flags rf; 1884 unsigned long ncsw; 1885 struct rq *rq; 1886 1887 for (;;) { 1888 /* 1889 * We do the initial early heuristics without holding 1890 * any task-queue locks at all. We'll only try to get 1891 * the runqueue lock when things look like they will 1892 * work out! 1893 */ 1894 rq = task_rq(p); 1895 1896 /* 1897 * If the task is actively running on another CPU 1898 * still, just relax and busy-wait without holding 1899 * any locks. 1900 * 1901 * NOTE! Since we don't hold any locks, it's not 1902 * even sure that "rq" stays as the right runqueue! 1903 * But we don't care, since "task_running()" will 1904 * return false if the runqueue has changed and p 1905 * is actually now running somewhere else! 1906 */ 1907 while (task_running(rq, p)) { 1908 if (match_state && unlikely(p->state != match_state)) 1909 return 0; 1910 cpu_relax(); 1911 } 1912 1913 /* 1914 * Ok, time to look more closely! We need the rq 1915 * lock now, to be *sure*. If we're wrong, we'll 1916 * just go back and repeat. 1917 */ 1918 rq = task_rq_lock(p, &rf); 1919 trace_sched_wait_task(p); 1920 running = task_running(rq, p); 1921 queued = task_on_rq_queued(p); 1922 ncsw = 0; 1923 if (!match_state || p->state == match_state) 1924 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 1925 task_rq_unlock(rq, p, &rf); 1926 1927 /* 1928 * If it changed from the expected state, bail out now. 1929 */ 1930 if (unlikely(!ncsw)) 1931 break; 1932 1933 /* 1934 * Was it really running after all now that we 1935 * checked with the proper locks actually held? 1936 * 1937 * Oops. Go back and try again.. 1938 */ 1939 if (unlikely(running)) { 1940 cpu_relax(); 1941 continue; 1942 } 1943 1944 /* 1945 * It's not enough that it's not actively running, 1946 * it must be off the runqueue _entirely_, and not 1947 * preempted! 1948 * 1949 * So if it was still runnable (but just not actively 1950 * running right now), it's preempted, and we should 1951 * yield - it could be a while. 1952 */ 1953 if (unlikely(queued)) { 1954 ktime_t to = NSEC_PER_SEC / HZ; 1955 1956 set_current_state(TASK_UNINTERRUPTIBLE); 1957 schedule_hrtimeout(&to, HRTIMER_MODE_REL); 1958 continue; 1959 } 1960 1961 /* 1962 * Ahh, all good. It wasn't running, and it wasn't 1963 * runnable, which means that it will never become 1964 * running in the future either. We're all done! 1965 */ 1966 break; 1967 } 1968 1969 return ncsw; 1970 } 1971 1972 /*** 1973 * kick_process - kick a running thread to enter/exit the kernel 1974 * @p: the to-be-kicked thread 1975 * 1976 * Cause a process which is running on another CPU to enter 1977 * kernel-mode, without any delay. (to get signals handled.) 1978 * 1979 * NOTE: this function doesn't have to take the runqueue lock, 1980 * because all it wants to ensure is that the remote task enters 1981 * the kernel. If the IPI races and the task has been migrated 1982 * to another CPU then no harm is done and the purpose has been 1983 * achieved as well. 1984 */ 1985 void kick_process(struct task_struct *p) 1986 { 1987 int cpu; 1988 1989 preempt_disable(); 1990 cpu = task_cpu(p); 1991 if ((cpu != smp_processor_id()) && task_curr(p)) 1992 smp_send_reschedule(cpu); 1993 preempt_enable(); 1994 } 1995 EXPORT_SYMBOL_GPL(kick_process); 1996 1997 /* 1998 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 1999 * 2000 * A few notes on cpu_active vs cpu_online: 2001 * 2002 * - cpu_active must be a subset of cpu_online 2003 * 2004 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 2005 * see __set_cpus_allowed_ptr(). At this point the newly online 2006 * CPU isn't yet part of the sched domains, and balancing will not 2007 * see it. 2008 * 2009 * - on CPU-down we clear cpu_active() to mask the sched domains and 2010 * avoid the load balancer to place new tasks on the to be removed 2011 * CPU. Existing tasks will remain running there and will be taken 2012 * off. 2013 * 2014 * This means that fallback selection must not select !active CPUs. 2015 * And can assume that any active CPU must be online. Conversely 2016 * select_task_rq() below may allow selection of !active CPUs in order 2017 * to satisfy the above rules. 2018 */ 2019 static int select_fallback_rq(int cpu, struct task_struct *p) 2020 { 2021 int nid = cpu_to_node(cpu); 2022 const struct cpumask *nodemask = NULL; 2023 enum { cpuset, possible, fail } state = cpuset; 2024 int dest_cpu; 2025 2026 /* 2027 * If the node that the CPU is on has been offlined, cpu_to_node() 2028 * will return -1. There is no CPU on the node, and we should 2029 * select the CPU on the other node. 2030 */ 2031 if (nid != -1) { 2032 nodemask = cpumask_of_node(nid); 2033 2034 /* Look for allowed, online CPU in same node. */ 2035 for_each_cpu(dest_cpu, nodemask) { 2036 if (!cpu_active(dest_cpu)) 2037 continue; 2038 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) 2039 return dest_cpu; 2040 } 2041 } 2042 2043 for (;;) { 2044 /* Any allowed, online CPU? */ 2045 for_each_cpu(dest_cpu, p->cpus_ptr) { 2046 if (!is_cpu_allowed(p, dest_cpu)) 2047 continue; 2048 2049 goto out; 2050 } 2051 2052 /* No more Mr. Nice Guy. */ 2053 switch (state) { 2054 case cpuset: 2055 if (IS_ENABLED(CONFIG_CPUSETS)) { 2056 cpuset_cpus_allowed_fallback(p); 2057 state = possible; 2058 break; 2059 } 2060 /* Fall-through */ 2061 case possible: 2062 do_set_cpus_allowed(p, cpu_possible_mask); 2063 state = fail; 2064 break; 2065 2066 case fail: 2067 BUG(); 2068 break; 2069 } 2070 } 2071 2072 out: 2073 if (state != cpuset) { 2074 /* 2075 * Don't tell them about moving exiting tasks or 2076 * kernel threads (both mm NULL), since they never 2077 * leave kernel. 2078 */ 2079 if (p->mm && printk_ratelimit()) { 2080 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 2081 task_pid_nr(p), p->comm, cpu); 2082 } 2083 } 2084 2085 return dest_cpu; 2086 } 2087 2088 /* 2089 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 2090 */ 2091 static inline 2092 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) 2093 { 2094 lockdep_assert_held(&p->pi_lock); 2095 2096 if (p->nr_cpus_allowed > 1) 2097 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 2098 else 2099 cpu = cpumask_any(p->cpus_ptr); 2100 2101 /* 2102 * In order not to call set_task_cpu() on a blocking task we need 2103 * to rely on ttwu() to place the task on a valid ->cpus_ptr 2104 * CPU. 2105 * 2106 * Since this is common to all placement strategies, this lives here. 2107 * 2108 * [ this allows ->select_task() to simply return task_cpu(p) and 2109 * not worry about this generic constraint ] 2110 */ 2111 if (unlikely(!is_cpu_allowed(p, cpu))) 2112 cpu = select_fallback_rq(task_cpu(p), p); 2113 2114 return cpu; 2115 } 2116 2117 void sched_set_stop_task(int cpu, struct task_struct *stop) 2118 { 2119 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 2120 struct task_struct *old_stop = cpu_rq(cpu)->stop; 2121 2122 if (stop) { 2123 /* 2124 * Make it appear like a SCHED_FIFO task, its something 2125 * userspace knows about and won't get confused about. 2126 * 2127 * Also, it will make PI more or less work without too 2128 * much confusion -- but then, stop work should not 2129 * rely on PI working anyway. 2130 */ 2131 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 2132 2133 stop->sched_class = &stop_sched_class; 2134 } 2135 2136 cpu_rq(cpu)->stop = stop; 2137 2138 if (old_stop) { 2139 /* 2140 * Reset it back to a normal scheduling class so that 2141 * it can die in pieces. 2142 */ 2143 old_stop->sched_class = &rt_sched_class; 2144 } 2145 } 2146 2147 #else 2148 2149 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2150 const struct cpumask *new_mask, bool check) 2151 { 2152 return set_cpus_allowed_ptr(p, new_mask); 2153 } 2154 2155 #endif /* CONFIG_SMP */ 2156 2157 static void 2158 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 2159 { 2160 struct rq *rq; 2161 2162 if (!schedstat_enabled()) 2163 return; 2164 2165 rq = this_rq(); 2166 2167 #ifdef CONFIG_SMP 2168 if (cpu == rq->cpu) { 2169 __schedstat_inc(rq->ttwu_local); 2170 __schedstat_inc(p->se.statistics.nr_wakeups_local); 2171 } else { 2172 struct sched_domain *sd; 2173 2174 __schedstat_inc(p->se.statistics.nr_wakeups_remote); 2175 rcu_read_lock(); 2176 for_each_domain(rq->cpu, sd) { 2177 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 2178 __schedstat_inc(sd->ttwu_wake_remote); 2179 break; 2180 } 2181 } 2182 rcu_read_unlock(); 2183 } 2184 2185 if (wake_flags & WF_MIGRATED) 2186 __schedstat_inc(p->se.statistics.nr_wakeups_migrate); 2187 #endif /* CONFIG_SMP */ 2188 2189 __schedstat_inc(rq->ttwu_count); 2190 __schedstat_inc(p->se.statistics.nr_wakeups); 2191 2192 if (wake_flags & WF_SYNC) 2193 __schedstat_inc(p->se.statistics.nr_wakeups_sync); 2194 } 2195 2196 /* 2197 * Mark the task runnable and perform wakeup-preemption. 2198 */ 2199 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, 2200 struct rq_flags *rf) 2201 { 2202 check_preempt_curr(rq, p, wake_flags); 2203 p->state = TASK_RUNNING; 2204 trace_sched_wakeup(p); 2205 2206 #ifdef CONFIG_SMP 2207 if (p->sched_class->task_woken) { 2208 /* 2209 * Our task @p is fully woken up and running; so its safe to 2210 * drop the rq->lock, hereafter rq is only used for statistics. 2211 */ 2212 rq_unpin_lock(rq, rf); 2213 p->sched_class->task_woken(rq, p); 2214 rq_repin_lock(rq, rf); 2215 } 2216 2217 if (rq->idle_stamp) { 2218 u64 delta = rq_clock(rq) - rq->idle_stamp; 2219 u64 max = 2*rq->max_idle_balance_cost; 2220 2221 update_avg(&rq->avg_idle, delta); 2222 2223 if (rq->avg_idle > max) 2224 rq->avg_idle = max; 2225 2226 rq->idle_stamp = 0; 2227 } 2228 #endif 2229 } 2230 2231 static void 2232 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 2233 struct rq_flags *rf) 2234 { 2235 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 2236 2237 lockdep_assert_held(&rq->lock); 2238 2239 #ifdef CONFIG_SMP 2240 if (p->sched_contributes_to_load) 2241 rq->nr_uninterruptible--; 2242 2243 if (wake_flags & WF_MIGRATED) 2244 en_flags |= ENQUEUE_MIGRATED; 2245 #endif 2246 2247 activate_task(rq, p, en_flags); 2248 ttwu_do_wakeup(rq, p, wake_flags, rf); 2249 } 2250 2251 /* 2252 * Called in case the task @p isn't fully descheduled from its runqueue, 2253 * in this case we must do a remote wakeup. Its a 'light' wakeup though, 2254 * since all we need to do is flip p->state to TASK_RUNNING, since 2255 * the task is still ->on_rq. 2256 */ 2257 static int ttwu_remote(struct task_struct *p, int wake_flags) 2258 { 2259 struct rq_flags rf; 2260 struct rq *rq; 2261 int ret = 0; 2262 2263 rq = __task_rq_lock(p, &rf); 2264 if (task_on_rq_queued(p)) { 2265 /* check_preempt_curr() may use rq clock */ 2266 update_rq_clock(rq); 2267 ttwu_do_wakeup(rq, p, wake_flags, &rf); 2268 ret = 1; 2269 } 2270 __task_rq_unlock(rq, &rf); 2271 2272 return ret; 2273 } 2274 2275 #ifdef CONFIG_SMP 2276 void sched_ttwu_pending(void *arg) 2277 { 2278 struct llist_node *llist = arg; 2279 struct rq *rq = this_rq(); 2280 struct task_struct *p, *t; 2281 struct rq_flags rf; 2282 2283 if (!llist) 2284 return; 2285 2286 /* 2287 * rq::ttwu_pending racy indication of out-standing wakeups. 2288 * Races such that false-negatives are possible, since they 2289 * are shorter lived that false-positives would be. 2290 */ 2291 WRITE_ONCE(rq->ttwu_pending, 0); 2292 2293 rq_lock_irqsave(rq, &rf); 2294 update_rq_clock(rq); 2295 2296 llist_for_each_entry_safe(p, t, llist, wake_entry) 2297 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 2298 2299 rq_unlock_irqrestore(rq, &rf); 2300 } 2301 2302 void send_call_function_single_ipi(int cpu) 2303 { 2304 struct rq *rq = cpu_rq(cpu); 2305 2306 if (!set_nr_if_polling(rq->idle)) 2307 arch_send_call_function_single_ipi(cpu); 2308 else 2309 trace_sched_wake_idle_without_ipi(cpu); 2310 } 2311 2312 /* 2313 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 2314 * necessary. The wakee CPU on receipt of the IPI will queue the task 2315 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 2316 * of the wakeup instead of the waker. 2317 */ 2318 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 2319 { 2320 struct rq *rq = cpu_rq(cpu); 2321 2322 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 2323 2324 WRITE_ONCE(rq->ttwu_pending, 1); 2325 __smp_call_single_queue(cpu, &p->wake_entry); 2326 } 2327 2328 void wake_up_if_idle(int cpu) 2329 { 2330 struct rq *rq = cpu_rq(cpu); 2331 struct rq_flags rf; 2332 2333 rcu_read_lock(); 2334 2335 if (!is_idle_task(rcu_dereference(rq->curr))) 2336 goto out; 2337 2338 if (set_nr_if_polling(rq->idle)) { 2339 trace_sched_wake_idle_without_ipi(cpu); 2340 } else { 2341 rq_lock_irqsave(rq, &rf); 2342 if (is_idle_task(rq->curr)) 2343 smp_send_reschedule(cpu); 2344 /* Else CPU is not idle, do nothing here: */ 2345 rq_unlock_irqrestore(rq, &rf); 2346 } 2347 2348 out: 2349 rcu_read_unlock(); 2350 } 2351 2352 bool cpus_share_cache(int this_cpu, int that_cpu) 2353 { 2354 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 2355 } 2356 2357 static inline bool ttwu_queue_cond(int cpu, int wake_flags) 2358 { 2359 /* 2360 * If the CPU does not share cache, then queue the task on the 2361 * remote rqs wakelist to avoid accessing remote data. 2362 */ 2363 if (!cpus_share_cache(smp_processor_id(), cpu)) 2364 return true; 2365 2366 /* 2367 * If the task is descheduling and the only running task on the 2368 * CPU then use the wakelist to offload the task activation to 2369 * the soon-to-be-idle CPU as the current CPU is likely busy. 2370 * nr_running is checked to avoid unnecessary task stacking. 2371 */ 2372 if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1) 2373 return true; 2374 2375 return false; 2376 } 2377 2378 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 2379 { 2380 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { 2381 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 2382 __ttwu_queue_wakelist(p, cpu, wake_flags); 2383 return true; 2384 } 2385 2386 return false; 2387 } 2388 #endif /* CONFIG_SMP */ 2389 2390 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 2391 { 2392 struct rq *rq = cpu_rq(cpu); 2393 struct rq_flags rf; 2394 2395 #if defined(CONFIG_SMP) 2396 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 2397 return; 2398 #endif 2399 2400 rq_lock(rq, &rf); 2401 update_rq_clock(rq); 2402 ttwu_do_activate(rq, p, wake_flags, &rf); 2403 rq_unlock(rq, &rf); 2404 } 2405 2406 /* 2407 * Notes on Program-Order guarantees on SMP systems. 2408 * 2409 * MIGRATION 2410 * 2411 * The basic program-order guarantee on SMP systems is that when a task [t] 2412 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 2413 * execution on its new CPU [c1]. 2414 * 2415 * For migration (of runnable tasks) this is provided by the following means: 2416 * 2417 * A) UNLOCK of the rq(c0)->lock scheduling out task t 2418 * B) migration for t is required to synchronize *both* rq(c0)->lock and 2419 * rq(c1)->lock (if not at the same time, then in that order). 2420 * C) LOCK of the rq(c1)->lock scheduling in task 2421 * 2422 * Release/acquire chaining guarantees that B happens after A and C after B. 2423 * Note: the CPU doing B need not be c0 or c1 2424 * 2425 * Example: 2426 * 2427 * CPU0 CPU1 CPU2 2428 * 2429 * LOCK rq(0)->lock 2430 * sched-out X 2431 * sched-in Y 2432 * UNLOCK rq(0)->lock 2433 * 2434 * LOCK rq(0)->lock // orders against CPU0 2435 * dequeue X 2436 * UNLOCK rq(0)->lock 2437 * 2438 * LOCK rq(1)->lock 2439 * enqueue X 2440 * UNLOCK rq(1)->lock 2441 * 2442 * LOCK rq(1)->lock // orders against CPU2 2443 * sched-out Z 2444 * sched-in X 2445 * UNLOCK rq(1)->lock 2446 * 2447 * 2448 * BLOCKING -- aka. SLEEP + WAKEUP 2449 * 2450 * For blocking we (obviously) need to provide the same guarantee as for 2451 * migration. However the means are completely different as there is no lock 2452 * chain to provide order. Instead we do: 2453 * 2454 * 1) smp_store_release(X->on_cpu, 0) 2455 * 2) smp_cond_load_acquire(!X->on_cpu) 2456 * 2457 * Example: 2458 * 2459 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 2460 * 2461 * LOCK rq(0)->lock LOCK X->pi_lock 2462 * dequeue X 2463 * sched-out X 2464 * smp_store_release(X->on_cpu, 0); 2465 * 2466 * smp_cond_load_acquire(&X->on_cpu, !VAL); 2467 * X->state = WAKING 2468 * set_task_cpu(X,2) 2469 * 2470 * LOCK rq(2)->lock 2471 * enqueue X 2472 * X->state = RUNNING 2473 * UNLOCK rq(2)->lock 2474 * 2475 * LOCK rq(2)->lock // orders against CPU1 2476 * sched-out Z 2477 * sched-in X 2478 * UNLOCK rq(2)->lock 2479 * 2480 * UNLOCK X->pi_lock 2481 * UNLOCK rq(0)->lock 2482 * 2483 * 2484 * However, for wakeups there is a second guarantee we must provide, namely we 2485 * must ensure that CONDITION=1 done by the caller can not be reordered with 2486 * accesses to the task state; see try_to_wake_up() and set_current_state(). 2487 */ 2488 2489 /** 2490 * try_to_wake_up - wake up a thread 2491 * @p: the thread to be awakened 2492 * @state: the mask of task states that can be woken 2493 * @wake_flags: wake modifier flags (WF_*) 2494 * 2495 * If (@state & @p->state) @p->state = TASK_RUNNING. 2496 * 2497 * If the task was not queued/runnable, also place it back on a runqueue. 2498 * 2499 * Atomic against schedule() which would dequeue a task, also see 2500 * set_current_state(). 2501 * 2502 * This function executes a full memory barrier before accessing the task 2503 * state; see set_current_state(). 2504 * 2505 * Return: %true if @p->state changes (an actual wakeup was done), 2506 * %false otherwise. 2507 */ 2508 static int 2509 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 2510 { 2511 unsigned long flags; 2512 int cpu, success = 0; 2513 2514 preempt_disable(); 2515 if (p == current) { 2516 /* 2517 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 2518 * == smp_processor_id()'. Together this means we can special 2519 * case the whole 'p->on_rq && ttwu_remote()' case below 2520 * without taking any locks. 2521 * 2522 * In particular: 2523 * - we rely on Program-Order guarantees for all the ordering, 2524 * - we're serialized against set_special_state() by virtue of 2525 * it disabling IRQs (this allows not taking ->pi_lock). 2526 */ 2527 if (!(p->state & state)) 2528 goto out; 2529 2530 success = 1; 2531 cpu = task_cpu(p); 2532 trace_sched_waking(p); 2533 p->state = TASK_RUNNING; 2534 trace_sched_wakeup(p); 2535 goto out; 2536 } 2537 2538 /* 2539 * If we are going to wake up a thread waiting for CONDITION we 2540 * need to ensure that CONDITION=1 done by the caller can not be 2541 * reordered with p->state check below. This pairs with mb() in 2542 * set_current_state() the waiting thread does. 2543 */ 2544 raw_spin_lock_irqsave(&p->pi_lock, flags); 2545 smp_mb__after_spinlock(); 2546 if (!(p->state & state)) 2547 goto unlock; 2548 2549 trace_sched_waking(p); 2550 2551 /* We're going to change ->state: */ 2552 success = 1; 2553 cpu = task_cpu(p); 2554 2555 /* 2556 * Ensure we load p->on_rq _after_ p->state, otherwise it would 2557 * be possible to, falsely, observe p->on_rq == 0 and get stuck 2558 * in smp_cond_load_acquire() below. 2559 * 2560 * sched_ttwu_pending() try_to_wake_up() 2561 * STORE p->on_rq = 1 LOAD p->state 2562 * UNLOCK rq->lock 2563 * 2564 * __schedule() (switch to task 'p') 2565 * LOCK rq->lock smp_rmb(); 2566 * smp_mb__after_spinlock(); 2567 * UNLOCK rq->lock 2568 * 2569 * [task p] 2570 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 2571 * 2572 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2573 * __schedule(). See the comment for smp_mb__after_spinlock(). 2574 * 2575 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 2576 */ 2577 smp_rmb(); 2578 if (p->on_rq && ttwu_remote(p, wake_flags)) 2579 goto unlock; 2580 2581 if (p->in_iowait) { 2582 delayacct_blkio_end(p); 2583 atomic_dec(&task_rq(p)->nr_iowait); 2584 } 2585 2586 #ifdef CONFIG_SMP 2587 p->sched_contributes_to_load = !!task_contributes_to_load(p); 2588 p->state = TASK_WAKING; 2589 2590 /* 2591 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 2592 * possible to, falsely, observe p->on_cpu == 0. 2593 * 2594 * One must be running (->on_cpu == 1) in order to remove oneself 2595 * from the runqueue. 2596 * 2597 * __schedule() (switch to task 'p') try_to_wake_up() 2598 * STORE p->on_cpu = 1 LOAD p->on_rq 2599 * UNLOCK rq->lock 2600 * 2601 * __schedule() (put 'p' to sleep) 2602 * LOCK rq->lock smp_rmb(); 2603 * smp_mb__after_spinlock(); 2604 * STORE p->on_rq = 0 LOAD p->on_cpu 2605 * 2606 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 2607 * __schedule(). See the comment for smp_mb__after_spinlock(). 2608 */ 2609 smp_rmb(); 2610 2611 /* 2612 * If the owning (remote) CPU is still in the middle of schedule() with 2613 * this task as prev, considering queueing p on the remote CPUs wake_list 2614 * which potentially sends an IPI instead of spinning on p->on_cpu to 2615 * let the waker make forward progress. This is safe because IRQs are 2616 * disabled and the IPI will deliver after on_cpu is cleared. 2617 */ 2618 if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ)) 2619 goto unlock; 2620 2621 /* 2622 * If the owning (remote) CPU is still in the middle of schedule() with 2623 * this task as prev, wait until its done referencing the task. 2624 * 2625 * Pairs with the smp_store_release() in finish_task(). 2626 * 2627 * This ensures that tasks getting woken will be fully ordered against 2628 * their previous state and preserve Program Order. 2629 */ 2630 smp_cond_load_acquire(&p->on_cpu, !VAL); 2631 2632 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); 2633 if (task_cpu(p) != cpu) { 2634 wake_flags |= WF_MIGRATED; 2635 psi_ttwu_dequeue(p); 2636 set_task_cpu(p, cpu); 2637 } 2638 #endif /* CONFIG_SMP */ 2639 2640 ttwu_queue(p, cpu, wake_flags); 2641 unlock: 2642 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2643 out: 2644 if (success) 2645 ttwu_stat(p, cpu, wake_flags); 2646 preempt_enable(); 2647 2648 return success; 2649 } 2650 2651 /** 2652 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state 2653 * @p: Process for which the function is to be invoked. 2654 * @func: Function to invoke. 2655 * @arg: Argument to function. 2656 * 2657 * If the specified task can be quickly locked into a definite state 2658 * (either sleeping or on a given runqueue), arrange to keep it in that 2659 * state while invoking @func(@arg). This function can use ->on_rq and 2660 * task_curr() to work out what the state is, if required. Given that 2661 * @func can be invoked with a runqueue lock held, it had better be quite 2662 * lightweight. 2663 * 2664 * Returns: 2665 * @false if the task slipped out from under the locks. 2666 * @true if the task was locked onto a runqueue or is sleeping. 2667 * However, @func can override this by returning @false. 2668 */ 2669 bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) 2670 { 2671 bool ret = false; 2672 struct rq_flags rf; 2673 struct rq *rq; 2674 2675 lockdep_assert_irqs_enabled(); 2676 raw_spin_lock_irq(&p->pi_lock); 2677 if (p->on_rq) { 2678 rq = __task_rq_lock(p, &rf); 2679 if (task_rq(p) == rq) 2680 ret = func(p, arg); 2681 rq_unlock(rq, &rf); 2682 } else { 2683 switch (p->state) { 2684 case TASK_RUNNING: 2685 case TASK_WAKING: 2686 break; 2687 default: 2688 smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). 2689 if (!p->on_rq) 2690 ret = func(p, arg); 2691 } 2692 } 2693 raw_spin_unlock_irq(&p->pi_lock); 2694 return ret; 2695 } 2696 2697 /** 2698 * wake_up_process - Wake up a specific process 2699 * @p: The process to be woken up. 2700 * 2701 * Attempt to wake up the nominated process and move it to the set of runnable 2702 * processes. 2703 * 2704 * Return: 1 if the process was woken up, 0 if it was already running. 2705 * 2706 * This function executes a full memory barrier before accessing the task state. 2707 */ 2708 int wake_up_process(struct task_struct *p) 2709 { 2710 return try_to_wake_up(p, TASK_NORMAL, 0); 2711 } 2712 EXPORT_SYMBOL(wake_up_process); 2713 2714 int wake_up_state(struct task_struct *p, unsigned int state) 2715 { 2716 return try_to_wake_up(p, state, 0); 2717 } 2718 2719 /* 2720 * Perform scheduler related setup for a newly forked process p. 2721 * p is forked by current. 2722 * 2723 * __sched_fork() is basic setup used by init_idle() too: 2724 */ 2725 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 2726 { 2727 p->on_rq = 0; 2728 2729 p->se.on_rq = 0; 2730 p->se.exec_start = 0; 2731 p->se.sum_exec_runtime = 0; 2732 p->se.prev_sum_exec_runtime = 0; 2733 p->se.nr_migrations = 0; 2734 p->se.vruntime = 0; 2735 INIT_LIST_HEAD(&p->se.group_node); 2736 2737 #ifdef CONFIG_FAIR_GROUP_SCHED 2738 p->se.cfs_rq = NULL; 2739 #endif 2740 2741 #ifdef CONFIG_SCHEDSTATS 2742 /* Even if schedstat is disabled, there should not be garbage */ 2743 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2744 #endif 2745 2746 RB_CLEAR_NODE(&p->dl.rb_node); 2747 init_dl_task_timer(&p->dl); 2748 init_dl_inactive_task_timer(&p->dl); 2749 __dl_clear_params(p); 2750 2751 INIT_LIST_HEAD(&p->rt.run_list); 2752 p->rt.timeout = 0; 2753 p->rt.time_slice = sched_rr_timeslice; 2754 p->rt.on_rq = 0; 2755 p->rt.on_list = 0; 2756 2757 #ifdef CONFIG_PREEMPT_NOTIFIERS 2758 INIT_HLIST_HEAD(&p->preempt_notifiers); 2759 #endif 2760 2761 #ifdef CONFIG_COMPACTION 2762 p->capture_control = NULL; 2763 #endif 2764 init_numa_balancing(clone_flags, p); 2765 #ifdef CONFIG_SMP 2766 p->wake_entry_type = CSD_TYPE_TTWU; 2767 #endif 2768 } 2769 2770 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 2771 2772 #ifdef CONFIG_NUMA_BALANCING 2773 2774 void set_numabalancing_state(bool enabled) 2775 { 2776 if (enabled) 2777 static_branch_enable(&sched_numa_balancing); 2778 else 2779 static_branch_disable(&sched_numa_balancing); 2780 } 2781 2782 #ifdef CONFIG_PROC_SYSCTL 2783 int sysctl_numa_balancing(struct ctl_table *table, int write, 2784 void *buffer, size_t *lenp, loff_t *ppos) 2785 { 2786 struct ctl_table t; 2787 int err; 2788 int state = static_branch_likely(&sched_numa_balancing); 2789 2790 if (write && !capable(CAP_SYS_ADMIN)) 2791 return -EPERM; 2792 2793 t = *table; 2794 t.data = &state; 2795 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2796 if (err < 0) 2797 return err; 2798 if (write) 2799 set_numabalancing_state(state); 2800 return err; 2801 } 2802 #endif 2803 #endif 2804 2805 #ifdef CONFIG_SCHEDSTATS 2806 2807 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2808 static bool __initdata __sched_schedstats = false; 2809 2810 static void set_schedstats(bool enabled) 2811 { 2812 if (enabled) 2813 static_branch_enable(&sched_schedstats); 2814 else 2815 static_branch_disable(&sched_schedstats); 2816 } 2817 2818 void force_schedstat_enabled(void) 2819 { 2820 if (!schedstat_enabled()) { 2821 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 2822 static_branch_enable(&sched_schedstats); 2823 } 2824 } 2825 2826 static int __init setup_schedstats(char *str) 2827 { 2828 int ret = 0; 2829 if (!str) 2830 goto out; 2831 2832 /* 2833 * This code is called before jump labels have been set up, so we can't 2834 * change the static branch directly just yet. Instead set a temporary 2835 * variable so init_schedstats() can do it later. 2836 */ 2837 if (!strcmp(str, "enable")) { 2838 __sched_schedstats = true; 2839 ret = 1; 2840 } else if (!strcmp(str, "disable")) { 2841 __sched_schedstats = false; 2842 ret = 1; 2843 } 2844 out: 2845 if (!ret) 2846 pr_warn("Unable to parse schedstats=\n"); 2847 2848 return ret; 2849 } 2850 __setup("schedstats=", setup_schedstats); 2851 2852 static void __init init_schedstats(void) 2853 { 2854 set_schedstats(__sched_schedstats); 2855 } 2856 2857 #ifdef CONFIG_PROC_SYSCTL 2858 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 2859 size_t *lenp, loff_t *ppos) 2860 { 2861 struct ctl_table t; 2862 int err; 2863 int state = static_branch_likely(&sched_schedstats); 2864 2865 if (write && !capable(CAP_SYS_ADMIN)) 2866 return -EPERM; 2867 2868 t = *table; 2869 t.data = &state; 2870 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 2871 if (err < 0) 2872 return err; 2873 if (write) 2874 set_schedstats(state); 2875 return err; 2876 } 2877 #endif /* CONFIG_PROC_SYSCTL */ 2878 #else /* !CONFIG_SCHEDSTATS */ 2879 static inline void init_schedstats(void) {} 2880 #endif /* CONFIG_SCHEDSTATS */ 2881 2882 /* 2883 * fork()/clone()-time setup: 2884 */ 2885 int sched_fork(unsigned long clone_flags, struct task_struct *p) 2886 { 2887 unsigned long flags; 2888 2889 __sched_fork(clone_flags, p); 2890 /* 2891 * We mark the process as NEW here. This guarantees that 2892 * nobody will actually run it, and a signal or other external 2893 * event cannot wake it up and insert it on the runqueue either. 2894 */ 2895 p->state = TASK_NEW; 2896 2897 /* 2898 * Make sure we do not leak PI boosting priority to the child. 2899 */ 2900 p->prio = current->normal_prio; 2901 2902 uclamp_fork(p); 2903 2904 /* 2905 * Revert to default priority/policy on fork if requested. 2906 */ 2907 if (unlikely(p->sched_reset_on_fork)) { 2908 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 2909 p->policy = SCHED_NORMAL; 2910 p->static_prio = NICE_TO_PRIO(0); 2911 p->rt_priority = 0; 2912 } else if (PRIO_TO_NICE(p->static_prio) < 0) 2913 p->static_prio = NICE_TO_PRIO(0); 2914 2915 p->prio = p->normal_prio = __normal_prio(p); 2916 set_load_weight(p, false); 2917 2918 /* 2919 * We don't need the reset flag anymore after the fork. It has 2920 * fulfilled its duty: 2921 */ 2922 p->sched_reset_on_fork = 0; 2923 } 2924 2925 if (dl_prio(p->prio)) 2926 return -EAGAIN; 2927 else if (rt_prio(p->prio)) 2928 p->sched_class = &rt_sched_class; 2929 else 2930 p->sched_class = &fair_sched_class; 2931 2932 init_entity_runnable_average(&p->se); 2933 2934 /* 2935 * The child is not yet in the pid-hash so no cgroup attach races, 2936 * and the cgroup is pinned to this child due to cgroup_fork() 2937 * is ran before sched_fork(). 2938 * 2939 * Silence PROVE_RCU. 2940 */ 2941 raw_spin_lock_irqsave(&p->pi_lock, flags); 2942 /* 2943 * We're setting the CPU for the first time, we don't migrate, 2944 * so use __set_task_cpu(). 2945 */ 2946 __set_task_cpu(p, smp_processor_id()); 2947 if (p->sched_class->task_fork) 2948 p->sched_class->task_fork(p); 2949 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 2950 2951 #ifdef CONFIG_SCHED_INFO 2952 if (likely(sched_info_on())) 2953 memset(&p->sched_info, 0, sizeof(p->sched_info)); 2954 #endif 2955 #if defined(CONFIG_SMP) 2956 p->on_cpu = 0; 2957 #endif 2958 init_task_preempt_count(p); 2959 #ifdef CONFIG_SMP 2960 plist_node_init(&p->pushable_tasks, MAX_PRIO); 2961 RB_CLEAR_NODE(&p->pushable_dl_tasks); 2962 #endif 2963 return 0; 2964 } 2965 2966 unsigned long to_ratio(u64 period, u64 runtime) 2967 { 2968 if (runtime == RUNTIME_INF) 2969 return BW_UNIT; 2970 2971 /* 2972 * Doing this here saves a lot of checks in all 2973 * the calling paths, and returning zero seems 2974 * safe for them anyway. 2975 */ 2976 if (period == 0) 2977 return 0; 2978 2979 return div64_u64(runtime << BW_SHIFT, period); 2980 } 2981 2982 /* 2983 * wake_up_new_task - wake up a newly created task for the first time. 2984 * 2985 * This function will do some initial scheduler statistics housekeeping 2986 * that must be done for every newly created context, then puts the task 2987 * on the runqueue and wakes it. 2988 */ 2989 void wake_up_new_task(struct task_struct *p) 2990 { 2991 struct rq_flags rf; 2992 struct rq *rq; 2993 2994 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 2995 p->state = TASK_RUNNING; 2996 #ifdef CONFIG_SMP 2997 /* 2998 * Fork balancing, do it here and not earlier because: 2999 * - cpus_ptr can change in the fork path 3000 * - any previously selected CPU might disappear through hotplug 3001 * 3002 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 3003 * as we're not fully set-up yet. 3004 */ 3005 p->recent_used_cpu = task_cpu(p); 3006 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 3007 #endif 3008 rq = __task_rq_lock(p, &rf); 3009 update_rq_clock(rq); 3010 post_init_entity_util_avg(p); 3011 3012 activate_task(rq, p, ENQUEUE_NOCLOCK); 3013 trace_sched_wakeup_new(p); 3014 check_preempt_curr(rq, p, WF_FORK); 3015 #ifdef CONFIG_SMP 3016 if (p->sched_class->task_woken) { 3017 /* 3018 * Nothing relies on rq->lock after this, so its fine to 3019 * drop it. 3020 */ 3021 rq_unpin_lock(rq, &rf); 3022 p->sched_class->task_woken(rq, p); 3023 rq_repin_lock(rq, &rf); 3024 } 3025 #endif 3026 task_rq_unlock(rq, p, &rf); 3027 } 3028 3029 #ifdef CONFIG_PREEMPT_NOTIFIERS 3030 3031 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 3032 3033 void preempt_notifier_inc(void) 3034 { 3035 static_branch_inc(&preempt_notifier_key); 3036 } 3037 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 3038 3039 void preempt_notifier_dec(void) 3040 { 3041 static_branch_dec(&preempt_notifier_key); 3042 } 3043 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 3044 3045 /** 3046 * preempt_notifier_register - tell me when current is being preempted & rescheduled 3047 * @notifier: notifier struct to register 3048 */ 3049 void preempt_notifier_register(struct preempt_notifier *notifier) 3050 { 3051 if (!static_branch_unlikely(&preempt_notifier_key)) 3052 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 3053 3054 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 3055 } 3056 EXPORT_SYMBOL_GPL(preempt_notifier_register); 3057 3058 /** 3059 * preempt_notifier_unregister - no longer interested in preemption notifications 3060 * @notifier: notifier struct to unregister 3061 * 3062 * This is *not* safe to call from within a preemption notifier. 3063 */ 3064 void preempt_notifier_unregister(struct preempt_notifier *notifier) 3065 { 3066 hlist_del(¬ifier->link); 3067 } 3068 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 3069 3070 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 3071 { 3072 struct preempt_notifier *notifier; 3073 3074 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3075 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 3076 } 3077 3078 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3079 { 3080 if (static_branch_unlikely(&preempt_notifier_key)) 3081 __fire_sched_in_preempt_notifiers(curr); 3082 } 3083 3084 static void 3085 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 3086 struct task_struct *next) 3087 { 3088 struct preempt_notifier *notifier; 3089 3090 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 3091 notifier->ops->sched_out(notifier, next); 3092 } 3093 3094 static __always_inline void 3095 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3096 struct task_struct *next) 3097 { 3098 if (static_branch_unlikely(&preempt_notifier_key)) 3099 __fire_sched_out_preempt_notifiers(curr, next); 3100 } 3101 3102 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 3103 3104 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 3105 { 3106 } 3107 3108 static inline void 3109 fire_sched_out_preempt_notifiers(struct task_struct *curr, 3110 struct task_struct *next) 3111 { 3112 } 3113 3114 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 3115 3116 static inline void prepare_task(struct task_struct *next) 3117 { 3118 #ifdef CONFIG_SMP 3119 /* 3120 * Claim the task as running, we do this before switching to it 3121 * such that any running task will have this set. 3122 */ 3123 next->on_cpu = 1; 3124 #endif 3125 } 3126 3127 static inline void finish_task(struct task_struct *prev) 3128 { 3129 #ifdef CONFIG_SMP 3130 /* 3131 * After ->on_cpu is cleared, the task can be moved to a different CPU. 3132 * We must ensure this doesn't happen until the switch is completely 3133 * finished. 3134 * 3135 * In particular, the load of prev->state in finish_task_switch() must 3136 * happen before this. 3137 * 3138 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 3139 */ 3140 smp_store_release(&prev->on_cpu, 0); 3141 #endif 3142 } 3143 3144 static inline void 3145 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 3146 { 3147 /* 3148 * Since the runqueue lock will be released by the next 3149 * task (which is an invalid locking op but in the case 3150 * of the scheduler it's an obvious special-case), so we 3151 * do an early lockdep release here: 3152 */ 3153 rq_unpin_lock(rq, rf); 3154 spin_release(&rq->lock.dep_map, _THIS_IP_); 3155 #ifdef CONFIG_DEBUG_SPINLOCK 3156 /* this is a valid case when another task releases the spinlock */ 3157 rq->lock.owner = next; 3158 #endif 3159 } 3160 3161 static inline void finish_lock_switch(struct rq *rq) 3162 { 3163 /* 3164 * If we are tracking spinlock dependencies then we have to 3165 * fix up the runqueue lock - which gets 'carried over' from 3166 * prev into current: 3167 */ 3168 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 3169 raw_spin_unlock_irq(&rq->lock); 3170 } 3171 3172 /* 3173 * NOP if the arch has not defined these: 3174 */ 3175 3176 #ifndef prepare_arch_switch 3177 # define prepare_arch_switch(next) do { } while (0) 3178 #endif 3179 3180 #ifndef finish_arch_post_lock_switch 3181 # define finish_arch_post_lock_switch() do { } while (0) 3182 #endif 3183 3184 /** 3185 * prepare_task_switch - prepare to switch tasks 3186 * @rq: the runqueue preparing to switch 3187 * @prev: the current task that is being switched out 3188 * @next: the task we are going to switch to. 3189 * 3190 * This is called with the rq lock held and interrupts off. It must 3191 * be paired with a subsequent finish_task_switch after the context 3192 * switch. 3193 * 3194 * prepare_task_switch sets up locking and calls architecture specific 3195 * hooks. 3196 */ 3197 static inline void 3198 prepare_task_switch(struct rq *rq, struct task_struct *prev, 3199 struct task_struct *next) 3200 { 3201 kcov_prepare_switch(prev); 3202 sched_info_switch(rq, prev, next); 3203 perf_event_task_sched_out(prev, next); 3204 rseq_preempt(prev); 3205 fire_sched_out_preempt_notifiers(prev, next); 3206 prepare_task(next); 3207 prepare_arch_switch(next); 3208 } 3209 3210 /** 3211 * finish_task_switch - clean up after a task-switch 3212 * @prev: the thread we just switched away from. 3213 * 3214 * finish_task_switch must be called after the context switch, paired 3215 * with a prepare_task_switch call before the context switch. 3216 * finish_task_switch will reconcile locking set up by prepare_task_switch, 3217 * and do any other architecture-specific cleanup actions. 3218 * 3219 * Note that we may have delayed dropping an mm in context_switch(). If 3220 * so, we finish that here outside of the runqueue lock. (Doing it 3221 * with the lock held can cause deadlocks; see schedule() for 3222 * details.) 3223 * 3224 * The context switch have flipped the stack from under us and restored the 3225 * local variables which were saved when this task called schedule() in the 3226 * past. prev == current is still correct but we need to recalculate this_rq 3227 * because prev may have moved to another CPU. 3228 */ 3229 static struct rq *finish_task_switch(struct task_struct *prev) 3230 __releases(rq->lock) 3231 { 3232 struct rq *rq = this_rq(); 3233 struct mm_struct *mm = rq->prev_mm; 3234 long prev_state; 3235 3236 /* 3237 * The previous task will have left us with a preempt_count of 2 3238 * because it left us after: 3239 * 3240 * schedule() 3241 * preempt_disable(); // 1 3242 * __schedule() 3243 * raw_spin_lock_irq(&rq->lock) // 2 3244 * 3245 * Also, see FORK_PREEMPT_COUNT. 3246 */ 3247 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 3248 "corrupted preempt_count: %s/%d/0x%x\n", 3249 current->comm, current->pid, preempt_count())) 3250 preempt_count_set(FORK_PREEMPT_COUNT); 3251 3252 rq->prev_mm = NULL; 3253 3254 /* 3255 * A task struct has one reference for the use as "current". 3256 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 3257 * schedule one last time. The schedule call will never return, and 3258 * the scheduled task must drop that reference. 3259 * 3260 * We must observe prev->state before clearing prev->on_cpu (in 3261 * finish_task), otherwise a concurrent wakeup can get prev 3262 * running on another CPU and we could rave with its RUNNING -> DEAD 3263 * transition, resulting in a double drop. 3264 */ 3265 prev_state = prev->state; 3266 vtime_task_switch(prev); 3267 perf_event_task_sched_in(prev, current); 3268 finish_task(prev); 3269 finish_lock_switch(rq); 3270 finish_arch_post_lock_switch(); 3271 kcov_finish_switch(current); 3272 3273 fire_sched_in_preempt_notifiers(current); 3274 /* 3275 * When switching through a kernel thread, the loop in 3276 * membarrier_{private,global}_expedited() may have observed that 3277 * kernel thread and not issued an IPI. It is therefore possible to 3278 * schedule between user->kernel->user threads without passing though 3279 * switch_mm(). Membarrier requires a barrier after storing to 3280 * rq->curr, before returning to userspace, so provide them here: 3281 * 3282 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 3283 * provided by mmdrop(), 3284 * - a sync_core for SYNC_CORE. 3285 */ 3286 if (mm) { 3287 membarrier_mm_sync_core_before_usermode(mm); 3288 mmdrop(mm); 3289 } 3290 if (unlikely(prev_state == TASK_DEAD)) { 3291 if (prev->sched_class->task_dead) 3292 prev->sched_class->task_dead(prev); 3293 3294 /* 3295 * Remove function-return probe instances associated with this 3296 * task and put them back on the free list. 3297 */ 3298 kprobe_flush_task(prev); 3299 3300 /* Task is done with its stack. */ 3301 put_task_stack(prev); 3302 3303 put_task_struct_rcu_user(prev); 3304 } 3305 3306 tick_nohz_task_switch(); 3307 return rq; 3308 } 3309 3310 #ifdef CONFIG_SMP 3311 3312 /* rq->lock is NOT held, but preemption is disabled */ 3313 static void __balance_callback(struct rq *rq) 3314 { 3315 struct callback_head *head, *next; 3316 void (*func)(struct rq *rq); 3317 unsigned long flags; 3318 3319 raw_spin_lock_irqsave(&rq->lock, flags); 3320 head = rq->balance_callback; 3321 rq->balance_callback = NULL; 3322 while (head) { 3323 func = (void (*)(struct rq *))head->func; 3324 next = head->next; 3325 head->next = NULL; 3326 head = next; 3327 3328 func(rq); 3329 } 3330 raw_spin_unlock_irqrestore(&rq->lock, flags); 3331 } 3332 3333 static inline void balance_callback(struct rq *rq) 3334 { 3335 if (unlikely(rq->balance_callback)) 3336 __balance_callback(rq); 3337 } 3338 3339 #else 3340 3341 static inline void balance_callback(struct rq *rq) 3342 { 3343 } 3344 3345 #endif 3346 3347 /** 3348 * schedule_tail - first thing a freshly forked thread must call. 3349 * @prev: the thread we just switched away from. 3350 */ 3351 asmlinkage __visible void schedule_tail(struct task_struct *prev) 3352 __releases(rq->lock) 3353 { 3354 struct rq *rq; 3355 3356 /* 3357 * New tasks start with FORK_PREEMPT_COUNT, see there and 3358 * finish_task_switch() for details. 3359 * 3360 * finish_task_switch() will drop rq->lock() and lower preempt_count 3361 * and the preempt_enable() will end up enabling preemption (on 3362 * PREEMPT_COUNT kernels). 3363 */ 3364 3365 rq = finish_task_switch(prev); 3366 balance_callback(rq); 3367 preempt_enable(); 3368 3369 if (current->set_child_tid) 3370 put_user(task_pid_vnr(current), current->set_child_tid); 3371 3372 calculate_sigpending(); 3373 } 3374 3375 /* 3376 * context_switch - switch to the new MM and the new thread's register state. 3377 */ 3378 static __always_inline struct rq * 3379 context_switch(struct rq *rq, struct task_struct *prev, 3380 struct task_struct *next, struct rq_flags *rf) 3381 { 3382 prepare_task_switch(rq, prev, next); 3383 3384 /* 3385 * For paravirt, this is coupled with an exit in switch_to to 3386 * combine the page table reload and the switch backend into 3387 * one hypercall. 3388 */ 3389 arch_start_context_switch(prev); 3390 3391 /* 3392 * kernel -> kernel lazy + transfer active 3393 * user -> kernel lazy + mmgrab() active 3394 * 3395 * kernel -> user switch + mmdrop() active 3396 * user -> user switch 3397 */ 3398 if (!next->mm) { // to kernel 3399 enter_lazy_tlb(prev->active_mm, next); 3400 3401 next->active_mm = prev->active_mm; 3402 if (prev->mm) // from user 3403 mmgrab(prev->active_mm); 3404 else 3405 prev->active_mm = NULL; 3406 } else { // to user 3407 membarrier_switch_mm(rq, prev->active_mm, next->mm); 3408 /* 3409 * sys_membarrier() requires an smp_mb() between setting 3410 * rq->curr / membarrier_switch_mm() and returning to userspace. 3411 * 3412 * The below provides this either through switch_mm(), or in 3413 * case 'prev->active_mm == next->mm' through 3414 * finish_task_switch()'s mmdrop(). 3415 */ 3416 switch_mm_irqs_off(prev->active_mm, next->mm, next); 3417 3418 if (!prev->mm) { // from kernel 3419 /* will mmdrop() in finish_task_switch(). */ 3420 rq->prev_mm = prev->active_mm; 3421 prev->active_mm = NULL; 3422 } 3423 } 3424 3425 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 3426 3427 prepare_lock_switch(rq, next, rf); 3428 3429 /* Here we just switch the register state and the stack. */ 3430 switch_to(prev, next, prev); 3431 barrier(); 3432 3433 return finish_task_switch(prev); 3434 } 3435 3436 /* 3437 * nr_running and nr_context_switches: 3438 * 3439 * externally visible scheduler statistics: current number of runnable 3440 * threads, total number of context switches performed since bootup. 3441 */ 3442 unsigned long nr_running(void) 3443 { 3444 unsigned long i, sum = 0; 3445 3446 for_each_online_cpu(i) 3447 sum += cpu_rq(i)->nr_running; 3448 3449 return sum; 3450 } 3451 3452 /* 3453 * Check if only the current task is running on the CPU. 3454 * 3455 * Caution: this function does not check that the caller has disabled 3456 * preemption, thus the result might have a time-of-check-to-time-of-use 3457 * race. The caller is responsible to use it correctly, for example: 3458 * 3459 * - from a non-preemptible section (of course) 3460 * 3461 * - from a thread that is bound to a single CPU 3462 * 3463 * - in a loop with very short iterations (e.g. a polling loop) 3464 */ 3465 bool single_task_running(void) 3466 { 3467 return raw_rq()->nr_running == 1; 3468 } 3469 EXPORT_SYMBOL(single_task_running); 3470 3471 unsigned long long nr_context_switches(void) 3472 { 3473 int i; 3474 unsigned long long sum = 0; 3475 3476 for_each_possible_cpu(i) 3477 sum += cpu_rq(i)->nr_switches; 3478 3479 return sum; 3480 } 3481 3482 /* 3483 * Consumers of these two interfaces, like for example the cpuidle menu 3484 * governor, are using nonsensical data. Preferring shallow idle state selection 3485 * for a CPU that has IO-wait which might not even end up running the task when 3486 * it does become runnable. 3487 */ 3488 3489 unsigned long nr_iowait_cpu(int cpu) 3490 { 3491 return atomic_read(&cpu_rq(cpu)->nr_iowait); 3492 } 3493 3494 /* 3495 * IO-wait accounting, and how its mostly bollocks (on SMP). 3496 * 3497 * The idea behind IO-wait account is to account the idle time that we could 3498 * have spend running if it were not for IO. That is, if we were to improve the 3499 * storage performance, we'd have a proportional reduction in IO-wait time. 3500 * 3501 * This all works nicely on UP, where, when a task blocks on IO, we account 3502 * idle time as IO-wait, because if the storage were faster, it could've been 3503 * running and we'd not be idle. 3504 * 3505 * This has been extended to SMP, by doing the same for each CPU. This however 3506 * is broken. 3507 * 3508 * Imagine for instance the case where two tasks block on one CPU, only the one 3509 * CPU will have IO-wait accounted, while the other has regular idle. Even 3510 * though, if the storage were faster, both could've ran at the same time, 3511 * utilising both CPUs. 3512 * 3513 * This means, that when looking globally, the current IO-wait accounting on 3514 * SMP is a lower bound, by reason of under accounting. 3515 * 3516 * Worse, since the numbers are provided per CPU, they are sometimes 3517 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 3518 * associated with any one particular CPU, it can wake to another CPU than it 3519 * blocked on. This means the per CPU IO-wait number is meaningless. 3520 * 3521 * Task CPU affinities can make all that even more 'interesting'. 3522 */ 3523 3524 unsigned long nr_iowait(void) 3525 { 3526 unsigned long i, sum = 0; 3527 3528 for_each_possible_cpu(i) 3529 sum += nr_iowait_cpu(i); 3530 3531 return sum; 3532 } 3533 3534 #ifdef CONFIG_SMP 3535 3536 /* 3537 * sched_exec - execve() is a valuable balancing opportunity, because at 3538 * this point the task has the smallest effective memory and cache footprint. 3539 */ 3540 void sched_exec(void) 3541 { 3542 struct task_struct *p = current; 3543 unsigned long flags; 3544 int dest_cpu; 3545 3546 raw_spin_lock_irqsave(&p->pi_lock, flags); 3547 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); 3548 if (dest_cpu == smp_processor_id()) 3549 goto unlock; 3550 3551 if (likely(cpu_active(dest_cpu))) { 3552 struct migration_arg arg = { p, dest_cpu }; 3553 3554 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3555 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 3556 return; 3557 } 3558 unlock: 3559 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 3560 } 3561 3562 #endif 3563 3564 DEFINE_PER_CPU(struct kernel_stat, kstat); 3565 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 3566 3567 EXPORT_PER_CPU_SYMBOL(kstat); 3568 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 3569 3570 /* 3571 * The function fair_sched_class.update_curr accesses the struct curr 3572 * and its field curr->exec_start; when called from task_sched_runtime(), 3573 * we observe a high rate of cache misses in practice. 3574 * Prefetching this data results in improved performance. 3575 */ 3576 static inline void prefetch_curr_exec_start(struct task_struct *p) 3577 { 3578 #ifdef CONFIG_FAIR_GROUP_SCHED 3579 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 3580 #else 3581 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 3582 #endif 3583 prefetch(curr); 3584 prefetch(&curr->exec_start); 3585 } 3586 3587 /* 3588 * Return accounted runtime for the task. 3589 * In case the task is currently running, return the runtime plus current's 3590 * pending runtime that have not been accounted yet. 3591 */ 3592 unsigned long long task_sched_runtime(struct task_struct *p) 3593 { 3594 struct rq_flags rf; 3595 struct rq *rq; 3596 u64 ns; 3597 3598 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 3599 /* 3600 * 64-bit doesn't need locks to atomically read a 64-bit value. 3601 * So we have a optimization chance when the task's delta_exec is 0. 3602 * Reading ->on_cpu is racy, but this is ok. 3603 * 3604 * If we race with it leaving CPU, we'll take a lock. So we're correct. 3605 * If we race with it entering CPU, unaccounted time is 0. This is 3606 * indistinguishable from the read occurring a few cycles earlier. 3607 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 3608 * been accounted, so we're correct here as well. 3609 */ 3610 if (!p->on_cpu || !task_on_rq_queued(p)) 3611 return p->se.sum_exec_runtime; 3612 #endif 3613 3614 rq = task_rq_lock(p, &rf); 3615 /* 3616 * Must be ->curr _and_ ->on_rq. If dequeued, we would 3617 * project cycles that may never be accounted to this 3618 * thread, breaking clock_gettime(). 3619 */ 3620 if (task_current(rq, p) && task_on_rq_queued(p)) { 3621 prefetch_curr_exec_start(p); 3622 update_rq_clock(rq); 3623 p->sched_class->update_curr(rq); 3624 } 3625 ns = p->se.sum_exec_runtime; 3626 task_rq_unlock(rq, p, &rf); 3627 3628 return ns; 3629 } 3630 3631 DEFINE_PER_CPU(unsigned long, thermal_pressure); 3632 3633 void arch_set_thermal_pressure(struct cpumask *cpus, 3634 unsigned long th_pressure) 3635 { 3636 int cpu; 3637 3638 for_each_cpu(cpu, cpus) 3639 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); 3640 } 3641 3642 /* 3643 * This function gets called by the timer code, with HZ frequency. 3644 * We call it with interrupts disabled. 3645 */ 3646 void scheduler_tick(void) 3647 { 3648 int cpu = smp_processor_id(); 3649 struct rq *rq = cpu_rq(cpu); 3650 struct task_struct *curr = rq->curr; 3651 struct rq_flags rf; 3652 unsigned long thermal_pressure; 3653 3654 arch_scale_freq_tick(); 3655 sched_clock_tick(); 3656 3657 rq_lock(rq, &rf); 3658 3659 update_rq_clock(rq); 3660 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 3661 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 3662 curr->sched_class->task_tick(rq, curr, 0); 3663 calc_global_load_tick(rq); 3664 psi_task_tick(rq); 3665 3666 rq_unlock(rq, &rf); 3667 3668 perf_event_task_tick(); 3669 3670 #ifdef CONFIG_SMP 3671 rq->idle_balance = idle_cpu(cpu); 3672 trigger_load_balance(rq); 3673 #endif 3674 } 3675 3676 #ifdef CONFIG_NO_HZ_FULL 3677 3678 struct tick_work { 3679 int cpu; 3680 atomic_t state; 3681 struct delayed_work work; 3682 }; 3683 /* Values for ->state, see diagram below. */ 3684 #define TICK_SCHED_REMOTE_OFFLINE 0 3685 #define TICK_SCHED_REMOTE_OFFLINING 1 3686 #define TICK_SCHED_REMOTE_RUNNING 2 3687 3688 /* 3689 * State diagram for ->state: 3690 * 3691 * 3692 * TICK_SCHED_REMOTE_OFFLINE 3693 * | ^ 3694 * | | 3695 * | | sched_tick_remote() 3696 * | | 3697 * | | 3698 * +--TICK_SCHED_REMOTE_OFFLINING 3699 * | ^ 3700 * | | 3701 * sched_tick_start() | | sched_tick_stop() 3702 * | | 3703 * V | 3704 * TICK_SCHED_REMOTE_RUNNING 3705 * 3706 * 3707 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 3708 * and sched_tick_start() are happy to leave the state in RUNNING. 3709 */ 3710 3711 static struct tick_work __percpu *tick_work_cpu; 3712 3713 static void sched_tick_remote(struct work_struct *work) 3714 { 3715 struct delayed_work *dwork = to_delayed_work(work); 3716 struct tick_work *twork = container_of(dwork, struct tick_work, work); 3717 int cpu = twork->cpu; 3718 struct rq *rq = cpu_rq(cpu); 3719 struct task_struct *curr; 3720 struct rq_flags rf; 3721 u64 delta; 3722 int os; 3723 3724 /* 3725 * Handle the tick only if it appears the remote CPU is running in full 3726 * dynticks mode. The check is racy by nature, but missing a tick or 3727 * having one too much is no big deal because the scheduler tick updates 3728 * statistics and checks timeslices in a time-independent way, regardless 3729 * of when exactly it is running. 3730 */ 3731 if (!tick_nohz_tick_stopped_cpu(cpu)) 3732 goto out_requeue; 3733 3734 rq_lock_irq(rq, &rf); 3735 curr = rq->curr; 3736 if (cpu_is_offline(cpu)) 3737 goto out_unlock; 3738 3739 update_rq_clock(rq); 3740 3741 if (!is_idle_task(curr)) { 3742 /* 3743 * Make sure the next tick runs within a reasonable 3744 * amount of time. 3745 */ 3746 delta = rq_clock_task(rq) - curr->se.exec_start; 3747 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 3748 } 3749 curr->sched_class->task_tick(rq, curr, 0); 3750 3751 calc_load_nohz_remote(rq); 3752 out_unlock: 3753 rq_unlock_irq(rq, &rf); 3754 out_requeue: 3755 3756 /* 3757 * Run the remote tick once per second (1Hz). This arbitrary 3758 * frequency is large enough to avoid overload but short enough 3759 * to keep scheduler internal stats reasonably up to date. But 3760 * first update state to reflect hotplug activity if required. 3761 */ 3762 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 3763 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 3764 if (os == TICK_SCHED_REMOTE_RUNNING) 3765 queue_delayed_work(system_unbound_wq, dwork, HZ); 3766 } 3767 3768 static void sched_tick_start(int cpu) 3769 { 3770 int os; 3771 struct tick_work *twork; 3772 3773 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3774 return; 3775 3776 WARN_ON_ONCE(!tick_work_cpu); 3777 3778 twork = per_cpu_ptr(tick_work_cpu, cpu); 3779 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 3780 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 3781 if (os == TICK_SCHED_REMOTE_OFFLINE) { 3782 twork->cpu = cpu; 3783 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 3784 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 3785 } 3786 } 3787 3788 #ifdef CONFIG_HOTPLUG_CPU 3789 static void sched_tick_stop(int cpu) 3790 { 3791 struct tick_work *twork; 3792 int os; 3793 3794 if (housekeeping_cpu(cpu, HK_FLAG_TICK)) 3795 return; 3796 3797 WARN_ON_ONCE(!tick_work_cpu); 3798 3799 twork = per_cpu_ptr(tick_work_cpu, cpu); 3800 /* There cannot be competing actions, but don't rely on stop-machine. */ 3801 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 3802 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 3803 /* Don't cancel, as this would mess up the state machine. */ 3804 } 3805 #endif /* CONFIG_HOTPLUG_CPU */ 3806 3807 int __init sched_tick_offload_init(void) 3808 { 3809 tick_work_cpu = alloc_percpu(struct tick_work); 3810 BUG_ON(!tick_work_cpu); 3811 return 0; 3812 } 3813 3814 #else /* !CONFIG_NO_HZ_FULL */ 3815 static inline void sched_tick_start(int cpu) { } 3816 static inline void sched_tick_stop(int cpu) { } 3817 #endif 3818 3819 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3820 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3821 /* 3822 * If the value passed in is equal to the current preempt count 3823 * then we just disabled preemption. Start timing the latency. 3824 */ 3825 static inline void preempt_latency_start(int val) 3826 { 3827 if (preempt_count() == val) { 3828 unsigned long ip = get_lock_parent_ip(); 3829 #ifdef CONFIG_DEBUG_PREEMPT 3830 current->preempt_disable_ip = ip; 3831 #endif 3832 trace_preempt_off(CALLER_ADDR0, ip); 3833 } 3834 } 3835 3836 void preempt_count_add(int val) 3837 { 3838 #ifdef CONFIG_DEBUG_PREEMPT 3839 /* 3840 * Underflow? 3841 */ 3842 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 3843 return; 3844 #endif 3845 __preempt_count_add(val); 3846 #ifdef CONFIG_DEBUG_PREEMPT 3847 /* 3848 * Spinlock count overflowing soon? 3849 */ 3850 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 3851 PREEMPT_MASK - 10); 3852 #endif 3853 preempt_latency_start(val); 3854 } 3855 EXPORT_SYMBOL(preempt_count_add); 3856 NOKPROBE_SYMBOL(preempt_count_add); 3857 3858 /* 3859 * If the value passed in equals to the current preempt count 3860 * then we just enabled preemption. Stop timing the latency. 3861 */ 3862 static inline void preempt_latency_stop(int val) 3863 { 3864 if (preempt_count() == val) 3865 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 3866 } 3867 3868 void preempt_count_sub(int val) 3869 { 3870 #ifdef CONFIG_DEBUG_PREEMPT 3871 /* 3872 * Underflow? 3873 */ 3874 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 3875 return; 3876 /* 3877 * Is the spinlock portion underflowing? 3878 */ 3879 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 3880 !(preempt_count() & PREEMPT_MASK))) 3881 return; 3882 #endif 3883 3884 preempt_latency_stop(val); 3885 __preempt_count_sub(val); 3886 } 3887 EXPORT_SYMBOL(preempt_count_sub); 3888 NOKPROBE_SYMBOL(preempt_count_sub); 3889 3890 #else 3891 static inline void preempt_latency_start(int val) { } 3892 static inline void preempt_latency_stop(int val) { } 3893 #endif 3894 3895 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 3896 { 3897 #ifdef CONFIG_DEBUG_PREEMPT 3898 return p->preempt_disable_ip; 3899 #else 3900 return 0; 3901 #endif 3902 } 3903 3904 /* 3905 * Print scheduling while atomic bug: 3906 */ 3907 static noinline void __schedule_bug(struct task_struct *prev) 3908 { 3909 /* Save this before calling printk(), since that will clobber it */ 3910 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 3911 3912 if (oops_in_progress) 3913 return; 3914 3915 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 3916 prev->comm, prev->pid, preempt_count()); 3917 3918 debug_show_held_locks(prev); 3919 print_modules(); 3920 if (irqs_disabled()) 3921 print_irqtrace_events(prev); 3922 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 3923 && in_atomic_preempt_off()) { 3924 pr_err("Preemption disabled at:"); 3925 print_ip_sym(KERN_ERR, preempt_disable_ip); 3926 } 3927 if (panic_on_warn) 3928 panic("scheduling while atomic\n"); 3929 3930 dump_stack(); 3931 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3932 } 3933 3934 /* 3935 * Various schedule()-time debugging checks and statistics: 3936 */ 3937 static inline void schedule_debug(struct task_struct *prev, bool preempt) 3938 { 3939 #ifdef CONFIG_SCHED_STACK_END_CHECK 3940 if (task_stack_end_corrupted(prev)) 3941 panic("corrupted stack end detected inside scheduler\n"); 3942 3943 if (task_scs_end_corrupted(prev)) 3944 panic("corrupted shadow stack detected inside scheduler\n"); 3945 #endif 3946 3947 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 3948 if (!preempt && prev->state && prev->non_block_count) { 3949 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 3950 prev->comm, prev->pid, prev->non_block_count); 3951 dump_stack(); 3952 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 3953 } 3954 #endif 3955 3956 if (unlikely(in_atomic_preempt_off())) { 3957 __schedule_bug(prev); 3958 preempt_count_set(PREEMPT_DISABLED); 3959 } 3960 rcu_sleep_check(); 3961 3962 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 3963 3964 schedstat_inc(this_rq()->sched_count); 3965 } 3966 3967 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 3968 struct rq_flags *rf) 3969 { 3970 #ifdef CONFIG_SMP 3971 const struct sched_class *class; 3972 /* 3973 * We must do the balancing pass before put_prev_task(), such 3974 * that when we release the rq->lock the task is in the same 3975 * state as before we took rq->lock. 3976 * 3977 * We can terminate the balance pass as soon as we know there is 3978 * a runnable task of @class priority or higher. 3979 */ 3980 for_class_range(class, prev->sched_class, &idle_sched_class) { 3981 if (class->balance(rq, prev, rf)) 3982 break; 3983 } 3984 #endif 3985 3986 put_prev_task(rq, prev); 3987 } 3988 3989 /* 3990 * Pick up the highest-prio task: 3991 */ 3992 static inline struct task_struct * 3993 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 3994 { 3995 const struct sched_class *class; 3996 struct task_struct *p; 3997 3998 /* 3999 * Optimization: we know that if all tasks are in the fair class we can 4000 * call that function directly, but only if the @prev task wasn't of a 4001 * higher scheduling class, because otherwise those loose the 4002 * opportunity to pull in more work from other CPUs. 4003 */ 4004 if (likely((prev->sched_class == &idle_sched_class || 4005 prev->sched_class == &fair_sched_class) && 4006 rq->nr_running == rq->cfs.h_nr_running)) { 4007 4008 p = pick_next_task_fair(rq, prev, rf); 4009 if (unlikely(p == RETRY_TASK)) 4010 goto restart; 4011 4012 /* Assumes fair_sched_class->next == idle_sched_class */ 4013 if (!p) { 4014 put_prev_task(rq, prev); 4015 p = pick_next_task_idle(rq); 4016 } 4017 4018 return p; 4019 } 4020 4021 restart: 4022 put_prev_task_balance(rq, prev, rf); 4023 4024 for_each_class(class) { 4025 p = class->pick_next_task(rq); 4026 if (p) 4027 return p; 4028 } 4029 4030 /* The idle class should always have a runnable task: */ 4031 BUG(); 4032 } 4033 4034 /* 4035 * __schedule() is the main scheduler function. 4036 * 4037 * The main means of driving the scheduler and thus entering this function are: 4038 * 4039 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 4040 * 4041 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 4042 * paths. For example, see arch/x86/entry_64.S. 4043 * 4044 * To drive preemption between tasks, the scheduler sets the flag in timer 4045 * interrupt handler scheduler_tick(). 4046 * 4047 * 3. Wakeups don't really cause entry into schedule(). They add a 4048 * task to the run-queue and that's it. 4049 * 4050 * Now, if the new task added to the run-queue preempts the current 4051 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 4052 * called on the nearest possible occasion: 4053 * 4054 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 4055 * 4056 * - in syscall or exception context, at the next outmost 4057 * preempt_enable(). (this might be as soon as the wake_up()'s 4058 * spin_unlock()!) 4059 * 4060 * - in IRQ context, return from interrupt-handler to 4061 * preemptible context 4062 * 4063 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 4064 * then at the next: 4065 * 4066 * - cond_resched() call 4067 * - explicit schedule() call 4068 * - return from syscall or exception to user-space 4069 * - return from interrupt-handler to user-space 4070 * 4071 * WARNING: must be called with preemption disabled! 4072 */ 4073 static void __sched notrace __schedule(bool preempt) 4074 { 4075 struct task_struct *prev, *next; 4076 unsigned long *switch_count; 4077 struct rq_flags rf; 4078 struct rq *rq; 4079 int cpu; 4080 4081 cpu = smp_processor_id(); 4082 rq = cpu_rq(cpu); 4083 prev = rq->curr; 4084 4085 schedule_debug(prev, preempt); 4086 4087 if (sched_feat(HRTICK)) 4088 hrtick_clear(rq); 4089 4090 local_irq_disable(); 4091 rcu_note_context_switch(preempt); 4092 4093 /* 4094 * Make sure that signal_pending_state()->signal_pending() below 4095 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 4096 * done by the caller to avoid the race with signal_wake_up(). 4097 * 4098 * The membarrier system call requires a full memory barrier 4099 * after coming from user-space, before storing to rq->curr. 4100 */ 4101 rq_lock(rq, &rf); 4102 smp_mb__after_spinlock(); 4103 4104 /* Promote REQ to ACT */ 4105 rq->clock_update_flags <<= 1; 4106 update_rq_clock(rq); 4107 4108 switch_count = &prev->nivcsw; 4109 if (!preempt && prev->state) { 4110 if (signal_pending_state(prev->state, prev)) { 4111 prev->state = TASK_RUNNING; 4112 } else { 4113 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 4114 4115 if (prev->in_iowait) { 4116 atomic_inc(&rq->nr_iowait); 4117 delayacct_blkio_start(); 4118 } 4119 } 4120 switch_count = &prev->nvcsw; 4121 } 4122 4123 next = pick_next_task(rq, prev, &rf); 4124 clear_tsk_need_resched(prev); 4125 clear_preempt_need_resched(); 4126 4127 if (likely(prev != next)) { 4128 rq->nr_switches++; 4129 /* 4130 * RCU users of rcu_dereference(rq->curr) may not see 4131 * changes to task_struct made by pick_next_task(). 4132 */ 4133 RCU_INIT_POINTER(rq->curr, next); 4134 /* 4135 * The membarrier system call requires each architecture 4136 * to have a full memory barrier after updating 4137 * rq->curr, before returning to user-space. 4138 * 4139 * Here are the schemes providing that barrier on the 4140 * various architectures: 4141 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 4142 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 4143 * - finish_lock_switch() for weakly-ordered 4144 * architectures where spin_unlock is a full barrier, 4145 * - switch_to() for arm64 (weakly-ordered, spin_unlock 4146 * is a RELEASE barrier), 4147 */ 4148 ++*switch_count; 4149 4150 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 4151 4152 trace_sched_switch(preempt, prev, next); 4153 4154 /* Also unlocks the rq: */ 4155 rq = context_switch(rq, prev, next, &rf); 4156 } else { 4157 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); 4158 rq_unlock_irq(rq, &rf); 4159 } 4160 4161 balance_callback(rq); 4162 } 4163 4164 void __noreturn do_task_dead(void) 4165 { 4166 /* Causes final put_task_struct in finish_task_switch(): */ 4167 set_special_state(TASK_DEAD); 4168 4169 /* Tell freezer to ignore us: */ 4170 current->flags |= PF_NOFREEZE; 4171 4172 __schedule(false); 4173 BUG(); 4174 4175 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 4176 for (;;) 4177 cpu_relax(); 4178 } 4179 4180 static inline void sched_submit_work(struct task_struct *tsk) 4181 { 4182 if (!tsk->state) 4183 return; 4184 4185 /* 4186 * If a worker went to sleep, notify and ask workqueue whether 4187 * it wants to wake up a task to maintain concurrency. 4188 * As this function is called inside the schedule() context, 4189 * we disable preemption to avoid it calling schedule() again 4190 * in the possible wakeup of a kworker and because wq_worker_sleeping() 4191 * requires it. 4192 */ 4193 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4194 preempt_disable(); 4195 if (tsk->flags & PF_WQ_WORKER) 4196 wq_worker_sleeping(tsk); 4197 else 4198 io_wq_worker_sleeping(tsk); 4199 preempt_enable_no_resched(); 4200 } 4201 4202 if (tsk_is_pi_blocked(tsk)) 4203 return; 4204 4205 /* 4206 * If we are going to sleep and we have plugged IO queued, 4207 * make sure to submit it to avoid deadlocks. 4208 */ 4209 if (blk_needs_flush_plug(tsk)) 4210 blk_schedule_flush_plug(tsk); 4211 } 4212 4213 static void sched_update_worker(struct task_struct *tsk) 4214 { 4215 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 4216 if (tsk->flags & PF_WQ_WORKER) 4217 wq_worker_running(tsk); 4218 else 4219 io_wq_worker_running(tsk); 4220 } 4221 } 4222 4223 asmlinkage __visible void __sched schedule(void) 4224 { 4225 struct task_struct *tsk = current; 4226 4227 sched_submit_work(tsk); 4228 do { 4229 preempt_disable(); 4230 __schedule(false); 4231 sched_preempt_enable_no_resched(); 4232 } while (need_resched()); 4233 sched_update_worker(tsk); 4234 } 4235 EXPORT_SYMBOL(schedule); 4236 4237 /* 4238 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 4239 * state (have scheduled out non-voluntarily) by making sure that all 4240 * tasks have either left the run queue or have gone into user space. 4241 * As idle tasks do not do either, they must not ever be preempted 4242 * (schedule out non-voluntarily). 4243 * 4244 * schedule_idle() is similar to schedule_preempt_disable() except that it 4245 * never enables preemption because it does not call sched_submit_work(). 4246 */ 4247 void __sched schedule_idle(void) 4248 { 4249 /* 4250 * As this skips calling sched_submit_work(), which the idle task does 4251 * regardless because that function is a nop when the task is in a 4252 * TASK_RUNNING state, make sure this isn't used someplace that the 4253 * current task can be in any other state. Note, idle is always in the 4254 * TASK_RUNNING state. 4255 */ 4256 WARN_ON_ONCE(current->state); 4257 do { 4258 __schedule(false); 4259 } while (need_resched()); 4260 } 4261 4262 #ifdef CONFIG_CONTEXT_TRACKING 4263 asmlinkage __visible void __sched schedule_user(void) 4264 { 4265 /* 4266 * If we come here after a random call to set_need_resched(), 4267 * or we have been woken up remotely but the IPI has not yet arrived, 4268 * we haven't yet exited the RCU idle mode. Do it here manually until 4269 * we find a better solution. 4270 * 4271 * NB: There are buggy callers of this function. Ideally we 4272 * should warn if prev_state != CONTEXT_USER, but that will trigger 4273 * too frequently to make sense yet. 4274 */ 4275 enum ctx_state prev_state = exception_enter(); 4276 schedule(); 4277 exception_exit(prev_state); 4278 } 4279 #endif 4280 4281 /** 4282 * schedule_preempt_disabled - called with preemption disabled 4283 * 4284 * Returns with preemption disabled. Note: preempt_count must be 1 4285 */ 4286 void __sched schedule_preempt_disabled(void) 4287 { 4288 sched_preempt_enable_no_resched(); 4289 schedule(); 4290 preempt_disable(); 4291 } 4292 4293 static void __sched notrace preempt_schedule_common(void) 4294 { 4295 do { 4296 /* 4297 * Because the function tracer can trace preempt_count_sub() 4298 * and it also uses preempt_enable/disable_notrace(), if 4299 * NEED_RESCHED is set, the preempt_enable_notrace() called 4300 * by the function tracer will call this function again and 4301 * cause infinite recursion. 4302 * 4303 * Preemption must be disabled here before the function 4304 * tracer can trace. Break up preempt_disable() into two 4305 * calls. One to disable preemption without fear of being 4306 * traced. The other to still record the preemption latency, 4307 * which can also be traced by the function tracer. 4308 */ 4309 preempt_disable_notrace(); 4310 preempt_latency_start(1); 4311 __schedule(true); 4312 preempt_latency_stop(1); 4313 preempt_enable_no_resched_notrace(); 4314 4315 /* 4316 * Check again in case we missed a preemption opportunity 4317 * between schedule and now. 4318 */ 4319 } while (need_resched()); 4320 } 4321 4322 #ifdef CONFIG_PREEMPTION 4323 /* 4324 * This is the entry point to schedule() from in-kernel preemption 4325 * off of preempt_enable. 4326 */ 4327 asmlinkage __visible void __sched notrace preempt_schedule(void) 4328 { 4329 /* 4330 * If there is a non-zero preempt_count or interrupts are disabled, 4331 * we do not want to preempt the current task. Just return.. 4332 */ 4333 if (likely(!preemptible())) 4334 return; 4335 4336 preempt_schedule_common(); 4337 } 4338 NOKPROBE_SYMBOL(preempt_schedule); 4339 EXPORT_SYMBOL(preempt_schedule); 4340 4341 /** 4342 * preempt_schedule_notrace - preempt_schedule called by tracing 4343 * 4344 * The tracing infrastructure uses preempt_enable_notrace to prevent 4345 * recursion and tracing preempt enabling caused by the tracing 4346 * infrastructure itself. But as tracing can happen in areas coming 4347 * from userspace or just about to enter userspace, a preempt enable 4348 * can occur before user_exit() is called. This will cause the scheduler 4349 * to be called when the system is still in usermode. 4350 * 4351 * To prevent this, the preempt_enable_notrace will use this function 4352 * instead of preempt_schedule() to exit user context if needed before 4353 * calling the scheduler. 4354 */ 4355 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 4356 { 4357 enum ctx_state prev_ctx; 4358 4359 if (likely(!preemptible())) 4360 return; 4361 4362 do { 4363 /* 4364 * Because the function tracer can trace preempt_count_sub() 4365 * and it also uses preempt_enable/disable_notrace(), if 4366 * NEED_RESCHED is set, the preempt_enable_notrace() called 4367 * by the function tracer will call this function again and 4368 * cause infinite recursion. 4369 * 4370 * Preemption must be disabled here before the function 4371 * tracer can trace. Break up preempt_disable() into two 4372 * calls. One to disable preemption without fear of being 4373 * traced. The other to still record the preemption latency, 4374 * which can also be traced by the function tracer. 4375 */ 4376 preempt_disable_notrace(); 4377 preempt_latency_start(1); 4378 /* 4379 * Needs preempt disabled in case user_exit() is traced 4380 * and the tracer calls preempt_enable_notrace() causing 4381 * an infinite recursion. 4382 */ 4383 prev_ctx = exception_enter(); 4384 __schedule(true); 4385 exception_exit(prev_ctx); 4386 4387 preempt_latency_stop(1); 4388 preempt_enable_no_resched_notrace(); 4389 } while (need_resched()); 4390 } 4391 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 4392 4393 #endif /* CONFIG_PREEMPTION */ 4394 4395 /* 4396 * This is the entry point to schedule() from kernel preemption 4397 * off of irq context. 4398 * Note, that this is called and return with irqs disabled. This will 4399 * protect us against recursive calling from irq. 4400 */ 4401 asmlinkage __visible void __sched preempt_schedule_irq(void) 4402 { 4403 enum ctx_state prev_state; 4404 4405 /* Catch callers which need to be fixed */ 4406 BUG_ON(preempt_count() || !irqs_disabled()); 4407 4408 prev_state = exception_enter(); 4409 4410 do { 4411 preempt_disable(); 4412 local_irq_enable(); 4413 __schedule(true); 4414 local_irq_disable(); 4415 sched_preempt_enable_no_resched(); 4416 } while (need_resched()); 4417 4418 exception_exit(prev_state); 4419 } 4420 4421 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 4422 void *key) 4423 { 4424 return try_to_wake_up(curr->private, mode, wake_flags); 4425 } 4426 EXPORT_SYMBOL(default_wake_function); 4427 4428 #ifdef CONFIG_RT_MUTEXES 4429 4430 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 4431 { 4432 if (pi_task) 4433 prio = min(prio, pi_task->prio); 4434 4435 return prio; 4436 } 4437 4438 static inline int rt_effective_prio(struct task_struct *p, int prio) 4439 { 4440 struct task_struct *pi_task = rt_mutex_get_top_task(p); 4441 4442 return __rt_effective_prio(pi_task, prio); 4443 } 4444 4445 /* 4446 * rt_mutex_setprio - set the current priority of a task 4447 * @p: task to boost 4448 * @pi_task: donor task 4449 * 4450 * This function changes the 'effective' priority of a task. It does 4451 * not touch ->normal_prio like __setscheduler(). 4452 * 4453 * Used by the rt_mutex code to implement priority inheritance 4454 * logic. Call site only calls if the priority of the task changed. 4455 */ 4456 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 4457 { 4458 int prio, oldprio, queued, running, queue_flag = 4459 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4460 const struct sched_class *prev_class; 4461 struct rq_flags rf; 4462 struct rq *rq; 4463 4464 /* XXX used to be waiter->prio, not waiter->task->prio */ 4465 prio = __rt_effective_prio(pi_task, p->normal_prio); 4466 4467 /* 4468 * If nothing changed; bail early. 4469 */ 4470 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 4471 return; 4472 4473 rq = __task_rq_lock(p, &rf); 4474 update_rq_clock(rq); 4475 /* 4476 * Set under pi_lock && rq->lock, such that the value can be used under 4477 * either lock. 4478 * 4479 * Note that there is loads of tricky to make this pointer cache work 4480 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 4481 * ensure a task is de-boosted (pi_task is set to NULL) before the 4482 * task is allowed to run again (and can exit). This ensures the pointer 4483 * points to a blocked task -- which guaratees the task is present. 4484 */ 4485 p->pi_top_task = pi_task; 4486 4487 /* 4488 * For FIFO/RR we only need to set prio, if that matches we're done. 4489 */ 4490 if (prio == p->prio && !dl_prio(prio)) 4491 goto out_unlock; 4492 4493 /* 4494 * Idle task boosting is a nono in general. There is one 4495 * exception, when PREEMPT_RT and NOHZ is active: 4496 * 4497 * The idle task calls get_next_timer_interrupt() and holds 4498 * the timer wheel base->lock on the CPU and another CPU wants 4499 * to access the timer (probably to cancel it). We can safely 4500 * ignore the boosting request, as the idle CPU runs this code 4501 * with interrupts disabled and will complete the lock 4502 * protected section without being interrupted. So there is no 4503 * real need to boost. 4504 */ 4505 if (unlikely(p == rq->idle)) { 4506 WARN_ON(p != rq->curr); 4507 WARN_ON(p->pi_blocked_on); 4508 goto out_unlock; 4509 } 4510 4511 trace_sched_pi_setprio(p, pi_task); 4512 oldprio = p->prio; 4513 4514 if (oldprio == prio) 4515 queue_flag &= ~DEQUEUE_MOVE; 4516 4517 prev_class = p->sched_class; 4518 queued = task_on_rq_queued(p); 4519 running = task_current(rq, p); 4520 if (queued) 4521 dequeue_task(rq, p, queue_flag); 4522 if (running) 4523 put_prev_task(rq, p); 4524 4525 /* 4526 * Boosting condition are: 4527 * 1. -rt task is running and holds mutex A 4528 * --> -dl task blocks on mutex A 4529 * 4530 * 2. -dl task is running and holds mutex A 4531 * --> -dl task blocks on mutex A and could preempt the 4532 * running task 4533 */ 4534 if (dl_prio(prio)) { 4535 if (!dl_prio(p->normal_prio) || 4536 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { 4537 p->dl.dl_boosted = 1; 4538 queue_flag |= ENQUEUE_REPLENISH; 4539 } else 4540 p->dl.dl_boosted = 0; 4541 p->sched_class = &dl_sched_class; 4542 } else if (rt_prio(prio)) { 4543 if (dl_prio(oldprio)) 4544 p->dl.dl_boosted = 0; 4545 if (oldprio < prio) 4546 queue_flag |= ENQUEUE_HEAD; 4547 p->sched_class = &rt_sched_class; 4548 } else { 4549 if (dl_prio(oldprio)) 4550 p->dl.dl_boosted = 0; 4551 if (rt_prio(oldprio)) 4552 p->rt.timeout = 0; 4553 p->sched_class = &fair_sched_class; 4554 } 4555 4556 p->prio = prio; 4557 4558 if (queued) 4559 enqueue_task(rq, p, queue_flag); 4560 if (running) 4561 set_next_task(rq, p); 4562 4563 check_class_changed(rq, p, prev_class, oldprio); 4564 out_unlock: 4565 /* Avoid rq from going away on us: */ 4566 preempt_disable(); 4567 __task_rq_unlock(rq, &rf); 4568 4569 balance_callback(rq); 4570 preempt_enable(); 4571 } 4572 #else 4573 static inline int rt_effective_prio(struct task_struct *p, int prio) 4574 { 4575 return prio; 4576 } 4577 #endif 4578 4579 void set_user_nice(struct task_struct *p, long nice) 4580 { 4581 bool queued, running; 4582 int old_prio; 4583 struct rq_flags rf; 4584 struct rq *rq; 4585 4586 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 4587 return; 4588 /* 4589 * We have to be careful, if called from sys_setpriority(), 4590 * the task might be in the middle of scheduling on another CPU. 4591 */ 4592 rq = task_rq_lock(p, &rf); 4593 update_rq_clock(rq); 4594 4595 /* 4596 * The RT priorities are set via sched_setscheduler(), but we still 4597 * allow the 'normal' nice value to be set - but as expected 4598 * it wont have any effect on scheduling until the task is 4599 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 4600 */ 4601 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4602 p->static_prio = NICE_TO_PRIO(nice); 4603 goto out_unlock; 4604 } 4605 queued = task_on_rq_queued(p); 4606 running = task_current(rq, p); 4607 if (queued) 4608 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 4609 if (running) 4610 put_prev_task(rq, p); 4611 4612 p->static_prio = NICE_TO_PRIO(nice); 4613 set_load_weight(p, true); 4614 old_prio = p->prio; 4615 p->prio = effective_prio(p); 4616 4617 if (queued) 4618 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 4619 if (running) 4620 set_next_task(rq, p); 4621 4622 /* 4623 * If the task increased its priority or is running and 4624 * lowered its priority, then reschedule its CPU: 4625 */ 4626 p->sched_class->prio_changed(rq, p, old_prio); 4627 4628 out_unlock: 4629 task_rq_unlock(rq, p, &rf); 4630 } 4631 EXPORT_SYMBOL(set_user_nice); 4632 4633 /* 4634 * can_nice - check if a task can reduce its nice value 4635 * @p: task 4636 * @nice: nice value 4637 */ 4638 int can_nice(const struct task_struct *p, const int nice) 4639 { 4640 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 4641 int nice_rlim = nice_to_rlimit(nice); 4642 4643 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || 4644 capable(CAP_SYS_NICE)); 4645 } 4646 4647 #ifdef __ARCH_WANT_SYS_NICE 4648 4649 /* 4650 * sys_nice - change the priority of the current process. 4651 * @increment: priority increment 4652 * 4653 * sys_setpriority is a more generic, but much slower function that 4654 * does similar things. 4655 */ 4656 SYSCALL_DEFINE1(nice, int, increment) 4657 { 4658 long nice, retval; 4659 4660 /* 4661 * Setpriority might change our priority at the same moment. 4662 * We don't have to worry. Conceptually one call occurs first 4663 * and we have a single winner. 4664 */ 4665 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 4666 nice = task_nice(current) + increment; 4667 4668 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 4669 if (increment < 0 && !can_nice(current, nice)) 4670 return -EPERM; 4671 4672 retval = security_task_setnice(current, nice); 4673 if (retval) 4674 return retval; 4675 4676 set_user_nice(current, nice); 4677 return 0; 4678 } 4679 4680 #endif 4681 4682 /** 4683 * task_prio - return the priority value of a given task. 4684 * @p: the task in question. 4685 * 4686 * Return: The priority value as seen by users in /proc. 4687 * RT tasks are offset by -200. Normal tasks are centered 4688 * around 0, value goes from -16 to +15. 4689 */ 4690 int task_prio(const struct task_struct *p) 4691 { 4692 return p->prio - MAX_RT_PRIO; 4693 } 4694 4695 /** 4696 * idle_cpu - is a given CPU idle currently? 4697 * @cpu: the processor in question. 4698 * 4699 * Return: 1 if the CPU is currently idle. 0 otherwise. 4700 */ 4701 int idle_cpu(int cpu) 4702 { 4703 struct rq *rq = cpu_rq(cpu); 4704 4705 if (rq->curr != rq->idle) 4706 return 0; 4707 4708 if (rq->nr_running) 4709 return 0; 4710 4711 #ifdef CONFIG_SMP 4712 if (rq->ttwu_pending) 4713 return 0; 4714 #endif 4715 4716 return 1; 4717 } 4718 4719 /** 4720 * available_idle_cpu - is a given CPU idle for enqueuing work. 4721 * @cpu: the CPU in question. 4722 * 4723 * Return: 1 if the CPU is currently idle. 0 otherwise. 4724 */ 4725 int available_idle_cpu(int cpu) 4726 { 4727 if (!idle_cpu(cpu)) 4728 return 0; 4729 4730 if (vcpu_is_preempted(cpu)) 4731 return 0; 4732 4733 return 1; 4734 } 4735 4736 /** 4737 * idle_task - return the idle task for a given CPU. 4738 * @cpu: the processor in question. 4739 * 4740 * Return: The idle task for the CPU @cpu. 4741 */ 4742 struct task_struct *idle_task(int cpu) 4743 { 4744 return cpu_rq(cpu)->idle; 4745 } 4746 4747 /** 4748 * find_process_by_pid - find a process with a matching PID value. 4749 * @pid: the pid in question. 4750 * 4751 * The task of @pid, if found. %NULL otherwise. 4752 */ 4753 static struct task_struct *find_process_by_pid(pid_t pid) 4754 { 4755 return pid ? find_task_by_vpid(pid) : current; 4756 } 4757 4758 /* 4759 * sched_setparam() passes in -1 for its policy, to let the functions 4760 * it calls know not to change it. 4761 */ 4762 #define SETPARAM_POLICY -1 4763 4764 static void __setscheduler_params(struct task_struct *p, 4765 const struct sched_attr *attr) 4766 { 4767 int policy = attr->sched_policy; 4768 4769 if (policy == SETPARAM_POLICY) 4770 policy = p->policy; 4771 4772 p->policy = policy; 4773 4774 if (dl_policy(policy)) 4775 __setparam_dl(p, attr); 4776 else if (fair_policy(policy)) 4777 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 4778 4779 /* 4780 * __sched_setscheduler() ensures attr->sched_priority == 0 when 4781 * !rt_policy. Always setting this ensures that things like 4782 * getparam()/getattr() don't report silly values for !rt tasks. 4783 */ 4784 p->rt_priority = attr->sched_priority; 4785 p->normal_prio = normal_prio(p); 4786 set_load_weight(p, true); 4787 } 4788 4789 /* Actually do priority change: must hold pi & rq lock. */ 4790 static void __setscheduler(struct rq *rq, struct task_struct *p, 4791 const struct sched_attr *attr, bool keep_boost) 4792 { 4793 /* 4794 * If params can't change scheduling class changes aren't allowed 4795 * either. 4796 */ 4797 if (attr->sched_flags & SCHED_FLAG_KEEP_PARAMS) 4798 return; 4799 4800 __setscheduler_params(p, attr); 4801 4802 /* 4803 * Keep a potential priority boosting if called from 4804 * sched_setscheduler(). 4805 */ 4806 p->prio = normal_prio(p); 4807 if (keep_boost) 4808 p->prio = rt_effective_prio(p, p->prio); 4809 4810 if (dl_prio(p->prio)) 4811 p->sched_class = &dl_sched_class; 4812 else if (rt_prio(p->prio)) 4813 p->sched_class = &rt_sched_class; 4814 else 4815 p->sched_class = &fair_sched_class; 4816 } 4817 4818 /* 4819 * Check the target process has a UID that matches the current process's: 4820 */ 4821 static bool check_same_owner(struct task_struct *p) 4822 { 4823 const struct cred *cred = current_cred(), *pcred; 4824 bool match; 4825 4826 rcu_read_lock(); 4827 pcred = __task_cred(p); 4828 match = (uid_eq(cred->euid, pcred->euid) || 4829 uid_eq(cred->euid, pcred->uid)); 4830 rcu_read_unlock(); 4831 return match; 4832 } 4833 4834 static int __sched_setscheduler(struct task_struct *p, 4835 const struct sched_attr *attr, 4836 bool user, bool pi) 4837 { 4838 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : 4839 MAX_RT_PRIO - 1 - attr->sched_priority; 4840 int retval, oldprio, oldpolicy = -1, queued, running; 4841 int new_effective_prio, policy = attr->sched_policy; 4842 const struct sched_class *prev_class; 4843 struct rq_flags rf; 4844 int reset_on_fork; 4845 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 4846 struct rq *rq; 4847 4848 /* The pi code expects interrupts enabled */ 4849 BUG_ON(pi && in_interrupt()); 4850 recheck: 4851 /* Double check policy once rq lock held: */ 4852 if (policy < 0) { 4853 reset_on_fork = p->sched_reset_on_fork; 4854 policy = oldpolicy = p->policy; 4855 } else { 4856 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 4857 4858 if (!valid_policy(policy)) 4859 return -EINVAL; 4860 } 4861 4862 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 4863 return -EINVAL; 4864 4865 /* 4866 * Valid priorities for SCHED_FIFO and SCHED_RR are 4867 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 4868 * SCHED_BATCH and SCHED_IDLE is 0. 4869 */ 4870 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || 4871 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) 4872 return -EINVAL; 4873 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 4874 (rt_policy(policy) != (attr->sched_priority != 0))) 4875 return -EINVAL; 4876 4877 /* 4878 * Allow unprivileged RT tasks to decrease priority: 4879 */ 4880 if (user && !capable(CAP_SYS_NICE)) { 4881 if (fair_policy(policy)) { 4882 if (attr->sched_nice < task_nice(p) && 4883 !can_nice(p, attr->sched_nice)) 4884 return -EPERM; 4885 } 4886 4887 if (rt_policy(policy)) { 4888 unsigned long rlim_rtprio = 4889 task_rlimit(p, RLIMIT_RTPRIO); 4890 4891 /* Can't set/change the rt policy: */ 4892 if (policy != p->policy && !rlim_rtprio) 4893 return -EPERM; 4894 4895 /* Can't increase priority: */ 4896 if (attr->sched_priority > p->rt_priority && 4897 attr->sched_priority > rlim_rtprio) 4898 return -EPERM; 4899 } 4900 4901 /* 4902 * Can't set/change SCHED_DEADLINE policy at all for now 4903 * (safest behavior); in the future we would like to allow 4904 * unprivileged DL tasks to increase their relative deadline 4905 * or reduce their runtime (both ways reducing utilization) 4906 */ 4907 if (dl_policy(policy)) 4908 return -EPERM; 4909 4910 /* 4911 * Treat SCHED_IDLE as nice 20. Only allow a switch to 4912 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 4913 */ 4914 if (task_has_idle_policy(p) && !idle_policy(policy)) { 4915 if (!can_nice(p, task_nice(p))) 4916 return -EPERM; 4917 } 4918 4919 /* Can't change other user's priorities: */ 4920 if (!check_same_owner(p)) 4921 return -EPERM; 4922 4923 /* Normal users shall not reset the sched_reset_on_fork flag: */ 4924 if (p->sched_reset_on_fork && !reset_on_fork) 4925 return -EPERM; 4926 } 4927 4928 if (user) { 4929 if (attr->sched_flags & SCHED_FLAG_SUGOV) 4930 return -EINVAL; 4931 4932 retval = security_task_setscheduler(p); 4933 if (retval) 4934 return retval; 4935 } 4936 4937 /* Update task specific "requested" clamps */ 4938 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 4939 retval = uclamp_validate(p, attr); 4940 if (retval) 4941 return retval; 4942 } 4943 4944 if (pi) 4945 cpuset_read_lock(); 4946 4947 /* 4948 * Make sure no PI-waiters arrive (or leave) while we are 4949 * changing the priority of the task: 4950 * 4951 * To be able to change p->policy safely, the appropriate 4952 * runqueue lock must be held. 4953 */ 4954 rq = task_rq_lock(p, &rf); 4955 update_rq_clock(rq); 4956 4957 /* 4958 * Changing the policy of the stop threads its a very bad idea: 4959 */ 4960 if (p == rq->stop) { 4961 retval = -EINVAL; 4962 goto unlock; 4963 } 4964 4965 /* 4966 * If not changing anything there's no need to proceed further, 4967 * but store a possible modification of reset_on_fork. 4968 */ 4969 if (unlikely(policy == p->policy)) { 4970 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 4971 goto change; 4972 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 4973 goto change; 4974 if (dl_policy(policy) && dl_param_changed(p, attr)) 4975 goto change; 4976 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 4977 goto change; 4978 4979 p->sched_reset_on_fork = reset_on_fork; 4980 retval = 0; 4981 goto unlock; 4982 } 4983 change: 4984 4985 if (user) { 4986 #ifdef CONFIG_RT_GROUP_SCHED 4987 /* 4988 * Do not allow realtime tasks into groups that have no runtime 4989 * assigned. 4990 */ 4991 if (rt_bandwidth_enabled() && rt_policy(policy) && 4992 task_group(p)->rt_bandwidth.rt_runtime == 0 && 4993 !task_group_is_autogroup(task_group(p))) { 4994 retval = -EPERM; 4995 goto unlock; 4996 } 4997 #endif 4998 #ifdef CONFIG_SMP 4999 if (dl_bandwidth_enabled() && dl_policy(policy) && 5000 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 5001 cpumask_t *span = rq->rd->span; 5002 5003 /* 5004 * Don't allow tasks with an affinity mask smaller than 5005 * the entire root_domain to become SCHED_DEADLINE. We 5006 * will also fail if there's no bandwidth available. 5007 */ 5008 if (!cpumask_subset(span, p->cpus_ptr) || 5009 rq->rd->dl_bw.bw == 0) { 5010 retval = -EPERM; 5011 goto unlock; 5012 } 5013 } 5014 #endif 5015 } 5016 5017 /* Re-check policy now with rq lock held: */ 5018 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 5019 policy = oldpolicy = -1; 5020 task_rq_unlock(rq, p, &rf); 5021 if (pi) 5022 cpuset_read_unlock(); 5023 goto recheck; 5024 } 5025 5026 /* 5027 * If setscheduling to SCHED_DEADLINE (or changing the parameters 5028 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 5029 * is available. 5030 */ 5031 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 5032 retval = -EBUSY; 5033 goto unlock; 5034 } 5035 5036 p->sched_reset_on_fork = reset_on_fork; 5037 oldprio = p->prio; 5038 5039 if (pi) { 5040 /* 5041 * Take priority boosted tasks into account. If the new 5042 * effective priority is unchanged, we just store the new 5043 * normal parameters and do not touch the scheduler class and 5044 * the runqueue. This will be done when the task deboost 5045 * itself. 5046 */ 5047 new_effective_prio = rt_effective_prio(p, newprio); 5048 if (new_effective_prio == oldprio) 5049 queue_flags &= ~DEQUEUE_MOVE; 5050 } 5051 5052 queued = task_on_rq_queued(p); 5053 running = task_current(rq, p); 5054 if (queued) 5055 dequeue_task(rq, p, queue_flags); 5056 if (running) 5057 put_prev_task(rq, p); 5058 5059 prev_class = p->sched_class; 5060 5061 __setscheduler(rq, p, attr, pi); 5062 __setscheduler_uclamp(p, attr); 5063 5064 if (queued) { 5065 /* 5066 * We enqueue to tail when the priority of a task is 5067 * increased (user space view). 5068 */ 5069 if (oldprio < p->prio) 5070 queue_flags |= ENQUEUE_HEAD; 5071 5072 enqueue_task(rq, p, queue_flags); 5073 } 5074 if (running) 5075 set_next_task(rq, p); 5076 5077 check_class_changed(rq, p, prev_class, oldprio); 5078 5079 /* Avoid rq from going away on us: */ 5080 preempt_disable(); 5081 task_rq_unlock(rq, p, &rf); 5082 5083 if (pi) { 5084 cpuset_read_unlock(); 5085 rt_mutex_adjust_pi(p); 5086 } 5087 5088 /* Run balance callbacks after we've adjusted the PI chain: */ 5089 balance_callback(rq); 5090 preempt_enable(); 5091 5092 return 0; 5093 5094 unlock: 5095 task_rq_unlock(rq, p, &rf); 5096 if (pi) 5097 cpuset_read_unlock(); 5098 return retval; 5099 } 5100 5101 static int _sched_setscheduler(struct task_struct *p, int policy, 5102 const struct sched_param *param, bool check) 5103 { 5104 struct sched_attr attr = { 5105 .sched_policy = policy, 5106 .sched_priority = param->sched_priority, 5107 .sched_nice = PRIO_TO_NICE(p->static_prio), 5108 }; 5109 5110 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 5111 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 5112 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5113 policy &= ~SCHED_RESET_ON_FORK; 5114 attr.sched_policy = policy; 5115 } 5116 5117 return __sched_setscheduler(p, &attr, check, true); 5118 } 5119 /** 5120 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 5121 * @p: the task in question. 5122 * @policy: new policy. 5123 * @param: structure containing the new RT priority. 5124 * 5125 * Return: 0 on success. An error code otherwise. 5126 * 5127 * NOTE that the task may be already dead. 5128 */ 5129 int sched_setscheduler(struct task_struct *p, int policy, 5130 const struct sched_param *param) 5131 { 5132 return _sched_setscheduler(p, policy, param, true); 5133 } 5134 EXPORT_SYMBOL_GPL(sched_setscheduler); 5135 5136 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 5137 { 5138 return __sched_setscheduler(p, attr, true, true); 5139 } 5140 EXPORT_SYMBOL_GPL(sched_setattr); 5141 5142 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 5143 { 5144 return __sched_setscheduler(p, attr, false, true); 5145 } 5146 5147 /** 5148 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 5149 * @p: the task in question. 5150 * @policy: new policy. 5151 * @param: structure containing the new RT priority. 5152 * 5153 * Just like sched_setscheduler, only don't bother checking if the 5154 * current context has permission. For example, this is needed in 5155 * stop_machine(): we create temporary high priority worker threads, 5156 * but our caller might not have that capability. 5157 * 5158 * Return: 0 on success. An error code otherwise. 5159 */ 5160 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 5161 const struct sched_param *param) 5162 { 5163 return _sched_setscheduler(p, policy, param, false); 5164 } 5165 EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); 5166 5167 static int 5168 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5169 { 5170 struct sched_param lparam; 5171 struct task_struct *p; 5172 int retval; 5173 5174 if (!param || pid < 0) 5175 return -EINVAL; 5176 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 5177 return -EFAULT; 5178 5179 rcu_read_lock(); 5180 retval = -ESRCH; 5181 p = find_process_by_pid(pid); 5182 if (likely(p)) 5183 get_task_struct(p); 5184 rcu_read_unlock(); 5185 5186 if (likely(p)) { 5187 retval = sched_setscheduler(p, policy, &lparam); 5188 put_task_struct(p); 5189 } 5190 5191 return retval; 5192 } 5193 5194 /* 5195 * Mimics kernel/events/core.c perf_copy_attr(). 5196 */ 5197 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 5198 { 5199 u32 size; 5200 int ret; 5201 5202 /* Zero the full structure, so that a short copy will be nice: */ 5203 memset(attr, 0, sizeof(*attr)); 5204 5205 ret = get_user(size, &uattr->size); 5206 if (ret) 5207 return ret; 5208 5209 /* ABI compatibility quirk: */ 5210 if (!size) 5211 size = SCHED_ATTR_SIZE_VER0; 5212 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 5213 goto err_size; 5214 5215 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 5216 if (ret) { 5217 if (ret == -E2BIG) 5218 goto err_size; 5219 return ret; 5220 } 5221 5222 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 5223 size < SCHED_ATTR_SIZE_VER1) 5224 return -EINVAL; 5225 5226 /* 5227 * XXX: Do we want to be lenient like existing syscalls; or do we want 5228 * to be strict and return an error on out-of-bounds values? 5229 */ 5230 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 5231 5232 return 0; 5233 5234 err_size: 5235 put_user(sizeof(*attr), &uattr->size); 5236 return -E2BIG; 5237 } 5238 5239 /** 5240 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 5241 * @pid: the pid in question. 5242 * @policy: new policy. 5243 * @param: structure containing the new RT priority. 5244 * 5245 * Return: 0 on success. An error code otherwise. 5246 */ 5247 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 5248 { 5249 if (policy < 0) 5250 return -EINVAL; 5251 5252 return do_sched_setscheduler(pid, policy, param); 5253 } 5254 5255 /** 5256 * sys_sched_setparam - set/change the RT priority of a thread 5257 * @pid: the pid in question. 5258 * @param: structure containing the new RT priority. 5259 * 5260 * Return: 0 on success. An error code otherwise. 5261 */ 5262 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 5263 { 5264 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 5265 } 5266 5267 /** 5268 * sys_sched_setattr - same as above, but with extended sched_attr 5269 * @pid: the pid in question. 5270 * @uattr: structure containing the extended parameters. 5271 * @flags: for future extension. 5272 */ 5273 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 5274 unsigned int, flags) 5275 { 5276 struct sched_attr attr; 5277 struct task_struct *p; 5278 int retval; 5279 5280 if (!uattr || pid < 0 || flags) 5281 return -EINVAL; 5282 5283 retval = sched_copy_attr(uattr, &attr); 5284 if (retval) 5285 return retval; 5286 5287 if ((int)attr.sched_policy < 0) 5288 return -EINVAL; 5289 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 5290 attr.sched_policy = SETPARAM_POLICY; 5291 5292 rcu_read_lock(); 5293 retval = -ESRCH; 5294 p = find_process_by_pid(pid); 5295 if (likely(p)) 5296 get_task_struct(p); 5297 rcu_read_unlock(); 5298 5299 if (likely(p)) { 5300 retval = sched_setattr(p, &attr); 5301 put_task_struct(p); 5302 } 5303 5304 return retval; 5305 } 5306 5307 /** 5308 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5309 * @pid: the pid in question. 5310 * 5311 * Return: On success, the policy of the thread. Otherwise, a negative error 5312 * code. 5313 */ 5314 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 5315 { 5316 struct task_struct *p; 5317 int retval; 5318 5319 if (pid < 0) 5320 return -EINVAL; 5321 5322 retval = -ESRCH; 5323 rcu_read_lock(); 5324 p = find_process_by_pid(pid); 5325 if (p) { 5326 retval = security_task_getscheduler(p); 5327 if (!retval) 5328 retval = p->policy 5329 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 5330 } 5331 rcu_read_unlock(); 5332 return retval; 5333 } 5334 5335 /** 5336 * sys_sched_getparam - get the RT priority of a thread 5337 * @pid: the pid in question. 5338 * @param: structure containing the RT priority. 5339 * 5340 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 5341 * code. 5342 */ 5343 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 5344 { 5345 struct sched_param lp = { .sched_priority = 0 }; 5346 struct task_struct *p; 5347 int retval; 5348 5349 if (!param || pid < 0) 5350 return -EINVAL; 5351 5352 rcu_read_lock(); 5353 p = find_process_by_pid(pid); 5354 retval = -ESRCH; 5355 if (!p) 5356 goto out_unlock; 5357 5358 retval = security_task_getscheduler(p); 5359 if (retval) 5360 goto out_unlock; 5361 5362 if (task_has_rt_policy(p)) 5363 lp.sched_priority = p->rt_priority; 5364 rcu_read_unlock(); 5365 5366 /* 5367 * This one might sleep, we cannot do it with a spinlock held ... 5368 */ 5369 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 5370 5371 return retval; 5372 5373 out_unlock: 5374 rcu_read_unlock(); 5375 return retval; 5376 } 5377 5378 /* 5379 * Copy the kernel size attribute structure (which might be larger 5380 * than what user-space knows about) to user-space. 5381 * 5382 * Note that all cases are valid: user-space buffer can be larger or 5383 * smaller than the kernel-space buffer. The usual case is that both 5384 * have the same size. 5385 */ 5386 static int 5387 sched_attr_copy_to_user(struct sched_attr __user *uattr, 5388 struct sched_attr *kattr, 5389 unsigned int usize) 5390 { 5391 unsigned int ksize = sizeof(*kattr); 5392 5393 if (!access_ok(uattr, usize)) 5394 return -EFAULT; 5395 5396 /* 5397 * sched_getattr() ABI forwards and backwards compatibility: 5398 * 5399 * If usize == ksize then we just copy everything to user-space and all is good. 5400 * 5401 * If usize < ksize then we only copy as much as user-space has space for, 5402 * this keeps ABI compatibility as well. We skip the rest. 5403 * 5404 * If usize > ksize then user-space is using a newer version of the ABI, 5405 * which part the kernel doesn't know about. Just ignore it - tooling can 5406 * detect the kernel's knowledge of attributes from the attr->size value 5407 * which is set to ksize in this case. 5408 */ 5409 kattr->size = min(usize, ksize); 5410 5411 if (copy_to_user(uattr, kattr, kattr->size)) 5412 return -EFAULT; 5413 5414 return 0; 5415 } 5416 5417 /** 5418 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5419 * @pid: the pid in question. 5420 * @uattr: structure containing the extended parameters. 5421 * @usize: sizeof(attr) for fwd/bwd comp. 5422 * @flags: for future extension. 5423 */ 5424 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5425 unsigned int, usize, unsigned int, flags) 5426 { 5427 struct sched_attr kattr = { }; 5428 struct task_struct *p; 5429 int retval; 5430 5431 if (!uattr || pid < 0 || usize > PAGE_SIZE || 5432 usize < SCHED_ATTR_SIZE_VER0 || flags) 5433 return -EINVAL; 5434 5435 rcu_read_lock(); 5436 p = find_process_by_pid(pid); 5437 retval = -ESRCH; 5438 if (!p) 5439 goto out_unlock; 5440 5441 retval = security_task_getscheduler(p); 5442 if (retval) 5443 goto out_unlock; 5444 5445 kattr.sched_policy = p->policy; 5446 if (p->sched_reset_on_fork) 5447 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5448 if (task_has_dl_policy(p)) 5449 __getparam_dl(p, &kattr); 5450 else if (task_has_rt_policy(p)) 5451 kattr.sched_priority = p->rt_priority; 5452 else 5453 kattr.sched_nice = task_nice(p); 5454 5455 #ifdef CONFIG_UCLAMP_TASK 5456 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5457 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5458 #endif 5459 5460 rcu_read_unlock(); 5461 5462 return sched_attr_copy_to_user(uattr, &kattr, usize); 5463 5464 out_unlock: 5465 rcu_read_unlock(); 5466 return retval; 5467 } 5468 5469 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 5470 { 5471 cpumask_var_t cpus_allowed, new_mask; 5472 struct task_struct *p; 5473 int retval; 5474 5475 rcu_read_lock(); 5476 5477 p = find_process_by_pid(pid); 5478 if (!p) { 5479 rcu_read_unlock(); 5480 return -ESRCH; 5481 } 5482 5483 /* Prevent p going away */ 5484 get_task_struct(p); 5485 rcu_read_unlock(); 5486 5487 if (p->flags & PF_NO_SETAFFINITY) { 5488 retval = -EINVAL; 5489 goto out_put_task; 5490 } 5491 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 5492 retval = -ENOMEM; 5493 goto out_put_task; 5494 } 5495 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 5496 retval = -ENOMEM; 5497 goto out_free_cpus_allowed; 5498 } 5499 retval = -EPERM; 5500 if (!check_same_owner(p)) { 5501 rcu_read_lock(); 5502 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { 5503 rcu_read_unlock(); 5504 goto out_free_new_mask; 5505 } 5506 rcu_read_unlock(); 5507 } 5508 5509 retval = security_task_setscheduler(p); 5510 if (retval) 5511 goto out_free_new_mask; 5512 5513 5514 cpuset_cpus_allowed(p, cpus_allowed); 5515 cpumask_and(new_mask, in_mask, cpus_allowed); 5516 5517 /* 5518 * Since bandwidth control happens on root_domain basis, 5519 * if admission test is enabled, we only admit -deadline 5520 * tasks allowed to run on all the CPUs in the task's 5521 * root_domain. 5522 */ 5523 #ifdef CONFIG_SMP 5524 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 5525 rcu_read_lock(); 5526 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { 5527 retval = -EBUSY; 5528 rcu_read_unlock(); 5529 goto out_free_new_mask; 5530 } 5531 rcu_read_unlock(); 5532 } 5533 #endif 5534 again: 5535 retval = __set_cpus_allowed_ptr(p, new_mask, true); 5536 5537 if (!retval) { 5538 cpuset_cpus_allowed(p, cpus_allowed); 5539 if (!cpumask_subset(new_mask, cpus_allowed)) { 5540 /* 5541 * We must have raced with a concurrent cpuset 5542 * update. Just reset the cpus_allowed to the 5543 * cpuset's cpus_allowed 5544 */ 5545 cpumask_copy(new_mask, cpus_allowed); 5546 goto again; 5547 } 5548 } 5549 out_free_new_mask: 5550 free_cpumask_var(new_mask); 5551 out_free_cpus_allowed: 5552 free_cpumask_var(cpus_allowed); 5553 out_put_task: 5554 put_task_struct(p); 5555 return retval; 5556 } 5557 5558 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5559 struct cpumask *new_mask) 5560 { 5561 if (len < cpumask_size()) 5562 cpumask_clear(new_mask); 5563 else if (len > cpumask_size()) 5564 len = cpumask_size(); 5565 5566 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5567 } 5568 5569 /** 5570 * sys_sched_setaffinity - set the CPU affinity of a process 5571 * @pid: pid of the process 5572 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5573 * @user_mask_ptr: user-space pointer to the new CPU mask 5574 * 5575 * Return: 0 on success. An error code otherwise. 5576 */ 5577 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 5578 unsigned long __user *, user_mask_ptr) 5579 { 5580 cpumask_var_t new_mask; 5581 int retval; 5582 5583 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 5584 return -ENOMEM; 5585 5586 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 5587 if (retval == 0) 5588 retval = sched_setaffinity(pid, new_mask); 5589 free_cpumask_var(new_mask); 5590 return retval; 5591 } 5592 5593 long sched_getaffinity(pid_t pid, struct cpumask *mask) 5594 { 5595 struct task_struct *p; 5596 unsigned long flags; 5597 int retval; 5598 5599 rcu_read_lock(); 5600 5601 retval = -ESRCH; 5602 p = find_process_by_pid(pid); 5603 if (!p) 5604 goto out_unlock; 5605 5606 retval = security_task_getscheduler(p); 5607 if (retval) 5608 goto out_unlock; 5609 5610 raw_spin_lock_irqsave(&p->pi_lock, flags); 5611 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 5612 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5613 5614 out_unlock: 5615 rcu_read_unlock(); 5616 5617 return retval; 5618 } 5619 5620 /** 5621 * sys_sched_getaffinity - get the CPU affinity of a process 5622 * @pid: pid of the process 5623 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5624 * @user_mask_ptr: user-space pointer to hold the current CPU mask 5625 * 5626 * Return: size of CPU mask copied to user_mask_ptr on success. An 5627 * error code otherwise. 5628 */ 5629 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 5630 unsigned long __user *, user_mask_ptr) 5631 { 5632 int ret; 5633 cpumask_var_t mask; 5634 5635 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 5636 return -EINVAL; 5637 if (len & (sizeof(unsigned long)-1)) 5638 return -EINVAL; 5639 5640 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 5641 return -ENOMEM; 5642 5643 ret = sched_getaffinity(pid, mask); 5644 if (ret == 0) { 5645 unsigned int retlen = min(len, cpumask_size()); 5646 5647 if (copy_to_user(user_mask_ptr, mask, retlen)) 5648 ret = -EFAULT; 5649 else 5650 ret = retlen; 5651 } 5652 free_cpumask_var(mask); 5653 5654 return ret; 5655 } 5656 5657 /** 5658 * sys_sched_yield - yield the current processor to other threads. 5659 * 5660 * This function yields the current CPU to other tasks. If there are no 5661 * other threads running on this CPU then this function will return. 5662 * 5663 * Return: 0. 5664 */ 5665 static void do_sched_yield(void) 5666 { 5667 struct rq_flags rf; 5668 struct rq *rq; 5669 5670 rq = this_rq_lock_irq(&rf); 5671 5672 schedstat_inc(rq->yld_count); 5673 current->sched_class->yield_task(rq); 5674 5675 /* 5676 * Since we are going to call schedule() anyway, there's 5677 * no need to preempt or enable interrupts: 5678 */ 5679 preempt_disable(); 5680 rq_unlock(rq, &rf); 5681 sched_preempt_enable_no_resched(); 5682 5683 schedule(); 5684 } 5685 5686 SYSCALL_DEFINE0(sched_yield) 5687 { 5688 do_sched_yield(); 5689 return 0; 5690 } 5691 5692 #ifndef CONFIG_PREEMPTION 5693 int __sched _cond_resched(void) 5694 { 5695 if (should_resched(0)) { 5696 preempt_schedule_common(); 5697 return 1; 5698 } 5699 rcu_all_qs(); 5700 return 0; 5701 } 5702 EXPORT_SYMBOL(_cond_resched); 5703 #endif 5704 5705 /* 5706 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 5707 * call schedule, and on return reacquire the lock. 5708 * 5709 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 5710 * operations here to prevent schedule() from being called twice (once via 5711 * spin_unlock(), once by hand). 5712 */ 5713 int __cond_resched_lock(spinlock_t *lock) 5714 { 5715 int resched = should_resched(PREEMPT_LOCK_OFFSET); 5716 int ret = 0; 5717 5718 lockdep_assert_held(lock); 5719 5720 if (spin_needbreak(lock) || resched) { 5721 spin_unlock(lock); 5722 if (resched) 5723 preempt_schedule_common(); 5724 else 5725 cpu_relax(); 5726 ret = 1; 5727 spin_lock(lock); 5728 } 5729 return ret; 5730 } 5731 EXPORT_SYMBOL(__cond_resched_lock); 5732 5733 /** 5734 * yield - yield the current processor to other threads. 5735 * 5736 * Do not ever use this function, there's a 99% chance you're doing it wrong. 5737 * 5738 * The scheduler is at all times free to pick the calling task as the most 5739 * eligible task to run, if removing the yield() call from your code breaks 5740 * it, its already broken. 5741 * 5742 * Typical broken usage is: 5743 * 5744 * while (!event) 5745 * yield(); 5746 * 5747 * where one assumes that yield() will let 'the other' process run that will 5748 * make event true. If the current task is a SCHED_FIFO task that will never 5749 * happen. Never use yield() as a progress guarantee!! 5750 * 5751 * If you want to use yield() to wait for something, use wait_event(). 5752 * If you want to use yield() to be 'nice' for others, use cond_resched(). 5753 * If you still want to use yield(), do not! 5754 */ 5755 void __sched yield(void) 5756 { 5757 set_current_state(TASK_RUNNING); 5758 do_sched_yield(); 5759 } 5760 EXPORT_SYMBOL(yield); 5761 5762 /** 5763 * yield_to - yield the current processor to another thread in 5764 * your thread group, or accelerate that thread toward the 5765 * processor it's on. 5766 * @p: target task 5767 * @preempt: whether task preemption is allowed or not 5768 * 5769 * It's the caller's job to ensure that the target task struct 5770 * can't go away on us before we can do any checks. 5771 * 5772 * Return: 5773 * true (>0) if we indeed boosted the target task. 5774 * false (0) if we failed to boost the target. 5775 * -ESRCH if there's no task to yield to. 5776 */ 5777 int __sched yield_to(struct task_struct *p, bool preempt) 5778 { 5779 struct task_struct *curr = current; 5780 struct rq *rq, *p_rq; 5781 unsigned long flags; 5782 int yielded = 0; 5783 5784 local_irq_save(flags); 5785 rq = this_rq(); 5786 5787 again: 5788 p_rq = task_rq(p); 5789 /* 5790 * If we're the only runnable task on the rq and target rq also 5791 * has only one task, there's absolutely no point in yielding. 5792 */ 5793 if (rq->nr_running == 1 && p_rq->nr_running == 1) { 5794 yielded = -ESRCH; 5795 goto out_irq; 5796 } 5797 5798 double_rq_lock(rq, p_rq); 5799 if (task_rq(p) != p_rq) { 5800 double_rq_unlock(rq, p_rq); 5801 goto again; 5802 } 5803 5804 if (!curr->sched_class->yield_to_task) 5805 goto out_unlock; 5806 5807 if (curr->sched_class != p->sched_class) 5808 goto out_unlock; 5809 5810 if (task_running(p_rq, p) || p->state) 5811 goto out_unlock; 5812 5813 yielded = curr->sched_class->yield_to_task(rq, p, preempt); 5814 if (yielded) { 5815 schedstat_inc(rq->yld_count); 5816 /* 5817 * Make p's CPU reschedule; pick_next_entity takes care of 5818 * fairness. 5819 */ 5820 if (preempt && rq != p_rq) 5821 resched_curr(p_rq); 5822 } 5823 5824 out_unlock: 5825 double_rq_unlock(rq, p_rq); 5826 out_irq: 5827 local_irq_restore(flags); 5828 5829 if (yielded > 0) 5830 schedule(); 5831 5832 return yielded; 5833 } 5834 EXPORT_SYMBOL_GPL(yield_to); 5835 5836 int io_schedule_prepare(void) 5837 { 5838 int old_iowait = current->in_iowait; 5839 5840 current->in_iowait = 1; 5841 blk_schedule_flush_plug(current); 5842 5843 return old_iowait; 5844 } 5845 5846 void io_schedule_finish(int token) 5847 { 5848 current->in_iowait = token; 5849 } 5850 5851 /* 5852 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5853 * that process accounting knows that this is a task in IO wait state. 5854 */ 5855 long __sched io_schedule_timeout(long timeout) 5856 { 5857 int token; 5858 long ret; 5859 5860 token = io_schedule_prepare(); 5861 ret = schedule_timeout(timeout); 5862 io_schedule_finish(token); 5863 5864 return ret; 5865 } 5866 EXPORT_SYMBOL(io_schedule_timeout); 5867 5868 void __sched io_schedule(void) 5869 { 5870 int token; 5871 5872 token = io_schedule_prepare(); 5873 schedule(); 5874 io_schedule_finish(token); 5875 } 5876 EXPORT_SYMBOL(io_schedule); 5877 5878 /** 5879 * sys_sched_get_priority_max - return maximum RT priority. 5880 * @policy: scheduling class. 5881 * 5882 * Return: On success, this syscall returns the maximum 5883 * rt_priority that can be used by a given scheduling class. 5884 * On failure, a negative error code is returned. 5885 */ 5886 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 5887 { 5888 int ret = -EINVAL; 5889 5890 switch (policy) { 5891 case SCHED_FIFO: 5892 case SCHED_RR: 5893 ret = MAX_USER_RT_PRIO-1; 5894 break; 5895 case SCHED_DEADLINE: 5896 case SCHED_NORMAL: 5897 case SCHED_BATCH: 5898 case SCHED_IDLE: 5899 ret = 0; 5900 break; 5901 } 5902 return ret; 5903 } 5904 5905 /** 5906 * sys_sched_get_priority_min - return minimum RT priority. 5907 * @policy: scheduling class. 5908 * 5909 * Return: On success, this syscall returns the minimum 5910 * rt_priority that can be used by a given scheduling class. 5911 * On failure, a negative error code is returned. 5912 */ 5913 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 5914 { 5915 int ret = -EINVAL; 5916 5917 switch (policy) { 5918 case SCHED_FIFO: 5919 case SCHED_RR: 5920 ret = 1; 5921 break; 5922 case SCHED_DEADLINE: 5923 case SCHED_NORMAL: 5924 case SCHED_BATCH: 5925 case SCHED_IDLE: 5926 ret = 0; 5927 } 5928 return ret; 5929 } 5930 5931 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5932 { 5933 struct task_struct *p; 5934 unsigned int time_slice; 5935 struct rq_flags rf; 5936 struct rq *rq; 5937 int retval; 5938 5939 if (pid < 0) 5940 return -EINVAL; 5941 5942 retval = -ESRCH; 5943 rcu_read_lock(); 5944 p = find_process_by_pid(pid); 5945 if (!p) 5946 goto out_unlock; 5947 5948 retval = security_task_getscheduler(p); 5949 if (retval) 5950 goto out_unlock; 5951 5952 rq = task_rq_lock(p, &rf); 5953 time_slice = 0; 5954 if (p->sched_class->get_rr_interval) 5955 time_slice = p->sched_class->get_rr_interval(rq, p); 5956 task_rq_unlock(rq, p, &rf); 5957 5958 rcu_read_unlock(); 5959 jiffies_to_timespec64(time_slice, t); 5960 return 0; 5961 5962 out_unlock: 5963 rcu_read_unlock(); 5964 return retval; 5965 } 5966 5967 /** 5968 * sys_sched_rr_get_interval - return the default timeslice of a process. 5969 * @pid: pid of the process. 5970 * @interval: userspace pointer to the timeslice value. 5971 * 5972 * this syscall writes the default timeslice value of a given process 5973 * into the user-space timespec buffer. A value of '0' means infinity. 5974 * 5975 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 5976 * an error code. 5977 */ 5978 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5979 struct __kernel_timespec __user *, interval) 5980 { 5981 struct timespec64 t; 5982 int retval = sched_rr_get_interval(pid, &t); 5983 5984 if (retval == 0) 5985 retval = put_timespec64(&t, interval); 5986 5987 return retval; 5988 } 5989 5990 #ifdef CONFIG_COMPAT_32BIT_TIME 5991 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 5992 struct old_timespec32 __user *, interval) 5993 { 5994 struct timespec64 t; 5995 int retval = sched_rr_get_interval(pid, &t); 5996 5997 if (retval == 0) 5998 retval = put_old_timespec32(&t, interval); 5999 return retval; 6000 } 6001 #endif 6002 6003 void sched_show_task(struct task_struct *p) 6004 { 6005 unsigned long free = 0; 6006 int ppid; 6007 6008 if (!try_get_task_stack(p)) 6009 return; 6010 6011 printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); 6012 6013 if (p->state == TASK_RUNNING) 6014 printk(KERN_CONT " running task "); 6015 #ifdef CONFIG_DEBUG_STACK_USAGE 6016 free = stack_not_used(p); 6017 #endif 6018 ppid = 0; 6019 rcu_read_lock(); 6020 if (pid_alive(p)) 6021 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 6022 rcu_read_unlock(); 6023 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 6024 task_pid_nr(p), ppid, 6025 (unsigned long)task_thread_info(p)->flags); 6026 6027 print_worker_info(KERN_INFO, p); 6028 show_stack(p, NULL, KERN_INFO); 6029 put_task_stack(p); 6030 } 6031 EXPORT_SYMBOL_GPL(sched_show_task); 6032 6033 static inline bool 6034 state_filter_match(unsigned long state_filter, struct task_struct *p) 6035 { 6036 /* no filter, everything matches */ 6037 if (!state_filter) 6038 return true; 6039 6040 /* filter, but doesn't match */ 6041 if (!(p->state & state_filter)) 6042 return false; 6043 6044 /* 6045 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 6046 * TASK_KILLABLE). 6047 */ 6048 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) 6049 return false; 6050 6051 return true; 6052 } 6053 6054 6055 void show_state_filter(unsigned long state_filter) 6056 { 6057 struct task_struct *g, *p; 6058 6059 #if BITS_PER_LONG == 32 6060 printk(KERN_INFO 6061 " task PC stack pid father\n"); 6062 #else 6063 printk(KERN_INFO 6064 " task PC stack pid father\n"); 6065 #endif 6066 rcu_read_lock(); 6067 for_each_process_thread(g, p) { 6068 /* 6069 * reset the NMI-timeout, listing all files on a slow 6070 * console might take a lot of time: 6071 * Also, reset softlockup watchdogs on all CPUs, because 6072 * another CPU might be blocked waiting for us to process 6073 * an IPI. 6074 */ 6075 touch_nmi_watchdog(); 6076 touch_all_softlockup_watchdogs(); 6077 if (state_filter_match(state_filter, p)) 6078 sched_show_task(p); 6079 } 6080 6081 #ifdef CONFIG_SCHED_DEBUG 6082 if (!state_filter) 6083 sysrq_sched_debug_show(); 6084 #endif 6085 rcu_read_unlock(); 6086 /* 6087 * Only show locks if all tasks are dumped: 6088 */ 6089 if (!state_filter) 6090 debug_show_all_locks(); 6091 } 6092 6093 /** 6094 * init_idle - set up an idle thread for a given CPU 6095 * @idle: task in question 6096 * @cpu: CPU the idle task belongs to 6097 * 6098 * NOTE: this function does not set the idle thread's NEED_RESCHED 6099 * flag, to make booting more robust. 6100 */ 6101 void init_idle(struct task_struct *idle, int cpu) 6102 { 6103 struct rq *rq = cpu_rq(cpu); 6104 unsigned long flags; 6105 6106 __sched_fork(0, idle); 6107 6108 raw_spin_lock_irqsave(&idle->pi_lock, flags); 6109 raw_spin_lock(&rq->lock); 6110 6111 idle->state = TASK_RUNNING; 6112 idle->se.exec_start = sched_clock(); 6113 idle->flags |= PF_IDLE; 6114 6115 scs_task_reset(idle); 6116 kasan_unpoison_task_stack(idle); 6117 6118 #ifdef CONFIG_SMP 6119 /* 6120 * Its possible that init_idle() gets called multiple times on a task, 6121 * in that case do_set_cpus_allowed() will not do the right thing. 6122 * 6123 * And since this is boot we can forgo the serialization. 6124 */ 6125 set_cpus_allowed_common(idle, cpumask_of(cpu)); 6126 #endif 6127 /* 6128 * We're having a chicken and egg problem, even though we are 6129 * holding rq->lock, the CPU isn't yet set to this CPU so the 6130 * lockdep check in task_group() will fail. 6131 * 6132 * Similar case to sched_fork(). / Alternatively we could 6133 * use task_rq_lock() here and obtain the other rq->lock. 6134 * 6135 * Silence PROVE_RCU 6136 */ 6137 rcu_read_lock(); 6138 __set_task_cpu(idle, cpu); 6139 rcu_read_unlock(); 6140 6141 rq->idle = idle; 6142 rcu_assign_pointer(rq->curr, idle); 6143 idle->on_rq = TASK_ON_RQ_QUEUED; 6144 #ifdef CONFIG_SMP 6145 idle->on_cpu = 1; 6146 #endif 6147 raw_spin_unlock(&rq->lock); 6148 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 6149 6150 /* Set the preempt count _outside_ the spinlocks! */ 6151 init_idle_preempt_count(idle, cpu); 6152 6153 /* 6154 * The idle tasks have their own, simple scheduling class: 6155 */ 6156 idle->sched_class = &idle_sched_class; 6157 ftrace_graph_init_idle_task(idle, cpu); 6158 vtime_init_idle(idle, cpu); 6159 #ifdef CONFIG_SMP 6160 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 6161 #endif 6162 } 6163 6164 #ifdef CONFIG_SMP 6165 6166 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 6167 const struct cpumask *trial) 6168 { 6169 int ret = 1; 6170 6171 if (!cpumask_weight(cur)) 6172 return ret; 6173 6174 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 6175 6176 return ret; 6177 } 6178 6179 int task_can_attach(struct task_struct *p, 6180 const struct cpumask *cs_cpus_allowed) 6181 { 6182 int ret = 0; 6183 6184 /* 6185 * Kthreads which disallow setaffinity shouldn't be moved 6186 * to a new cpuset; we don't want to change their CPU 6187 * affinity and isolating such threads by their set of 6188 * allowed nodes is unnecessary. Thus, cpusets are not 6189 * applicable for such threads. This prevents checking for 6190 * success of set_cpus_allowed_ptr() on all attached tasks 6191 * before cpus_mask may be changed. 6192 */ 6193 if (p->flags & PF_NO_SETAFFINITY) { 6194 ret = -EINVAL; 6195 goto out; 6196 } 6197 6198 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, 6199 cs_cpus_allowed)) 6200 ret = dl_task_can_attach(p, cs_cpus_allowed); 6201 6202 out: 6203 return ret; 6204 } 6205 6206 bool sched_smp_initialized __read_mostly; 6207 6208 #ifdef CONFIG_NUMA_BALANCING 6209 /* Migrate current task p to target_cpu */ 6210 int migrate_task_to(struct task_struct *p, int target_cpu) 6211 { 6212 struct migration_arg arg = { p, target_cpu }; 6213 int curr_cpu = task_cpu(p); 6214 6215 if (curr_cpu == target_cpu) 6216 return 0; 6217 6218 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 6219 return -EINVAL; 6220 6221 /* TODO: This is not properly updating schedstats */ 6222 6223 trace_sched_move_numa(p, curr_cpu, target_cpu); 6224 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 6225 } 6226 6227 /* 6228 * Requeue a task on a given node and accurately track the number of NUMA 6229 * tasks on the runqueues 6230 */ 6231 void sched_setnuma(struct task_struct *p, int nid) 6232 { 6233 bool queued, running; 6234 struct rq_flags rf; 6235 struct rq *rq; 6236 6237 rq = task_rq_lock(p, &rf); 6238 queued = task_on_rq_queued(p); 6239 running = task_current(rq, p); 6240 6241 if (queued) 6242 dequeue_task(rq, p, DEQUEUE_SAVE); 6243 if (running) 6244 put_prev_task(rq, p); 6245 6246 p->numa_preferred_nid = nid; 6247 6248 if (queued) 6249 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 6250 if (running) 6251 set_next_task(rq, p); 6252 task_rq_unlock(rq, p, &rf); 6253 } 6254 #endif /* CONFIG_NUMA_BALANCING */ 6255 6256 #ifdef CONFIG_HOTPLUG_CPU 6257 /* 6258 * Ensure that the idle task is using init_mm right before its CPU goes 6259 * offline. 6260 */ 6261 void idle_task_exit(void) 6262 { 6263 struct mm_struct *mm = current->active_mm; 6264 6265 BUG_ON(cpu_online(smp_processor_id())); 6266 BUG_ON(current != this_rq()->idle); 6267 6268 if (mm != &init_mm) { 6269 switch_mm(mm, &init_mm, current); 6270 finish_arch_post_lock_switch(); 6271 } 6272 6273 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 6274 } 6275 6276 /* 6277 * Since this CPU is going 'away' for a while, fold any nr_active delta 6278 * we might have. Assumes we're called after migrate_tasks() so that the 6279 * nr_active count is stable. We need to take the teardown thread which 6280 * is calling this into account, so we hand in adjust = 1 to the load 6281 * calculation. 6282 * 6283 * Also see the comment "Global load-average calculations". 6284 */ 6285 static void calc_load_migrate(struct rq *rq) 6286 { 6287 long delta = calc_load_fold_active(rq, 1); 6288 if (delta) 6289 atomic_long_add(delta, &calc_load_tasks); 6290 } 6291 6292 static struct task_struct *__pick_migrate_task(struct rq *rq) 6293 { 6294 const struct sched_class *class; 6295 struct task_struct *next; 6296 6297 for_each_class(class) { 6298 next = class->pick_next_task(rq); 6299 if (next) { 6300 next->sched_class->put_prev_task(rq, next); 6301 return next; 6302 } 6303 } 6304 6305 /* The idle class should always have a runnable task */ 6306 BUG(); 6307 } 6308 6309 /* 6310 * Migrate all tasks from the rq, sleeping tasks will be migrated by 6311 * try_to_wake_up()->select_task_rq(). 6312 * 6313 * Called with rq->lock held even though we'er in stop_machine() and 6314 * there's no concurrency possible, we hold the required locks anyway 6315 * because of lock validation efforts. 6316 */ 6317 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) 6318 { 6319 struct rq *rq = dead_rq; 6320 struct task_struct *next, *stop = rq->stop; 6321 struct rq_flags orf = *rf; 6322 int dest_cpu; 6323 6324 /* 6325 * Fudge the rq selection such that the below task selection loop 6326 * doesn't get stuck on the currently eligible stop task. 6327 * 6328 * We're currently inside stop_machine() and the rq is either stuck 6329 * in the stop_machine_cpu_stop() loop, or we're executing this code, 6330 * either way we should never end up calling schedule() until we're 6331 * done here. 6332 */ 6333 rq->stop = NULL; 6334 6335 /* 6336 * put_prev_task() and pick_next_task() sched 6337 * class method both need to have an up-to-date 6338 * value of rq->clock[_task] 6339 */ 6340 update_rq_clock(rq); 6341 6342 for (;;) { 6343 /* 6344 * There's this thread running, bail when that's the only 6345 * remaining thread: 6346 */ 6347 if (rq->nr_running == 1) 6348 break; 6349 6350 next = __pick_migrate_task(rq); 6351 6352 /* 6353 * Rules for changing task_struct::cpus_mask are holding 6354 * both pi_lock and rq->lock, such that holding either 6355 * stabilizes the mask. 6356 * 6357 * Drop rq->lock is not quite as disastrous as it usually is 6358 * because !cpu_active at this point, which means load-balance 6359 * will not interfere. Also, stop-machine. 6360 */ 6361 rq_unlock(rq, rf); 6362 raw_spin_lock(&next->pi_lock); 6363 rq_relock(rq, rf); 6364 6365 /* 6366 * Since we're inside stop-machine, _nothing_ should have 6367 * changed the task, WARN if weird stuff happened, because in 6368 * that case the above rq->lock drop is a fail too. 6369 */ 6370 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { 6371 raw_spin_unlock(&next->pi_lock); 6372 continue; 6373 } 6374 6375 /* Find suitable destination for @next, with force if needed. */ 6376 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 6377 rq = __migrate_task(rq, rf, next, dest_cpu); 6378 if (rq != dead_rq) { 6379 rq_unlock(rq, rf); 6380 rq = dead_rq; 6381 *rf = orf; 6382 rq_relock(rq, rf); 6383 } 6384 raw_spin_unlock(&next->pi_lock); 6385 } 6386 6387 rq->stop = stop; 6388 } 6389 #endif /* CONFIG_HOTPLUG_CPU */ 6390 6391 void set_rq_online(struct rq *rq) 6392 { 6393 if (!rq->online) { 6394 const struct sched_class *class; 6395 6396 cpumask_set_cpu(rq->cpu, rq->rd->online); 6397 rq->online = 1; 6398 6399 for_each_class(class) { 6400 if (class->rq_online) 6401 class->rq_online(rq); 6402 } 6403 } 6404 } 6405 6406 void set_rq_offline(struct rq *rq) 6407 { 6408 if (rq->online) { 6409 const struct sched_class *class; 6410 6411 for_each_class(class) { 6412 if (class->rq_offline) 6413 class->rq_offline(rq); 6414 } 6415 6416 cpumask_clear_cpu(rq->cpu, rq->rd->online); 6417 rq->online = 0; 6418 } 6419 } 6420 6421 /* 6422 * used to mark begin/end of suspend/resume: 6423 */ 6424 static int num_cpus_frozen; 6425 6426 /* 6427 * Update cpusets according to cpu_active mask. If cpusets are 6428 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 6429 * around partition_sched_domains(). 6430 * 6431 * If we come here as part of a suspend/resume, don't touch cpusets because we 6432 * want to restore it back to its original state upon resume anyway. 6433 */ 6434 static void cpuset_cpu_active(void) 6435 { 6436 if (cpuhp_tasks_frozen) { 6437 /* 6438 * num_cpus_frozen tracks how many CPUs are involved in suspend 6439 * resume sequence. As long as this is not the last online 6440 * operation in the resume sequence, just build a single sched 6441 * domain, ignoring cpusets. 6442 */ 6443 partition_sched_domains(1, NULL, NULL); 6444 if (--num_cpus_frozen) 6445 return; 6446 /* 6447 * This is the last CPU online operation. So fall through and 6448 * restore the original sched domains by considering the 6449 * cpuset configurations. 6450 */ 6451 cpuset_force_rebuild(); 6452 } 6453 cpuset_update_active_cpus(); 6454 } 6455 6456 static int cpuset_cpu_inactive(unsigned int cpu) 6457 { 6458 if (!cpuhp_tasks_frozen) { 6459 if (dl_cpu_busy(cpu)) 6460 return -EBUSY; 6461 cpuset_update_active_cpus(); 6462 } else { 6463 num_cpus_frozen++; 6464 partition_sched_domains(1, NULL, NULL); 6465 } 6466 return 0; 6467 } 6468 6469 int sched_cpu_activate(unsigned int cpu) 6470 { 6471 struct rq *rq = cpu_rq(cpu); 6472 struct rq_flags rf; 6473 6474 #ifdef CONFIG_SCHED_SMT 6475 /* 6476 * When going up, increment the number of cores with SMT present. 6477 */ 6478 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6479 static_branch_inc_cpuslocked(&sched_smt_present); 6480 #endif 6481 set_cpu_active(cpu, true); 6482 6483 if (sched_smp_initialized) { 6484 sched_domains_numa_masks_set(cpu); 6485 cpuset_cpu_active(); 6486 } 6487 6488 /* 6489 * Put the rq online, if not already. This happens: 6490 * 6491 * 1) In the early boot process, because we build the real domains 6492 * after all CPUs have been brought up. 6493 * 6494 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 6495 * domains. 6496 */ 6497 rq_lock_irqsave(rq, &rf); 6498 if (rq->rd) { 6499 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6500 set_rq_online(rq); 6501 } 6502 rq_unlock_irqrestore(rq, &rf); 6503 6504 return 0; 6505 } 6506 6507 int sched_cpu_deactivate(unsigned int cpu) 6508 { 6509 int ret; 6510 6511 set_cpu_active(cpu, false); 6512 /* 6513 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU 6514 * users of this state to go away such that all new such users will 6515 * observe it. 6516 * 6517 * Do sync before park smpboot threads to take care the rcu boost case. 6518 */ 6519 synchronize_rcu(); 6520 6521 #ifdef CONFIG_SCHED_SMT 6522 /* 6523 * When going down, decrement the number of cores with SMT present. 6524 */ 6525 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 6526 static_branch_dec_cpuslocked(&sched_smt_present); 6527 #endif 6528 6529 if (!sched_smp_initialized) 6530 return 0; 6531 6532 ret = cpuset_cpu_inactive(cpu); 6533 if (ret) { 6534 set_cpu_active(cpu, true); 6535 return ret; 6536 } 6537 sched_domains_numa_masks_clear(cpu); 6538 return 0; 6539 } 6540 6541 static void sched_rq_cpu_starting(unsigned int cpu) 6542 { 6543 struct rq *rq = cpu_rq(cpu); 6544 6545 rq->calc_load_update = calc_load_update; 6546 update_max_interval(); 6547 } 6548 6549 int sched_cpu_starting(unsigned int cpu) 6550 { 6551 sched_rq_cpu_starting(cpu); 6552 sched_tick_start(cpu); 6553 return 0; 6554 } 6555 6556 #ifdef CONFIG_HOTPLUG_CPU 6557 int sched_cpu_dying(unsigned int cpu) 6558 { 6559 struct rq *rq = cpu_rq(cpu); 6560 struct rq_flags rf; 6561 6562 /* Handle pending wakeups and then migrate everything off */ 6563 sched_tick_stop(cpu); 6564 6565 rq_lock_irqsave(rq, &rf); 6566 if (rq->rd) { 6567 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 6568 set_rq_offline(rq); 6569 } 6570 migrate_tasks(rq, &rf); 6571 BUG_ON(rq->nr_running != 1); 6572 rq_unlock_irqrestore(rq, &rf); 6573 6574 calc_load_migrate(rq); 6575 update_max_interval(); 6576 nohz_balance_exit_idle(rq); 6577 hrtick_clear(rq); 6578 return 0; 6579 } 6580 #endif 6581 6582 void __init sched_init_smp(void) 6583 { 6584 sched_init_numa(); 6585 6586 /* 6587 * There's no userspace yet to cause hotplug operations; hence all the 6588 * CPU masks are stable and all blatant races in the below code cannot 6589 * happen. 6590 */ 6591 mutex_lock(&sched_domains_mutex); 6592 sched_init_domains(cpu_active_mask); 6593 mutex_unlock(&sched_domains_mutex); 6594 6595 /* Move init over to a non-isolated CPU */ 6596 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 6597 BUG(); 6598 sched_init_granularity(); 6599 6600 init_sched_rt_class(); 6601 init_sched_dl_class(); 6602 6603 sched_smp_initialized = true; 6604 } 6605 6606 static int __init migration_init(void) 6607 { 6608 sched_cpu_starting(smp_processor_id()); 6609 return 0; 6610 } 6611 early_initcall(migration_init); 6612 6613 #else 6614 void __init sched_init_smp(void) 6615 { 6616 sched_init_granularity(); 6617 } 6618 #endif /* CONFIG_SMP */ 6619 6620 int in_sched_functions(unsigned long addr) 6621 { 6622 return in_lock_functions(addr) || 6623 (addr >= (unsigned long)__sched_text_start 6624 && addr < (unsigned long)__sched_text_end); 6625 } 6626 6627 #ifdef CONFIG_CGROUP_SCHED 6628 /* 6629 * Default task group. 6630 * Every task in system belongs to this group at bootup. 6631 */ 6632 struct task_group root_task_group; 6633 LIST_HEAD(task_groups); 6634 6635 /* Cacheline aligned slab cache for task_group */ 6636 static struct kmem_cache *task_group_cache __read_mostly; 6637 #endif 6638 6639 DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); 6640 DECLARE_PER_CPU(cpumask_var_t, select_idle_mask); 6641 6642 void __init sched_init(void) 6643 { 6644 unsigned long ptr = 0; 6645 int i; 6646 6647 wait_bit_init(); 6648 6649 #ifdef CONFIG_FAIR_GROUP_SCHED 6650 ptr += 2 * nr_cpu_ids * sizeof(void **); 6651 #endif 6652 #ifdef CONFIG_RT_GROUP_SCHED 6653 ptr += 2 * nr_cpu_ids * sizeof(void **); 6654 #endif 6655 if (ptr) { 6656 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 6657 6658 #ifdef CONFIG_FAIR_GROUP_SCHED 6659 root_task_group.se = (struct sched_entity **)ptr; 6660 ptr += nr_cpu_ids * sizeof(void **); 6661 6662 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 6663 ptr += nr_cpu_ids * sizeof(void **); 6664 6665 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 6666 init_cfs_bandwidth(&root_task_group.cfs_bandwidth); 6667 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6668 #ifdef CONFIG_RT_GROUP_SCHED 6669 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 6670 ptr += nr_cpu_ids * sizeof(void **); 6671 6672 root_task_group.rt_rq = (struct rt_rq **)ptr; 6673 ptr += nr_cpu_ids * sizeof(void **); 6674 6675 #endif /* CONFIG_RT_GROUP_SCHED */ 6676 } 6677 #ifdef CONFIG_CPUMASK_OFFSTACK 6678 for_each_possible_cpu(i) { 6679 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 6680 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6681 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node( 6682 cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 6683 } 6684 #endif /* CONFIG_CPUMASK_OFFSTACK */ 6685 6686 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 6687 init_dl_bandwidth(&def_dl_bandwidth, global_rt_period(), global_rt_runtime()); 6688 6689 #ifdef CONFIG_SMP 6690 init_defrootdomain(); 6691 #endif 6692 6693 #ifdef CONFIG_RT_GROUP_SCHED 6694 init_rt_bandwidth(&root_task_group.rt_bandwidth, 6695 global_rt_period(), global_rt_runtime()); 6696 #endif /* CONFIG_RT_GROUP_SCHED */ 6697 6698 #ifdef CONFIG_CGROUP_SCHED 6699 task_group_cache = KMEM_CACHE(task_group, 0); 6700 6701 list_add(&root_task_group.list, &task_groups); 6702 INIT_LIST_HEAD(&root_task_group.children); 6703 INIT_LIST_HEAD(&root_task_group.siblings); 6704 autogroup_init(&init_task); 6705 #endif /* CONFIG_CGROUP_SCHED */ 6706 6707 for_each_possible_cpu(i) { 6708 struct rq *rq; 6709 6710 rq = cpu_rq(i); 6711 raw_spin_lock_init(&rq->lock); 6712 rq->nr_running = 0; 6713 rq->calc_load_active = 0; 6714 rq->calc_load_update = jiffies + LOAD_FREQ; 6715 init_cfs_rq(&rq->cfs); 6716 init_rt_rq(&rq->rt); 6717 init_dl_rq(&rq->dl); 6718 #ifdef CONFIG_FAIR_GROUP_SCHED 6719 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 6720 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 6721 /* 6722 * How much CPU bandwidth does root_task_group get? 6723 * 6724 * In case of task-groups formed thr' the cgroup filesystem, it 6725 * gets 100% of the CPU resources in the system. This overall 6726 * system CPU resource is divided among the tasks of 6727 * root_task_group and its child task-groups in a fair manner, 6728 * based on each entity's (task or task-group's) weight 6729 * (se->load.weight). 6730 * 6731 * In other words, if root_task_group has 10 tasks of weight 6732 * 1024) and two child groups A0 and A1 (of weight 1024 each), 6733 * then A0's share of the CPU resource is: 6734 * 6735 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 6736 * 6737 * We achieve this by letting root_task_group's tasks sit 6738 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 6739 */ 6740 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 6741 #endif /* CONFIG_FAIR_GROUP_SCHED */ 6742 6743 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 6744 #ifdef CONFIG_RT_GROUP_SCHED 6745 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 6746 #endif 6747 #ifdef CONFIG_SMP 6748 rq->sd = NULL; 6749 rq->rd = NULL; 6750 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; 6751 rq->balance_callback = NULL; 6752 rq->active_balance = 0; 6753 rq->next_balance = jiffies; 6754 rq->push_cpu = 0; 6755 rq->cpu = i; 6756 rq->online = 0; 6757 rq->idle_stamp = 0; 6758 rq->avg_idle = 2*sysctl_sched_migration_cost; 6759 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 6760 6761 INIT_LIST_HEAD(&rq->cfs_tasks); 6762 6763 rq_attach_root(rq, &def_root_domain); 6764 #ifdef CONFIG_NO_HZ_COMMON 6765 rq->last_blocked_load_update_tick = jiffies; 6766 atomic_set(&rq->nohz_flags, 0); 6767 6768 rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func); 6769 #endif 6770 #endif /* CONFIG_SMP */ 6771 hrtick_rq_init(rq); 6772 atomic_set(&rq->nr_iowait, 0); 6773 } 6774 6775 set_load_weight(&init_task, false); 6776 6777 /* 6778 * The boot idle thread does lazy MMU switching as well: 6779 */ 6780 mmgrab(&init_mm); 6781 enter_lazy_tlb(&init_mm, current); 6782 6783 /* 6784 * Make us the idle thread. Technically, schedule() should not be 6785 * called from this thread, however somewhere below it might be, 6786 * but because we are the idle thread, we just pick up running again 6787 * when this runqueue becomes "idle". 6788 */ 6789 init_idle(current, smp_processor_id()); 6790 6791 calc_load_update = jiffies + LOAD_FREQ; 6792 6793 #ifdef CONFIG_SMP 6794 idle_thread_set_boot_cpu(); 6795 #endif 6796 init_sched_fair_class(); 6797 6798 init_schedstats(); 6799 6800 psi_init(); 6801 6802 init_uclamp(); 6803 6804 scheduler_running = 1; 6805 } 6806 6807 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 6808 static inline int preempt_count_equals(int preempt_offset) 6809 { 6810 int nested = preempt_count() + rcu_preempt_depth(); 6811 6812 return (nested == preempt_offset); 6813 } 6814 6815 void __might_sleep(const char *file, int line, int preempt_offset) 6816 { 6817 /* 6818 * Blocking primitives will set (and therefore destroy) current->state, 6819 * since we will exit with TASK_RUNNING make sure we enter with it, 6820 * otherwise we will destroy state. 6821 */ 6822 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, 6823 "do not call blocking ops when !TASK_RUNNING; " 6824 "state=%lx set at [<%p>] %pS\n", 6825 current->state, 6826 (void *)current->task_state_change, 6827 (void *)current->task_state_change); 6828 6829 ___might_sleep(file, line, preempt_offset); 6830 } 6831 EXPORT_SYMBOL(__might_sleep); 6832 6833 void ___might_sleep(const char *file, int line, int preempt_offset) 6834 { 6835 /* Ratelimiting timestamp: */ 6836 static unsigned long prev_jiffy; 6837 6838 unsigned long preempt_disable_ip; 6839 6840 /* WARN_ON_ONCE() by default, no rate limit required: */ 6841 rcu_sleep_check(); 6842 6843 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && 6844 !is_idle_task(current) && !current->non_block_count) || 6845 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 6846 oops_in_progress) 6847 return; 6848 6849 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6850 return; 6851 prev_jiffy = jiffies; 6852 6853 /* Save this before calling printk(), since that will clobber it: */ 6854 preempt_disable_ip = get_preempt_disable_ip(current); 6855 6856 printk(KERN_ERR 6857 "BUG: sleeping function called from invalid context at %s:%d\n", 6858 file, line); 6859 printk(KERN_ERR 6860 "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 6861 in_atomic(), irqs_disabled(), current->non_block_count, 6862 current->pid, current->comm); 6863 6864 if (task_stack_end_corrupted(current)) 6865 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 6866 6867 debug_show_held_locks(current); 6868 if (irqs_disabled()) 6869 print_irqtrace_events(current); 6870 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) 6871 && !preempt_count_equals(preempt_offset)) { 6872 pr_err("Preemption disabled at:"); 6873 print_ip_sym(KERN_ERR, preempt_disable_ip); 6874 } 6875 dump_stack(); 6876 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6877 } 6878 EXPORT_SYMBOL(___might_sleep); 6879 6880 void __cant_sleep(const char *file, int line, int preempt_offset) 6881 { 6882 static unsigned long prev_jiffy; 6883 6884 if (irqs_disabled()) 6885 return; 6886 6887 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 6888 return; 6889 6890 if (preempt_count() > preempt_offset) 6891 return; 6892 6893 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6894 return; 6895 prev_jiffy = jiffies; 6896 6897 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 6898 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 6899 in_atomic(), irqs_disabled(), 6900 current->pid, current->comm); 6901 6902 debug_show_held_locks(current); 6903 dump_stack(); 6904 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 6905 } 6906 EXPORT_SYMBOL_GPL(__cant_sleep); 6907 #endif 6908 6909 #ifdef CONFIG_MAGIC_SYSRQ 6910 void normalize_rt_tasks(void) 6911 { 6912 struct task_struct *g, *p; 6913 struct sched_attr attr = { 6914 .sched_policy = SCHED_NORMAL, 6915 }; 6916 6917 read_lock(&tasklist_lock); 6918 for_each_process_thread(g, p) { 6919 /* 6920 * Only normalize user tasks: 6921 */ 6922 if (p->flags & PF_KTHREAD) 6923 continue; 6924 6925 p->se.exec_start = 0; 6926 schedstat_set(p->se.statistics.wait_start, 0); 6927 schedstat_set(p->se.statistics.sleep_start, 0); 6928 schedstat_set(p->se.statistics.block_start, 0); 6929 6930 if (!dl_task(p) && !rt_task(p)) { 6931 /* 6932 * Renice negative nice level userspace 6933 * tasks back to 0: 6934 */ 6935 if (task_nice(p) < 0) 6936 set_user_nice(p, 0); 6937 continue; 6938 } 6939 6940 __sched_setscheduler(p, &attr, false, false); 6941 } 6942 read_unlock(&tasklist_lock); 6943 } 6944 6945 #endif /* CONFIG_MAGIC_SYSRQ */ 6946 6947 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) 6948 /* 6949 * These functions are only useful for the IA64 MCA handling, or kdb. 6950 * 6951 * They can only be called when the whole system has been 6952 * stopped - every CPU needs to be quiescent, and no scheduling 6953 * activity can take place. Using them for anything else would 6954 * be a serious bug, and as a result, they aren't even visible 6955 * under any other configuration. 6956 */ 6957 6958 /** 6959 * curr_task - return the current task for a given CPU. 6960 * @cpu: the processor in question. 6961 * 6962 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6963 * 6964 * Return: The current task for @cpu. 6965 */ 6966 struct task_struct *curr_task(int cpu) 6967 { 6968 return cpu_curr(cpu); 6969 } 6970 6971 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ 6972 6973 #ifdef CONFIG_IA64 6974 /** 6975 * ia64_set_curr_task - set the current task for a given CPU. 6976 * @cpu: the processor in question. 6977 * @p: the task pointer to set. 6978 * 6979 * Description: This function must only be used when non-maskable interrupts 6980 * are serviced on a separate stack. It allows the architecture to switch the 6981 * notion of the current task on a CPU in a non-blocking manner. This function 6982 * must be called with all CPU's synchronized, and interrupts disabled, the 6983 * and caller must save the original value of the current task (see 6984 * curr_task() above) and restore that value before reenabling interrupts and 6985 * re-starting the system. 6986 * 6987 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6988 */ 6989 void ia64_set_curr_task(int cpu, struct task_struct *p) 6990 { 6991 cpu_curr(cpu) = p; 6992 } 6993 6994 #endif 6995 6996 #ifdef CONFIG_CGROUP_SCHED 6997 /* task_group_lock serializes the addition/removal of task groups */ 6998 static DEFINE_SPINLOCK(task_group_lock); 6999 7000 static inline void alloc_uclamp_sched_group(struct task_group *tg, 7001 struct task_group *parent) 7002 { 7003 #ifdef CONFIG_UCLAMP_TASK_GROUP 7004 enum uclamp_id clamp_id; 7005 7006 for_each_clamp_id(clamp_id) { 7007 uclamp_se_set(&tg->uclamp_req[clamp_id], 7008 uclamp_none(clamp_id), false); 7009 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 7010 } 7011 #endif 7012 } 7013 7014 static void sched_free_group(struct task_group *tg) 7015 { 7016 free_fair_sched_group(tg); 7017 free_rt_sched_group(tg); 7018 autogroup_free(tg); 7019 kmem_cache_free(task_group_cache, tg); 7020 } 7021 7022 /* allocate runqueue etc for a new task group */ 7023 struct task_group *sched_create_group(struct task_group *parent) 7024 { 7025 struct task_group *tg; 7026 7027 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 7028 if (!tg) 7029 return ERR_PTR(-ENOMEM); 7030 7031 if (!alloc_fair_sched_group(tg, parent)) 7032 goto err; 7033 7034 if (!alloc_rt_sched_group(tg, parent)) 7035 goto err; 7036 7037 alloc_uclamp_sched_group(tg, parent); 7038 7039 return tg; 7040 7041 err: 7042 sched_free_group(tg); 7043 return ERR_PTR(-ENOMEM); 7044 } 7045 7046 void sched_online_group(struct task_group *tg, struct task_group *parent) 7047 { 7048 unsigned long flags; 7049 7050 spin_lock_irqsave(&task_group_lock, flags); 7051 list_add_rcu(&tg->list, &task_groups); 7052 7053 /* Root should already exist: */ 7054 WARN_ON(!parent); 7055 7056 tg->parent = parent; 7057 INIT_LIST_HEAD(&tg->children); 7058 list_add_rcu(&tg->siblings, &parent->children); 7059 spin_unlock_irqrestore(&task_group_lock, flags); 7060 7061 online_fair_sched_group(tg); 7062 } 7063 7064 /* rcu callback to free various structures associated with a task group */ 7065 static void sched_free_group_rcu(struct rcu_head *rhp) 7066 { 7067 /* Now it should be safe to free those cfs_rqs: */ 7068 sched_free_group(container_of(rhp, struct task_group, rcu)); 7069 } 7070 7071 void sched_destroy_group(struct task_group *tg) 7072 { 7073 /* Wait for possible concurrent references to cfs_rqs complete: */ 7074 call_rcu(&tg->rcu, sched_free_group_rcu); 7075 } 7076 7077 void sched_offline_group(struct task_group *tg) 7078 { 7079 unsigned long flags; 7080 7081 /* End participation in shares distribution: */ 7082 unregister_fair_sched_group(tg); 7083 7084 spin_lock_irqsave(&task_group_lock, flags); 7085 list_del_rcu(&tg->list); 7086 list_del_rcu(&tg->siblings); 7087 spin_unlock_irqrestore(&task_group_lock, flags); 7088 } 7089 7090 static void sched_change_group(struct task_struct *tsk, int type) 7091 { 7092 struct task_group *tg; 7093 7094 /* 7095 * All callers are synchronized by task_rq_lock(); we do not use RCU 7096 * which is pointless here. Thus, we pass "true" to task_css_check() 7097 * to prevent lockdep warnings. 7098 */ 7099 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 7100 struct task_group, css); 7101 tg = autogroup_task_group(tsk, tg); 7102 tsk->sched_task_group = tg; 7103 7104 #ifdef CONFIG_FAIR_GROUP_SCHED 7105 if (tsk->sched_class->task_change_group) 7106 tsk->sched_class->task_change_group(tsk, type); 7107 else 7108 #endif 7109 set_task_rq(tsk, task_cpu(tsk)); 7110 } 7111 7112 /* 7113 * Change task's runqueue when it moves between groups. 7114 * 7115 * The caller of this function should have put the task in its new group by 7116 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 7117 * its new group. 7118 */ 7119 void sched_move_task(struct task_struct *tsk) 7120 { 7121 int queued, running, queue_flags = 7122 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7123 struct rq_flags rf; 7124 struct rq *rq; 7125 7126 rq = task_rq_lock(tsk, &rf); 7127 update_rq_clock(rq); 7128 7129 running = task_current(rq, tsk); 7130 queued = task_on_rq_queued(tsk); 7131 7132 if (queued) 7133 dequeue_task(rq, tsk, queue_flags); 7134 if (running) 7135 put_prev_task(rq, tsk); 7136 7137 sched_change_group(tsk, TASK_MOVE_GROUP); 7138 7139 if (queued) 7140 enqueue_task(rq, tsk, queue_flags); 7141 if (running) { 7142 set_next_task(rq, tsk); 7143 /* 7144 * After changing group, the running task may have joined a 7145 * throttled one but it's still the running task. Trigger a 7146 * resched to make sure that task can still run. 7147 */ 7148 resched_curr(rq); 7149 } 7150 7151 task_rq_unlock(rq, tsk, &rf); 7152 } 7153 7154 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 7155 { 7156 return css ? container_of(css, struct task_group, css) : NULL; 7157 } 7158 7159 static struct cgroup_subsys_state * 7160 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 7161 { 7162 struct task_group *parent = css_tg(parent_css); 7163 struct task_group *tg; 7164 7165 if (!parent) { 7166 /* This is early initialization for the top cgroup */ 7167 return &root_task_group.css; 7168 } 7169 7170 tg = sched_create_group(parent); 7171 if (IS_ERR(tg)) 7172 return ERR_PTR(-ENOMEM); 7173 7174 return &tg->css; 7175 } 7176 7177 /* Expose task group only after completing cgroup initialization */ 7178 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 7179 { 7180 struct task_group *tg = css_tg(css); 7181 struct task_group *parent = css_tg(css->parent); 7182 7183 if (parent) 7184 sched_online_group(tg, parent); 7185 7186 #ifdef CONFIG_UCLAMP_TASK_GROUP 7187 /* Propagate the effective uclamp value for the new group */ 7188 cpu_util_update_eff(css); 7189 #endif 7190 7191 return 0; 7192 } 7193 7194 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 7195 { 7196 struct task_group *tg = css_tg(css); 7197 7198 sched_offline_group(tg); 7199 } 7200 7201 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 7202 { 7203 struct task_group *tg = css_tg(css); 7204 7205 /* 7206 * Relies on the RCU grace period between css_released() and this. 7207 */ 7208 sched_free_group(tg); 7209 } 7210 7211 /* 7212 * This is called before wake_up_new_task(), therefore we really only 7213 * have to set its group bits, all the other stuff does not apply. 7214 */ 7215 static void cpu_cgroup_fork(struct task_struct *task) 7216 { 7217 struct rq_flags rf; 7218 struct rq *rq; 7219 7220 rq = task_rq_lock(task, &rf); 7221 7222 update_rq_clock(rq); 7223 sched_change_group(task, TASK_SET_GROUP); 7224 7225 task_rq_unlock(rq, task, &rf); 7226 } 7227 7228 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 7229 { 7230 struct task_struct *task; 7231 struct cgroup_subsys_state *css; 7232 int ret = 0; 7233 7234 cgroup_taskset_for_each(task, css, tset) { 7235 #ifdef CONFIG_RT_GROUP_SCHED 7236 if (!sched_rt_can_attach(css_tg(css), task)) 7237 return -EINVAL; 7238 #endif 7239 /* 7240 * Serialize against wake_up_new_task() such that if its 7241 * running, we're sure to observe its full state. 7242 */ 7243 raw_spin_lock_irq(&task->pi_lock); 7244 /* 7245 * Avoid calling sched_move_task() before wake_up_new_task() 7246 * has happened. This would lead to problems with PELT, due to 7247 * move wanting to detach+attach while we're not attached yet. 7248 */ 7249 if (task->state == TASK_NEW) 7250 ret = -EINVAL; 7251 raw_spin_unlock_irq(&task->pi_lock); 7252 7253 if (ret) 7254 break; 7255 } 7256 return ret; 7257 } 7258 7259 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 7260 { 7261 struct task_struct *task; 7262 struct cgroup_subsys_state *css; 7263 7264 cgroup_taskset_for_each(task, css, tset) 7265 sched_move_task(task); 7266 } 7267 7268 #ifdef CONFIG_UCLAMP_TASK_GROUP 7269 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 7270 { 7271 struct cgroup_subsys_state *top_css = css; 7272 struct uclamp_se *uc_parent = NULL; 7273 struct uclamp_se *uc_se = NULL; 7274 unsigned int eff[UCLAMP_CNT]; 7275 enum uclamp_id clamp_id; 7276 unsigned int clamps; 7277 7278 css_for_each_descendant_pre(css, top_css) { 7279 uc_parent = css_tg(css)->parent 7280 ? css_tg(css)->parent->uclamp : NULL; 7281 7282 for_each_clamp_id(clamp_id) { 7283 /* Assume effective clamps matches requested clamps */ 7284 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 7285 /* Cap effective clamps with parent's effective clamps */ 7286 if (uc_parent && 7287 eff[clamp_id] > uc_parent[clamp_id].value) { 7288 eff[clamp_id] = uc_parent[clamp_id].value; 7289 } 7290 } 7291 /* Ensure protection is always capped by limit */ 7292 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 7293 7294 /* Propagate most restrictive effective clamps */ 7295 clamps = 0x0; 7296 uc_se = css_tg(css)->uclamp; 7297 for_each_clamp_id(clamp_id) { 7298 if (eff[clamp_id] == uc_se[clamp_id].value) 7299 continue; 7300 uc_se[clamp_id].value = eff[clamp_id]; 7301 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 7302 clamps |= (0x1 << clamp_id); 7303 } 7304 if (!clamps) { 7305 css = css_rightmost_descendant(css); 7306 continue; 7307 } 7308 7309 /* Immediately update descendants RUNNABLE tasks */ 7310 uclamp_update_active_tasks(css, clamps); 7311 } 7312 } 7313 7314 /* 7315 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 7316 * C expression. Since there is no way to convert a macro argument (N) into a 7317 * character constant, use two levels of macros. 7318 */ 7319 #define _POW10(exp) ((unsigned int)1e##exp) 7320 #define POW10(exp) _POW10(exp) 7321 7322 struct uclamp_request { 7323 #define UCLAMP_PERCENT_SHIFT 2 7324 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 7325 s64 percent; 7326 u64 util; 7327 int ret; 7328 }; 7329 7330 static inline struct uclamp_request 7331 capacity_from_percent(char *buf) 7332 { 7333 struct uclamp_request req = { 7334 .percent = UCLAMP_PERCENT_SCALE, 7335 .util = SCHED_CAPACITY_SCALE, 7336 .ret = 0, 7337 }; 7338 7339 buf = strim(buf); 7340 if (strcmp(buf, "max")) { 7341 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 7342 &req.percent); 7343 if (req.ret) 7344 return req; 7345 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 7346 req.ret = -ERANGE; 7347 return req; 7348 } 7349 7350 req.util = req.percent << SCHED_CAPACITY_SHIFT; 7351 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 7352 } 7353 7354 return req; 7355 } 7356 7357 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 7358 size_t nbytes, loff_t off, 7359 enum uclamp_id clamp_id) 7360 { 7361 struct uclamp_request req; 7362 struct task_group *tg; 7363 7364 req = capacity_from_percent(buf); 7365 if (req.ret) 7366 return req.ret; 7367 7368 mutex_lock(&uclamp_mutex); 7369 rcu_read_lock(); 7370 7371 tg = css_tg(of_css(of)); 7372 if (tg->uclamp_req[clamp_id].value != req.util) 7373 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 7374 7375 /* 7376 * Because of not recoverable conversion rounding we keep track of the 7377 * exact requested value 7378 */ 7379 tg->uclamp_pct[clamp_id] = req.percent; 7380 7381 /* Update effective clamps to track the most restrictive value */ 7382 cpu_util_update_eff(of_css(of)); 7383 7384 rcu_read_unlock(); 7385 mutex_unlock(&uclamp_mutex); 7386 7387 return nbytes; 7388 } 7389 7390 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 7391 char *buf, size_t nbytes, 7392 loff_t off) 7393 { 7394 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 7395 } 7396 7397 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 7398 char *buf, size_t nbytes, 7399 loff_t off) 7400 { 7401 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 7402 } 7403 7404 static inline void cpu_uclamp_print(struct seq_file *sf, 7405 enum uclamp_id clamp_id) 7406 { 7407 struct task_group *tg; 7408 u64 util_clamp; 7409 u64 percent; 7410 u32 rem; 7411 7412 rcu_read_lock(); 7413 tg = css_tg(seq_css(sf)); 7414 util_clamp = tg->uclamp_req[clamp_id].value; 7415 rcu_read_unlock(); 7416 7417 if (util_clamp == SCHED_CAPACITY_SCALE) { 7418 seq_puts(sf, "max\n"); 7419 return; 7420 } 7421 7422 percent = tg->uclamp_pct[clamp_id]; 7423 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 7424 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 7425 } 7426 7427 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 7428 { 7429 cpu_uclamp_print(sf, UCLAMP_MIN); 7430 return 0; 7431 } 7432 7433 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 7434 { 7435 cpu_uclamp_print(sf, UCLAMP_MAX); 7436 return 0; 7437 } 7438 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 7439 7440 #ifdef CONFIG_FAIR_GROUP_SCHED 7441 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 7442 struct cftype *cftype, u64 shareval) 7443 { 7444 if (shareval > scale_load_down(ULONG_MAX)) 7445 shareval = MAX_SHARES; 7446 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 7447 } 7448 7449 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 7450 struct cftype *cft) 7451 { 7452 struct task_group *tg = css_tg(css); 7453 7454 return (u64) scale_load_down(tg->shares); 7455 } 7456 7457 #ifdef CONFIG_CFS_BANDWIDTH 7458 static DEFINE_MUTEX(cfs_constraints_mutex); 7459 7460 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 7461 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 7462 /* More than 203 days if BW_SHIFT equals 20. */ 7463 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 7464 7465 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 7466 7467 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) 7468 { 7469 int i, ret = 0, runtime_enabled, runtime_was_enabled; 7470 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7471 7472 if (tg == &root_task_group) 7473 return -EINVAL; 7474 7475 /* 7476 * Ensure we have at some amount of bandwidth every period. This is 7477 * to prevent reaching a state of large arrears when throttled via 7478 * entity_tick() resulting in prolonged exit starvation. 7479 */ 7480 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 7481 return -EINVAL; 7482 7483 /* 7484 * Likewise, bound things on the otherside by preventing insane quota 7485 * periods. This also allows us to normalize in computing quota 7486 * feasibility. 7487 */ 7488 if (period > max_cfs_quota_period) 7489 return -EINVAL; 7490 7491 /* 7492 * Bound quota to defend quota against overflow during bandwidth shift. 7493 */ 7494 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 7495 return -EINVAL; 7496 7497 /* 7498 * Prevent race between setting of cfs_rq->runtime_enabled and 7499 * unthrottle_offline_cfs_rqs(). 7500 */ 7501 get_online_cpus(); 7502 mutex_lock(&cfs_constraints_mutex); 7503 ret = __cfs_schedulable(tg, period, quota); 7504 if (ret) 7505 goto out_unlock; 7506 7507 runtime_enabled = quota != RUNTIME_INF; 7508 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 7509 /* 7510 * If we need to toggle cfs_bandwidth_used, off->on must occur 7511 * before making related changes, and on->off must occur afterwards 7512 */ 7513 if (runtime_enabled && !runtime_was_enabled) 7514 cfs_bandwidth_usage_inc(); 7515 raw_spin_lock_irq(&cfs_b->lock); 7516 cfs_b->period = ns_to_ktime(period); 7517 cfs_b->quota = quota; 7518 7519 __refill_cfs_bandwidth_runtime(cfs_b); 7520 7521 /* Restart the period timer (if active) to handle new period expiry: */ 7522 if (runtime_enabled) 7523 start_cfs_bandwidth(cfs_b); 7524 7525 raw_spin_unlock_irq(&cfs_b->lock); 7526 7527 for_each_online_cpu(i) { 7528 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 7529 struct rq *rq = cfs_rq->rq; 7530 struct rq_flags rf; 7531 7532 rq_lock_irq(rq, &rf); 7533 cfs_rq->runtime_enabled = runtime_enabled; 7534 cfs_rq->runtime_remaining = 0; 7535 7536 if (cfs_rq->throttled) 7537 unthrottle_cfs_rq(cfs_rq); 7538 rq_unlock_irq(rq, &rf); 7539 } 7540 if (runtime_was_enabled && !runtime_enabled) 7541 cfs_bandwidth_usage_dec(); 7542 out_unlock: 7543 mutex_unlock(&cfs_constraints_mutex); 7544 put_online_cpus(); 7545 7546 return ret; 7547 } 7548 7549 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 7550 { 7551 u64 quota, period; 7552 7553 period = ktime_to_ns(tg->cfs_bandwidth.period); 7554 if (cfs_quota_us < 0) 7555 quota = RUNTIME_INF; 7556 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 7557 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 7558 else 7559 return -EINVAL; 7560 7561 return tg_set_cfs_bandwidth(tg, period, quota); 7562 } 7563 7564 static long tg_get_cfs_quota(struct task_group *tg) 7565 { 7566 u64 quota_us; 7567 7568 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 7569 return -1; 7570 7571 quota_us = tg->cfs_bandwidth.quota; 7572 do_div(quota_us, NSEC_PER_USEC); 7573 7574 return quota_us; 7575 } 7576 7577 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 7578 { 7579 u64 quota, period; 7580 7581 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 7582 return -EINVAL; 7583 7584 period = (u64)cfs_period_us * NSEC_PER_USEC; 7585 quota = tg->cfs_bandwidth.quota; 7586 7587 return tg_set_cfs_bandwidth(tg, period, quota); 7588 } 7589 7590 static long tg_get_cfs_period(struct task_group *tg) 7591 { 7592 u64 cfs_period_us; 7593 7594 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 7595 do_div(cfs_period_us, NSEC_PER_USEC); 7596 7597 return cfs_period_us; 7598 } 7599 7600 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 7601 struct cftype *cft) 7602 { 7603 return tg_get_cfs_quota(css_tg(css)); 7604 } 7605 7606 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 7607 struct cftype *cftype, s64 cfs_quota_us) 7608 { 7609 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 7610 } 7611 7612 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 7613 struct cftype *cft) 7614 { 7615 return tg_get_cfs_period(css_tg(css)); 7616 } 7617 7618 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 7619 struct cftype *cftype, u64 cfs_period_us) 7620 { 7621 return tg_set_cfs_period(css_tg(css), cfs_period_us); 7622 } 7623 7624 struct cfs_schedulable_data { 7625 struct task_group *tg; 7626 u64 period, quota; 7627 }; 7628 7629 /* 7630 * normalize group quota/period to be quota/max_period 7631 * note: units are usecs 7632 */ 7633 static u64 normalize_cfs_quota(struct task_group *tg, 7634 struct cfs_schedulable_data *d) 7635 { 7636 u64 quota, period; 7637 7638 if (tg == d->tg) { 7639 period = d->period; 7640 quota = d->quota; 7641 } else { 7642 period = tg_get_cfs_period(tg); 7643 quota = tg_get_cfs_quota(tg); 7644 } 7645 7646 /* note: these should typically be equivalent */ 7647 if (quota == RUNTIME_INF || quota == -1) 7648 return RUNTIME_INF; 7649 7650 return to_ratio(period, quota); 7651 } 7652 7653 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 7654 { 7655 struct cfs_schedulable_data *d = data; 7656 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7657 s64 quota = 0, parent_quota = -1; 7658 7659 if (!tg->parent) { 7660 quota = RUNTIME_INF; 7661 } else { 7662 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 7663 7664 quota = normalize_cfs_quota(tg, d); 7665 parent_quota = parent_b->hierarchical_quota; 7666 7667 /* 7668 * Ensure max(child_quota) <= parent_quota. On cgroup2, 7669 * always take the min. On cgroup1, only inherit when no 7670 * limit is set: 7671 */ 7672 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 7673 quota = min(quota, parent_quota); 7674 } else { 7675 if (quota == RUNTIME_INF) 7676 quota = parent_quota; 7677 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 7678 return -EINVAL; 7679 } 7680 } 7681 cfs_b->hierarchical_quota = quota; 7682 7683 return 0; 7684 } 7685 7686 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 7687 { 7688 int ret; 7689 struct cfs_schedulable_data data = { 7690 .tg = tg, 7691 .period = period, 7692 .quota = quota, 7693 }; 7694 7695 if (quota != RUNTIME_INF) { 7696 do_div(data.period, NSEC_PER_USEC); 7697 do_div(data.quota, NSEC_PER_USEC); 7698 } 7699 7700 rcu_read_lock(); 7701 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 7702 rcu_read_unlock(); 7703 7704 return ret; 7705 } 7706 7707 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 7708 { 7709 struct task_group *tg = css_tg(seq_css(sf)); 7710 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7711 7712 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 7713 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 7714 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 7715 7716 if (schedstat_enabled() && tg != &root_task_group) { 7717 u64 ws = 0; 7718 int i; 7719 7720 for_each_possible_cpu(i) 7721 ws += schedstat_val(tg->se[i]->statistics.wait_sum); 7722 7723 seq_printf(sf, "wait_sum %llu\n", ws); 7724 } 7725 7726 return 0; 7727 } 7728 #endif /* CONFIG_CFS_BANDWIDTH */ 7729 #endif /* CONFIG_FAIR_GROUP_SCHED */ 7730 7731 #ifdef CONFIG_RT_GROUP_SCHED 7732 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 7733 struct cftype *cft, s64 val) 7734 { 7735 return sched_group_set_rt_runtime(css_tg(css), val); 7736 } 7737 7738 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 7739 struct cftype *cft) 7740 { 7741 return sched_group_rt_runtime(css_tg(css)); 7742 } 7743 7744 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 7745 struct cftype *cftype, u64 rt_period_us) 7746 { 7747 return sched_group_set_rt_period(css_tg(css), rt_period_us); 7748 } 7749 7750 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 7751 struct cftype *cft) 7752 { 7753 return sched_group_rt_period(css_tg(css)); 7754 } 7755 #endif /* CONFIG_RT_GROUP_SCHED */ 7756 7757 static struct cftype cpu_legacy_files[] = { 7758 #ifdef CONFIG_FAIR_GROUP_SCHED 7759 { 7760 .name = "shares", 7761 .read_u64 = cpu_shares_read_u64, 7762 .write_u64 = cpu_shares_write_u64, 7763 }, 7764 #endif 7765 #ifdef CONFIG_CFS_BANDWIDTH 7766 { 7767 .name = "cfs_quota_us", 7768 .read_s64 = cpu_cfs_quota_read_s64, 7769 .write_s64 = cpu_cfs_quota_write_s64, 7770 }, 7771 { 7772 .name = "cfs_period_us", 7773 .read_u64 = cpu_cfs_period_read_u64, 7774 .write_u64 = cpu_cfs_period_write_u64, 7775 }, 7776 { 7777 .name = "stat", 7778 .seq_show = cpu_cfs_stat_show, 7779 }, 7780 #endif 7781 #ifdef CONFIG_RT_GROUP_SCHED 7782 { 7783 .name = "rt_runtime_us", 7784 .read_s64 = cpu_rt_runtime_read, 7785 .write_s64 = cpu_rt_runtime_write, 7786 }, 7787 { 7788 .name = "rt_period_us", 7789 .read_u64 = cpu_rt_period_read_uint, 7790 .write_u64 = cpu_rt_period_write_uint, 7791 }, 7792 #endif 7793 #ifdef CONFIG_UCLAMP_TASK_GROUP 7794 { 7795 .name = "uclamp.min", 7796 .flags = CFTYPE_NOT_ON_ROOT, 7797 .seq_show = cpu_uclamp_min_show, 7798 .write = cpu_uclamp_min_write, 7799 }, 7800 { 7801 .name = "uclamp.max", 7802 .flags = CFTYPE_NOT_ON_ROOT, 7803 .seq_show = cpu_uclamp_max_show, 7804 .write = cpu_uclamp_max_write, 7805 }, 7806 #endif 7807 { } /* Terminate */ 7808 }; 7809 7810 static int cpu_extra_stat_show(struct seq_file *sf, 7811 struct cgroup_subsys_state *css) 7812 { 7813 #ifdef CONFIG_CFS_BANDWIDTH 7814 { 7815 struct task_group *tg = css_tg(css); 7816 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 7817 u64 throttled_usec; 7818 7819 throttled_usec = cfs_b->throttled_time; 7820 do_div(throttled_usec, NSEC_PER_USEC); 7821 7822 seq_printf(sf, "nr_periods %d\n" 7823 "nr_throttled %d\n" 7824 "throttled_usec %llu\n", 7825 cfs_b->nr_periods, cfs_b->nr_throttled, 7826 throttled_usec); 7827 } 7828 #endif 7829 return 0; 7830 } 7831 7832 #ifdef CONFIG_FAIR_GROUP_SCHED 7833 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 7834 struct cftype *cft) 7835 { 7836 struct task_group *tg = css_tg(css); 7837 u64 weight = scale_load_down(tg->shares); 7838 7839 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 7840 } 7841 7842 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 7843 struct cftype *cft, u64 weight) 7844 { 7845 /* 7846 * cgroup weight knobs should use the common MIN, DFL and MAX 7847 * values which are 1, 100 and 10000 respectively. While it loses 7848 * a bit of range on both ends, it maps pretty well onto the shares 7849 * value used by scheduler and the round-trip conversions preserve 7850 * the original value over the entire range. 7851 */ 7852 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 7853 return -ERANGE; 7854 7855 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 7856 7857 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7858 } 7859 7860 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 7861 struct cftype *cft) 7862 { 7863 unsigned long weight = scale_load_down(css_tg(css)->shares); 7864 int last_delta = INT_MAX; 7865 int prio, delta; 7866 7867 /* find the closest nice value to the current weight */ 7868 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 7869 delta = abs(sched_prio_to_weight[prio] - weight); 7870 if (delta >= last_delta) 7871 break; 7872 last_delta = delta; 7873 } 7874 7875 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 7876 } 7877 7878 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 7879 struct cftype *cft, s64 nice) 7880 { 7881 unsigned long weight; 7882 int idx; 7883 7884 if (nice < MIN_NICE || nice > MAX_NICE) 7885 return -ERANGE; 7886 7887 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 7888 idx = array_index_nospec(idx, 40); 7889 weight = sched_prio_to_weight[idx]; 7890 7891 return sched_group_set_shares(css_tg(css), scale_load(weight)); 7892 } 7893 #endif 7894 7895 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 7896 long period, long quota) 7897 { 7898 if (quota < 0) 7899 seq_puts(sf, "max"); 7900 else 7901 seq_printf(sf, "%ld", quota); 7902 7903 seq_printf(sf, " %ld\n", period); 7904 } 7905 7906 /* caller should put the current value in *@periodp before calling */ 7907 static int __maybe_unused cpu_period_quota_parse(char *buf, 7908 u64 *periodp, u64 *quotap) 7909 { 7910 char tok[21]; /* U64_MAX */ 7911 7912 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 7913 return -EINVAL; 7914 7915 *periodp *= NSEC_PER_USEC; 7916 7917 if (sscanf(tok, "%llu", quotap)) 7918 *quotap *= NSEC_PER_USEC; 7919 else if (!strcmp(tok, "max")) 7920 *quotap = RUNTIME_INF; 7921 else 7922 return -EINVAL; 7923 7924 return 0; 7925 } 7926 7927 #ifdef CONFIG_CFS_BANDWIDTH 7928 static int cpu_max_show(struct seq_file *sf, void *v) 7929 { 7930 struct task_group *tg = css_tg(seq_css(sf)); 7931 7932 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 7933 return 0; 7934 } 7935 7936 static ssize_t cpu_max_write(struct kernfs_open_file *of, 7937 char *buf, size_t nbytes, loff_t off) 7938 { 7939 struct task_group *tg = css_tg(of_css(of)); 7940 u64 period = tg_get_cfs_period(tg); 7941 u64 quota; 7942 int ret; 7943 7944 ret = cpu_period_quota_parse(buf, &period, "a); 7945 if (!ret) 7946 ret = tg_set_cfs_bandwidth(tg, period, quota); 7947 return ret ?: nbytes; 7948 } 7949 #endif 7950 7951 static struct cftype cpu_files[] = { 7952 #ifdef CONFIG_FAIR_GROUP_SCHED 7953 { 7954 .name = "weight", 7955 .flags = CFTYPE_NOT_ON_ROOT, 7956 .read_u64 = cpu_weight_read_u64, 7957 .write_u64 = cpu_weight_write_u64, 7958 }, 7959 { 7960 .name = "weight.nice", 7961 .flags = CFTYPE_NOT_ON_ROOT, 7962 .read_s64 = cpu_weight_nice_read_s64, 7963 .write_s64 = cpu_weight_nice_write_s64, 7964 }, 7965 #endif 7966 #ifdef CONFIG_CFS_BANDWIDTH 7967 { 7968 .name = "max", 7969 .flags = CFTYPE_NOT_ON_ROOT, 7970 .seq_show = cpu_max_show, 7971 .write = cpu_max_write, 7972 }, 7973 #endif 7974 #ifdef CONFIG_UCLAMP_TASK_GROUP 7975 { 7976 .name = "uclamp.min", 7977 .flags = CFTYPE_NOT_ON_ROOT, 7978 .seq_show = cpu_uclamp_min_show, 7979 .write = cpu_uclamp_min_write, 7980 }, 7981 { 7982 .name = "uclamp.max", 7983 .flags = CFTYPE_NOT_ON_ROOT, 7984 .seq_show = cpu_uclamp_max_show, 7985 .write = cpu_uclamp_max_write, 7986 }, 7987 #endif 7988 { } /* terminate */ 7989 }; 7990 7991 struct cgroup_subsys cpu_cgrp_subsys = { 7992 .css_alloc = cpu_cgroup_css_alloc, 7993 .css_online = cpu_cgroup_css_online, 7994 .css_released = cpu_cgroup_css_released, 7995 .css_free = cpu_cgroup_css_free, 7996 .css_extra_stat_show = cpu_extra_stat_show, 7997 .fork = cpu_cgroup_fork, 7998 .can_attach = cpu_cgroup_can_attach, 7999 .attach = cpu_cgroup_attach, 8000 .legacy_cftypes = cpu_legacy_files, 8001 .dfl_cftypes = cpu_files, 8002 .early_init = true, 8003 .threaded = true, 8004 }; 8005 8006 #endif /* CONFIG_CGROUP_SCHED */ 8007 8008 void dump_cpu_task(int cpu) 8009 { 8010 pr_info("Task dump for CPU %d:\n", cpu); 8011 sched_show_task(cpu_curr(cpu)); 8012 } 8013 8014 /* 8015 * Nice levels are multiplicative, with a gentle 10% change for every 8016 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 8017 * nice 1, it will get ~10% less CPU time than another CPU-bound task 8018 * that remained on nice 0. 8019 * 8020 * The "10% effect" is relative and cumulative: from _any_ nice level, 8021 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 8022 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 8023 * If a task goes up by ~10% and another task goes down by ~10% then 8024 * the relative distance between them is ~25%.) 8025 */ 8026 const int sched_prio_to_weight[40] = { 8027 /* -20 */ 88761, 71755, 56483, 46273, 36291, 8028 /* -15 */ 29154, 23254, 18705, 14949, 11916, 8029 /* -10 */ 9548, 7620, 6100, 4904, 3906, 8030 /* -5 */ 3121, 2501, 1991, 1586, 1277, 8031 /* 0 */ 1024, 820, 655, 526, 423, 8032 /* 5 */ 335, 272, 215, 172, 137, 8033 /* 10 */ 110, 87, 70, 56, 45, 8034 /* 15 */ 36, 29, 23, 18, 15, 8035 }; 8036 8037 /* 8038 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 8039 * 8040 * In cases where the weight does not change often, we can use the 8041 * precalculated inverse to speed up arithmetics by turning divisions 8042 * into multiplications: 8043 */ 8044 const u32 sched_prio_to_wmult[40] = { 8045 /* -20 */ 48388, 59856, 76040, 92818, 118348, 8046 /* -15 */ 147320, 184698, 229616, 287308, 360437, 8047 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 8048 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 8049 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 8050 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 8051 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 8052 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 8053 }; 8054 8055 #undef CREATE_TRACE_POINTS 8056