1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel scheduler code and related syscalls 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 */ 9 #include <linux/highmem.h> 10 #include <linux/hrtimer_api.h> 11 #include <linux/ktime_api.h> 12 #include <linux/sched/signal.h> 13 #include <linux/syscalls_api.h> 14 #include <linux/debug_locks.h> 15 #include <linux/prefetch.h> 16 #include <linux/capability.h> 17 #include <linux/pgtable_api.h> 18 #include <linux/wait_bit.h> 19 #include <linux/jiffies.h> 20 #include <linux/spinlock_api.h> 21 #include <linux/cpumask_api.h> 22 #include <linux/lockdep_api.h> 23 #include <linux/hardirq.h> 24 #include <linux/softirq.h> 25 #include <linux/refcount_api.h> 26 #include <linux/topology.h> 27 #include <linux/sched/clock.h> 28 #include <linux/sched/cond_resched.h> 29 #include <linux/sched/cputime.h> 30 #include <linux/sched/debug.h> 31 #include <linux/sched/hotplug.h> 32 #include <linux/sched/init.h> 33 #include <linux/sched/isolation.h> 34 #include <linux/sched/loadavg.h> 35 #include <linux/sched/mm.h> 36 #include <linux/sched/nohz.h> 37 #include <linux/sched/rseq_api.h> 38 #include <linux/sched/rt.h> 39 40 #include <linux/blkdev.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpuset.h> 43 #include <linux/delayacct.h> 44 #include <linux/init_task.h> 45 #include <linux/interrupt.h> 46 #include <linux/ioprio.h> 47 #include <linux/kallsyms.h> 48 #include <linux/kcov.h> 49 #include <linux/kprobes.h> 50 #include <linux/llist_api.h> 51 #include <linux/mmu_context.h> 52 #include <linux/mmzone.h> 53 #include <linux/mutex_api.h> 54 #include <linux/nmi.h> 55 #include <linux/nospec.h> 56 #include <linux/perf_event_api.h> 57 #include <linux/profile.h> 58 #include <linux/psi.h> 59 #include <linux/rcuwait_api.h> 60 #include <linux/rseq.h> 61 #include <linux/sched/wake_q.h> 62 #include <linux/scs.h> 63 #include <linux/slab.h> 64 #include <linux/syscalls.h> 65 #include <linux/vtime.h> 66 #include <linux/wait_api.h> 67 #include <linux/workqueue_api.h> 68 69 #ifdef CONFIG_PREEMPT_DYNAMIC 70 # ifdef CONFIG_GENERIC_ENTRY 71 # include <linux/entry-common.h> 72 # endif 73 #endif 74 75 #include <uapi/linux/sched/types.h> 76 77 #include <asm/irq_regs.h> 78 #include <asm/switch_to.h> 79 #include <asm/tlb.h> 80 81 #define CREATE_TRACE_POINTS 82 #include <linux/sched/rseq_api.h> 83 #include <trace/events/sched.h> 84 #include <trace/events/ipi.h> 85 #undef CREATE_TRACE_POINTS 86 87 #include "sched.h" 88 #include "stats.h" 89 90 #include "autogroup.h" 91 #include "pelt.h" 92 #include "smp.h" 93 #include "stats.h" 94 95 #include "../workqueue_internal.h" 96 #include "../../io_uring/io-wq.h" 97 #include "../smpboot.h" 98 99 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); 100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); 101 102 /* 103 * Export tracepoints that act as a bare tracehook (ie: have no trace event 104 * associated with them) to allow external modules to probe them. 105 */ 106 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); 118 119 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 120 121 #ifdef CONFIG_SCHED_DEBUG 122 /* 123 * Debugging: various feature bits 124 * 125 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 126 * sysctl_sched_features, defined in sched.h, to allow constants propagation 127 * at compile time and compiler optimization based on features default. 128 */ 129 #define SCHED_FEAT(name, enabled) \ 130 (1UL << __SCHED_FEAT_##name) * enabled | 131 const_debug unsigned int sysctl_sched_features = 132 #include "features.h" 133 0; 134 #undef SCHED_FEAT 135 136 /* 137 * Print a warning if need_resched is set for the given duration (if 138 * LATENCY_WARN is enabled). 139 * 140 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 141 * per boot. 142 */ 143 __read_mostly int sysctl_resched_latency_warn_ms = 100; 144 __read_mostly int sysctl_resched_latency_warn_once = 1; 145 #endif /* CONFIG_SCHED_DEBUG */ 146 147 /* 148 * Number of tasks to iterate in a single balance run. 149 * Limited because this is done with IRQs disabled. 150 */ 151 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; 152 153 __read_mostly int scheduler_running; 154 155 #ifdef CONFIG_SCHED_CORE 156 157 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 158 159 /* kernel prio, less is more */ 160 static inline int __task_prio(const struct task_struct *p) 161 { 162 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 163 return -2; 164 165 if (rt_prio(p->prio)) /* includes deadline */ 166 return p->prio; /* [-1, 99] */ 167 168 if (p->sched_class == &idle_sched_class) 169 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 170 171 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ 172 } 173 174 /* 175 * l(a,b) 176 * le(a,b) := !l(b,a) 177 * g(a,b) := l(b,a) 178 * ge(a,b) := !l(a,b) 179 */ 180 181 /* real prio, less is less */ 182 static inline bool prio_less(const struct task_struct *a, 183 const struct task_struct *b, bool in_fi) 184 { 185 186 int pa = __task_prio(a), pb = __task_prio(b); 187 188 if (-pa < -pb) 189 return true; 190 191 if (-pb < -pa) 192 return false; 193 194 if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ 195 return !dl_time_before(a->dl.deadline, b->dl.deadline); 196 197 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 198 return cfs_prio_less(a, b, in_fi); 199 200 return false; 201 } 202 203 static inline bool __sched_core_less(const struct task_struct *a, 204 const struct task_struct *b) 205 { 206 if (a->core_cookie < b->core_cookie) 207 return true; 208 209 if (a->core_cookie > b->core_cookie) 210 return false; 211 212 /* flip prio, so high prio is leftmost */ 213 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 214 return true; 215 216 return false; 217 } 218 219 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 220 221 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 222 { 223 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 224 } 225 226 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 227 { 228 const struct task_struct *p = __node_2_sc(node); 229 unsigned long cookie = (unsigned long)key; 230 231 if (cookie < p->core_cookie) 232 return -1; 233 234 if (cookie > p->core_cookie) 235 return 1; 236 237 return 0; 238 } 239 240 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 241 { 242 rq->core->core_task_seq++; 243 244 if (!p->core_cookie) 245 return; 246 247 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 248 } 249 250 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 251 { 252 rq->core->core_task_seq++; 253 254 if (sched_core_enqueued(p)) { 255 rb_erase(&p->core_node, &rq->core_tree); 256 RB_CLEAR_NODE(&p->core_node); 257 } 258 259 /* 260 * Migrating the last task off the cpu, with the cpu in forced idle 261 * state. Reschedule to create an accounting edge for forced idle, 262 * and re-examine whether the core is still in forced idle state. 263 */ 264 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 265 rq->core->core_forceidle_count && rq->curr == rq->idle) 266 resched_curr(rq); 267 } 268 269 static int sched_task_is_throttled(struct task_struct *p, int cpu) 270 { 271 if (p->sched_class->task_is_throttled) 272 return p->sched_class->task_is_throttled(p, cpu); 273 274 return 0; 275 } 276 277 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 278 { 279 struct rb_node *node = &p->core_node; 280 int cpu = task_cpu(p); 281 282 do { 283 node = rb_next(node); 284 if (!node) 285 return NULL; 286 287 p = __node_2_sc(node); 288 if (p->core_cookie != cookie) 289 return NULL; 290 291 } while (sched_task_is_throttled(p, cpu)); 292 293 return p; 294 } 295 296 /* 297 * Find left-most (aka, highest priority) and unthrottled task matching @cookie. 298 * If no suitable task is found, NULL will be returned. 299 */ 300 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 301 { 302 struct task_struct *p; 303 struct rb_node *node; 304 305 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 306 if (!node) 307 return NULL; 308 309 p = __node_2_sc(node); 310 if (!sched_task_is_throttled(p, rq->cpu)) 311 return p; 312 313 return sched_core_next(p, cookie); 314 } 315 316 /* 317 * Magic required such that: 318 * 319 * raw_spin_rq_lock(rq); 320 * ... 321 * raw_spin_rq_unlock(rq); 322 * 323 * ends up locking and unlocking the _same_ lock, and all CPUs 324 * always agree on what rq has what lock. 325 * 326 * XXX entirely possible to selectively enable cores, don't bother for now. 327 */ 328 329 static DEFINE_MUTEX(sched_core_mutex); 330 static atomic_t sched_core_count; 331 static struct cpumask sched_core_mask; 332 333 static void sched_core_lock(int cpu, unsigned long *flags) 334 { 335 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 336 int t, i = 0; 337 338 local_irq_save(*flags); 339 for_each_cpu(t, smt_mask) 340 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 341 } 342 343 static void sched_core_unlock(int cpu, unsigned long *flags) 344 { 345 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 346 int t; 347 348 for_each_cpu(t, smt_mask) 349 raw_spin_unlock(&cpu_rq(t)->__lock); 350 local_irq_restore(*flags); 351 } 352 353 static void __sched_core_flip(bool enabled) 354 { 355 unsigned long flags; 356 int cpu, t; 357 358 cpus_read_lock(); 359 360 /* 361 * Toggle the online cores, one by one. 362 */ 363 cpumask_copy(&sched_core_mask, cpu_online_mask); 364 for_each_cpu(cpu, &sched_core_mask) { 365 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 366 367 sched_core_lock(cpu, &flags); 368 369 for_each_cpu(t, smt_mask) 370 cpu_rq(t)->core_enabled = enabled; 371 372 cpu_rq(cpu)->core->core_forceidle_start = 0; 373 374 sched_core_unlock(cpu, &flags); 375 376 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 377 } 378 379 /* 380 * Toggle the offline CPUs. 381 */ 382 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) 383 cpu_rq(cpu)->core_enabled = enabled; 384 385 cpus_read_unlock(); 386 } 387 388 static void sched_core_assert_empty(void) 389 { 390 int cpu; 391 392 for_each_possible_cpu(cpu) 393 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 394 } 395 396 static void __sched_core_enable(void) 397 { 398 static_branch_enable(&__sched_core_enabled); 399 /* 400 * Ensure all previous instances of raw_spin_rq_*lock() have finished 401 * and future ones will observe !sched_core_disabled(). 402 */ 403 synchronize_rcu(); 404 __sched_core_flip(true); 405 sched_core_assert_empty(); 406 } 407 408 static void __sched_core_disable(void) 409 { 410 sched_core_assert_empty(); 411 __sched_core_flip(false); 412 static_branch_disable(&__sched_core_enabled); 413 } 414 415 void sched_core_get(void) 416 { 417 if (atomic_inc_not_zero(&sched_core_count)) 418 return; 419 420 mutex_lock(&sched_core_mutex); 421 if (!atomic_read(&sched_core_count)) 422 __sched_core_enable(); 423 424 smp_mb__before_atomic(); 425 atomic_inc(&sched_core_count); 426 mutex_unlock(&sched_core_mutex); 427 } 428 429 static void __sched_core_put(struct work_struct *work) 430 { 431 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 432 __sched_core_disable(); 433 mutex_unlock(&sched_core_mutex); 434 } 435 } 436 437 void sched_core_put(void) 438 { 439 static DECLARE_WORK(_work, __sched_core_put); 440 441 /* 442 * "There can be only one" 443 * 444 * Either this is the last one, or we don't actually need to do any 445 * 'work'. If it is the last *again*, we rely on 446 * WORK_STRUCT_PENDING_BIT. 447 */ 448 if (!atomic_add_unless(&sched_core_count, -1, 1)) 449 schedule_work(&_work); 450 } 451 452 #else /* !CONFIG_SCHED_CORE */ 453 454 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 455 static inline void 456 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 457 458 #endif /* CONFIG_SCHED_CORE */ 459 460 /* 461 * Serialization rules: 462 * 463 * Lock order: 464 * 465 * p->pi_lock 466 * rq->lock 467 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 468 * 469 * rq1->lock 470 * rq2->lock where: rq1 < rq2 471 * 472 * Regular state: 473 * 474 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 475 * local CPU's rq->lock, it optionally removes the task from the runqueue and 476 * always looks at the local rq data structures to find the most eligible task 477 * to run next. 478 * 479 * Task enqueue is also under rq->lock, possibly taken from another CPU. 480 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 481 * the local CPU to avoid bouncing the runqueue state around [ see 482 * ttwu_queue_wakelist() ] 483 * 484 * Task wakeup, specifically wakeups that involve migration, are horribly 485 * complicated to avoid having to take two rq->locks. 486 * 487 * Special state: 488 * 489 * System-calls and anything external will use task_rq_lock() which acquires 490 * both p->pi_lock and rq->lock. As a consequence the state they change is 491 * stable while holding either lock: 492 * 493 * - sched_setaffinity()/ 494 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 495 * - set_user_nice(): p->se.load, p->*prio 496 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 497 * p->se.load, p->rt_priority, 498 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 499 * - sched_setnuma(): p->numa_preferred_nid 500 * - sched_move_task(): p->sched_task_group 501 * - uclamp_update_active() p->uclamp* 502 * 503 * p->state <- TASK_*: 504 * 505 * is changed locklessly using set_current_state(), __set_current_state() or 506 * set_special_state(), see their respective comments, or by 507 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 508 * concurrent self. 509 * 510 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 511 * 512 * is set by activate_task() and cleared by deactivate_task(), under 513 * rq->lock. Non-zero indicates the task is runnable, the special 514 * ON_RQ_MIGRATING state is used for migration without holding both 515 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 516 * 517 * p->on_cpu <- { 0, 1 }: 518 * 519 * is set by prepare_task() and cleared by finish_task() such that it will be 520 * set before p is scheduled-in and cleared after p is scheduled-out, both 521 * under rq->lock. Non-zero indicates the task is running on its CPU. 522 * 523 * [ The astute reader will observe that it is possible for two tasks on one 524 * CPU to have ->on_cpu = 1 at the same time. ] 525 * 526 * task_cpu(p): is changed by set_task_cpu(), the rules are: 527 * 528 * - Don't call set_task_cpu() on a blocked task: 529 * 530 * We don't care what CPU we're not running on, this simplifies hotplug, 531 * the CPU assignment of blocked tasks isn't required to be valid. 532 * 533 * - for try_to_wake_up(), called under p->pi_lock: 534 * 535 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 536 * 537 * - for migration called under rq->lock: 538 * [ see task_on_rq_migrating() in task_rq_lock() ] 539 * 540 * o move_queued_task() 541 * o detach_task() 542 * 543 * - for migration called under double_rq_lock(): 544 * 545 * o __migrate_swap_task() 546 * o push_rt_task() / pull_rt_task() 547 * o push_dl_task() / pull_dl_task() 548 * o dl_task_offline_migration() 549 * 550 */ 551 552 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 553 { 554 raw_spinlock_t *lock; 555 556 /* Matches synchronize_rcu() in __sched_core_enable() */ 557 preempt_disable(); 558 if (sched_core_disabled()) { 559 raw_spin_lock_nested(&rq->__lock, subclass); 560 /* preempt_count *MUST* be > 1 */ 561 preempt_enable_no_resched(); 562 return; 563 } 564 565 for (;;) { 566 lock = __rq_lockp(rq); 567 raw_spin_lock_nested(lock, subclass); 568 if (likely(lock == __rq_lockp(rq))) { 569 /* preempt_count *MUST* be > 1 */ 570 preempt_enable_no_resched(); 571 return; 572 } 573 raw_spin_unlock(lock); 574 } 575 } 576 577 bool raw_spin_rq_trylock(struct rq *rq) 578 { 579 raw_spinlock_t *lock; 580 bool ret; 581 582 /* Matches synchronize_rcu() in __sched_core_enable() */ 583 preempt_disable(); 584 if (sched_core_disabled()) { 585 ret = raw_spin_trylock(&rq->__lock); 586 preempt_enable(); 587 return ret; 588 } 589 590 for (;;) { 591 lock = __rq_lockp(rq); 592 ret = raw_spin_trylock(lock); 593 if (!ret || (likely(lock == __rq_lockp(rq)))) { 594 preempt_enable(); 595 return ret; 596 } 597 raw_spin_unlock(lock); 598 } 599 } 600 601 void raw_spin_rq_unlock(struct rq *rq) 602 { 603 raw_spin_unlock(rq_lockp(rq)); 604 } 605 606 #ifdef CONFIG_SMP 607 /* 608 * double_rq_lock - safely lock two runqueues 609 */ 610 void double_rq_lock(struct rq *rq1, struct rq *rq2) 611 { 612 lockdep_assert_irqs_disabled(); 613 614 if (rq_order_less(rq2, rq1)) 615 swap(rq1, rq2); 616 617 raw_spin_rq_lock(rq1); 618 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 619 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 620 621 double_rq_clock_clear_update(rq1, rq2); 622 } 623 #endif 624 625 /* 626 * __task_rq_lock - lock the rq @p resides on. 627 */ 628 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 629 __acquires(rq->lock) 630 { 631 struct rq *rq; 632 633 lockdep_assert_held(&p->pi_lock); 634 635 for (;;) { 636 rq = task_rq(p); 637 raw_spin_rq_lock(rq); 638 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 639 rq_pin_lock(rq, rf); 640 return rq; 641 } 642 raw_spin_rq_unlock(rq); 643 644 while (unlikely(task_on_rq_migrating(p))) 645 cpu_relax(); 646 } 647 } 648 649 /* 650 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 651 */ 652 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 653 __acquires(p->pi_lock) 654 __acquires(rq->lock) 655 { 656 struct rq *rq; 657 658 for (;;) { 659 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 660 rq = task_rq(p); 661 raw_spin_rq_lock(rq); 662 /* 663 * move_queued_task() task_rq_lock() 664 * 665 * ACQUIRE (rq->lock) 666 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 667 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 668 * [S] ->cpu = new_cpu [L] task_rq() 669 * [L] ->on_rq 670 * RELEASE (rq->lock) 671 * 672 * If we observe the old CPU in task_rq_lock(), the acquire of 673 * the old rq->lock will fully serialize against the stores. 674 * 675 * If we observe the new CPU in task_rq_lock(), the address 676 * dependency headed by '[L] rq = task_rq()' and the acquire 677 * will pair with the WMB to ensure we then also see migrating. 678 */ 679 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 680 rq_pin_lock(rq, rf); 681 return rq; 682 } 683 raw_spin_rq_unlock(rq); 684 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 685 686 while (unlikely(task_on_rq_migrating(p))) 687 cpu_relax(); 688 } 689 } 690 691 /* 692 * RQ-clock updating methods: 693 */ 694 695 static void update_rq_clock_task(struct rq *rq, s64 delta) 696 { 697 /* 698 * In theory, the compile should just see 0 here, and optimize out the call 699 * to sched_rt_avg_update. But I don't trust it... 700 */ 701 s64 __maybe_unused steal = 0, irq_delta = 0; 702 703 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 704 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 705 706 /* 707 * Since irq_time is only updated on {soft,}irq_exit, we might run into 708 * this case when a previous update_rq_clock() happened inside a 709 * {soft,}irq region. 710 * 711 * When this happens, we stop ->clock_task and only update the 712 * prev_irq_time stamp to account for the part that fit, so that a next 713 * update will consume the rest. This ensures ->clock_task is 714 * monotonic. 715 * 716 * It does however cause some slight miss-attribution of {soft,}irq 717 * time, a more accurate solution would be to update the irq_time using 718 * the current rq->clock timestamp, except that would require using 719 * atomic ops. 720 */ 721 if (irq_delta > delta) 722 irq_delta = delta; 723 724 rq->prev_irq_time += irq_delta; 725 delta -= irq_delta; 726 psi_account_irqtime(rq->curr, irq_delta); 727 delayacct_irq(rq->curr, irq_delta); 728 #endif 729 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 730 if (static_key_false((¶virt_steal_rq_enabled))) { 731 steal = paravirt_steal_clock(cpu_of(rq)); 732 steal -= rq->prev_steal_time_rq; 733 734 if (unlikely(steal > delta)) 735 steal = delta; 736 737 rq->prev_steal_time_rq += steal; 738 delta -= steal; 739 } 740 #endif 741 742 rq->clock_task += delta; 743 744 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 745 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 746 update_irq_load_avg(rq, irq_delta + steal); 747 #endif 748 update_rq_clock_pelt(rq, delta); 749 } 750 751 void update_rq_clock(struct rq *rq) 752 { 753 s64 delta; 754 755 lockdep_assert_rq_held(rq); 756 757 if (rq->clock_update_flags & RQCF_ACT_SKIP) 758 return; 759 760 #ifdef CONFIG_SCHED_DEBUG 761 if (sched_feat(WARN_DOUBLE_CLOCK)) 762 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 763 rq->clock_update_flags |= RQCF_UPDATED; 764 #endif 765 766 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 767 if (delta < 0) 768 return; 769 rq->clock += delta; 770 update_rq_clock_task(rq, delta); 771 } 772 773 #ifdef CONFIG_SCHED_HRTICK 774 /* 775 * Use HR-timers to deliver accurate preemption points. 776 */ 777 778 static void hrtick_clear(struct rq *rq) 779 { 780 if (hrtimer_active(&rq->hrtick_timer)) 781 hrtimer_cancel(&rq->hrtick_timer); 782 } 783 784 /* 785 * High-resolution timer tick. 786 * Runs from hardirq context with interrupts disabled. 787 */ 788 static enum hrtimer_restart hrtick(struct hrtimer *timer) 789 { 790 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 791 struct rq_flags rf; 792 793 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 794 795 rq_lock(rq, &rf); 796 update_rq_clock(rq); 797 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 798 rq_unlock(rq, &rf); 799 800 return HRTIMER_NORESTART; 801 } 802 803 #ifdef CONFIG_SMP 804 805 static void __hrtick_restart(struct rq *rq) 806 { 807 struct hrtimer *timer = &rq->hrtick_timer; 808 ktime_t time = rq->hrtick_time; 809 810 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 811 } 812 813 /* 814 * called from hardirq (IPI) context 815 */ 816 static void __hrtick_start(void *arg) 817 { 818 struct rq *rq = arg; 819 struct rq_flags rf; 820 821 rq_lock(rq, &rf); 822 __hrtick_restart(rq); 823 rq_unlock(rq, &rf); 824 } 825 826 /* 827 * Called to set the hrtick timer state. 828 * 829 * called with rq->lock held and irqs disabled 830 */ 831 void hrtick_start(struct rq *rq, u64 delay) 832 { 833 struct hrtimer *timer = &rq->hrtick_timer; 834 s64 delta; 835 836 /* 837 * Don't schedule slices shorter than 10000ns, that just 838 * doesn't make sense and can cause timer DoS. 839 */ 840 delta = max_t(s64, delay, 10000LL); 841 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 842 843 if (rq == this_rq()) 844 __hrtick_restart(rq); 845 else 846 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 847 } 848 849 #else 850 /* 851 * Called to set the hrtick timer state. 852 * 853 * called with rq->lock held and irqs disabled 854 */ 855 void hrtick_start(struct rq *rq, u64 delay) 856 { 857 /* 858 * Don't schedule slices shorter than 10000ns, that just 859 * doesn't make sense. Rely on vruntime for fairness. 860 */ 861 delay = max_t(u64, delay, 10000LL); 862 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 863 HRTIMER_MODE_REL_PINNED_HARD); 864 } 865 866 #endif /* CONFIG_SMP */ 867 868 static void hrtick_rq_init(struct rq *rq) 869 { 870 #ifdef CONFIG_SMP 871 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 872 #endif 873 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 874 rq->hrtick_timer.function = hrtick; 875 } 876 #else /* CONFIG_SCHED_HRTICK */ 877 static inline void hrtick_clear(struct rq *rq) 878 { 879 } 880 881 static inline void hrtick_rq_init(struct rq *rq) 882 { 883 } 884 #endif /* CONFIG_SCHED_HRTICK */ 885 886 /* 887 * cmpxchg based fetch_or, macro so it works for different integer types 888 */ 889 #define fetch_or(ptr, mask) \ 890 ({ \ 891 typeof(ptr) _ptr = (ptr); \ 892 typeof(mask) _mask = (mask); \ 893 typeof(*_ptr) _val = *_ptr; \ 894 \ 895 do { \ 896 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 897 _val; \ 898 }) 899 900 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 901 /* 902 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 903 * this avoids any races wrt polling state changes and thereby avoids 904 * spurious IPIs. 905 */ 906 static inline bool set_nr_and_not_polling(struct task_struct *p) 907 { 908 struct thread_info *ti = task_thread_info(p); 909 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 910 } 911 912 /* 913 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 914 * 915 * If this returns true, then the idle task promises to call 916 * sched_ttwu_pending() and reschedule soon. 917 */ 918 static bool set_nr_if_polling(struct task_struct *p) 919 { 920 struct thread_info *ti = task_thread_info(p); 921 typeof(ti->flags) val = READ_ONCE(ti->flags); 922 923 do { 924 if (!(val & _TIF_POLLING_NRFLAG)) 925 return false; 926 if (val & _TIF_NEED_RESCHED) 927 return true; 928 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); 929 930 return true; 931 } 932 933 #else 934 static inline bool set_nr_and_not_polling(struct task_struct *p) 935 { 936 set_tsk_need_resched(p); 937 return true; 938 } 939 940 #ifdef CONFIG_SMP 941 static inline bool set_nr_if_polling(struct task_struct *p) 942 { 943 return false; 944 } 945 #endif 946 #endif 947 948 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 949 { 950 struct wake_q_node *node = &task->wake_q; 951 952 /* 953 * Atomically grab the task, if ->wake_q is !nil already it means 954 * it's already queued (either by us or someone else) and will get the 955 * wakeup due to that. 956 * 957 * In order to ensure that a pending wakeup will observe our pending 958 * state, even in the failed case, an explicit smp_mb() must be used. 959 */ 960 smp_mb__before_atomic(); 961 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 962 return false; 963 964 /* 965 * The head is context local, there can be no concurrency. 966 */ 967 *head->lastp = node; 968 head->lastp = &node->next; 969 return true; 970 } 971 972 /** 973 * wake_q_add() - queue a wakeup for 'later' waking. 974 * @head: the wake_q_head to add @task to 975 * @task: the task to queue for 'later' wakeup 976 * 977 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 978 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 979 * instantly. 980 * 981 * This function must be used as-if it were wake_up_process(); IOW the task 982 * must be ready to be woken at this location. 983 */ 984 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 985 { 986 if (__wake_q_add(head, task)) 987 get_task_struct(task); 988 } 989 990 /** 991 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 992 * @head: the wake_q_head to add @task to 993 * @task: the task to queue for 'later' wakeup 994 * 995 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 996 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 997 * instantly. 998 * 999 * This function must be used as-if it were wake_up_process(); IOW the task 1000 * must be ready to be woken at this location. 1001 * 1002 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 1003 * that already hold reference to @task can call the 'safe' version and trust 1004 * wake_q to do the right thing depending whether or not the @task is already 1005 * queued for wakeup. 1006 */ 1007 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 1008 { 1009 if (!__wake_q_add(head, task)) 1010 put_task_struct(task); 1011 } 1012 1013 void wake_up_q(struct wake_q_head *head) 1014 { 1015 struct wake_q_node *node = head->first; 1016 1017 while (node != WAKE_Q_TAIL) { 1018 struct task_struct *task; 1019 1020 task = container_of(node, struct task_struct, wake_q); 1021 /* Task can safely be re-inserted now: */ 1022 node = node->next; 1023 task->wake_q.next = NULL; 1024 1025 /* 1026 * wake_up_process() executes a full barrier, which pairs with 1027 * the queueing in wake_q_add() so as not to miss wakeups. 1028 */ 1029 wake_up_process(task); 1030 put_task_struct(task); 1031 } 1032 } 1033 1034 /* 1035 * resched_curr - mark rq's current task 'to be rescheduled now'. 1036 * 1037 * On UP this means the setting of the need_resched flag, on SMP it 1038 * might also involve a cross-CPU call to trigger the scheduler on 1039 * the target CPU. 1040 */ 1041 void resched_curr(struct rq *rq) 1042 { 1043 struct task_struct *curr = rq->curr; 1044 int cpu; 1045 1046 lockdep_assert_rq_held(rq); 1047 1048 if (test_tsk_need_resched(curr)) 1049 return; 1050 1051 cpu = cpu_of(rq); 1052 1053 if (cpu == smp_processor_id()) { 1054 set_tsk_need_resched(curr); 1055 set_preempt_need_resched(); 1056 return; 1057 } 1058 1059 if (set_nr_and_not_polling(curr)) 1060 smp_send_reschedule(cpu); 1061 else 1062 trace_sched_wake_idle_without_ipi(cpu); 1063 } 1064 1065 void resched_cpu(int cpu) 1066 { 1067 struct rq *rq = cpu_rq(cpu); 1068 unsigned long flags; 1069 1070 raw_spin_rq_lock_irqsave(rq, flags); 1071 if (cpu_online(cpu) || cpu == smp_processor_id()) 1072 resched_curr(rq); 1073 raw_spin_rq_unlock_irqrestore(rq, flags); 1074 } 1075 1076 #ifdef CONFIG_SMP 1077 #ifdef CONFIG_NO_HZ_COMMON 1078 /* 1079 * In the semi idle case, use the nearest busy CPU for migrating timers 1080 * from an idle CPU. This is good for power-savings. 1081 * 1082 * We don't do similar optimization for completely idle system, as 1083 * selecting an idle CPU will add more delays to the timers than intended 1084 * (as that CPU's timer base may not be uptodate wrt jiffies etc). 1085 */ 1086 int get_nohz_timer_target(void) 1087 { 1088 int i, cpu = smp_processor_id(), default_cpu = -1; 1089 struct sched_domain *sd; 1090 const struct cpumask *hk_mask; 1091 1092 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1093 if (!idle_cpu(cpu)) 1094 return cpu; 1095 default_cpu = cpu; 1096 } 1097 1098 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1099 1100 guard(rcu)(); 1101 1102 for_each_domain(cpu, sd) { 1103 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1104 if (cpu == i) 1105 continue; 1106 1107 if (!idle_cpu(i)) 1108 return i; 1109 } 1110 } 1111 1112 if (default_cpu == -1) 1113 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1114 1115 return default_cpu; 1116 } 1117 1118 /* 1119 * When add_timer_on() enqueues a timer into the timer wheel of an 1120 * idle CPU then this timer might expire before the next timer event 1121 * which is scheduled to wake up that CPU. In case of a completely 1122 * idle system the next event might even be infinite time into the 1123 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1124 * leaves the inner idle loop so the newly added timer is taken into 1125 * account when the CPU goes back to idle and evaluates the timer 1126 * wheel for the next timer event. 1127 */ 1128 static void wake_up_idle_cpu(int cpu) 1129 { 1130 struct rq *rq = cpu_rq(cpu); 1131 1132 if (cpu == smp_processor_id()) 1133 return; 1134 1135 /* 1136 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling 1137 * part of the idle loop. This forces an exit from the idle loop 1138 * and a round trip to schedule(). Now this could be optimized 1139 * because a simple new idle loop iteration is enough to 1140 * re-evaluate the next tick. Provided some re-ordering of tick 1141 * nohz functions that would need to follow TIF_NR_POLLING 1142 * clearing: 1143 * 1144 * - On most archs, a simple fetch_or on ti::flags with a 1145 * "0" value would be enough to know if an IPI needs to be sent. 1146 * 1147 * - x86 needs to perform a last need_resched() check between 1148 * monitor and mwait which doesn't take timers into account. 1149 * There a dedicated TIF_TIMER flag would be required to 1150 * fetch_or here and be checked along with TIF_NEED_RESCHED 1151 * before mwait(). 1152 * 1153 * However, remote timer enqueue is not such a frequent event 1154 * and testing of the above solutions didn't appear to report 1155 * much benefits. 1156 */ 1157 if (set_nr_and_not_polling(rq->idle)) 1158 smp_send_reschedule(cpu); 1159 else 1160 trace_sched_wake_idle_without_ipi(cpu); 1161 } 1162 1163 static bool wake_up_full_nohz_cpu(int cpu) 1164 { 1165 /* 1166 * We just need the target to call irq_exit() and re-evaluate 1167 * the next tick. The nohz full kick at least implies that. 1168 * If needed we can still optimize that later with an 1169 * empty IRQ. 1170 */ 1171 if (cpu_is_offline(cpu)) 1172 return true; /* Don't try to wake offline CPUs. */ 1173 if (tick_nohz_full_cpu(cpu)) { 1174 if (cpu != smp_processor_id() || 1175 tick_nohz_tick_stopped()) 1176 tick_nohz_full_kick_cpu(cpu); 1177 return true; 1178 } 1179 1180 return false; 1181 } 1182 1183 /* 1184 * Wake up the specified CPU. If the CPU is going offline, it is the 1185 * caller's responsibility to deal with the lost wakeup, for example, 1186 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1187 */ 1188 void wake_up_nohz_cpu(int cpu) 1189 { 1190 if (!wake_up_full_nohz_cpu(cpu)) 1191 wake_up_idle_cpu(cpu); 1192 } 1193 1194 static void nohz_csd_func(void *info) 1195 { 1196 struct rq *rq = info; 1197 int cpu = cpu_of(rq); 1198 unsigned int flags; 1199 1200 /* 1201 * Release the rq::nohz_csd. 1202 */ 1203 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1204 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1205 1206 rq->idle_balance = idle_cpu(cpu); 1207 if (rq->idle_balance && !need_resched()) { 1208 rq->nohz_idle_balance = flags; 1209 raise_softirq_irqoff(SCHED_SOFTIRQ); 1210 } 1211 } 1212 1213 #endif /* CONFIG_NO_HZ_COMMON */ 1214 1215 #ifdef CONFIG_NO_HZ_FULL 1216 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) 1217 { 1218 if (rq->nr_running != 1) 1219 return false; 1220 1221 if (p->sched_class != &fair_sched_class) 1222 return false; 1223 1224 if (!task_on_rq_queued(p)) 1225 return false; 1226 1227 return true; 1228 } 1229 1230 bool sched_can_stop_tick(struct rq *rq) 1231 { 1232 int fifo_nr_running; 1233 1234 /* Deadline tasks, even if single, need the tick */ 1235 if (rq->dl.dl_nr_running) 1236 return false; 1237 1238 /* 1239 * If there are more than one RR tasks, we need the tick to affect the 1240 * actual RR behaviour. 1241 */ 1242 if (rq->rt.rr_nr_running) { 1243 if (rq->rt.rr_nr_running == 1) 1244 return true; 1245 else 1246 return false; 1247 } 1248 1249 /* 1250 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1251 * forced preemption between FIFO tasks. 1252 */ 1253 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1254 if (fifo_nr_running) 1255 return true; 1256 1257 /* 1258 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 1259 * if there's more than one we need the tick for involuntary 1260 * preemption. 1261 */ 1262 if (rq->nr_running > 1) 1263 return false; 1264 1265 /* 1266 * If there is one task and it has CFS runtime bandwidth constraints 1267 * and it's on the cpu now we don't want to stop the tick. 1268 * This check prevents clearing the bit if a newly enqueued task here is 1269 * dequeued by migrating while the constrained task continues to run. 1270 * E.g. going from 2->1 without going through pick_next_task(). 1271 */ 1272 if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) { 1273 if (cfs_task_bw_constrained(rq->curr)) 1274 return false; 1275 } 1276 1277 return true; 1278 } 1279 #endif /* CONFIG_NO_HZ_FULL */ 1280 #endif /* CONFIG_SMP */ 1281 1282 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1283 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1284 /* 1285 * Iterate task_group tree rooted at *from, calling @down when first entering a 1286 * node and @up when leaving it for the final time. 1287 * 1288 * Caller must hold rcu_lock or sufficient equivalent. 1289 */ 1290 int walk_tg_tree_from(struct task_group *from, 1291 tg_visitor down, tg_visitor up, void *data) 1292 { 1293 struct task_group *parent, *child; 1294 int ret; 1295 1296 parent = from; 1297 1298 down: 1299 ret = (*down)(parent, data); 1300 if (ret) 1301 goto out; 1302 list_for_each_entry_rcu(child, &parent->children, siblings) { 1303 parent = child; 1304 goto down; 1305 1306 up: 1307 continue; 1308 } 1309 ret = (*up)(parent, data); 1310 if (ret || parent == from) 1311 goto out; 1312 1313 child = parent; 1314 parent = parent->parent; 1315 if (parent) 1316 goto up; 1317 out: 1318 return ret; 1319 } 1320 1321 int tg_nop(struct task_group *tg, void *data) 1322 { 1323 return 0; 1324 } 1325 #endif 1326 1327 static void set_load_weight(struct task_struct *p, bool update_load) 1328 { 1329 int prio = p->static_prio - MAX_RT_PRIO; 1330 struct load_weight *load = &p->se.load; 1331 1332 /* 1333 * SCHED_IDLE tasks get minimal weight: 1334 */ 1335 if (task_has_idle_policy(p)) { 1336 load->weight = scale_load(WEIGHT_IDLEPRIO); 1337 load->inv_weight = WMULT_IDLEPRIO; 1338 return; 1339 } 1340 1341 /* 1342 * SCHED_OTHER tasks have to update their load when changing their 1343 * weight 1344 */ 1345 if (update_load && p->sched_class == &fair_sched_class) { 1346 reweight_task(p, prio); 1347 } else { 1348 load->weight = scale_load(sched_prio_to_weight[prio]); 1349 load->inv_weight = sched_prio_to_wmult[prio]; 1350 } 1351 } 1352 1353 #ifdef CONFIG_UCLAMP_TASK 1354 /* 1355 * Serializes updates of utilization clamp values 1356 * 1357 * The (slow-path) user-space triggers utilization clamp value updates which 1358 * can require updates on (fast-path) scheduler's data structures used to 1359 * support enqueue/dequeue operations. 1360 * While the per-CPU rq lock protects fast-path update operations, user-space 1361 * requests are serialized using a mutex to reduce the risk of conflicting 1362 * updates or API abuses. 1363 */ 1364 static DEFINE_MUTEX(uclamp_mutex); 1365 1366 /* Max allowed minimum utilization */ 1367 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1368 1369 /* Max allowed maximum utilization */ 1370 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1371 1372 /* 1373 * By default RT tasks run at the maximum performance point/capacity of the 1374 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1375 * SCHED_CAPACITY_SCALE. 1376 * 1377 * This knob allows admins to change the default behavior when uclamp is being 1378 * used. In battery powered devices, particularly, running at the maximum 1379 * capacity and frequency will increase energy consumption and shorten the 1380 * battery life. 1381 * 1382 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1383 * 1384 * This knob will not override the system default sched_util_clamp_min defined 1385 * above. 1386 */ 1387 static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1388 1389 /* All clamps are required to be less or equal than these values */ 1390 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1391 1392 /* 1393 * This static key is used to reduce the uclamp overhead in the fast path. It 1394 * primarily disables the call to uclamp_rq_{inc, dec}() in 1395 * enqueue/dequeue_task(). 1396 * 1397 * This allows users to continue to enable uclamp in their kernel config with 1398 * minimum uclamp overhead in the fast path. 1399 * 1400 * As soon as userspace modifies any of the uclamp knobs, the static key is 1401 * enabled, since we have an actual users that make use of uclamp 1402 * functionality. 1403 * 1404 * The knobs that would enable this static key are: 1405 * 1406 * * A task modifying its uclamp value with sched_setattr(). 1407 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1408 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1409 */ 1410 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1411 1412 /* Integer rounded range for each bucket */ 1413 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 1414 1415 #define for_each_clamp_id(clamp_id) \ 1416 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 1417 1418 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 1419 { 1420 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 1421 } 1422 1423 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 1424 { 1425 if (clamp_id == UCLAMP_MIN) 1426 return 0; 1427 return SCHED_CAPACITY_SCALE; 1428 } 1429 1430 static inline void uclamp_se_set(struct uclamp_se *uc_se, 1431 unsigned int value, bool user_defined) 1432 { 1433 uc_se->value = value; 1434 uc_se->bucket_id = uclamp_bucket_id(value); 1435 uc_se->user_defined = user_defined; 1436 } 1437 1438 static inline unsigned int 1439 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1440 unsigned int clamp_value) 1441 { 1442 /* 1443 * Avoid blocked utilization pushing up the frequency when we go 1444 * idle (which drops the max-clamp) by retaining the last known 1445 * max-clamp. 1446 */ 1447 if (clamp_id == UCLAMP_MAX) { 1448 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1449 return clamp_value; 1450 } 1451 1452 return uclamp_none(UCLAMP_MIN); 1453 } 1454 1455 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1456 unsigned int clamp_value) 1457 { 1458 /* Reset max-clamp retention only on idle exit */ 1459 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1460 return; 1461 1462 uclamp_rq_set(rq, clamp_id, clamp_value); 1463 } 1464 1465 static inline 1466 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1467 unsigned int clamp_value) 1468 { 1469 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1470 int bucket_id = UCLAMP_BUCKETS - 1; 1471 1472 /* 1473 * Since both min and max clamps are max aggregated, find the 1474 * top most bucket with tasks in. 1475 */ 1476 for ( ; bucket_id >= 0; bucket_id--) { 1477 if (!bucket[bucket_id].tasks) 1478 continue; 1479 return bucket[bucket_id].value; 1480 } 1481 1482 /* No tasks -- default clamp values */ 1483 return uclamp_idle_value(rq, clamp_id, clamp_value); 1484 } 1485 1486 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1487 { 1488 unsigned int default_util_min; 1489 struct uclamp_se *uc_se; 1490 1491 lockdep_assert_held(&p->pi_lock); 1492 1493 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1494 1495 /* Only sync if user didn't override the default */ 1496 if (uc_se->user_defined) 1497 return; 1498 1499 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1500 uclamp_se_set(uc_se, default_util_min, false); 1501 } 1502 1503 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1504 { 1505 if (!rt_task(p)) 1506 return; 1507 1508 /* Protect updates to p->uclamp_* */ 1509 guard(task_rq_lock)(p); 1510 __uclamp_update_util_min_rt_default(p); 1511 } 1512 1513 static inline struct uclamp_se 1514 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1515 { 1516 /* Copy by value as we could modify it */ 1517 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1518 #ifdef CONFIG_UCLAMP_TASK_GROUP 1519 unsigned int tg_min, tg_max, value; 1520 1521 /* 1522 * Tasks in autogroups or root task group will be 1523 * restricted by system defaults. 1524 */ 1525 if (task_group_is_autogroup(task_group(p))) 1526 return uc_req; 1527 if (task_group(p) == &root_task_group) 1528 return uc_req; 1529 1530 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1531 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1532 value = uc_req.value; 1533 value = clamp(value, tg_min, tg_max); 1534 uclamp_se_set(&uc_req, value, false); 1535 #endif 1536 1537 return uc_req; 1538 } 1539 1540 /* 1541 * The effective clamp bucket index of a task depends on, by increasing 1542 * priority: 1543 * - the task specific clamp value, when explicitly requested from userspace 1544 * - the task group effective clamp value, for tasks not either in the root 1545 * group or in an autogroup 1546 * - the system default clamp value, defined by the sysadmin 1547 */ 1548 static inline struct uclamp_se 1549 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1550 { 1551 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1552 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1553 1554 /* System default restrictions always apply */ 1555 if (unlikely(uc_req.value > uc_max.value)) 1556 return uc_max; 1557 1558 return uc_req; 1559 } 1560 1561 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1562 { 1563 struct uclamp_se uc_eff; 1564 1565 /* Task currently refcounted: use back-annotated (effective) value */ 1566 if (p->uclamp[clamp_id].active) 1567 return (unsigned long)p->uclamp[clamp_id].value; 1568 1569 uc_eff = uclamp_eff_get(p, clamp_id); 1570 1571 return (unsigned long)uc_eff.value; 1572 } 1573 1574 /* 1575 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1576 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1577 * updates the rq's clamp value if required. 1578 * 1579 * Tasks can have a task-specific value requested from user-space, track 1580 * within each bucket the maximum value for tasks refcounted in it. 1581 * This "local max aggregation" allows to track the exact "requested" value 1582 * for each bucket when all its RUNNABLE tasks require the same clamp. 1583 */ 1584 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1585 enum uclamp_id clamp_id) 1586 { 1587 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1588 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1589 struct uclamp_bucket *bucket; 1590 1591 lockdep_assert_rq_held(rq); 1592 1593 /* Update task effective clamp */ 1594 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1595 1596 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1597 bucket->tasks++; 1598 uc_se->active = true; 1599 1600 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1601 1602 /* 1603 * Local max aggregation: rq buckets always track the max 1604 * "requested" clamp value of its RUNNABLE tasks. 1605 */ 1606 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1607 bucket->value = uc_se->value; 1608 1609 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) 1610 uclamp_rq_set(rq, clamp_id, uc_se->value); 1611 } 1612 1613 /* 1614 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1615 * is released. If this is the last task reference counting the rq's max 1616 * active clamp value, then the rq's clamp value is updated. 1617 * 1618 * Both refcounted tasks and rq's cached clamp values are expected to be 1619 * always valid. If it's detected they are not, as defensive programming, 1620 * enforce the expected state and warn. 1621 */ 1622 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1623 enum uclamp_id clamp_id) 1624 { 1625 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1626 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1627 struct uclamp_bucket *bucket; 1628 unsigned int bkt_clamp; 1629 unsigned int rq_clamp; 1630 1631 lockdep_assert_rq_held(rq); 1632 1633 /* 1634 * If sched_uclamp_used was enabled after task @p was enqueued, 1635 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1636 * 1637 * In this case the uc_se->active flag should be false since no uclamp 1638 * accounting was performed at enqueue time and we can just return 1639 * here. 1640 * 1641 * Need to be careful of the following enqueue/dequeue ordering 1642 * problem too 1643 * 1644 * enqueue(taskA) 1645 * // sched_uclamp_used gets enabled 1646 * enqueue(taskB) 1647 * dequeue(taskA) 1648 * // Must not decrement bucket->tasks here 1649 * dequeue(taskB) 1650 * 1651 * where we could end up with stale data in uc_se and 1652 * bucket[uc_se->bucket_id]. 1653 * 1654 * The following check here eliminates the possibility of such race. 1655 */ 1656 if (unlikely(!uc_se->active)) 1657 return; 1658 1659 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1660 1661 SCHED_WARN_ON(!bucket->tasks); 1662 if (likely(bucket->tasks)) 1663 bucket->tasks--; 1664 1665 uc_se->active = false; 1666 1667 /* 1668 * Keep "local max aggregation" simple and accept to (possibly) 1669 * overboost some RUNNABLE tasks in the same bucket. 1670 * The rq clamp bucket value is reset to its base value whenever 1671 * there are no more RUNNABLE tasks refcounting it. 1672 */ 1673 if (likely(bucket->tasks)) 1674 return; 1675 1676 rq_clamp = uclamp_rq_get(rq, clamp_id); 1677 /* 1678 * Defensive programming: this should never happen. If it happens, 1679 * e.g. due to future modification, warn and fixup the expected value. 1680 */ 1681 SCHED_WARN_ON(bucket->value > rq_clamp); 1682 if (bucket->value >= rq_clamp) { 1683 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1684 uclamp_rq_set(rq, clamp_id, bkt_clamp); 1685 } 1686 } 1687 1688 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1689 { 1690 enum uclamp_id clamp_id; 1691 1692 /* 1693 * Avoid any overhead until uclamp is actually used by the userspace. 1694 * 1695 * The condition is constructed such that a NOP is generated when 1696 * sched_uclamp_used is disabled. 1697 */ 1698 if (!static_branch_unlikely(&sched_uclamp_used)) 1699 return; 1700 1701 if (unlikely(!p->sched_class->uclamp_enabled)) 1702 return; 1703 1704 for_each_clamp_id(clamp_id) 1705 uclamp_rq_inc_id(rq, p, clamp_id); 1706 1707 /* Reset clamp idle holding when there is one RUNNABLE task */ 1708 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1709 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1710 } 1711 1712 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1713 { 1714 enum uclamp_id clamp_id; 1715 1716 /* 1717 * Avoid any overhead until uclamp is actually used by the userspace. 1718 * 1719 * The condition is constructed such that a NOP is generated when 1720 * sched_uclamp_used is disabled. 1721 */ 1722 if (!static_branch_unlikely(&sched_uclamp_used)) 1723 return; 1724 1725 if (unlikely(!p->sched_class->uclamp_enabled)) 1726 return; 1727 1728 for_each_clamp_id(clamp_id) 1729 uclamp_rq_dec_id(rq, p, clamp_id); 1730 } 1731 1732 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1733 enum uclamp_id clamp_id) 1734 { 1735 if (!p->uclamp[clamp_id].active) 1736 return; 1737 1738 uclamp_rq_dec_id(rq, p, clamp_id); 1739 uclamp_rq_inc_id(rq, p, clamp_id); 1740 1741 /* 1742 * Make sure to clear the idle flag if we've transiently reached 0 1743 * active tasks on rq. 1744 */ 1745 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1746 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1747 } 1748 1749 static inline void 1750 uclamp_update_active(struct task_struct *p) 1751 { 1752 enum uclamp_id clamp_id; 1753 struct rq_flags rf; 1754 struct rq *rq; 1755 1756 /* 1757 * Lock the task and the rq where the task is (or was) queued. 1758 * 1759 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1760 * price to pay to safely serialize util_{min,max} updates with 1761 * enqueues, dequeues and migration operations. 1762 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1763 */ 1764 rq = task_rq_lock(p, &rf); 1765 1766 /* 1767 * Setting the clamp bucket is serialized by task_rq_lock(). 1768 * If the task is not yet RUNNABLE and its task_struct is not 1769 * affecting a valid clamp bucket, the next time it's enqueued, 1770 * it will already see the updated clamp bucket value. 1771 */ 1772 for_each_clamp_id(clamp_id) 1773 uclamp_rq_reinc_id(rq, p, clamp_id); 1774 1775 task_rq_unlock(rq, p, &rf); 1776 } 1777 1778 #ifdef CONFIG_UCLAMP_TASK_GROUP 1779 static inline void 1780 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1781 { 1782 struct css_task_iter it; 1783 struct task_struct *p; 1784 1785 css_task_iter_start(css, 0, &it); 1786 while ((p = css_task_iter_next(&it))) 1787 uclamp_update_active(p); 1788 css_task_iter_end(&it); 1789 } 1790 1791 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1792 #endif 1793 1794 #ifdef CONFIG_SYSCTL 1795 #ifdef CONFIG_UCLAMP_TASK 1796 #ifdef CONFIG_UCLAMP_TASK_GROUP 1797 static void uclamp_update_root_tg(void) 1798 { 1799 struct task_group *tg = &root_task_group; 1800 1801 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1802 sysctl_sched_uclamp_util_min, false); 1803 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1804 sysctl_sched_uclamp_util_max, false); 1805 1806 guard(rcu)(); 1807 cpu_util_update_eff(&root_task_group.css); 1808 } 1809 #else 1810 static void uclamp_update_root_tg(void) { } 1811 #endif 1812 1813 static void uclamp_sync_util_min_rt_default(void) 1814 { 1815 struct task_struct *g, *p; 1816 1817 /* 1818 * copy_process() sysctl_uclamp 1819 * uclamp_min_rt = X; 1820 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1821 * // link thread smp_mb__after_spinlock() 1822 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1823 * sched_post_fork() for_each_process_thread() 1824 * __uclamp_sync_rt() __uclamp_sync_rt() 1825 * 1826 * Ensures that either sched_post_fork() will observe the new 1827 * uclamp_min_rt or for_each_process_thread() will observe the new 1828 * task. 1829 */ 1830 read_lock(&tasklist_lock); 1831 smp_mb__after_spinlock(); 1832 read_unlock(&tasklist_lock); 1833 1834 guard(rcu)(); 1835 for_each_process_thread(g, p) 1836 uclamp_update_util_min_rt_default(p); 1837 } 1838 1839 static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, 1840 void *buffer, size_t *lenp, loff_t *ppos) 1841 { 1842 bool update_root_tg = false; 1843 int old_min, old_max, old_min_rt; 1844 int result; 1845 1846 guard(mutex)(&uclamp_mutex); 1847 1848 old_min = sysctl_sched_uclamp_util_min; 1849 old_max = sysctl_sched_uclamp_util_max; 1850 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1851 1852 result = proc_dointvec(table, write, buffer, lenp, ppos); 1853 if (result) 1854 goto undo; 1855 if (!write) 1856 return 0; 1857 1858 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1859 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1860 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1861 1862 result = -EINVAL; 1863 goto undo; 1864 } 1865 1866 if (old_min != sysctl_sched_uclamp_util_min) { 1867 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1868 sysctl_sched_uclamp_util_min, false); 1869 update_root_tg = true; 1870 } 1871 if (old_max != sysctl_sched_uclamp_util_max) { 1872 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1873 sysctl_sched_uclamp_util_max, false); 1874 update_root_tg = true; 1875 } 1876 1877 if (update_root_tg) { 1878 static_branch_enable(&sched_uclamp_used); 1879 uclamp_update_root_tg(); 1880 } 1881 1882 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1883 static_branch_enable(&sched_uclamp_used); 1884 uclamp_sync_util_min_rt_default(); 1885 } 1886 1887 /* 1888 * We update all RUNNABLE tasks only when task groups are in use. 1889 * Otherwise, keep it simple and do just a lazy update at each next 1890 * task enqueue time. 1891 */ 1892 return 0; 1893 1894 undo: 1895 sysctl_sched_uclamp_util_min = old_min; 1896 sysctl_sched_uclamp_util_max = old_max; 1897 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1898 return result; 1899 } 1900 #endif 1901 #endif 1902 1903 static int uclamp_validate(struct task_struct *p, 1904 const struct sched_attr *attr) 1905 { 1906 int util_min = p->uclamp_req[UCLAMP_MIN].value; 1907 int util_max = p->uclamp_req[UCLAMP_MAX].value; 1908 1909 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 1910 util_min = attr->sched_util_min; 1911 1912 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 1913 return -EINVAL; 1914 } 1915 1916 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 1917 util_max = attr->sched_util_max; 1918 1919 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 1920 return -EINVAL; 1921 } 1922 1923 if (util_min != -1 && util_max != -1 && util_min > util_max) 1924 return -EINVAL; 1925 1926 /* 1927 * We have valid uclamp attributes; make sure uclamp is enabled. 1928 * 1929 * We need to do that here, because enabling static branches is a 1930 * blocking operation which obviously cannot be done while holding 1931 * scheduler locks. 1932 */ 1933 static_branch_enable(&sched_uclamp_used); 1934 1935 return 0; 1936 } 1937 1938 static bool uclamp_reset(const struct sched_attr *attr, 1939 enum uclamp_id clamp_id, 1940 struct uclamp_se *uc_se) 1941 { 1942 /* Reset on sched class change for a non user-defined clamp value. */ 1943 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 1944 !uc_se->user_defined) 1945 return true; 1946 1947 /* Reset on sched_util_{min,max} == -1. */ 1948 if (clamp_id == UCLAMP_MIN && 1949 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1950 attr->sched_util_min == -1) { 1951 return true; 1952 } 1953 1954 if (clamp_id == UCLAMP_MAX && 1955 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1956 attr->sched_util_max == -1) { 1957 return true; 1958 } 1959 1960 return false; 1961 } 1962 1963 static void __setscheduler_uclamp(struct task_struct *p, 1964 const struct sched_attr *attr) 1965 { 1966 enum uclamp_id clamp_id; 1967 1968 for_each_clamp_id(clamp_id) { 1969 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 1970 unsigned int value; 1971 1972 if (!uclamp_reset(attr, clamp_id, uc_se)) 1973 continue; 1974 1975 /* 1976 * RT by default have a 100% boost value that could be modified 1977 * at runtime. 1978 */ 1979 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 1980 value = sysctl_sched_uclamp_util_min_rt_default; 1981 else 1982 value = uclamp_none(clamp_id); 1983 1984 uclamp_se_set(uc_se, value, false); 1985 1986 } 1987 1988 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 1989 return; 1990 1991 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 1992 attr->sched_util_min != -1) { 1993 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 1994 attr->sched_util_min, true); 1995 } 1996 1997 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 1998 attr->sched_util_max != -1) { 1999 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 2000 attr->sched_util_max, true); 2001 } 2002 } 2003 2004 static void uclamp_fork(struct task_struct *p) 2005 { 2006 enum uclamp_id clamp_id; 2007 2008 /* 2009 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 2010 * as the task is still at its early fork stages. 2011 */ 2012 for_each_clamp_id(clamp_id) 2013 p->uclamp[clamp_id].active = false; 2014 2015 if (likely(!p->sched_reset_on_fork)) 2016 return; 2017 2018 for_each_clamp_id(clamp_id) { 2019 uclamp_se_set(&p->uclamp_req[clamp_id], 2020 uclamp_none(clamp_id), false); 2021 } 2022 } 2023 2024 static void uclamp_post_fork(struct task_struct *p) 2025 { 2026 uclamp_update_util_min_rt_default(p); 2027 } 2028 2029 static void __init init_uclamp_rq(struct rq *rq) 2030 { 2031 enum uclamp_id clamp_id; 2032 struct uclamp_rq *uc_rq = rq->uclamp; 2033 2034 for_each_clamp_id(clamp_id) { 2035 uc_rq[clamp_id] = (struct uclamp_rq) { 2036 .value = uclamp_none(clamp_id) 2037 }; 2038 } 2039 2040 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 2041 } 2042 2043 static void __init init_uclamp(void) 2044 { 2045 struct uclamp_se uc_max = {}; 2046 enum uclamp_id clamp_id; 2047 int cpu; 2048 2049 for_each_possible_cpu(cpu) 2050 init_uclamp_rq(cpu_rq(cpu)); 2051 2052 for_each_clamp_id(clamp_id) { 2053 uclamp_se_set(&init_task.uclamp_req[clamp_id], 2054 uclamp_none(clamp_id), false); 2055 } 2056 2057 /* System defaults allow max clamp values for both indexes */ 2058 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 2059 for_each_clamp_id(clamp_id) { 2060 uclamp_default[clamp_id] = uc_max; 2061 #ifdef CONFIG_UCLAMP_TASK_GROUP 2062 root_task_group.uclamp_req[clamp_id] = uc_max; 2063 root_task_group.uclamp[clamp_id] = uc_max; 2064 #endif 2065 } 2066 } 2067 2068 #else /* CONFIG_UCLAMP_TASK */ 2069 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 2070 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 2071 static inline int uclamp_validate(struct task_struct *p, 2072 const struct sched_attr *attr) 2073 { 2074 return -EOPNOTSUPP; 2075 } 2076 static void __setscheduler_uclamp(struct task_struct *p, 2077 const struct sched_attr *attr) { } 2078 static inline void uclamp_fork(struct task_struct *p) { } 2079 static inline void uclamp_post_fork(struct task_struct *p) { } 2080 static inline void init_uclamp(void) { } 2081 #endif /* CONFIG_UCLAMP_TASK */ 2082 2083 bool sched_task_on_rq(struct task_struct *p) 2084 { 2085 return task_on_rq_queued(p); 2086 } 2087 2088 unsigned long get_wchan(struct task_struct *p) 2089 { 2090 unsigned long ip = 0; 2091 unsigned int state; 2092 2093 if (!p || p == current) 2094 return 0; 2095 2096 /* Only get wchan if task is blocked and we can keep it that way. */ 2097 raw_spin_lock_irq(&p->pi_lock); 2098 state = READ_ONCE(p->__state); 2099 smp_rmb(); /* see try_to_wake_up() */ 2100 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 2101 ip = __get_wchan(p); 2102 raw_spin_unlock_irq(&p->pi_lock); 2103 2104 return ip; 2105 } 2106 2107 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 2108 { 2109 if (!(flags & ENQUEUE_NOCLOCK)) 2110 update_rq_clock(rq); 2111 2112 if (!(flags & ENQUEUE_RESTORE)) { 2113 sched_info_enqueue(rq, p); 2114 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); 2115 } 2116 2117 uclamp_rq_inc(rq, p); 2118 p->sched_class->enqueue_task(rq, p, flags); 2119 2120 if (sched_core_enabled(rq)) 2121 sched_core_enqueue(rq, p); 2122 } 2123 2124 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2125 { 2126 if (sched_core_enabled(rq)) 2127 sched_core_dequeue(rq, p, flags); 2128 2129 if (!(flags & DEQUEUE_NOCLOCK)) 2130 update_rq_clock(rq); 2131 2132 if (!(flags & DEQUEUE_SAVE)) { 2133 sched_info_dequeue(rq, p); 2134 psi_dequeue(p, flags & DEQUEUE_SLEEP); 2135 } 2136 2137 uclamp_rq_dec(rq, p); 2138 p->sched_class->dequeue_task(rq, p, flags); 2139 } 2140 2141 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2142 { 2143 if (task_on_rq_migrating(p)) 2144 flags |= ENQUEUE_MIGRATED; 2145 if (flags & ENQUEUE_MIGRATED) 2146 sched_mm_cid_migrate_to(rq, p); 2147 2148 enqueue_task(rq, p, flags); 2149 2150 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); 2151 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2152 } 2153 2154 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2155 { 2156 WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING); 2157 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2158 2159 dequeue_task(rq, p, flags); 2160 } 2161 2162 static inline int __normal_prio(int policy, int rt_prio, int nice) 2163 { 2164 int prio; 2165 2166 if (dl_policy(policy)) 2167 prio = MAX_DL_PRIO - 1; 2168 else if (rt_policy(policy)) 2169 prio = MAX_RT_PRIO - 1 - rt_prio; 2170 else 2171 prio = NICE_TO_PRIO(nice); 2172 2173 return prio; 2174 } 2175 2176 /* 2177 * Calculate the expected normal priority: i.e. priority 2178 * without taking RT-inheritance into account. Might be 2179 * boosted by interactivity modifiers. Changes upon fork, 2180 * setprio syscalls, and whenever the interactivity 2181 * estimator recalculates. 2182 */ 2183 static inline int normal_prio(struct task_struct *p) 2184 { 2185 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 2186 } 2187 2188 /* 2189 * Calculate the current priority, i.e. the priority 2190 * taken into account by the scheduler. This value might 2191 * be boosted by RT tasks, or might be boosted by 2192 * interactivity modifiers. Will be RT if the task got 2193 * RT-boosted. If not then it returns p->normal_prio. 2194 */ 2195 static int effective_prio(struct task_struct *p) 2196 { 2197 p->normal_prio = normal_prio(p); 2198 /* 2199 * If we are RT tasks or we were boosted to RT priority, 2200 * keep the priority unchanged. Otherwise, update priority 2201 * to the normal priority: 2202 */ 2203 if (!rt_prio(p->prio)) 2204 return p->normal_prio; 2205 return p->prio; 2206 } 2207 2208 /** 2209 * task_curr - is this task currently executing on a CPU? 2210 * @p: the task in question. 2211 * 2212 * Return: 1 if the task is currently executing. 0 otherwise. 2213 */ 2214 inline int task_curr(const struct task_struct *p) 2215 { 2216 return cpu_curr(task_cpu(p)) == p; 2217 } 2218 2219 /* 2220 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2221 * use the balance_callback list if you want balancing. 2222 * 2223 * this means any call to check_class_changed() must be followed by a call to 2224 * balance_callback(). 2225 */ 2226 static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2227 const struct sched_class *prev_class, 2228 int oldprio) 2229 { 2230 if (prev_class != p->sched_class) { 2231 if (prev_class->switched_from) 2232 prev_class->switched_from(rq, p); 2233 2234 p->sched_class->switched_to(rq, p); 2235 } else if (oldprio != p->prio || dl_task(p)) 2236 p->sched_class->prio_changed(rq, p, oldprio); 2237 } 2238 2239 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) 2240 { 2241 if (p->sched_class == rq->curr->sched_class) 2242 rq->curr->sched_class->wakeup_preempt(rq, p, flags); 2243 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) 2244 resched_curr(rq); 2245 2246 /* 2247 * A queue event has occurred, and we're going to schedule. In 2248 * this case, we can save a useless back to back clock update. 2249 */ 2250 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 2251 rq_clock_skip_update(rq); 2252 } 2253 2254 static __always_inline 2255 int __task_state_match(struct task_struct *p, unsigned int state) 2256 { 2257 if (READ_ONCE(p->__state) & state) 2258 return 1; 2259 2260 if (READ_ONCE(p->saved_state) & state) 2261 return -1; 2262 2263 return 0; 2264 } 2265 2266 static __always_inline 2267 int task_state_match(struct task_struct *p, unsigned int state) 2268 { 2269 /* 2270 * Serialize against current_save_and_set_rtlock_wait_state(), 2271 * current_restore_rtlock_saved_state(), and __refrigerator(). 2272 */ 2273 guard(raw_spinlock_irq)(&p->pi_lock); 2274 return __task_state_match(p, state); 2275 } 2276 2277 /* 2278 * wait_task_inactive - wait for a thread to unschedule. 2279 * 2280 * Wait for the thread to block in any of the states set in @match_state. 2281 * If it changes, i.e. @p might have woken up, then return zero. When we 2282 * succeed in waiting for @p to be off its CPU, we return a positive number 2283 * (its total switch count). If a second call a short while later returns the 2284 * same number, the caller can be sure that @p has remained unscheduled the 2285 * whole time. 2286 * 2287 * The caller must ensure that the task *will* unschedule sometime soon, 2288 * else this function might spin for a *long* time. This function can't 2289 * be called with interrupts off, or it may introduce deadlock with 2290 * smp_call_function() if an IPI is sent by the same process we are 2291 * waiting to become inactive. 2292 */ 2293 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 2294 { 2295 int running, queued, match; 2296 struct rq_flags rf; 2297 unsigned long ncsw; 2298 struct rq *rq; 2299 2300 for (;;) { 2301 /* 2302 * We do the initial early heuristics without holding 2303 * any task-queue locks at all. We'll only try to get 2304 * the runqueue lock when things look like they will 2305 * work out! 2306 */ 2307 rq = task_rq(p); 2308 2309 /* 2310 * If the task is actively running on another CPU 2311 * still, just relax and busy-wait without holding 2312 * any locks. 2313 * 2314 * NOTE! Since we don't hold any locks, it's not 2315 * even sure that "rq" stays as the right runqueue! 2316 * But we don't care, since "task_on_cpu()" will 2317 * return false if the runqueue has changed and p 2318 * is actually now running somewhere else! 2319 */ 2320 while (task_on_cpu(rq, p)) { 2321 if (!task_state_match(p, match_state)) 2322 return 0; 2323 cpu_relax(); 2324 } 2325 2326 /* 2327 * Ok, time to look more closely! We need the rq 2328 * lock now, to be *sure*. If we're wrong, we'll 2329 * just go back and repeat. 2330 */ 2331 rq = task_rq_lock(p, &rf); 2332 trace_sched_wait_task(p); 2333 running = task_on_cpu(rq, p); 2334 queued = task_on_rq_queued(p); 2335 ncsw = 0; 2336 if ((match = __task_state_match(p, match_state))) { 2337 /* 2338 * When matching on p->saved_state, consider this task 2339 * still queued so it will wait. 2340 */ 2341 if (match < 0) 2342 queued = 1; 2343 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2344 } 2345 task_rq_unlock(rq, p, &rf); 2346 2347 /* 2348 * If it changed from the expected state, bail out now. 2349 */ 2350 if (unlikely(!ncsw)) 2351 break; 2352 2353 /* 2354 * Was it really running after all now that we 2355 * checked with the proper locks actually held? 2356 * 2357 * Oops. Go back and try again.. 2358 */ 2359 if (unlikely(running)) { 2360 cpu_relax(); 2361 continue; 2362 } 2363 2364 /* 2365 * It's not enough that it's not actively running, 2366 * it must be off the runqueue _entirely_, and not 2367 * preempted! 2368 * 2369 * So if it was still runnable (but just not actively 2370 * running right now), it's preempted, and we should 2371 * yield - it could be a while. 2372 */ 2373 if (unlikely(queued)) { 2374 ktime_t to = NSEC_PER_SEC / HZ; 2375 2376 set_current_state(TASK_UNINTERRUPTIBLE); 2377 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 2378 continue; 2379 } 2380 2381 /* 2382 * Ahh, all good. It wasn't running, and it wasn't 2383 * runnable, which means that it will never become 2384 * running in the future either. We're all done! 2385 */ 2386 break; 2387 } 2388 2389 return ncsw; 2390 } 2391 2392 #ifdef CONFIG_SMP 2393 2394 static void 2395 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); 2396 2397 static int __set_cpus_allowed_ptr(struct task_struct *p, 2398 struct affinity_context *ctx); 2399 2400 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2401 { 2402 struct affinity_context ac = { 2403 .new_mask = cpumask_of(rq->cpu), 2404 .flags = SCA_MIGRATE_DISABLE, 2405 }; 2406 2407 if (likely(!p->migration_disabled)) 2408 return; 2409 2410 if (p->cpus_ptr != &p->cpus_mask) 2411 return; 2412 2413 /* 2414 * Violates locking rules! see comment in __do_set_cpus_allowed(). 2415 */ 2416 __do_set_cpus_allowed(p, &ac); 2417 } 2418 2419 void migrate_disable(void) 2420 { 2421 struct task_struct *p = current; 2422 2423 if (p->migration_disabled) { 2424 p->migration_disabled++; 2425 return; 2426 } 2427 2428 guard(preempt)(); 2429 this_rq()->nr_pinned++; 2430 p->migration_disabled = 1; 2431 } 2432 EXPORT_SYMBOL_GPL(migrate_disable); 2433 2434 void migrate_enable(void) 2435 { 2436 struct task_struct *p = current; 2437 struct affinity_context ac = { 2438 .new_mask = &p->cpus_mask, 2439 .flags = SCA_MIGRATE_ENABLE, 2440 }; 2441 2442 if (p->migration_disabled > 1) { 2443 p->migration_disabled--; 2444 return; 2445 } 2446 2447 if (WARN_ON_ONCE(!p->migration_disabled)) 2448 return; 2449 2450 /* 2451 * Ensure stop_task runs either before or after this, and that 2452 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2453 */ 2454 guard(preempt)(); 2455 if (p->cpus_ptr != &p->cpus_mask) 2456 __set_cpus_allowed_ptr(p, &ac); 2457 /* 2458 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2459 * regular cpus_mask, otherwise things that race (eg. 2460 * select_fallback_rq) get confused. 2461 */ 2462 barrier(); 2463 p->migration_disabled = 0; 2464 this_rq()->nr_pinned--; 2465 } 2466 EXPORT_SYMBOL_GPL(migrate_enable); 2467 2468 static inline bool rq_has_pinned_tasks(struct rq *rq) 2469 { 2470 return rq->nr_pinned; 2471 } 2472 2473 /* 2474 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2475 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2476 */ 2477 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2478 { 2479 /* When not in the task's cpumask, no point in looking further. */ 2480 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2481 return false; 2482 2483 /* migrate_disabled() must be allowed to finish. */ 2484 if (is_migration_disabled(p)) 2485 return cpu_online(cpu); 2486 2487 /* Non kernel threads are not allowed during either online or offline. */ 2488 if (!(p->flags & PF_KTHREAD)) 2489 return cpu_active(cpu) && task_cpu_possible(cpu, p); 2490 2491 /* KTHREAD_IS_PER_CPU is always allowed. */ 2492 if (kthread_is_per_cpu(p)) 2493 return cpu_online(cpu); 2494 2495 /* Regular kernel threads don't get to stay during offline. */ 2496 if (cpu_dying(cpu)) 2497 return false; 2498 2499 /* But are allowed during online. */ 2500 return cpu_online(cpu); 2501 } 2502 2503 /* 2504 * This is how migration works: 2505 * 2506 * 1) we invoke migration_cpu_stop() on the target CPU using 2507 * stop_one_cpu(). 2508 * 2) stopper starts to run (implicitly forcing the migrated thread 2509 * off the CPU) 2510 * 3) it checks whether the migrated task is still in the wrong runqueue. 2511 * 4) if it's in the wrong runqueue then the migration thread removes 2512 * it and puts it into the right queue. 2513 * 5) stopper completes and stop_one_cpu() returns and the migration 2514 * is done. 2515 */ 2516 2517 /* 2518 * move_queued_task - move a queued task to new rq. 2519 * 2520 * Returns (locked) new rq. Old rq's lock is released. 2521 */ 2522 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2523 struct task_struct *p, int new_cpu) 2524 { 2525 lockdep_assert_rq_held(rq); 2526 2527 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2528 set_task_cpu(p, new_cpu); 2529 rq_unlock(rq, rf); 2530 2531 rq = cpu_rq(new_cpu); 2532 2533 rq_lock(rq, rf); 2534 WARN_ON_ONCE(task_cpu(p) != new_cpu); 2535 activate_task(rq, p, 0); 2536 wakeup_preempt(rq, p, 0); 2537 2538 return rq; 2539 } 2540 2541 struct migration_arg { 2542 struct task_struct *task; 2543 int dest_cpu; 2544 struct set_affinity_pending *pending; 2545 }; 2546 2547 /* 2548 * @refs: number of wait_for_completion() 2549 * @stop_pending: is @stop_work in use 2550 */ 2551 struct set_affinity_pending { 2552 refcount_t refs; 2553 unsigned int stop_pending; 2554 struct completion done; 2555 struct cpu_stop_work stop_work; 2556 struct migration_arg arg; 2557 }; 2558 2559 /* 2560 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2561 * this because either it can't run here any more (set_cpus_allowed() 2562 * away from this CPU, or CPU going down), or because we're 2563 * attempting to rebalance this task on exec (sched_exec). 2564 * 2565 * So we race with normal scheduler movements, but that's OK, as long 2566 * as the task is no longer on this CPU. 2567 */ 2568 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2569 struct task_struct *p, int dest_cpu) 2570 { 2571 /* Affinity changed (again). */ 2572 if (!is_cpu_allowed(p, dest_cpu)) 2573 return rq; 2574 2575 rq = move_queued_task(rq, rf, p, dest_cpu); 2576 2577 return rq; 2578 } 2579 2580 /* 2581 * migration_cpu_stop - this will be executed by a highprio stopper thread 2582 * and performs thread migration by bumping thread off CPU then 2583 * 'pushing' onto another runqueue. 2584 */ 2585 static int migration_cpu_stop(void *data) 2586 { 2587 struct migration_arg *arg = data; 2588 struct set_affinity_pending *pending = arg->pending; 2589 struct task_struct *p = arg->task; 2590 struct rq *rq = this_rq(); 2591 bool complete = false; 2592 struct rq_flags rf; 2593 2594 /* 2595 * The original target CPU might have gone down and we might 2596 * be on another CPU but it doesn't matter. 2597 */ 2598 local_irq_save(rf.flags); 2599 /* 2600 * We need to explicitly wake pending tasks before running 2601 * __migrate_task() such that we will not miss enforcing cpus_ptr 2602 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2603 */ 2604 flush_smp_call_function_queue(); 2605 2606 raw_spin_lock(&p->pi_lock); 2607 rq_lock(rq, &rf); 2608 2609 /* 2610 * If we were passed a pending, then ->stop_pending was set, thus 2611 * p->migration_pending must have remained stable. 2612 */ 2613 WARN_ON_ONCE(pending && pending != p->migration_pending); 2614 2615 /* 2616 * If task_rq(p) != rq, it cannot be migrated here, because we're 2617 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2618 * we're holding p->pi_lock. 2619 */ 2620 if (task_rq(p) == rq) { 2621 if (is_migration_disabled(p)) 2622 goto out; 2623 2624 if (pending) { 2625 p->migration_pending = NULL; 2626 complete = true; 2627 2628 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2629 goto out; 2630 } 2631 2632 if (task_on_rq_queued(p)) { 2633 update_rq_clock(rq); 2634 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2635 } else { 2636 p->wake_cpu = arg->dest_cpu; 2637 } 2638 2639 /* 2640 * XXX __migrate_task() can fail, at which point we might end 2641 * up running on a dodgy CPU, AFAICT this can only happen 2642 * during CPU hotplug, at which point we'll get pushed out 2643 * anyway, so it's probably not a big deal. 2644 */ 2645 2646 } else if (pending) { 2647 /* 2648 * This happens when we get migrated between migrate_enable()'s 2649 * preempt_enable() and scheduling the stopper task. At that 2650 * point we're a regular task again and not current anymore. 2651 * 2652 * A !PREEMPT kernel has a giant hole here, which makes it far 2653 * more likely. 2654 */ 2655 2656 /* 2657 * The task moved before the stopper got to run. We're holding 2658 * ->pi_lock, so the allowed mask is stable - if it got 2659 * somewhere allowed, we're done. 2660 */ 2661 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2662 p->migration_pending = NULL; 2663 complete = true; 2664 goto out; 2665 } 2666 2667 /* 2668 * When migrate_enable() hits a rq mis-match we can't reliably 2669 * determine is_migration_disabled() and so have to chase after 2670 * it. 2671 */ 2672 WARN_ON_ONCE(!pending->stop_pending); 2673 preempt_disable(); 2674 task_rq_unlock(rq, p, &rf); 2675 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2676 &pending->arg, &pending->stop_work); 2677 preempt_enable(); 2678 return 0; 2679 } 2680 out: 2681 if (pending) 2682 pending->stop_pending = false; 2683 task_rq_unlock(rq, p, &rf); 2684 2685 if (complete) 2686 complete_all(&pending->done); 2687 2688 return 0; 2689 } 2690 2691 int push_cpu_stop(void *arg) 2692 { 2693 struct rq *lowest_rq = NULL, *rq = this_rq(); 2694 struct task_struct *p = arg; 2695 2696 raw_spin_lock_irq(&p->pi_lock); 2697 raw_spin_rq_lock(rq); 2698 2699 if (task_rq(p) != rq) 2700 goto out_unlock; 2701 2702 if (is_migration_disabled(p)) { 2703 p->migration_flags |= MDF_PUSH; 2704 goto out_unlock; 2705 } 2706 2707 p->migration_flags &= ~MDF_PUSH; 2708 2709 if (p->sched_class->find_lock_rq) 2710 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2711 2712 if (!lowest_rq) 2713 goto out_unlock; 2714 2715 // XXX validate p is still the highest prio task 2716 if (task_rq(p) == rq) { 2717 deactivate_task(rq, p, 0); 2718 set_task_cpu(p, lowest_rq->cpu); 2719 activate_task(lowest_rq, p, 0); 2720 resched_curr(lowest_rq); 2721 } 2722 2723 double_unlock_balance(rq, lowest_rq); 2724 2725 out_unlock: 2726 rq->push_busy = false; 2727 raw_spin_rq_unlock(rq); 2728 raw_spin_unlock_irq(&p->pi_lock); 2729 2730 put_task_struct(p); 2731 return 0; 2732 } 2733 2734 /* 2735 * sched_class::set_cpus_allowed must do the below, but is not required to 2736 * actually call this function. 2737 */ 2738 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) 2739 { 2740 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2741 p->cpus_ptr = ctx->new_mask; 2742 return; 2743 } 2744 2745 cpumask_copy(&p->cpus_mask, ctx->new_mask); 2746 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); 2747 2748 /* 2749 * Swap in a new user_cpus_ptr if SCA_USER flag set 2750 */ 2751 if (ctx->flags & SCA_USER) 2752 swap(p->user_cpus_ptr, ctx->user_mask); 2753 } 2754 2755 static void 2756 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) 2757 { 2758 struct rq *rq = task_rq(p); 2759 bool queued, running; 2760 2761 /* 2762 * This here violates the locking rules for affinity, since we're only 2763 * supposed to change these variables while holding both rq->lock and 2764 * p->pi_lock. 2765 * 2766 * HOWEVER, it magically works, because ttwu() is the only code that 2767 * accesses these variables under p->pi_lock and only does so after 2768 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2769 * before finish_task(). 2770 * 2771 * XXX do further audits, this smells like something putrid. 2772 */ 2773 if (ctx->flags & SCA_MIGRATE_DISABLE) 2774 SCHED_WARN_ON(!p->on_cpu); 2775 else 2776 lockdep_assert_held(&p->pi_lock); 2777 2778 queued = task_on_rq_queued(p); 2779 running = task_current(rq, p); 2780 2781 if (queued) { 2782 /* 2783 * Because __kthread_bind() calls this on blocked tasks without 2784 * holding rq->lock. 2785 */ 2786 lockdep_assert_rq_held(rq); 2787 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2788 } 2789 if (running) 2790 put_prev_task(rq, p); 2791 2792 p->sched_class->set_cpus_allowed(p, ctx); 2793 2794 if (queued) 2795 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2796 if (running) 2797 set_next_task(rq, p); 2798 } 2799 2800 /* 2801 * Used for kthread_bind() and select_fallback_rq(), in both cases the user 2802 * affinity (if any) should be destroyed too. 2803 */ 2804 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2805 { 2806 struct affinity_context ac = { 2807 .new_mask = new_mask, 2808 .user_mask = NULL, 2809 .flags = SCA_USER, /* clear the user requested mask */ 2810 }; 2811 union cpumask_rcuhead { 2812 cpumask_t cpumask; 2813 struct rcu_head rcu; 2814 }; 2815 2816 __do_set_cpus_allowed(p, &ac); 2817 2818 /* 2819 * Because this is called with p->pi_lock held, it is not possible 2820 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using 2821 * kfree_rcu(). 2822 */ 2823 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu); 2824 } 2825 2826 static cpumask_t *alloc_user_cpus_ptr(int node) 2827 { 2828 /* 2829 * See do_set_cpus_allowed() above for the rcu_head usage. 2830 */ 2831 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); 2832 2833 return kmalloc_node(size, GFP_KERNEL, node); 2834 } 2835 2836 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2837 int node) 2838 { 2839 cpumask_t *user_mask; 2840 unsigned long flags; 2841 2842 /* 2843 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's 2844 * may differ by now due to racing. 2845 */ 2846 dst->user_cpus_ptr = NULL; 2847 2848 /* 2849 * This check is racy and losing the race is a valid situation. 2850 * It is not worth the extra overhead of taking the pi_lock on 2851 * every fork/clone. 2852 */ 2853 if (data_race(!src->user_cpus_ptr)) 2854 return 0; 2855 2856 user_mask = alloc_user_cpus_ptr(node); 2857 if (!user_mask) 2858 return -ENOMEM; 2859 2860 /* 2861 * Use pi_lock to protect content of user_cpus_ptr 2862 * 2863 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent 2864 * do_set_cpus_allowed(). 2865 */ 2866 raw_spin_lock_irqsave(&src->pi_lock, flags); 2867 if (src->user_cpus_ptr) { 2868 swap(dst->user_cpus_ptr, user_mask); 2869 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2870 } 2871 raw_spin_unlock_irqrestore(&src->pi_lock, flags); 2872 2873 if (unlikely(user_mask)) 2874 kfree(user_mask); 2875 2876 return 0; 2877 } 2878 2879 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2880 { 2881 struct cpumask *user_mask = NULL; 2882 2883 swap(p->user_cpus_ptr, user_mask); 2884 2885 return user_mask; 2886 } 2887 2888 void release_user_cpus_ptr(struct task_struct *p) 2889 { 2890 kfree(clear_user_cpus_ptr(p)); 2891 } 2892 2893 /* 2894 * This function is wildly self concurrent; here be dragons. 2895 * 2896 * 2897 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2898 * designated task is enqueued on an allowed CPU. If that task is currently 2899 * running, we have to kick it out using the CPU stopper. 2900 * 2901 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2902 * Consider: 2903 * 2904 * Initial conditions: P0->cpus_mask = [0, 1] 2905 * 2906 * P0@CPU0 P1 2907 * 2908 * migrate_disable(); 2909 * <preempted> 2910 * set_cpus_allowed_ptr(P0, [1]); 2911 * 2912 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2913 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2914 * This means we need the following scheme: 2915 * 2916 * P0@CPU0 P1 2917 * 2918 * migrate_disable(); 2919 * <preempted> 2920 * set_cpus_allowed_ptr(P0, [1]); 2921 * <blocks> 2922 * <resumes> 2923 * migrate_enable(); 2924 * __set_cpus_allowed_ptr(); 2925 * <wakes local stopper> 2926 * `--> <woken on migration completion> 2927 * 2928 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2929 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2930 * task p are serialized by p->pi_lock, which we can leverage: the one that 2931 * should come into effect at the end of the Migrate-Disable region is the last 2932 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2933 * but we still need to properly signal those waiting tasks at the appropriate 2934 * moment. 2935 * 2936 * This is implemented using struct set_affinity_pending. The first 2937 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2938 * setup an instance of that struct and install it on the targeted task_struct. 2939 * Any and all further callers will reuse that instance. Those then wait for 2940 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2941 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2942 * 2943 * 2944 * (1) In the cases covered above. There is one more where the completion is 2945 * signaled within affine_move_task() itself: when a subsequent affinity request 2946 * occurs after the stopper bailed out due to the targeted task still being 2947 * Migrate-Disable. Consider: 2948 * 2949 * Initial conditions: P0->cpus_mask = [0, 1] 2950 * 2951 * CPU0 P1 P2 2952 * <P0> 2953 * migrate_disable(); 2954 * <preempted> 2955 * set_cpus_allowed_ptr(P0, [1]); 2956 * <blocks> 2957 * <migration/0> 2958 * migration_cpu_stop() 2959 * is_migration_disabled() 2960 * <bails> 2961 * set_cpus_allowed_ptr(P0, [0, 1]); 2962 * <signal completion> 2963 * <awakes> 2964 * 2965 * Note that the above is safe vs a concurrent migrate_enable(), as any 2966 * pending affinity completion is preceded by an uninstallation of 2967 * p->migration_pending done with p->pi_lock held. 2968 */ 2969 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2970 int dest_cpu, unsigned int flags) 2971 __releases(rq->lock) 2972 __releases(p->pi_lock) 2973 { 2974 struct set_affinity_pending my_pending = { }, *pending = NULL; 2975 bool stop_pending, complete = false; 2976 2977 /* Can the task run on the task's current CPU? If so, we're done */ 2978 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2979 struct task_struct *push_task = NULL; 2980 2981 if ((flags & SCA_MIGRATE_ENABLE) && 2982 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2983 rq->push_busy = true; 2984 push_task = get_task_struct(p); 2985 } 2986 2987 /* 2988 * If there are pending waiters, but no pending stop_work, 2989 * then complete now. 2990 */ 2991 pending = p->migration_pending; 2992 if (pending && !pending->stop_pending) { 2993 p->migration_pending = NULL; 2994 complete = true; 2995 } 2996 2997 preempt_disable(); 2998 task_rq_unlock(rq, p, rf); 2999 if (push_task) { 3000 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 3001 p, &rq->push_work); 3002 } 3003 preempt_enable(); 3004 3005 if (complete) 3006 complete_all(&pending->done); 3007 3008 return 0; 3009 } 3010 3011 if (!(flags & SCA_MIGRATE_ENABLE)) { 3012 /* serialized by p->pi_lock */ 3013 if (!p->migration_pending) { 3014 /* Install the request */ 3015 refcount_set(&my_pending.refs, 1); 3016 init_completion(&my_pending.done); 3017 my_pending.arg = (struct migration_arg) { 3018 .task = p, 3019 .dest_cpu = dest_cpu, 3020 .pending = &my_pending, 3021 }; 3022 3023 p->migration_pending = &my_pending; 3024 } else { 3025 pending = p->migration_pending; 3026 refcount_inc(&pending->refs); 3027 /* 3028 * Affinity has changed, but we've already installed a 3029 * pending. migration_cpu_stop() *must* see this, else 3030 * we risk a completion of the pending despite having a 3031 * task on a disallowed CPU. 3032 * 3033 * Serialized by p->pi_lock, so this is safe. 3034 */ 3035 pending->arg.dest_cpu = dest_cpu; 3036 } 3037 } 3038 pending = p->migration_pending; 3039 /* 3040 * - !MIGRATE_ENABLE: 3041 * we'll have installed a pending if there wasn't one already. 3042 * 3043 * - MIGRATE_ENABLE: 3044 * we're here because the current CPU isn't matching anymore, 3045 * the only way that can happen is because of a concurrent 3046 * set_cpus_allowed_ptr() call, which should then still be 3047 * pending completion. 3048 * 3049 * Either way, we really should have a @pending here. 3050 */ 3051 if (WARN_ON_ONCE(!pending)) { 3052 task_rq_unlock(rq, p, rf); 3053 return -EINVAL; 3054 } 3055 3056 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 3057 /* 3058 * MIGRATE_ENABLE gets here because 'p == current', but for 3059 * anything else we cannot do is_migration_disabled(), punt 3060 * and have the stopper function handle it all race-free. 3061 */ 3062 stop_pending = pending->stop_pending; 3063 if (!stop_pending) 3064 pending->stop_pending = true; 3065 3066 if (flags & SCA_MIGRATE_ENABLE) 3067 p->migration_flags &= ~MDF_PUSH; 3068 3069 preempt_disable(); 3070 task_rq_unlock(rq, p, rf); 3071 if (!stop_pending) { 3072 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 3073 &pending->arg, &pending->stop_work); 3074 } 3075 preempt_enable(); 3076 3077 if (flags & SCA_MIGRATE_ENABLE) 3078 return 0; 3079 } else { 3080 3081 if (!is_migration_disabled(p)) { 3082 if (task_on_rq_queued(p)) 3083 rq = move_queued_task(rq, rf, p, dest_cpu); 3084 3085 if (!pending->stop_pending) { 3086 p->migration_pending = NULL; 3087 complete = true; 3088 } 3089 } 3090 task_rq_unlock(rq, p, rf); 3091 3092 if (complete) 3093 complete_all(&pending->done); 3094 } 3095 3096 wait_for_completion(&pending->done); 3097 3098 if (refcount_dec_and_test(&pending->refs)) 3099 wake_up_var(&pending->refs); /* No UaF, just an address */ 3100 3101 /* 3102 * Block the original owner of &pending until all subsequent callers 3103 * have seen the completion and decremented the refcount 3104 */ 3105 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 3106 3107 /* ARGH */ 3108 WARN_ON_ONCE(my_pending.stop_pending); 3109 3110 return 0; 3111 } 3112 3113 /* 3114 * Called with both p->pi_lock and rq->lock held; drops both before returning. 3115 */ 3116 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 3117 struct affinity_context *ctx, 3118 struct rq *rq, 3119 struct rq_flags *rf) 3120 __releases(rq->lock) 3121 __releases(p->pi_lock) 3122 { 3123 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 3124 const struct cpumask *cpu_valid_mask = cpu_active_mask; 3125 bool kthread = p->flags & PF_KTHREAD; 3126 unsigned int dest_cpu; 3127 int ret = 0; 3128 3129 update_rq_clock(rq); 3130 3131 if (kthread || is_migration_disabled(p)) { 3132 /* 3133 * Kernel threads are allowed on online && !active CPUs, 3134 * however, during cpu-hot-unplug, even these might get pushed 3135 * away if not KTHREAD_IS_PER_CPU. 3136 * 3137 * Specifically, migration_disabled() tasks must not fail the 3138 * cpumask_any_and_distribute() pick below, esp. so on 3139 * SCA_MIGRATE_ENABLE, otherwise we'll not call 3140 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 3141 */ 3142 cpu_valid_mask = cpu_online_mask; 3143 } 3144 3145 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { 3146 ret = -EINVAL; 3147 goto out; 3148 } 3149 3150 /* 3151 * Must re-check here, to close a race against __kthread_bind(), 3152 * sched_setaffinity() is not guaranteed to observe the flag. 3153 */ 3154 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 3155 ret = -EINVAL; 3156 goto out; 3157 } 3158 3159 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { 3160 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { 3161 if (ctx->flags & SCA_USER) 3162 swap(p->user_cpus_ptr, ctx->user_mask); 3163 goto out; 3164 } 3165 3166 if (WARN_ON_ONCE(p == current && 3167 is_migration_disabled(p) && 3168 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { 3169 ret = -EBUSY; 3170 goto out; 3171 } 3172 } 3173 3174 /* 3175 * Picking a ~random cpu helps in cases where we are changing affinity 3176 * for groups of tasks (ie. cpuset), so that load balancing is not 3177 * immediately required to distribute the tasks within their new mask. 3178 */ 3179 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); 3180 if (dest_cpu >= nr_cpu_ids) { 3181 ret = -EINVAL; 3182 goto out; 3183 } 3184 3185 __do_set_cpus_allowed(p, ctx); 3186 3187 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); 3188 3189 out: 3190 task_rq_unlock(rq, p, rf); 3191 3192 return ret; 3193 } 3194 3195 /* 3196 * Change a given task's CPU affinity. Migrate the thread to a 3197 * proper CPU and schedule it away if the CPU it's executing on 3198 * is removed from the allowed bitmask. 3199 * 3200 * NOTE: the caller must have a valid reference to the task, the 3201 * task must not exit() & deallocate itself prematurely. The 3202 * call is not atomic; no spinlocks may be held. 3203 */ 3204 static int __set_cpus_allowed_ptr(struct task_struct *p, 3205 struct affinity_context *ctx) 3206 { 3207 struct rq_flags rf; 3208 struct rq *rq; 3209 3210 rq = task_rq_lock(p, &rf); 3211 /* 3212 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_* 3213 * flags are set. 3214 */ 3215 if (p->user_cpus_ptr && 3216 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && 3217 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) 3218 ctx->new_mask = rq->scratch_mask; 3219 3220 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); 3221 } 3222 3223 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 3224 { 3225 struct affinity_context ac = { 3226 .new_mask = new_mask, 3227 .flags = 0, 3228 }; 3229 3230 return __set_cpus_allowed_ptr(p, &ac); 3231 } 3232 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 3233 3234 /* 3235 * Change a given task's CPU affinity to the intersection of its current 3236 * affinity mask and @subset_mask, writing the resulting mask to @new_mask. 3237 * If user_cpus_ptr is defined, use it as the basis for restricting CPU 3238 * affinity or use cpu_online_mask instead. 3239 * 3240 * If the resulting mask is empty, leave the affinity unchanged and return 3241 * -EINVAL. 3242 */ 3243 static int restrict_cpus_allowed_ptr(struct task_struct *p, 3244 struct cpumask *new_mask, 3245 const struct cpumask *subset_mask) 3246 { 3247 struct affinity_context ac = { 3248 .new_mask = new_mask, 3249 .flags = 0, 3250 }; 3251 struct rq_flags rf; 3252 struct rq *rq; 3253 int err; 3254 3255 rq = task_rq_lock(p, &rf); 3256 3257 /* 3258 * Forcefully restricting the affinity of a deadline task is 3259 * likely to cause problems, so fail and noisily override the 3260 * mask entirely. 3261 */ 3262 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 3263 err = -EPERM; 3264 goto err_unlock; 3265 } 3266 3267 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { 3268 err = -EINVAL; 3269 goto err_unlock; 3270 } 3271 3272 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); 3273 3274 err_unlock: 3275 task_rq_unlock(rq, p, &rf); 3276 return err; 3277 } 3278 3279 /* 3280 * Restrict the CPU affinity of task @p so that it is a subset of 3281 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the 3282 * old affinity mask. If the resulting mask is empty, we warn and walk 3283 * up the cpuset hierarchy until we find a suitable mask. 3284 */ 3285 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3286 { 3287 cpumask_var_t new_mask; 3288 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3289 3290 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3291 3292 /* 3293 * __migrate_task() can fail silently in the face of concurrent 3294 * offlining of the chosen destination CPU, so take the hotplug 3295 * lock to ensure that the migration succeeds. 3296 */ 3297 cpus_read_lock(); 3298 if (!cpumask_available(new_mask)) 3299 goto out_set_mask; 3300 3301 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3302 goto out_free_mask; 3303 3304 /* 3305 * We failed to find a valid subset of the affinity mask for the 3306 * task, so override it based on its cpuset hierarchy. 3307 */ 3308 cpuset_cpus_allowed(p, new_mask); 3309 override_mask = new_mask; 3310 3311 out_set_mask: 3312 if (printk_ratelimit()) { 3313 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3314 task_pid_nr(p), p->comm, 3315 cpumask_pr_args(override_mask)); 3316 } 3317 3318 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3319 out_free_mask: 3320 cpus_read_unlock(); 3321 free_cpumask_var(new_mask); 3322 } 3323 3324 static int 3325 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); 3326 3327 /* 3328 * Restore the affinity of a task @p which was previously restricted by a 3329 * call to force_compatible_cpus_allowed_ptr(). 3330 * 3331 * It is the caller's responsibility to serialise this with any calls to 3332 * force_compatible_cpus_allowed_ptr(@p). 3333 */ 3334 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3335 { 3336 struct affinity_context ac = { 3337 .new_mask = task_user_cpus(p), 3338 .flags = 0, 3339 }; 3340 int ret; 3341 3342 /* 3343 * Try to restore the old affinity mask with __sched_setaffinity(). 3344 * Cpuset masking will be done there too. 3345 */ 3346 ret = __sched_setaffinity(p, &ac); 3347 WARN_ON_ONCE(ret); 3348 } 3349 3350 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3351 { 3352 #ifdef CONFIG_SCHED_DEBUG 3353 unsigned int state = READ_ONCE(p->__state); 3354 3355 /* 3356 * We should never call set_task_cpu() on a blocked task, 3357 * ttwu() will sort out the placement. 3358 */ 3359 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3360 3361 /* 3362 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3363 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3364 * time relying on p->on_rq. 3365 */ 3366 WARN_ON_ONCE(state == TASK_RUNNING && 3367 p->sched_class == &fair_sched_class && 3368 (p->on_rq && !task_on_rq_migrating(p))); 3369 3370 #ifdef CONFIG_LOCKDEP 3371 /* 3372 * The caller should hold either p->pi_lock or rq->lock, when changing 3373 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3374 * 3375 * sched_move_task() holds both and thus holding either pins the cgroup, 3376 * see task_group(). 3377 * 3378 * Furthermore, all task_rq users should acquire both locks, see 3379 * task_rq_lock(). 3380 */ 3381 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3382 lockdep_is_held(__rq_lockp(task_rq(p))))); 3383 #endif 3384 /* 3385 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3386 */ 3387 WARN_ON_ONCE(!cpu_online(new_cpu)); 3388 3389 WARN_ON_ONCE(is_migration_disabled(p)); 3390 #endif 3391 3392 trace_sched_migrate_task(p, new_cpu); 3393 3394 if (task_cpu(p) != new_cpu) { 3395 if (p->sched_class->migrate_task_rq) 3396 p->sched_class->migrate_task_rq(p, new_cpu); 3397 p->se.nr_migrations++; 3398 rseq_migrate(p); 3399 sched_mm_cid_migrate_from(p); 3400 perf_event_task_migrate(p); 3401 } 3402 3403 __set_task_cpu(p, new_cpu); 3404 } 3405 3406 #ifdef CONFIG_NUMA_BALANCING 3407 static void __migrate_swap_task(struct task_struct *p, int cpu) 3408 { 3409 if (task_on_rq_queued(p)) { 3410 struct rq *src_rq, *dst_rq; 3411 struct rq_flags srf, drf; 3412 3413 src_rq = task_rq(p); 3414 dst_rq = cpu_rq(cpu); 3415 3416 rq_pin_lock(src_rq, &srf); 3417 rq_pin_lock(dst_rq, &drf); 3418 3419 deactivate_task(src_rq, p, 0); 3420 set_task_cpu(p, cpu); 3421 activate_task(dst_rq, p, 0); 3422 wakeup_preempt(dst_rq, p, 0); 3423 3424 rq_unpin_lock(dst_rq, &drf); 3425 rq_unpin_lock(src_rq, &srf); 3426 3427 } else { 3428 /* 3429 * Task isn't running anymore; make it appear like we migrated 3430 * it before it went to sleep. This means on wakeup we make the 3431 * previous CPU our target instead of where it really is. 3432 */ 3433 p->wake_cpu = cpu; 3434 } 3435 } 3436 3437 struct migration_swap_arg { 3438 struct task_struct *src_task, *dst_task; 3439 int src_cpu, dst_cpu; 3440 }; 3441 3442 static int migrate_swap_stop(void *data) 3443 { 3444 struct migration_swap_arg *arg = data; 3445 struct rq *src_rq, *dst_rq; 3446 3447 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3448 return -EAGAIN; 3449 3450 src_rq = cpu_rq(arg->src_cpu); 3451 dst_rq = cpu_rq(arg->dst_cpu); 3452 3453 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); 3454 guard(double_rq_lock)(src_rq, dst_rq); 3455 3456 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3457 return -EAGAIN; 3458 3459 if (task_cpu(arg->src_task) != arg->src_cpu) 3460 return -EAGAIN; 3461 3462 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3463 return -EAGAIN; 3464 3465 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3466 return -EAGAIN; 3467 3468 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3469 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3470 3471 return 0; 3472 } 3473 3474 /* 3475 * Cross migrate two tasks 3476 */ 3477 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3478 int target_cpu, int curr_cpu) 3479 { 3480 struct migration_swap_arg arg; 3481 int ret = -EINVAL; 3482 3483 arg = (struct migration_swap_arg){ 3484 .src_task = cur, 3485 .src_cpu = curr_cpu, 3486 .dst_task = p, 3487 .dst_cpu = target_cpu, 3488 }; 3489 3490 if (arg.src_cpu == arg.dst_cpu) 3491 goto out; 3492 3493 /* 3494 * These three tests are all lockless; this is OK since all of them 3495 * will be re-checked with proper locks held further down the line. 3496 */ 3497 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3498 goto out; 3499 3500 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3501 goto out; 3502 3503 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3504 goto out; 3505 3506 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3507 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3508 3509 out: 3510 return ret; 3511 } 3512 #endif /* CONFIG_NUMA_BALANCING */ 3513 3514 /*** 3515 * kick_process - kick a running thread to enter/exit the kernel 3516 * @p: the to-be-kicked thread 3517 * 3518 * Cause a process which is running on another CPU to enter 3519 * kernel-mode, without any delay. (to get signals handled.) 3520 * 3521 * NOTE: this function doesn't have to take the runqueue lock, 3522 * because all it wants to ensure is that the remote task enters 3523 * the kernel. If the IPI races and the task has been migrated 3524 * to another CPU then no harm is done and the purpose has been 3525 * achieved as well. 3526 */ 3527 void kick_process(struct task_struct *p) 3528 { 3529 guard(preempt)(); 3530 int cpu = task_cpu(p); 3531 3532 if ((cpu != smp_processor_id()) && task_curr(p)) 3533 smp_send_reschedule(cpu); 3534 } 3535 EXPORT_SYMBOL_GPL(kick_process); 3536 3537 /* 3538 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3539 * 3540 * A few notes on cpu_active vs cpu_online: 3541 * 3542 * - cpu_active must be a subset of cpu_online 3543 * 3544 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3545 * see __set_cpus_allowed_ptr(). At this point the newly online 3546 * CPU isn't yet part of the sched domains, and balancing will not 3547 * see it. 3548 * 3549 * - on CPU-down we clear cpu_active() to mask the sched domains and 3550 * avoid the load balancer to place new tasks on the to be removed 3551 * CPU. Existing tasks will remain running there and will be taken 3552 * off. 3553 * 3554 * This means that fallback selection must not select !active CPUs. 3555 * And can assume that any active CPU must be online. Conversely 3556 * select_task_rq() below may allow selection of !active CPUs in order 3557 * to satisfy the above rules. 3558 */ 3559 static int select_fallback_rq(int cpu, struct task_struct *p) 3560 { 3561 int nid = cpu_to_node(cpu); 3562 const struct cpumask *nodemask = NULL; 3563 enum { cpuset, possible, fail } state = cpuset; 3564 int dest_cpu; 3565 3566 /* 3567 * If the node that the CPU is on has been offlined, cpu_to_node() 3568 * will return -1. There is no CPU on the node, and we should 3569 * select the CPU on the other node. 3570 */ 3571 if (nid != -1) { 3572 nodemask = cpumask_of_node(nid); 3573 3574 /* Look for allowed, online CPU in same node. */ 3575 for_each_cpu(dest_cpu, nodemask) { 3576 if (is_cpu_allowed(p, dest_cpu)) 3577 return dest_cpu; 3578 } 3579 } 3580 3581 for (;;) { 3582 /* Any allowed, online CPU? */ 3583 for_each_cpu(dest_cpu, p->cpus_ptr) { 3584 if (!is_cpu_allowed(p, dest_cpu)) 3585 continue; 3586 3587 goto out; 3588 } 3589 3590 /* No more Mr. Nice Guy. */ 3591 switch (state) { 3592 case cpuset: 3593 if (cpuset_cpus_allowed_fallback(p)) { 3594 state = possible; 3595 break; 3596 } 3597 fallthrough; 3598 case possible: 3599 /* 3600 * XXX When called from select_task_rq() we only 3601 * hold p->pi_lock and again violate locking order. 3602 * 3603 * More yuck to audit. 3604 */ 3605 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); 3606 state = fail; 3607 break; 3608 case fail: 3609 BUG(); 3610 break; 3611 } 3612 } 3613 3614 out: 3615 if (state != cpuset) { 3616 /* 3617 * Don't tell them about moving exiting tasks or 3618 * kernel threads (both mm NULL), since they never 3619 * leave kernel. 3620 */ 3621 if (p->mm && printk_ratelimit()) { 3622 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3623 task_pid_nr(p), p->comm, cpu); 3624 } 3625 } 3626 3627 return dest_cpu; 3628 } 3629 3630 /* 3631 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3632 */ 3633 static inline 3634 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 3635 { 3636 lockdep_assert_held(&p->pi_lock); 3637 3638 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 3639 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 3640 else 3641 cpu = cpumask_any(p->cpus_ptr); 3642 3643 /* 3644 * In order not to call set_task_cpu() on a blocking task we need 3645 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3646 * CPU. 3647 * 3648 * Since this is common to all placement strategies, this lives here. 3649 * 3650 * [ this allows ->select_task() to simply return task_cpu(p) and 3651 * not worry about this generic constraint ] 3652 */ 3653 if (unlikely(!is_cpu_allowed(p, cpu))) 3654 cpu = select_fallback_rq(task_cpu(p), p); 3655 3656 return cpu; 3657 } 3658 3659 void sched_set_stop_task(int cpu, struct task_struct *stop) 3660 { 3661 static struct lock_class_key stop_pi_lock; 3662 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3663 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3664 3665 if (stop) { 3666 /* 3667 * Make it appear like a SCHED_FIFO task, its something 3668 * userspace knows about and won't get confused about. 3669 * 3670 * Also, it will make PI more or less work without too 3671 * much confusion -- but then, stop work should not 3672 * rely on PI working anyway. 3673 */ 3674 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3675 3676 stop->sched_class = &stop_sched_class; 3677 3678 /* 3679 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3680 * adjust the effective priority of a task. As a result, 3681 * rt_mutex_setprio() can trigger (RT) balancing operations, 3682 * which can then trigger wakeups of the stop thread to push 3683 * around the current task. 3684 * 3685 * The stop task itself will never be part of the PI-chain, it 3686 * never blocks, therefore that ->pi_lock recursion is safe. 3687 * Tell lockdep about this by placing the stop->pi_lock in its 3688 * own class. 3689 */ 3690 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3691 } 3692 3693 cpu_rq(cpu)->stop = stop; 3694 3695 if (old_stop) { 3696 /* 3697 * Reset it back to a normal scheduling class so that 3698 * it can die in pieces. 3699 */ 3700 old_stop->sched_class = &rt_sched_class; 3701 } 3702 } 3703 3704 #else /* CONFIG_SMP */ 3705 3706 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 3707 struct affinity_context *ctx) 3708 { 3709 return set_cpus_allowed_ptr(p, ctx->new_mask); 3710 } 3711 3712 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3713 3714 static inline bool rq_has_pinned_tasks(struct rq *rq) 3715 { 3716 return false; 3717 } 3718 3719 static inline cpumask_t *alloc_user_cpus_ptr(int node) 3720 { 3721 return NULL; 3722 } 3723 3724 #endif /* !CONFIG_SMP */ 3725 3726 static void 3727 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3728 { 3729 struct rq *rq; 3730 3731 if (!schedstat_enabled()) 3732 return; 3733 3734 rq = this_rq(); 3735 3736 #ifdef CONFIG_SMP 3737 if (cpu == rq->cpu) { 3738 __schedstat_inc(rq->ttwu_local); 3739 __schedstat_inc(p->stats.nr_wakeups_local); 3740 } else { 3741 struct sched_domain *sd; 3742 3743 __schedstat_inc(p->stats.nr_wakeups_remote); 3744 3745 guard(rcu)(); 3746 for_each_domain(rq->cpu, sd) { 3747 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3748 __schedstat_inc(sd->ttwu_wake_remote); 3749 break; 3750 } 3751 } 3752 } 3753 3754 if (wake_flags & WF_MIGRATED) 3755 __schedstat_inc(p->stats.nr_wakeups_migrate); 3756 #endif /* CONFIG_SMP */ 3757 3758 __schedstat_inc(rq->ttwu_count); 3759 __schedstat_inc(p->stats.nr_wakeups); 3760 3761 if (wake_flags & WF_SYNC) 3762 __schedstat_inc(p->stats.nr_wakeups_sync); 3763 } 3764 3765 /* 3766 * Mark the task runnable. 3767 */ 3768 static inline void ttwu_do_wakeup(struct task_struct *p) 3769 { 3770 WRITE_ONCE(p->__state, TASK_RUNNING); 3771 trace_sched_wakeup(p); 3772 } 3773 3774 static void 3775 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3776 struct rq_flags *rf) 3777 { 3778 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3779 3780 lockdep_assert_rq_held(rq); 3781 3782 if (p->sched_contributes_to_load) 3783 rq->nr_uninterruptible--; 3784 3785 #ifdef CONFIG_SMP 3786 if (wake_flags & WF_MIGRATED) 3787 en_flags |= ENQUEUE_MIGRATED; 3788 else 3789 #endif 3790 if (p->in_iowait) { 3791 delayacct_blkio_end(p); 3792 atomic_dec(&task_rq(p)->nr_iowait); 3793 } 3794 3795 activate_task(rq, p, en_flags); 3796 wakeup_preempt(rq, p, wake_flags); 3797 3798 ttwu_do_wakeup(p); 3799 3800 #ifdef CONFIG_SMP 3801 if (p->sched_class->task_woken) { 3802 /* 3803 * Our task @p is fully woken up and running; so it's safe to 3804 * drop the rq->lock, hereafter rq is only used for statistics. 3805 */ 3806 rq_unpin_lock(rq, rf); 3807 p->sched_class->task_woken(rq, p); 3808 rq_repin_lock(rq, rf); 3809 } 3810 3811 if (rq->idle_stamp) { 3812 u64 delta = rq_clock(rq) - rq->idle_stamp; 3813 u64 max = 2*rq->max_idle_balance_cost; 3814 3815 update_avg(&rq->avg_idle, delta); 3816 3817 if (rq->avg_idle > max) 3818 rq->avg_idle = max; 3819 3820 rq->idle_stamp = 0; 3821 } 3822 #endif 3823 3824 p->dl_server = NULL; 3825 } 3826 3827 /* 3828 * Consider @p being inside a wait loop: 3829 * 3830 * for (;;) { 3831 * set_current_state(TASK_UNINTERRUPTIBLE); 3832 * 3833 * if (CONDITION) 3834 * break; 3835 * 3836 * schedule(); 3837 * } 3838 * __set_current_state(TASK_RUNNING); 3839 * 3840 * between set_current_state() and schedule(). In this case @p is still 3841 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3842 * an atomic manner. 3843 * 3844 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3845 * then schedule() must still happen and p->state can be changed to 3846 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3847 * need to do a full wakeup with enqueue. 3848 * 3849 * Returns: %true when the wakeup is done, 3850 * %false otherwise. 3851 */ 3852 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3853 { 3854 struct rq_flags rf; 3855 struct rq *rq; 3856 int ret = 0; 3857 3858 rq = __task_rq_lock(p, &rf); 3859 if (task_on_rq_queued(p)) { 3860 if (!task_on_cpu(rq, p)) { 3861 /* 3862 * When on_rq && !on_cpu the task is preempted, see if 3863 * it should preempt the task that is current now. 3864 */ 3865 update_rq_clock(rq); 3866 wakeup_preempt(rq, p, wake_flags); 3867 } 3868 ttwu_do_wakeup(p); 3869 ret = 1; 3870 } 3871 __task_rq_unlock(rq, &rf); 3872 3873 return ret; 3874 } 3875 3876 #ifdef CONFIG_SMP 3877 void sched_ttwu_pending(void *arg) 3878 { 3879 struct llist_node *llist = arg; 3880 struct rq *rq = this_rq(); 3881 struct task_struct *p, *t; 3882 struct rq_flags rf; 3883 3884 if (!llist) 3885 return; 3886 3887 rq_lock_irqsave(rq, &rf); 3888 update_rq_clock(rq); 3889 3890 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3891 if (WARN_ON_ONCE(p->on_cpu)) 3892 smp_cond_load_acquire(&p->on_cpu, !VAL); 3893 3894 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3895 set_task_cpu(p, cpu_of(rq)); 3896 3897 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3898 } 3899 3900 /* 3901 * Must be after enqueueing at least once task such that 3902 * idle_cpu() does not observe a false-negative -- if it does, 3903 * it is possible for select_idle_siblings() to stack a number 3904 * of tasks on this CPU during that window. 3905 * 3906 * It is ok to clear ttwu_pending when another task pending. 3907 * We will receive IPI after local irq enabled and then enqueue it. 3908 * Since now nr_running > 0, idle_cpu() will always get correct result. 3909 */ 3910 WRITE_ONCE(rq->ttwu_pending, 0); 3911 rq_unlock_irqrestore(rq, &rf); 3912 } 3913 3914 /* 3915 * Prepare the scene for sending an IPI for a remote smp_call 3916 * 3917 * Returns true if the caller can proceed with sending the IPI. 3918 * Returns false otherwise. 3919 */ 3920 bool call_function_single_prep_ipi(int cpu) 3921 { 3922 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { 3923 trace_sched_wake_idle_without_ipi(cpu); 3924 return false; 3925 } 3926 3927 return true; 3928 } 3929 3930 /* 3931 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3932 * necessary. The wakee CPU on receipt of the IPI will queue the task 3933 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3934 * of the wakeup instead of the waker. 3935 */ 3936 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3937 { 3938 struct rq *rq = cpu_rq(cpu); 3939 3940 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3941 3942 WRITE_ONCE(rq->ttwu_pending, 1); 3943 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3944 } 3945 3946 void wake_up_if_idle(int cpu) 3947 { 3948 struct rq *rq = cpu_rq(cpu); 3949 3950 guard(rcu)(); 3951 if (is_idle_task(rcu_dereference(rq->curr))) { 3952 guard(rq_lock_irqsave)(rq); 3953 if (is_idle_task(rq->curr)) 3954 resched_curr(rq); 3955 } 3956 } 3957 3958 bool cpus_share_cache(int this_cpu, int that_cpu) 3959 { 3960 if (this_cpu == that_cpu) 3961 return true; 3962 3963 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3964 } 3965 3966 /* 3967 * Whether CPUs are share cache resources, which means LLC on non-cluster 3968 * machines and LLC tag or L2 on machines with clusters. 3969 */ 3970 bool cpus_share_resources(int this_cpu, int that_cpu) 3971 { 3972 if (this_cpu == that_cpu) 3973 return true; 3974 3975 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); 3976 } 3977 3978 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3979 { 3980 /* 3981 * Do not complicate things with the async wake_list while the CPU is 3982 * in hotplug state. 3983 */ 3984 if (!cpu_active(cpu)) 3985 return false; 3986 3987 /* Ensure the task will still be allowed to run on the CPU. */ 3988 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3989 return false; 3990 3991 /* 3992 * If the CPU does not share cache, then queue the task on the 3993 * remote rqs wakelist to avoid accessing remote data. 3994 */ 3995 if (!cpus_share_cache(smp_processor_id(), cpu)) 3996 return true; 3997 3998 if (cpu == smp_processor_id()) 3999 return false; 4000 4001 /* 4002 * If the wakee cpu is idle, or the task is descheduling and the 4003 * only running task on the CPU, then use the wakelist to offload 4004 * the task activation to the idle (or soon-to-be-idle) CPU as 4005 * the current CPU is likely busy. nr_running is checked to 4006 * avoid unnecessary task stacking. 4007 * 4008 * Note that we can only get here with (wakee) p->on_rq=0, 4009 * p->on_cpu can be whatever, we've done the dequeue, so 4010 * the wakee has been accounted out of ->nr_running. 4011 */ 4012 if (!cpu_rq(cpu)->nr_running) 4013 return true; 4014 4015 return false; 4016 } 4017 4018 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 4019 { 4020 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 4021 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 4022 __ttwu_queue_wakelist(p, cpu, wake_flags); 4023 return true; 4024 } 4025 4026 return false; 4027 } 4028 4029 #else /* !CONFIG_SMP */ 4030 4031 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 4032 { 4033 return false; 4034 } 4035 4036 #endif /* CONFIG_SMP */ 4037 4038 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 4039 { 4040 struct rq *rq = cpu_rq(cpu); 4041 struct rq_flags rf; 4042 4043 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 4044 return; 4045 4046 rq_lock(rq, &rf); 4047 update_rq_clock(rq); 4048 ttwu_do_activate(rq, p, wake_flags, &rf); 4049 rq_unlock(rq, &rf); 4050 } 4051 4052 /* 4053 * Invoked from try_to_wake_up() to check whether the task can be woken up. 4054 * 4055 * The caller holds p::pi_lock if p != current or has preemption 4056 * disabled when p == current. 4057 * 4058 * The rules of saved_state: 4059 * 4060 * The related locking code always holds p::pi_lock when updating 4061 * p::saved_state, which means the code is fully serialized in both cases. 4062 * 4063 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. 4064 * No other bits set. This allows to distinguish all wakeup scenarios. 4065 * 4066 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This 4067 * allows us to prevent early wakeup of tasks before they can be run on 4068 * asymmetric ISA architectures (eg ARMv9). 4069 */ 4070 static __always_inline 4071 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 4072 { 4073 int match; 4074 4075 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 4076 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 4077 state != TASK_RTLOCK_WAIT); 4078 } 4079 4080 *success = !!(match = __task_state_match(p, state)); 4081 4082 /* 4083 * Saved state preserves the task state across blocking on 4084 * an RT lock or TASK_FREEZABLE tasks. If the state matches, 4085 * set p::saved_state to TASK_RUNNING, but do not wake the task 4086 * because it waits for a lock wakeup or __thaw_task(). Also 4087 * indicate success because from the regular waker's point of 4088 * view this has succeeded. 4089 * 4090 * After acquiring the lock the task will restore p::__state 4091 * from p::saved_state which ensures that the regular 4092 * wakeup is not lost. The restore will also set 4093 * p::saved_state to TASK_RUNNING so any further tests will 4094 * not result in false positives vs. @success 4095 */ 4096 if (match < 0) 4097 p->saved_state = TASK_RUNNING; 4098 4099 return match > 0; 4100 } 4101 4102 /* 4103 * Notes on Program-Order guarantees on SMP systems. 4104 * 4105 * MIGRATION 4106 * 4107 * The basic program-order guarantee on SMP systems is that when a task [t] 4108 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 4109 * execution on its new CPU [c1]. 4110 * 4111 * For migration (of runnable tasks) this is provided by the following means: 4112 * 4113 * A) UNLOCK of the rq(c0)->lock scheduling out task t 4114 * B) migration for t is required to synchronize *both* rq(c0)->lock and 4115 * rq(c1)->lock (if not at the same time, then in that order). 4116 * C) LOCK of the rq(c1)->lock scheduling in task 4117 * 4118 * Release/acquire chaining guarantees that B happens after A and C after B. 4119 * Note: the CPU doing B need not be c0 or c1 4120 * 4121 * Example: 4122 * 4123 * CPU0 CPU1 CPU2 4124 * 4125 * LOCK rq(0)->lock 4126 * sched-out X 4127 * sched-in Y 4128 * UNLOCK rq(0)->lock 4129 * 4130 * LOCK rq(0)->lock // orders against CPU0 4131 * dequeue X 4132 * UNLOCK rq(0)->lock 4133 * 4134 * LOCK rq(1)->lock 4135 * enqueue X 4136 * UNLOCK rq(1)->lock 4137 * 4138 * LOCK rq(1)->lock // orders against CPU2 4139 * sched-out Z 4140 * sched-in X 4141 * UNLOCK rq(1)->lock 4142 * 4143 * 4144 * BLOCKING -- aka. SLEEP + WAKEUP 4145 * 4146 * For blocking we (obviously) need to provide the same guarantee as for 4147 * migration. However the means are completely different as there is no lock 4148 * chain to provide order. Instead we do: 4149 * 4150 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 4151 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 4152 * 4153 * Example: 4154 * 4155 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 4156 * 4157 * LOCK rq(0)->lock LOCK X->pi_lock 4158 * dequeue X 4159 * sched-out X 4160 * smp_store_release(X->on_cpu, 0); 4161 * 4162 * smp_cond_load_acquire(&X->on_cpu, !VAL); 4163 * X->state = WAKING 4164 * set_task_cpu(X,2) 4165 * 4166 * LOCK rq(2)->lock 4167 * enqueue X 4168 * X->state = RUNNING 4169 * UNLOCK rq(2)->lock 4170 * 4171 * LOCK rq(2)->lock // orders against CPU1 4172 * sched-out Z 4173 * sched-in X 4174 * UNLOCK rq(2)->lock 4175 * 4176 * UNLOCK X->pi_lock 4177 * UNLOCK rq(0)->lock 4178 * 4179 * 4180 * However, for wakeups there is a second guarantee we must provide, namely we 4181 * must ensure that CONDITION=1 done by the caller can not be reordered with 4182 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4183 */ 4184 4185 /** 4186 * try_to_wake_up - wake up a thread 4187 * @p: the thread to be awakened 4188 * @state: the mask of task states that can be woken 4189 * @wake_flags: wake modifier flags (WF_*) 4190 * 4191 * Conceptually does: 4192 * 4193 * If (@state & @p->state) @p->state = TASK_RUNNING. 4194 * 4195 * If the task was not queued/runnable, also place it back on a runqueue. 4196 * 4197 * This function is atomic against schedule() which would dequeue the task. 4198 * 4199 * It issues a full memory barrier before accessing @p->state, see the comment 4200 * with set_current_state(). 4201 * 4202 * Uses p->pi_lock to serialize against concurrent wake-ups. 4203 * 4204 * Relies on p->pi_lock stabilizing: 4205 * - p->sched_class 4206 * - p->cpus_ptr 4207 * - p->sched_task_group 4208 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4209 * 4210 * Tries really hard to only take one task_rq(p)->lock for performance. 4211 * Takes rq->lock in: 4212 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4213 * - ttwu_queue() -- new rq, for enqueue of the task; 4214 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4215 * 4216 * As a consequence we race really badly with just about everything. See the 4217 * many memory barriers and their comments for details. 4218 * 4219 * Return: %true if @p->state changes (an actual wakeup was done), 4220 * %false otherwise. 4221 */ 4222 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4223 { 4224 guard(preempt)(); 4225 int cpu, success = 0; 4226 4227 if (p == current) { 4228 /* 4229 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4230 * == smp_processor_id()'. Together this means we can special 4231 * case the whole 'p->on_rq && ttwu_runnable()' case below 4232 * without taking any locks. 4233 * 4234 * In particular: 4235 * - we rely on Program-Order guarantees for all the ordering, 4236 * - we're serialized against set_special_state() by virtue of 4237 * it disabling IRQs (this allows not taking ->pi_lock). 4238 */ 4239 if (!ttwu_state_match(p, state, &success)) 4240 goto out; 4241 4242 trace_sched_waking(p); 4243 ttwu_do_wakeup(p); 4244 goto out; 4245 } 4246 4247 /* 4248 * If we are going to wake up a thread waiting for CONDITION we 4249 * need to ensure that CONDITION=1 done by the caller can not be 4250 * reordered with p->state check below. This pairs with smp_store_mb() 4251 * in set_current_state() that the waiting thread does. 4252 */ 4253 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 4254 smp_mb__after_spinlock(); 4255 if (!ttwu_state_match(p, state, &success)) 4256 break; 4257 4258 trace_sched_waking(p); 4259 4260 /* 4261 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4262 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4263 * in smp_cond_load_acquire() below. 4264 * 4265 * sched_ttwu_pending() try_to_wake_up() 4266 * STORE p->on_rq = 1 LOAD p->state 4267 * UNLOCK rq->lock 4268 * 4269 * __schedule() (switch to task 'p') 4270 * LOCK rq->lock smp_rmb(); 4271 * smp_mb__after_spinlock(); 4272 * UNLOCK rq->lock 4273 * 4274 * [task p] 4275 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4276 * 4277 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4278 * __schedule(). See the comment for smp_mb__after_spinlock(). 4279 * 4280 * A similar smp_rmb() lives in __task_needs_rq_lock(). 4281 */ 4282 smp_rmb(); 4283 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4284 break; 4285 4286 #ifdef CONFIG_SMP 4287 /* 4288 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4289 * possible to, falsely, observe p->on_cpu == 0. 4290 * 4291 * One must be running (->on_cpu == 1) in order to remove oneself 4292 * from the runqueue. 4293 * 4294 * __schedule() (switch to task 'p') try_to_wake_up() 4295 * STORE p->on_cpu = 1 LOAD p->on_rq 4296 * UNLOCK rq->lock 4297 * 4298 * __schedule() (put 'p' to sleep) 4299 * LOCK rq->lock smp_rmb(); 4300 * smp_mb__after_spinlock(); 4301 * STORE p->on_rq = 0 LOAD p->on_cpu 4302 * 4303 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4304 * __schedule(). See the comment for smp_mb__after_spinlock(). 4305 * 4306 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4307 * schedule()'s deactivate_task() has 'happened' and p will no longer 4308 * care about it's own p->state. See the comment in __schedule(). 4309 */ 4310 smp_acquire__after_ctrl_dep(); 4311 4312 /* 4313 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4314 * == 0), which means we need to do an enqueue, change p->state to 4315 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4316 * enqueue, such as ttwu_queue_wakelist(). 4317 */ 4318 WRITE_ONCE(p->__state, TASK_WAKING); 4319 4320 /* 4321 * If the owning (remote) CPU is still in the middle of schedule() with 4322 * this task as prev, considering queueing p on the remote CPUs wake_list 4323 * which potentially sends an IPI instead of spinning on p->on_cpu to 4324 * let the waker make forward progress. This is safe because IRQs are 4325 * disabled and the IPI will deliver after on_cpu is cleared. 4326 * 4327 * Ensure we load task_cpu(p) after p->on_cpu: 4328 * 4329 * set_task_cpu(p, cpu); 4330 * STORE p->cpu = @cpu 4331 * __schedule() (switch to task 'p') 4332 * LOCK rq->lock 4333 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4334 * STORE p->on_cpu = 1 LOAD p->cpu 4335 * 4336 * to ensure we observe the correct CPU on which the task is currently 4337 * scheduling. 4338 */ 4339 if (smp_load_acquire(&p->on_cpu) && 4340 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4341 break; 4342 4343 /* 4344 * If the owning (remote) CPU is still in the middle of schedule() with 4345 * this task as prev, wait until it's done referencing the task. 4346 * 4347 * Pairs with the smp_store_release() in finish_task(). 4348 * 4349 * This ensures that tasks getting woken will be fully ordered against 4350 * their previous state and preserve Program Order. 4351 */ 4352 smp_cond_load_acquire(&p->on_cpu, !VAL); 4353 4354 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4355 if (task_cpu(p) != cpu) { 4356 if (p->in_iowait) { 4357 delayacct_blkio_end(p); 4358 atomic_dec(&task_rq(p)->nr_iowait); 4359 } 4360 4361 wake_flags |= WF_MIGRATED; 4362 psi_ttwu_dequeue(p); 4363 set_task_cpu(p, cpu); 4364 } 4365 #else 4366 cpu = task_cpu(p); 4367 #endif /* CONFIG_SMP */ 4368 4369 ttwu_queue(p, cpu, wake_flags); 4370 } 4371 out: 4372 if (success) 4373 ttwu_stat(p, task_cpu(p), wake_flags); 4374 4375 return success; 4376 } 4377 4378 static bool __task_needs_rq_lock(struct task_struct *p) 4379 { 4380 unsigned int state = READ_ONCE(p->__state); 4381 4382 /* 4383 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4384 * the task is blocked. Make sure to check @state since ttwu() can drop 4385 * locks at the end, see ttwu_queue_wakelist(). 4386 */ 4387 if (state == TASK_RUNNING || state == TASK_WAKING) 4388 return true; 4389 4390 /* 4391 * Ensure we load p->on_rq after p->__state, otherwise it would be 4392 * possible to, falsely, observe p->on_rq == 0. 4393 * 4394 * See try_to_wake_up() for a longer comment. 4395 */ 4396 smp_rmb(); 4397 if (p->on_rq) 4398 return true; 4399 4400 #ifdef CONFIG_SMP 4401 /* 4402 * Ensure the task has finished __schedule() and will not be referenced 4403 * anymore. Again, see try_to_wake_up() for a longer comment. 4404 */ 4405 smp_rmb(); 4406 smp_cond_load_acquire(&p->on_cpu, !VAL); 4407 #endif 4408 4409 return false; 4410 } 4411 4412 /** 4413 * task_call_func - Invoke a function on task in fixed state 4414 * @p: Process for which the function is to be invoked, can be @current. 4415 * @func: Function to invoke. 4416 * @arg: Argument to function. 4417 * 4418 * Fix the task in it's current state by avoiding wakeups and or rq operations 4419 * and call @func(@arg) on it. This function can use ->on_rq and task_curr() 4420 * to work out what the state is, if required. Given that @func can be invoked 4421 * with a runqueue lock held, it had better be quite lightweight. 4422 * 4423 * Returns: 4424 * Whatever @func returns 4425 */ 4426 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4427 { 4428 struct rq *rq = NULL; 4429 struct rq_flags rf; 4430 int ret; 4431 4432 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4433 4434 if (__task_needs_rq_lock(p)) 4435 rq = __task_rq_lock(p, &rf); 4436 4437 /* 4438 * At this point the task is pinned; either: 4439 * - blocked and we're holding off wakeups (pi->lock) 4440 * - woken, and we're holding off enqueue (rq->lock) 4441 * - queued, and we're holding off schedule (rq->lock) 4442 * - running, and we're holding off de-schedule (rq->lock) 4443 * 4444 * The called function (@func) can use: task_curr(), p->on_rq and 4445 * p->__state to differentiate between these states. 4446 */ 4447 ret = func(p, arg); 4448 4449 if (rq) 4450 rq_unlock(rq, &rf); 4451 4452 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4453 return ret; 4454 } 4455 4456 /** 4457 * cpu_curr_snapshot - Return a snapshot of the currently running task 4458 * @cpu: The CPU on which to snapshot the task. 4459 * 4460 * Returns the task_struct pointer of the task "currently" running on 4461 * the specified CPU. If the same task is running on that CPU throughout, 4462 * the return value will be a pointer to that task's task_struct structure. 4463 * If the CPU did any context switches even vaguely concurrently with the 4464 * execution of this function, the return value will be a pointer to the 4465 * task_struct structure of a randomly chosen task that was running on 4466 * that CPU somewhere around the time that this function was executing. 4467 * 4468 * If the specified CPU was offline, the return value is whatever it 4469 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4470 * task, but there is no guarantee. Callers wishing a useful return 4471 * value must take some action to ensure that the specified CPU remains 4472 * online throughout. 4473 * 4474 * This function executes full memory barriers before and after fetching 4475 * the pointer, which permits the caller to confine this function's fetch 4476 * with respect to the caller's accesses to other shared variables. 4477 */ 4478 struct task_struct *cpu_curr_snapshot(int cpu) 4479 { 4480 struct task_struct *t; 4481 4482 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4483 t = rcu_dereference(cpu_curr(cpu)); 4484 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4485 return t; 4486 } 4487 4488 /** 4489 * wake_up_process - Wake up a specific process 4490 * @p: The process to be woken up. 4491 * 4492 * Attempt to wake up the nominated process and move it to the set of runnable 4493 * processes. 4494 * 4495 * Return: 1 if the process was woken up, 0 if it was already running. 4496 * 4497 * This function executes a full memory barrier before accessing the task state. 4498 */ 4499 int wake_up_process(struct task_struct *p) 4500 { 4501 return try_to_wake_up(p, TASK_NORMAL, 0); 4502 } 4503 EXPORT_SYMBOL(wake_up_process); 4504 4505 int wake_up_state(struct task_struct *p, unsigned int state) 4506 { 4507 return try_to_wake_up(p, state, 0); 4508 } 4509 4510 /* 4511 * Perform scheduler related setup for a newly forked process p. 4512 * p is forked by current. 4513 * 4514 * __sched_fork() is basic setup used by init_idle() too: 4515 */ 4516 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4517 { 4518 p->on_rq = 0; 4519 4520 p->se.on_rq = 0; 4521 p->se.exec_start = 0; 4522 p->se.sum_exec_runtime = 0; 4523 p->se.prev_sum_exec_runtime = 0; 4524 p->se.nr_migrations = 0; 4525 p->se.vruntime = 0; 4526 p->se.vlag = 0; 4527 p->se.slice = sysctl_sched_base_slice; 4528 INIT_LIST_HEAD(&p->se.group_node); 4529 4530 #ifdef CONFIG_FAIR_GROUP_SCHED 4531 p->se.cfs_rq = NULL; 4532 #endif 4533 4534 #ifdef CONFIG_SCHEDSTATS 4535 /* Even if schedstat is disabled, there should not be garbage */ 4536 memset(&p->stats, 0, sizeof(p->stats)); 4537 #endif 4538 4539 init_dl_entity(&p->dl); 4540 4541 INIT_LIST_HEAD(&p->rt.run_list); 4542 p->rt.timeout = 0; 4543 p->rt.time_slice = sched_rr_timeslice; 4544 p->rt.on_rq = 0; 4545 p->rt.on_list = 0; 4546 4547 #ifdef CONFIG_PREEMPT_NOTIFIERS 4548 INIT_HLIST_HEAD(&p->preempt_notifiers); 4549 #endif 4550 4551 #ifdef CONFIG_COMPACTION 4552 p->capture_control = NULL; 4553 #endif 4554 init_numa_balancing(clone_flags, p); 4555 #ifdef CONFIG_SMP 4556 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4557 p->migration_pending = NULL; 4558 #endif 4559 init_sched_mm_cid(p); 4560 } 4561 4562 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4563 4564 #ifdef CONFIG_NUMA_BALANCING 4565 4566 int sysctl_numa_balancing_mode; 4567 4568 static void __set_numabalancing_state(bool enabled) 4569 { 4570 if (enabled) 4571 static_branch_enable(&sched_numa_balancing); 4572 else 4573 static_branch_disable(&sched_numa_balancing); 4574 } 4575 4576 void set_numabalancing_state(bool enabled) 4577 { 4578 if (enabled) 4579 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4580 else 4581 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4582 __set_numabalancing_state(enabled); 4583 } 4584 4585 #ifdef CONFIG_PROC_SYSCTL 4586 static void reset_memory_tiering(void) 4587 { 4588 struct pglist_data *pgdat; 4589 4590 for_each_online_pgdat(pgdat) { 4591 pgdat->nbp_threshold = 0; 4592 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4593 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); 4594 } 4595 } 4596 4597 static int sysctl_numa_balancing(struct ctl_table *table, int write, 4598 void *buffer, size_t *lenp, loff_t *ppos) 4599 { 4600 struct ctl_table t; 4601 int err; 4602 int state = sysctl_numa_balancing_mode; 4603 4604 if (write && !capable(CAP_SYS_ADMIN)) 4605 return -EPERM; 4606 4607 t = *table; 4608 t.data = &state; 4609 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4610 if (err < 0) 4611 return err; 4612 if (write) { 4613 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4614 (state & NUMA_BALANCING_MEMORY_TIERING)) 4615 reset_memory_tiering(); 4616 sysctl_numa_balancing_mode = state; 4617 __set_numabalancing_state(state); 4618 } 4619 return err; 4620 } 4621 #endif 4622 #endif 4623 4624 #ifdef CONFIG_SCHEDSTATS 4625 4626 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4627 4628 static void set_schedstats(bool enabled) 4629 { 4630 if (enabled) 4631 static_branch_enable(&sched_schedstats); 4632 else 4633 static_branch_disable(&sched_schedstats); 4634 } 4635 4636 void force_schedstat_enabled(void) 4637 { 4638 if (!schedstat_enabled()) { 4639 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4640 static_branch_enable(&sched_schedstats); 4641 } 4642 } 4643 4644 static int __init setup_schedstats(char *str) 4645 { 4646 int ret = 0; 4647 if (!str) 4648 goto out; 4649 4650 if (!strcmp(str, "enable")) { 4651 set_schedstats(true); 4652 ret = 1; 4653 } else if (!strcmp(str, "disable")) { 4654 set_schedstats(false); 4655 ret = 1; 4656 } 4657 out: 4658 if (!ret) 4659 pr_warn("Unable to parse schedstats=\n"); 4660 4661 return ret; 4662 } 4663 __setup("schedstats=", setup_schedstats); 4664 4665 #ifdef CONFIG_PROC_SYSCTL 4666 static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, 4667 size_t *lenp, loff_t *ppos) 4668 { 4669 struct ctl_table t; 4670 int err; 4671 int state = static_branch_likely(&sched_schedstats); 4672 4673 if (write && !capable(CAP_SYS_ADMIN)) 4674 return -EPERM; 4675 4676 t = *table; 4677 t.data = &state; 4678 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4679 if (err < 0) 4680 return err; 4681 if (write) 4682 set_schedstats(state); 4683 return err; 4684 } 4685 #endif /* CONFIG_PROC_SYSCTL */ 4686 #endif /* CONFIG_SCHEDSTATS */ 4687 4688 #ifdef CONFIG_SYSCTL 4689 static struct ctl_table sched_core_sysctls[] = { 4690 #ifdef CONFIG_SCHEDSTATS 4691 { 4692 .procname = "sched_schedstats", 4693 .data = NULL, 4694 .maxlen = sizeof(unsigned int), 4695 .mode = 0644, 4696 .proc_handler = sysctl_schedstats, 4697 .extra1 = SYSCTL_ZERO, 4698 .extra2 = SYSCTL_ONE, 4699 }, 4700 #endif /* CONFIG_SCHEDSTATS */ 4701 #ifdef CONFIG_UCLAMP_TASK 4702 { 4703 .procname = "sched_util_clamp_min", 4704 .data = &sysctl_sched_uclamp_util_min, 4705 .maxlen = sizeof(unsigned int), 4706 .mode = 0644, 4707 .proc_handler = sysctl_sched_uclamp_handler, 4708 }, 4709 { 4710 .procname = "sched_util_clamp_max", 4711 .data = &sysctl_sched_uclamp_util_max, 4712 .maxlen = sizeof(unsigned int), 4713 .mode = 0644, 4714 .proc_handler = sysctl_sched_uclamp_handler, 4715 }, 4716 { 4717 .procname = "sched_util_clamp_min_rt_default", 4718 .data = &sysctl_sched_uclamp_util_min_rt_default, 4719 .maxlen = sizeof(unsigned int), 4720 .mode = 0644, 4721 .proc_handler = sysctl_sched_uclamp_handler, 4722 }, 4723 #endif /* CONFIG_UCLAMP_TASK */ 4724 #ifdef CONFIG_NUMA_BALANCING 4725 { 4726 .procname = "numa_balancing", 4727 .data = NULL, /* filled in by handler */ 4728 .maxlen = sizeof(unsigned int), 4729 .mode = 0644, 4730 .proc_handler = sysctl_numa_balancing, 4731 .extra1 = SYSCTL_ZERO, 4732 .extra2 = SYSCTL_FOUR, 4733 }, 4734 #endif /* CONFIG_NUMA_BALANCING */ 4735 {} 4736 }; 4737 static int __init sched_core_sysctl_init(void) 4738 { 4739 register_sysctl_init("kernel", sched_core_sysctls); 4740 return 0; 4741 } 4742 late_initcall(sched_core_sysctl_init); 4743 #endif /* CONFIG_SYSCTL */ 4744 4745 /* 4746 * fork()/clone()-time setup: 4747 */ 4748 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4749 { 4750 __sched_fork(clone_flags, p); 4751 /* 4752 * We mark the process as NEW here. This guarantees that 4753 * nobody will actually run it, and a signal or other external 4754 * event cannot wake it up and insert it on the runqueue either. 4755 */ 4756 p->__state = TASK_NEW; 4757 4758 /* 4759 * Make sure we do not leak PI boosting priority to the child. 4760 */ 4761 p->prio = current->normal_prio; 4762 4763 uclamp_fork(p); 4764 4765 /* 4766 * Revert to default priority/policy on fork if requested. 4767 */ 4768 if (unlikely(p->sched_reset_on_fork)) { 4769 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4770 p->policy = SCHED_NORMAL; 4771 p->static_prio = NICE_TO_PRIO(0); 4772 p->rt_priority = 0; 4773 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4774 p->static_prio = NICE_TO_PRIO(0); 4775 4776 p->prio = p->normal_prio = p->static_prio; 4777 set_load_weight(p, false); 4778 4779 /* 4780 * We don't need the reset flag anymore after the fork. It has 4781 * fulfilled its duty: 4782 */ 4783 p->sched_reset_on_fork = 0; 4784 } 4785 4786 if (dl_prio(p->prio)) 4787 return -EAGAIN; 4788 else if (rt_prio(p->prio)) 4789 p->sched_class = &rt_sched_class; 4790 else 4791 p->sched_class = &fair_sched_class; 4792 4793 init_entity_runnable_average(&p->se); 4794 4795 4796 #ifdef CONFIG_SCHED_INFO 4797 if (likely(sched_info_on())) 4798 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4799 #endif 4800 #if defined(CONFIG_SMP) 4801 p->on_cpu = 0; 4802 #endif 4803 init_task_preempt_count(p); 4804 #ifdef CONFIG_SMP 4805 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4806 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4807 #endif 4808 return 0; 4809 } 4810 4811 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4812 { 4813 unsigned long flags; 4814 4815 /* 4816 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4817 * required yet, but lockdep gets upset if rules are violated. 4818 */ 4819 raw_spin_lock_irqsave(&p->pi_lock, flags); 4820 #ifdef CONFIG_CGROUP_SCHED 4821 if (1) { 4822 struct task_group *tg; 4823 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4824 struct task_group, css); 4825 tg = autogroup_task_group(p, tg); 4826 p->sched_task_group = tg; 4827 } 4828 #endif 4829 rseq_migrate(p); 4830 /* 4831 * We're setting the CPU for the first time, we don't migrate, 4832 * so use __set_task_cpu(). 4833 */ 4834 __set_task_cpu(p, smp_processor_id()); 4835 if (p->sched_class->task_fork) 4836 p->sched_class->task_fork(p); 4837 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4838 } 4839 4840 void sched_post_fork(struct task_struct *p) 4841 { 4842 uclamp_post_fork(p); 4843 } 4844 4845 unsigned long to_ratio(u64 period, u64 runtime) 4846 { 4847 if (runtime == RUNTIME_INF) 4848 return BW_UNIT; 4849 4850 /* 4851 * Doing this here saves a lot of checks in all 4852 * the calling paths, and returning zero seems 4853 * safe for them anyway. 4854 */ 4855 if (period == 0) 4856 return 0; 4857 4858 return div64_u64(runtime << BW_SHIFT, period); 4859 } 4860 4861 /* 4862 * wake_up_new_task - wake up a newly created task for the first time. 4863 * 4864 * This function will do some initial scheduler statistics housekeeping 4865 * that must be done for every newly created context, then puts the task 4866 * on the runqueue and wakes it. 4867 */ 4868 void wake_up_new_task(struct task_struct *p) 4869 { 4870 struct rq_flags rf; 4871 struct rq *rq; 4872 4873 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4874 WRITE_ONCE(p->__state, TASK_RUNNING); 4875 #ifdef CONFIG_SMP 4876 /* 4877 * Fork balancing, do it here and not earlier because: 4878 * - cpus_ptr can change in the fork path 4879 * - any previously selected CPU might disappear through hotplug 4880 * 4881 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4882 * as we're not fully set-up yet. 4883 */ 4884 p->recent_used_cpu = task_cpu(p); 4885 rseq_migrate(p); 4886 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 4887 #endif 4888 rq = __task_rq_lock(p, &rf); 4889 update_rq_clock(rq); 4890 post_init_entity_util_avg(p); 4891 4892 activate_task(rq, p, ENQUEUE_NOCLOCK); 4893 trace_sched_wakeup_new(p); 4894 wakeup_preempt(rq, p, WF_FORK); 4895 #ifdef CONFIG_SMP 4896 if (p->sched_class->task_woken) { 4897 /* 4898 * Nothing relies on rq->lock after this, so it's fine to 4899 * drop it. 4900 */ 4901 rq_unpin_lock(rq, &rf); 4902 p->sched_class->task_woken(rq, p); 4903 rq_repin_lock(rq, &rf); 4904 } 4905 #endif 4906 task_rq_unlock(rq, p, &rf); 4907 } 4908 4909 #ifdef CONFIG_PREEMPT_NOTIFIERS 4910 4911 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4912 4913 void preempt_notifier_inc(void) 4914 { 4915 static_branch_inc(&preempt_notifier_key); 4916 } 4917 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4918 4919 void preempt_notifier_dec(void) 4920 { 4921 static_branch_dec(&preempt_notifier_key); 4922 } 4923 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4924 4925 /** 4926 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4927 * @notifier: notifier struct to register 4928 */ 4929 void preempt_notifier_register(struct preempt_notifier *notifier) 4930 { 4931 if (!static_branch_unlikely(&preempt_notifier_key)) 4932 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4933 4934 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4935 } 4936 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4937 4938 /** 4939 * preempt_notifier_unregister - no longer interested in preemption notifications 4940 * @notifier: notifier struct to unregister 4941 * 4942 * This is *not* safe to call from within a preemption notifier. 4943 */ 4944 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4945 { 4946 hlist_del(¬ifier->link); 4947 } 4948 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4949 4950 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4951 { 4952 struct preempt_notifier *notifier; 4953 4954 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4955 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4956 } 4957 4958 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4959 { 4960 if (static_branch_unlikely(&preempt_notifier_key)) 4961 __fire_sched_in_preempt_notifiers(curr); 4962 } 4963 4964 static void 4965 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4966 struct task_struct *next) 4967 { 4968 struct preempt_notifier *notifier; 4969 4970 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4971 notifier->ops->sched_out(notifier, next); 4972 } 4973 4974 static __always_inline void 4975 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4976 struct task_struct *next) 4977 { 4978 if (static_branch_unlikely(&preempt_notifier_key)) 4979 __fire_sched_out_preempt_notifiers(curr, next); 4980 } 4981 4982 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4983 4984 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4985 { 4986 } 4987 4988 static inline void 4989 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4990 struct task_struct *next) 4991 { 4992 } 4993 4994 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4995 4996 static inline void prepare_task(struct task_struct *next) 4997 { 4998 #ifdef CONFIG_SMP 4999 /* 5000 * Claim the task as running, we do this before switching to it 5001 * such that any running task will have this set. 5002 * 5003 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 5004 * its ordering comment. 5005 */ 5006 WRITE_ONCE(next->on_cpu, 1); 5007 #endif 5008 } 5009 5010 static inline void finish_task(struct task_struct *prev) 5011 { 5012 #ifdef CONFIG_SMP 5013 /* 5014 * This must be the very last reference to @prev from this CPU. After 5015 * p->on_cpu is cleared, the task can be moved to a different CPU. We 5016 * must ensure this doesn't happen until the switch is completely 5017 * finished. 5018 * 5019 * In particular, the load of prev->state in finish_task_switch() must 5020 * happen before this. 5021 * 5022 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 5023 */ 5024 smp_store_release(&prev->on_cpu, 0); 5025 #endif 5026 } 5027 5028 #ifdef CONFIG_SMP 5029 5030 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 5031 { 5032 void (*func)(struct rq *rq); 5033 struct balance_callback *next; 5034 5035 lockdep_assert_rq_held(rq); 5036 5037 while (head) { 5038 func = (void (*)(struct rq *))head->func; 5039 next = head->next; 5040 head->next = NULL; 5041 head = next; 5042 5043 func(rq); 5044 } 5045 } 5046 5047 static void balance_push(struct rq *rq); 5048 5049 /* 5050 * balance_push_callback is a right abuse of the callback interface and plays 5051 * by significantly different rules. 5052 * 5053 * Where the normal balance_callback's purpose is to be ran in the same context 5054 * that queued it (only later, when it's safe to drop rq->lock again), 5055 * balance_push_callback is specifically targeted at __schedule(). 5056 * 5057 * This abuse is tolerated because it places all the unlikely/odd cases behind 5058 * a single test, namely: rq->balance_callback == NULL. 5059 */ 5060 struct balance_callback balance_push_callback = { 5061 .next = NULL, 5062 .func = balance_push, 5063 }; 5064 5065 static inline struct balance_callback * 5066 __splice_balance_callbacks(struct rq *rq, bool split) 5067 { 5068 struct balance_callback *head = rq->balance_callback; 5069 5070 if (likely(!head)) 5071 return NULL; 5072 5073 lockdep_assert_rq_held(rq); 5074 /* 5075 * Must not take balance_push_callback off the list when 5076 * splice_balance_callbacks() and balance_callbacks() are not 5077 * in the same rq->lock section. 5078 * 5079 * In that case it would be possible for __schedule() to interleave 5080 * and observe the list empty. 5081 */ 5082 if (split && head == &balance_push_callback) 5083 head = NULL; 5084 else 5085 rq->balance_callback = NULL; 5086 5087 return head; 5088 } 5089 5090 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 5091 { 5092 return __splice_balance_callbacks(rq, true); 5093 } 5094 5095 static void __balance_callbacks(struct rq *rq) 5096 { 5097 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 5098 } 5099 5100 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 5101 { 5102 unsigned long flags; 5103 5104 if (unlikely(head)) { 5105 raw_spin_rq_lock_irqsave(rq, flags); 5106 do_balance_callbacks(rq, head); 5107 raw_spin_rq_unlock_irqrestore(rq, flags); 5108 } 5109 } 5110 5111 #else 5112 5113 static inline void __balance_callbacks(struct rq *rq) 5114 { 5115 } 5116 5117 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 5118 { 5119 return NULL; 5120 } 5121 5122 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 5123 { 5124 } 5125 5126 #endif 5127 5128 static inline void 5129 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 5130 { 5131 /* 5132 * Since the runqueue lock will be released by the next 5133 * task (which is an invalid locking op but in the case 5134 * of the scheduler it's an obvious special-case), so we 5135 * do an early lockdep release here: 5136 */ 5137 rq_unpin_lock(rq, rf); 5138 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 5139 #ifdef CONFIG_DEBUG_SPINLOCK 5140 /* this is a valid case when another task releases the spinlock */ 5141 rq_lockp(rq)->owner = next; 5142 #endif 5143 } 5144 5145 static inline void finish_lock_switch(struct rq *rq) 5146 { 5147 /* 5148 * If we are tracking spinlock dependencies then we have to 5149 * fix up the runqueue lock - which gets 'carried over' from 5150 * prev into current: 5151 */ 5152 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 5153 __balance_callbacks(rq); 5154 raw_spin_rq_unlock_irq(rq); 5155 } 5156 5157 /* 5158 * NOP if the arch has not defined these: 5159 */ 5160 5161 #ifndef prepare_arch_switch 5162 # define prepare_arch_switch(next) do { } while (0) 5163 #endif 5164 5165 #ifndef finish_arch_post_lock_switch 5166 # define finish_arch_post_lock_switch() do { } while (0) 5167 #endif 5168 5169 static inline void kmap_local_sched_out(void) 5170 { 5171 #ifdef CONFIG_KMAP_LOCAL 5172 if (unlikely(current->kmap_ctrl.idx)) 5173 __kmap_local_sched_out(); 5174 #endif 5175 } 5176 5177 static inline void kmap_local_sched_in(void) 5178 { 5179 #ifdef CONFIG_KMAP_LOCAL 5180 if (unlikely(current->kmap_ctrl.idx)) 5181 __kmap_local_sched_in(); 5182 #endif 5183 } 5184 5185 /** 5186 * prepare_task_switch - prepare to switch tasks 5187 * @rq: the runqueue preparing to switch 5188 * @prev: the current task that is being switched out 5189 * @next: the task we are going to switch to. 5190 * 5191 * This is called with the rq lock held and interrupts off. It must 5192 * be paired with a subsequent finish_task_switch after the context 5193 * switch. 5194 * 5195 * prepare_task_switch sets up locking and calls architecture specific 5196 * hooks. 5197 */ 5198 static inline void 5199 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5200 struct task_struct *next) 5201 { 5202 kcov_prepare_switch(prev); 5203 sched_info_switch(rq, prev, next); 5204 perf_event_task_sched_out(prev, next); 5205 rseq_preempt(prev); 5206 fire_sched_out_preempt_notifiers(prev, next); 5207 kmap_local_sched_out(); 5208 prepare_task(next); 5209 prepare_arch_switch(next); 5210 } 5211 5212 /** 5213 * finish_task_switch - clean up after a task-switch 5214 * @prev: the thread we just switched away from. 5215 * 5216 * finish_task_switch must be called after the context switch, paired 5217 * with a prepare_task_switch call before the context switch. 5218 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5219 * and do any other architecture-specific cleanup actions. 5220 * 5221 * Note that we may have delayed dropping an mm in context_switch(). If 5222 * so, we finish that here outside of the runqueue lock. (Doing it 5223 * with the lock held can cause deadlocks; see schedule() for 5224 * details.) 5225 * 5226 * The context switch have flipped the stack from under us and restored the 5227 * local variables which were saved when this task called schedule() in the 5228 * past. prev == current is still correct but we need to recalculate this_rq 5229 * because prev may have moved to another CPU. 5230 */ 5231 static struct rq *finish_task_switch(struct task_struct *prev) 5232 __releases(rq->lock) 5233 { 5234 struct rq *rq = this_rq(); 5235 struct mm_struct *mm = rq->prev_mm; 5236 unsigned int prev_state; 5237 5238 /* 5239 * The previous task will have left us with a preempt_count of 2 5240 * because it left us after: 5241 * 5242 * schedule() 5243 * preempt_disable(); // 1 5244 * __schedule() 5245 * raw_spin_lock_irq(&rq->lock) // 2 5246 * 5247 * Also, see FORK_PREEMPT_COUNT. 5248 */ 5249 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5250 "corrupted preempt_count: %s/%d/0x%x\n", 5251 current->comm, current->pid, preempt_count())) 5252 preempt_count_set(FORK_PREEMPT_COUNT); 5253 5254 rq->prev_mm = NULL; 5255 5256 /* 5257 * A task struct has one reference for the use as "current". 5258 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5259 * schedule one last time. The schedule call will never return, and 5260 * the scheduled task must drop that reference. 5261 * 5262 * We must observe prev->state before clearing prev->on_cpu (in 5263 * finish_task), otherwise a concurrent wakeup can get prev 5264 * running on another CPU and we could rave with its RUNNING -> DEAD 5265 * transition, resulting in a double drop. 5266 */ 5267 prev_state = READ_ONCE(prev->__state); 5268 vtime_task_switch(prev); 5269 perf_event_task_sched_in(prev, current); 5270 finish_task(prev); 5271 tick_nohz_task_switch(); 5272 finish_lock_switch(rq); 5273 finish_arch_post_lock_switch(); 5274 kcov_finish_switch(current); 5275 /* 5276 * kmap_local_sched_out() is invoked with rq::lock held and 5277 * interrupts disabled. There is no requirement for that, but the 5278 * sched out code does not have an interrupt enabled section. 5279 * Restoring the maps on sched in does not require interrupts being 5280 * disabled either. 5281 */ 5282 kmap_local_sched_in(); 5283 5284 fire_sched_in_preempt_notifiers(current); 5285 /* 5286 * When switching through a kernel thread, the loop in 5287 * membarrier_{private,global}_expedited() may have observed that 5288 * kernel thread and not issued an IPI. It is therefore possible to 5289 * schedule between user->kernel->user threads without passing though 5290 * switch_mm(). Membarrier requires a barrier after storing to 5291 * rq->curr, before returning to userspace, so provide them here: 5292 * 5293 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5294 * provided by mmdrop_lazy_tlb(), 5295 * - a sync_core for SYNC_CORE. 5296 */ 5297 if (mm) { 5298 membarrier_mm_sync_core_before_usermode(mm); 5299 mmdrop_lazy_tlb_sched(mm); 5300 } 5301 5302 if (unlikely(prev_state == TASK_DEAD)) { 5303 if (prev->sched_class->task_dead) 5304 prev->sched_class->task_dead(prev); 5305 5306 /* Task is done with its stack. */ 5307 put_task_stack(prev); 5308 5309 put_task_struct_rcu_user(prev); 5310 } 5311 5312 return rq; 5313 } 5314 5315 /** 5316 * schedule_tail - first thing a freshly forked thread must call. 5317 * @prev: the thread we just switched away from. 5318 */ 5319 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5320 __releases(rq->lock) 5321 { 5322 /* 5323 * New tasks start with FORK_PREEMPT_COUNT, see there and 5324 * finish_task_switch() for details. 5325 * 5326 * finish_task_switch() will drop rq->lock() and lower preempt_count 5327 * and the preempt_enable() will end up enabling preemption (on 5328 * PREEMPT_COUNT kernels). 5329 */ 5330 5331 finish_task_switch(prev); 5332 preempt_enable(); 5333 5334 if (current->set_child_tid) 5335 put_user(task_pid_vnr(current), current->set_child_tid); 5336 5337 calculate_sigpending(); 5338 } 5339 5340 /* 5341 * context_switch - switch to the new MM and the new thread's register state. 5342 */ 5343 static __always_inline struct rq * 5344 context_switch(struct rq *rq, struct task_struct *prev, 5345 struct task_struct *next, struct rq_flags *rf) 5346 { 5347 prepare_task_switch(rq, prev, next); 5348 5349 /* 5350 * For paravirt, this is coupled with an exit in switch_to to 5351 * combine the page table reload and the switch backend into 5352 * one hypercall. 5353 */ 5354 arch_start_context_switch(prev); 5355 5356 /* 5357 * kernel -> kernel lazy + transfer active 5358 * user -> kernel lazy + mmgrab_lazy_tlb() active 5359 * 5360 * kernel -> user switch + mmdrop_lazy_tlb() active 5361 * user -> user switch 5362 * 5363 * switch_mm_cid() needs to be updated if the barriers provided 5364 * by context_switch() are modified. 5365 */ 5366 if (!next->mm) { // to kernel 5367 enter_lazy_tlb(prev->active_mm, next); 5368 5369 next->active_mm = prev->active_mm; 5370 if (prev->mm) // from user 5371 mmgrab_lazy_tlb(prev->active_mm); 5372 else 5373 prev->active_mm = NULL; 5374 } else { // to user 5375 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5376 /* 5377 * sys_membarrier() requires an smp_mb() between setting 5378 * rq->curr / membarrier_switch_mm() and returning to userspace. 5379 * 5380 * The below provides this either through switch_mm(), or in 5381 * case 'prev->active_mm == next->mm' through 5382 * finish_task_switch()'s mmdrop(). 5383 */ 5384 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5385 lru_gen_use_mm(next->mm); 5386 5387 if (!prev->mm) { // from kernel 5388 /* will mmdrop_lazy_tlb() in finish_task_switch(). */ 5389 rq->prev_mm = prev->active_mm; 5390 prev->active_mm = NULL; 5391 } 5392 } 5393 5394 /* switch_mm_cid() requires the memory barriers above. */ 5395 switch_mm_cid(rq, prev, next); 5396 5397 prepare_lock_switch(rq, next, rf); 5398 5399 /* Here we just switch the register state and the stack. */ 5400 switch_to(prev, next, prev); 5401 barrier(); 5402 5403 return finish_task_switch(prev); 5404 } 5405 5406 /* 5407 * nr_running and nr_context_switches: 5408 * 5409 * externally visible scheduler statistics: current number of runnable 5410 * threads, total number of context switches performed since bootup. 5411 */ 5412 unsigned int nr_running(void) 5413 { 5414 unsigned int i, sum = 0; 5415 5416 for_each_online_cpu(i) 5417 sum += cpu_rq(i)->nr_running; 5418 5419 return sum; 5420 } 5421 5422 /* 5423 * Check if only the current task is running on the CPU. 5424 * 5425 * Caution: this function does not check that the caller has disabled 5426 * preemption, thus the result might have a time-of-check-to-time-of-use 5427 * race. The caller is responsible to use it correctly, for example: 5428 * 5429 * - from a non-preemptible section (of course) 5430 * 5431 * - from a thread that is bound to a single CPU 5432 * 5433 * - in a loop with very short iterations (e.g. a polling loop) 5434 */ 5435 bool single_task_running(void) 5436 { 5437 return raw_rq()->nr_running == 1; 5438 } 5439 EXPORT_SYMBOL(single_task_running); 5440 5441 unsigned long long nr_context_switches_cpu(int cpu) 5442 { 5443 return cpu_rq(cpu)->nr_switches; 5444 } 5445 5446 unsigned long long nr_context_switches(void) 5447 { 5448 int i; 5449 unsigned long long sum = 0; 5450 5451 for_each_possible_cpu(i) 5452 sum += cpu_rq(i)->nr_switches; 5453 5454 return sum; 5455 } 5456 5457 /* 5458 * Consumers of these two interfaces, like for example the cpuidle menu 5459 * governor, are using nonsensical data. Preferring shallow idle state selection 5460 * for a CPU that has IO-wait which might not even end up running the task when 5461 * it does become runnable. 5462 */ 5463 5464 unsigned int nr_iowait_cpu(int cpu) 5465 { 5466 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5467 } 5468 5469 /* 5470 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5471 * 5472 * The idea behind IO-wait account is to account the idle time that we could 5473 * have spend running if it were not for IO. That is, if we were to improve the 5474 * storage performance, we'd have a proportional reduction in IO-wait time. 5475 * 5476 * This all works nicely on UP, where, when a task blocks on IO, we account 5477 * idle time as IO-wait, because if the storage were faster, it could've been 5478 * running and we'd not be idle. 5479 * 5480 * This has been extended to SMP, by doing the same for each CPU. This however 5481 * is broken. 5482 * 5483 * Imagine for instance the case where two tasks block on one CPU, only the one 5484 * CPU will have IO-wait accounted, while the other has regular idle. Even 5485 * though, if the storage were faster, both could've ran at the same time, 5486 * utilising both CPUs. 5487 * 5488 * This means, that when looking globally, the current IO-wait accounting on 5489 * SMP is a lower bound, by reason of under accounting. 5490 * 5491 * Worse, since the numbers are provided per CPU, they are sometimes 5492 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5493 * associated with any one particular CPU, it can wake to another CPU than it 5494 * blocked on. This means the per CPU IO-wait number is meaningless. 5495 * 5496 * Task CPU affinities can make all that even more 'interesting'. 5497 */ 5498 5499 unsigned int nr_iowait(void) 5500 { 5501 unsigned int i, sum = 0; 5502 5503 for_each_possible_cpu(i) 5504 sum += nr_iowait_cpu(i); 5505 5506 return sum; 5507 } 5508 5509 #ifdef CONFIG_SMP 5510 5511 /* 5512 * sched_exec - execve() is a valuable balancing opportunity, because at 5513 * this point the task has the smallest effective memory and cache footprint. 5514 */ 5515 void sched_exec(void) 5516 { 5517 struct task_struct *p = current; 5518 struct migration_arg arg; 5519 int dest_cpu; 5520 5521 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 5522 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5523 if (dest_cpu == smp_processor_id()) 5524 return; 5525 5526 if (unlikely(!cpu_active(dest_cpu))) 5527 return; 5528 5529 arg = (struct migration_arg){ p, dest_cpu }; 5530 } 5531 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5532 } 5533 5534 #endif 5535 5536 DEFINE_PER_CPU(struct kernel_stat, kstat); 5537 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5538 5539 EXPORT_PER_CPU_SYMBOL(kstat); 5540 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5541 5542 /* 5543 * The function fair_sched_class.update_curr accesses the struct curr 5544 * and its field curr->exec_start; when called from task_sched_runtime(), 5545 * we observe a high rate of cache misses in practice. 5546 * Prefetching this data results in improved performance. 5547 */ 5548 static inline void prefetch_curr_exec_start(struct task_struct *p) 5549 { 5550 #ifdef CONFIG_FAIR_GROUP_SCHED 5551 struct sched_entity *curr = (&p->se)->cfs_rq->curr; 5552 #else 5553 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; 5554 #endif 5555 prefetch(curr); 5556 prefetch(&curr->exec_start); 5557 } 5558 5559 /* 5560 * Return accounted runtime for the task. 5561 * In case the task is currently running, return the runtime plus current's 5562 * pending runtime that have not been accounted yet. 5563 */ 5564 unsigned long long task_sched_runtime(struct task_struct *p) 5565 { 5566 struct rq_flags rf; 5567 struct rq *rq; 5568 u64 ns; 5569 5570 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5571 /* 5572 * 64-bit doesn't need locks to atomically read a 64-bit value. 5573 * So we have a optimization chance when the task's delta_exec is 0. 5574 * Reading ->on_cpu is racy, but this is ok. 5575 * 5576 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5577 * If we race with it entering CPU, unaccounted time is 0. This is 5578 * indistinguishable from the read occurring a few cycles earlier. 5579 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5580 * been accounted, so we're correct here as well. 5581 */ 5582 if (!p->on_cpu || !task_on_rq_queued(p)) 5583 return p->se.sum_exec_runtime; 5584 #endif 5585 5586 rq = task_rq_lock(p, &rf); 5587 /* 5588 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5589 * project cycles that may never be accounted to this 5590 * thread, breaking clock_gettime(). 5591 */ 5592 if (task_current(rq, p) && task_on_rq_queued(p)) { 5593 prefetch_curr_exec_start(p); 5594 update_rq_clock(rq); 5595 p->sched_class->update_curr(rq); 5596 } 5597 ns = p->se.sum_exec_runtime; 5598 task_rq_unlock(rq, p, &rf); 5599 5600 return ns; 5601 } 5602 5603 #ifdef CONFIG_SCHED_DEBUG 5604 static u64 cpu_resched_latency(struct rq *rq) 5605 { 5606 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5607 u64 resched_latency, now = rq_clock(rq); 5608 static bool warned_once; 5609 5610 if (sysctl_resched_latency_warn_once && warned_once) 5611 return 0; 5612 5613 if (!need_resched() || !latency_warn_ms) 5614 return 0; 5615 5616 if (system_state == SYSTEM_BOOTING) 5617 return 0; 5618 5619 if (!rq->last_seen_need_resched_ns) { 5620 rq->last_seen_need_resched_ns = now; 5621 rq->ticks_without_resched = 0; 5622 return 0; 5623 } 5624 5625 rq->ticks_without_resched++; 5626 resched_latency = now - rq->last_seen_need_resched_ns; 5627 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5628 return 0; 5629 5630 warned_once = true; 5631 5632 return resched_latency; 5633 } 5634 5635 static int __init setup_resched_latency_warn_ms(char *str) 5636 { 5637 long val; 5638 5639 if ((kstrtol(str, 0, &val))) { 5640 pr_warn("Unable to set resched_latency_warn_ms\n"); 5641 return 1; 5642 } 5643 5644 sysctl_resched_latency_warn_ms = val; 5645 return 1; 5646 } 5647 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5648 #else 5649 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } 5650 #endif /* CONFIG_SCHED_DEBUG */ 5651 5652 /* 5653 * This function gets called by the timer code, with HZ frequency. 5654 * We call it with interrupts disabled. 5655 */ 5656 void scheduler_tick(void) 5657 { 5658 int cpu = smp_processor_id(); 5659 struct rq *rq = cpu_rq(cpu); 5660 struct task_struct *curr = rq->curr; 5661 struct rq_flags rf; 5662 unsigned long thermal_pressure; 5663 u64 resched_latency; 5664 5665 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5666 arch_scale_freq_tick(); 5667 5668 sched_clock_tick(); 5669 5670 rq_lock(rq, &rf); 5671 5672 update_rq_clock(rq); 5673 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); 5674 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); 5675 curr->sched_class->task_tick(rq, curr, 0); 5676 if (sched_feat(LATENCY_WARN)) 5677 resched_latency = cpu_resched_latency(rq); 5678 calc_global_load_tick(rq); 5679 sched_core_tick(rq); 5680 task_tick_mm_cid(rq, curr); 5681 5682 rq_unlock(rq, &rf); 5683 5684 if (sched_feat(LATENCY_WARN) && resched_latency) 5685 resched_latency_warn(cpu, resched_latency); 5686 5687 perf_event_task_tick(); 5688 5689 if (curr->flags & PF_WQ_WORKER) 5690 wq_worker_tick(curr); 5691 5692 #ifdef CONFIG_SMP 5693 rq->idle_balance = idle_cpu(cpu); 5694 trigger_load_balance(rq); 5695 #endif 5696 } 5697 5698 #ifdef CONFIG_NO_HZ_FULL 5699 5700 struct tick_work { 5701 int cpu; 5702 atomic_t state; 5703 struct delayed_work work; 5704 }; 5705 /* Values for ->state, see diagram below. */ 5706 #define TICK_SCHED_REMOTE_OFFLINE 0 5707 #define TICK_SCHED_REMOTE_OFFLINING 1 5708 #define TICK_SCHED_REMOTE_RUNNING 2 5709 5710 /* 5711 * State diagram for ->state: 5712 * 5713 * 5714 * TICK_SCHED_REMOTE_OFFLINE 5715 * | ^ 5716 * | | 5717 * | | sched_tick_remote() 5718 * | | 5719 * | | 5720 * +--TICK_SCHED_REMOTE_OFFLINING 5721 * | ^ 5722 * | | 5723 * sched_tick_start() | | sched_tick_stop() 5724 * | | 5725 * V | 5726 * TICK_SCHED_REMOTE_RUNNING 5727 * 5728 * 5729 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5730 * and sched_tick_start() are happy to leave the state in RUNNING. 5731 */ 5732 5733 static struct tick_work __percpu *tick_work_cpu; 5734 5735 static void sched_tick_remote(struct work_struct *work) 5736 { 5737 struct delayed_work *dwork = to_delayed_work(work); 5738 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5739 int cpu = twork->cpu; 5740 struct rq *rq = cpu_rq(cpu); 5741 int os; 5742 5743 /* 5744 * Handle the tick only if it appears the remote CPU is running in full 5745 * dynticks mode. The check is racy by nature, but missing a tick or 5746 * having one too much is no big deal because the scheduler tick updates 5747 * statistics and checks timeslices in a time-independent way, regardless 5748 * of when exactly it is running. 5749 */ 5750 if (tick_nohz_tick_stopped_cpu(cpu)) { 5751 guard(rq_lock_irq)(rq); 5752 struct task_struct *curr = rq->curr; 5753 5754 if (cpu_online(cpu)) { 5755 update_rq_clock(rq); 5756 5757 if (!is_idle_task(curr)) { 5758 /* 5759 * Make sure the next tick runs within a 5760 * reasonable amount of time. 5761 */ 5762 u64 delta = rq_clock_task(rq) - curr->se.exec_start; 5763 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5764 } 5765 curr->sched_class->task_tick(rq, curr, 0); 5766 5767 calc_load_nohz_remote(rq); 5768 } 5769 } 5770 5771 /* 5772 * Run the remote tick once per second (1Hz). This arbitrary 5773 * frequency is large enough to avoid overload but short enough 5774 * to keep scheduler internal stats reasonably up to date. But 5775 * first update state to reflect hotplug activity if required. 5776 */ 5777 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5778 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5779 if (os == TICK_SCHED_REMOTE_RUNNING) 5780 queue_delayed_work(system_unbound_wq, dwork, HZ); 5781 } 5782 5783 static void sched_tick_start(int cpu) 5784 { 5785 int os; 5786 struct tick_work *twork; 5787 5788 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5789 return; 5790 5791 WARN_ON_ONCE(!tick_work_cpu); 5792 5793 twork = per_cpu_ptr(tick_work_cpu, cpu); 5794 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5795 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5796 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5797 twork->cpu = cpu; 5798 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5799 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5800 } 5801 } 5802 5803 #ifdef CONFIG_HOTPLUG_CPU 5804 static void sched_tick_stop(int cpu) 5805 { 5806 struct tick_work *twork; 5807 int os; 5808 5809 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5810 return; 5811 5812 WARN_ON_ONCE(!tick_work_cpu); 5813 5814 twork = per_cpu_ptr(tick_work_cpu, cpu); 5815 /* There cannot be competing actions, but don't rely on stop-machine. */ 5816 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5817 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5818 /* Don't cancel, as this would mess up the state machine. */ 5819 } 5820 #endif /* CONFIG_HOTPLUG_CPU */ 5821 5822 int __init sched_tick_offload_init(void) 5823 { 5824 tick_work_cpu = alloc_percpu(struct tick_work); 5825 BUG_ON(!tick_work_cpu); 5826 return 0; 5827 } 5828 5829 #else /* !CONFIG_NO_HZ_FULL */ 5830 static inline void sched_tick_start(int cpu) { } 5831 static inline void sched_tick_stop(int cpu) { } 5832 #endif 5833 5834 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5835 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5836 /* 5837 * If the value passed in is equal to the current preempt count 5838 * then we just disabled preemption. Start timing the latency. 5839 */ 5840 static inline void preempt_latency_start(int val) 5841 { 5842 if (preempt_count() == val) { 5843 unsigned long ip = get_lock_parent_ip(); 5844 #ifdef CONFIG_DEBUG_PREEMPT 5845 current->preempt_disable_ip = ip; 5846 #endif 5847 trace_preempt_off(CALLER_ADDR0, ip); 5848 } 5849 } 5850 5851 void preempt_count_add(int val) 5852 { 5853 #ifdef CONFIG_DEBUG_PREEMPT 5854 /* 5855 * Underflow? 5856 */ 5857 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5858 return; 5859 #endif 5860 __preempt_count_add(val); 5861 #ifdef CONFIG_DEBUG_PREEMPT 5862 /* 5863 * Spinlock count overflowing soon? 5864 */ 5865 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5866 PREEMPT_MASK - 10); 5867 #endif 5868 preempt_latency_start(val); 5869 } 5870 EXPORT_SYMBOL(preempt_count_add); 5871 NOKPROBE_SYMBOL(preempt_count_add); 5872 5873 /* 5874 * If the value passed in equals to the current preempt count 5875 * then we just enabled preemption. Stop timing the latency. 5876 */ 5877 static inline void preempt_latency_stop(int val) 5878 { 5879 if (preempt_count() == val) 5880 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5881 } 5882 5883 void preempt_count_sub(int val) 5884 { 5885 #ifdef CONFIG_DEBUG_PREEMPT 5886 /* 5887 * Underflow? 5888 */ 5889 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5890 return; 5891 /* 5892 * Is the spinlock portion underflowing? 5893 */ 5894 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5895 !(preempt_count() & PREEMPT_MASK))) 5896 return; 5897 #endif 5898 5899 preempt_latency_stop(val); 5900 __preempt_count_sub(val); 5901 } 5902 EXPORT_SYMBOL(preempt_count_sub); 5903 NOKPROBE_SYMBOL(preempt_count_sub); 5904 5905 #else 5906 static inline void preempt_latency_start(int val) { } 5907 static inline void preempt_latency_stop(int val) { } 5908 #endif 5909 5910 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5911 { 5912 #ifdef CONFIG_DEBUG_PREEMPT 5913 return p->preempt_disable_ip; 5914 #else 5915 return 0; 5916 #endif 5917 } 5918 5919 /* 5920 * Print scheduling while atomic bug: 5921 */ 5922 static noinline void __schedule_bug(struct task_struct *prev) 5923 { 5924 /* Save this before calling printk(), since that will clobber it */ 5925 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5926 5927 if (oops_in_progress) 5928 return; 5929 5930 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5931 prev->comm, prev->pid, preempt_count()); 5932 5933 debug_show_held_locks(prev); 5934 print_modules(); 5935 if (irqs_disabled()) 5936 print_irqtrace_events(prev); 5937 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 5938 pr_err("Preemption disabled at:"); 5939 print_ip_sym(KERN_ERR, preempt_disable_ip); 5940 } 5941 check_panic_on_warn("scheduling while atomic"); 5942 5943 dump_stack(); 5944 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5945 } 5946 5947 /* 5948 * Various schedule()-time debugging checks and statistics: 5949 */ 5950 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5951 { 5952 #ifdef CONFIG_SCHED_STACK_END_CHECK 5953 if (task_stack_end_corrupted(prev)) 5954 panic("corrupted stack end detected inside scheduler\n"); 5955 5956 if (task_scs_end_corrupted(prev)) 5957 panic("corrupted shadow stack detected inside scheduler\n"); 5958 #endif 5959 5960 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5961 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5962 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5963 prev->comm, prev->pid, prev->non_block_count); 5964 dump_stack(); 5965 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5966 } 5967 #endif 5968 5969 if (unlikely(in_atomic_preempt_off())) { 5970 __schedule_bug(prev); 5971 preempt_count_set(PREEMPT_DISABLED); 5972 } 5973 rcu_sleep_check(); 5974 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 5975 5976 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5977 5978 schedstat_inc(this_rq()->sched_count); 5979 } 5980 5981 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 5982 struct rq_flags *rf) 5983 { 5984 #ifdef CONFIG_SMP 5985 const struct sched_class *class; 5986 /* 5987 * We must do the balancing pass before put_prev_task(), such 5988 * that when we release the rq->lock the task is in the same 5989 * state as before we took rq->lock. 5990 * 5991 * We can terminate the balance pass as soon as we know there is 5992 * a runnable task of @class priority or higher. 5993 */ 5994 for_class_range(class, prev->sched_class, &idle_sched_class) { 5995 if (class->balance(rq, prev, rf)) 5996 break; 5997 } 5998 #endif 5999 6000 put_prev_task(rq, prev); 6001 } 6002 6003 /* 6004 * Pick up the highest-prio task: 6005 */ 6006 static inline struct task_struct * 6007 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6008 { 6009 const struct sched_class *class; 6010 struct task_struct *p; 6011 6012 /* 6013 * Optimization: we know that if all tasks are in the fair class we can 6014 * call that function directly, but only if the @prev task wasn't of a 6015 * higher scheduling class, because otherwise those lose the 6016 * opportunity to pull in more work from other CPUs. 6017 */ 6018 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 6019 rq->nr_running == rq->cfs.h_nr_running)) { 6020 6021 p = pick_next_task_fair(rq, prev, rf); 6022 if (unlikely(p == RETRY_TASK)) 6023 goto restart; 6024 6025 /* Assume the next prioritized class is idle_sched_class */ 6026 if (!p) { 6027 put_prev_task(rq, prev); 6028 p = pick_next_task_idle(rq); 6029 } 6030 6031 /* 6032 * This is the fast path; it cannot be a DL server pick; 6033 * therefore even if @p == @prev, ->dl_server must be NULL. 6034 */ 6035 if (p->dl_server) 6036 p->dl_server = NULL; 6037 6038 return p; 6039 } 6040 6041 restart: 6042 put_prev_task_balance(rq, prev, rf); 6043 6044 /* 6045 * We've updated @prev and no longer need the server link, clear it. 6046 * Must be done before ->pick_next_task() because that can (re)set 6047 * ->dl_server. 6048 */ 6049 if (prev->dl_server) 6050 prev->dl_server = NULL; 6051 6052 for_each_class(class) { 6053 p = class->pick_next_task(rq); 6054 if (p) 6055 return p; 6056 } 6057 6058 BUG(); /* The idle class should always have a runnable task. */ 6059 } 6060 6061 #ifdef CONFIG_SCHED_CORE 6062 static inline bool is_task_rq_idle(struct task_struct *t) 6063 { 6064 return (task_rq(t)->idle == t); 6065 } 6066 6067 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 6068 { 6069 return is_task_rq_idle(a) || (a->core_cookie == cookie); 6070 } 6071 6072 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 6073 { 6074 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 6075 return true; 6076 6077 return a->core_cookie == b->core_cookie; 6078 } 6079 6080 static inline struct task_struct *pick_task(struct rq *rq) 6081 { 6082 const struct sched_class *class; 6083 struct task_struct *p; 6084 6085 for_each_class(class) { 6086 p = class->pick_task(rq); 6087 if (p) 6088 return p; 6089 } 6090 6091 BUG(); /* The idle class should always have a runnable task. */ 6092 } 6093 6094 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 6095 6096 static void queue_core_balance(struct rq *rq); 6097 6098 static struct task_struct * 6099 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6100 { 6101 struct task_struct *next, *p, *max = NULL; 6102 const struct cpumask *smt_mask; 6103 bool fi_before = false; 6104 bool core_clock_updated = (rq == rq->core); 6105 unsigned long cookie; 6106 int i, cpu, occ = 0; 6107 struct rq *rq_i; 6108 bool need_sync; 6109 6110 if (!sched_core_enabled(rq)) 6111 return __pick_next_task(rq, prev, rf); 6112 6113 cpu = cpu_of(rq); 6114 6115 /* Stopper task is switching into idle, no need core-wide selection. */ 6116 if (cpu_is_offline(cpu)) { 6117 /* 6118 * Reset core_pick so that we don't enter the fastpath when 6119 * coming online. core_pick would already be migrated to 6120 * another cpu during offline. 6121 */ 6122 rq->core_pick = NULL; 6123 return __pick_next_task(rq, prev, rf); 6124 } 6125 6126 /* 6127 * If there were no {en,de}queues since we picked (IOW, the task 6128 * pointers are all still valid), and we haven't scheduled the last 6129 * pick yet, do so now. 6130 * 6131 * rq->core_pick can be NULL if no selection was made for a CPU because 6132 * it was either offline or went offline during a sibling's core-wide 6133 * selection. In this case, do a core-wide selection. 6134 */ 6135 if (rq->core->core_pick_seq == rq->core->core_task_seq && 6136 rq->core->core_pick_seq != rq->core_sched_seq && 6137 rq->core_pick) { 6138 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 6139 6140 next = rq->core_pick; 6141 if (next != prev) { 6142 put_prev_task(rq, prev); 6143 set_next_task(rq, next); 6144 } 6145 6146 rq->core_pick = NULL; 6147 goto out; 6148 } 6149 6150 put_prev_task_balance(rq, prev, rf); 6151 6152 smt_mask = cpu_smt_mask(cpu); 6153 need_sync = !!rq->core->core_cookie; 6154 6155 /* reset state */ 6156 rq->core->core_cookie = 0UL; 6157 if (rq->core->core_forceidle_count) { 6158 if (!core_clock_updated) { 6159 update_rq_clock(rq->core); 6160 core_clock_updated = true; 6161 } 6162 sched_core_account_forceidle(rq); 6163 /* reset after accounting force idle */ 6164 rq->core->core_forceidle_start = 0; 6165 rq->core->core_forceidle_count = 0; 6166 rq->core->core_forceidle_occupation = 0; 6167 need_sync = true; 6168 fi_before = true; 6169 } 6170 6171 /* 6172 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 6173 * 6174 * @task_seq guards the task state ({en,de}queues) 6175 * @pick_seq is the @task_seq we did a selection on 6176 * @sched_seq is the @pick_seq we scheduled 6177 * 6178 * However, preemptions can cause multiple picks on the same task set. 6179 * 'Fix' this by also increasing @task_seq for every pick. 6180 */ 6181 rq->core->core_task_seq++; 6182 6183 /* 6184 * Optimize for common case where this CPU has no cookies 6185 * and there are no cookied tasks running on siblings. 6186 */ 6187 if (!need_sync) { 6188 next = pick_task(rq); 6189 if (!next->core_cookie) { 6190 rq->core_pick = NULL; 6191 /* 6192 * For robustness, update the min_vruntime_fi for 6193 * unconstrained picks as well. 6194 */ 6195 WARN_ON_ONCE(fi_before); 6196 task_vruntime_update(rq, next, false); 6197 goto out_set_next; 6198 } 6199 } 6200 6201 /* 6202 * For each thread: do the regular task pick and find the max prio task 6203 * amongst them. 6204 * 6205 * Tie-break prio towards the current CPU 6206 */ 6207 for_each_cpu_wrap(i, smt_mask, cpu) { 6208 rq_i = cpu_rq(i); 6209 6210 /* 6211 * Current cpu always has its clock updated on entrance to 6212 * pick_next_task(). If the current cpu is not the core, 6213 * the core may also have been updated above. 6214 */ 6215 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 6216 update_rq_clock(rq_i); 6217 6218 p = rq_i->core_pick = pick_task(rq_i); 6219 if (!max || prio_less(max, p, fi_before)) 6220 max = p; 6221 } 6222 6223 cookie = rq->core->core_cookie = max->core_cookie; 6224 6225 /* 6226 * For each thread: try and find a runnable task that matches @max or 6227 * force idle. 6228 */ 6229 for_each_cpu(i, smt_mask) { 6230 rq_i = cpu_rq(i); 6231 p = rq_i->core_pick; 6232 6233 if (!cookie_equals(p, cookie)) { 6234 p = NULL; 6235 if (cookie) 6236 p = sched_core_find(rq_i, cookie); 6237 if (!p) 6238 p = idle_sched_class.pick_task(rq_i); 6239 } 6240 6241 rq_i->core_pick = p; 6242 6243 if (p == rq_i->idle) { 6244 if (rq_i->nr_running) { 6245 rq->core->core_forceidle_count++; 6246 if (!fi_before) 6247 rq->core->core_forceidle_seq++; 6248 } 6249 } else { 6250 occ++; 6251 } 6252 } 6253 6254 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6255 rq->core->core_forceidle_start = rq_clock(rq->core); 6256 rq->core->core_forceidle_occupation = occ; 6257 } 6258 6259 rq->core->core_pick_seq = rq->core->core_task_seq; 6260 next = rq->core_pick; 6261 rq->core_sched_seq = rq->core->core_pick_seq; 6262 6263 /* Something should have been selected for current CPU */ 6264 WARN_ON_ONCE(!next); 6265 6266 /* 6267 * Reschedule siblings 6268 * 6269 * NOTE: L1TF -- at this point we're no longer running the old task and 6270 * sending an IPI (below) ensures the sibling will no longer be running 6271 * their task. This ensures there is no inter-sibling overlap between 6272 * non-matching user state. 6273 */ 6274 for_each_cpu(i, smt_mask) { 6275 rq_i = cpu_rq(i); 6276 6277 /* 6278 * An online sibling might have gone offline before a task 6279 * could be picked for it, or it might be offline but later 6280 * happen to come online, but its too late and nothing was 6281 * picked for it. That's Ok - it will pick tasks for itself, 6282 * so ignore it. 6283 */ 6284 if (!rq_i->core_pick) 6285 continue; 6286 6287 /* 6288 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6289 * fi_before fi update? 6290 * 0 0 1 6291 * 0 1 1 6292 * 1 0 1 6293 * 1 1 0 6294 */ 6295 if (!(fi_before && rq->core->core_forceidle_count)) 6296 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6297 6298 rq_i->core_pick->core_occupation = occ; 6299 6300 if (i == cpu) { 6301 rq_i->core_pick = NULL; 6302 continue; 6303 } 6304 6305 /* Did we break L1TF mitigation requirements? */ 6306 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6307 6308 if (rq_i->curr == rq_i->core_pick) { 6309 rq_i->core_pick = NULL; 6310 continue; 6311 } 6312 6313 resched_curr(rq_i); 6314 } 6315 6316 out_set_next: 6317 set_next_task(rq, next); 6318 out: 6319 if (rq->core->core_forceidle_count && next == rq->idle) 6320 queue_core_balance(rq); 6321 6322 return next; 6323 } 6324 6325 static bool try_steal_cookie(int this, int that) 6326 { 6327 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6328 struct task_struct *p; 6329 unsigned long cookie; 6330 bool success = false; 6331 6332 guard(irq)(); 6333 guard(double_rq_lock)(dst, src); 6334 6335 cookie = dst->core->core_cookie; 6336 if (!cookie) 6337 return false; 6338 6339 if (dst->curr != dst->idle) 6340 return false; 6341 6342 p = sched_core_find(src, cookie); 6343 if (!p) 6344 return false; 6345 6346 do { 6347 if (p == src->core_pick || p == src->curr) 6348 goto next; 6349 6350 if (!is_cpu_allowed(p, this)) 6351 goto next; 6352 6353 if (p->core_occupation > dst->idle->core_occupation) 6354 goto next; 6355 /* 6356 * sched_core_find() and sched_core_next() will ensure 6357 * that task @p is not throttled now, we also need to 6358 * check whether the runqueue of the destination CPU is 6359 * being throttled. 6360 */ 6361 if (sched_task_is_throttled(p, this)) 6362 goto next; 6363 6364 deactivate_task(src, p, 0); 6365 set_task_cpu(p, this); 6366 activate_task(dst, p, 0); 6367 6368 resched_curr(dst); 6369 6370 success = true; 6371 break; 6372 6373 next: 6374 p = sched_core_next(p, cookie); 6375 } while (p); 6376 6377 return success; 6378 } 6379 6380 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6381 { 6382 int i; 6383 6384 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { 6385 if (i == cpu) 6386 continue; 6387 6388 if (need_resched()) 6389 break; 6390 6391 if (try_steal_cookie(cpu, i)) 6392 return true; 6393 } 6394 6395 return false; 6396 } 6397 6398 static void sched_core_balance(struct rq *rq) 6399 { 6400 struct sched_domain *sd; 6401 int cpu = cpu_of(rq); 6402 6403 guard(preempt)(); 6404 guard(rcu)(); 6405 6406 raw_spin_rq_unlock_irq(rq); 6407 for_each_domain(cpu, sd) { 6408 if (need_resched()) 6409 break; 6410 6411 if (steal_cookie_task(cpu, sd)) 6412 break; 6413 } 6414 raw_spin_rq_lock_irq(rq); 6415 } 6416 6417 static DEFINE_PER_CPU(struct balance_callback, core_balance_head); 6418 6419 static void queue_core_balance(struct rq *rq) 6420 { 6421 if (!sched_core_enabled(rq)) 6422 return; 6423 6424 if (!rq->core->core_cookie) 6425 return; 6426 6427 if (!rq->nr_running) /* not forced idle */ 6428 return; 6429 6430 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6431 } 6432 6433 DEFINE_LOCK_GUARD_1(core_lock, int, 6434 sched_core_lock(*_T->lock, &_T->flags), 6435 sched_core_unlock(*_T->lock, &_T->flags), 6436 unsigned long flags) 6437 6438 static void sched_core_cpu_starting(unsigned int cpu) 6439 { 6440 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6441 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6442 int t; 6443 6444 guard(core_lock)(&cpu); 6445 6446 WARN_ON_ONCE(rq->core != rq); 6447 6448 /* if we're the first, we'll be our own leader */ 6449 if (cpumask_weight(smt_mask) == 1) 6450 return; 6451 6452 /* find the leader */ 6453 for_each_cpu(t, smt_mask) { 6454 if (t == cpu) 6455 continue; 6456 rq = cpu_rq(t); 6457 if (rq->core == rq) { 6458 core_rq = rq; 6459 break; 6460 } 6461 } 6462 6463 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6464 return; 6465 6466 /* install and validate core_rq */ 6467 for_each_cpu(t, smt_mask) { 6468 rq = cpu_rq(t); 6469 6470 if (t == cpu) 6471 rq->core = core_rq; 6472 6473 WARN_ON_ONCE(rq->core != core_rq); 6474 } 6475 } 6476 6477 static void sched_core_cpu_deactivate(unsigned int cpu) 6478 { 6479 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6480 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6481 int t; 6482 6483 guard(core_lock)(&cpu); 6484 6485 /* if we're the last man standing, nothing to do */ 6486 if (cpumask_weight(smt_mask) == 1) { 6487 WARN_ON_ONCE(rq->core != rq); 6488 return; 6489 } 6490 6491 /* if we're not the leader, nothing to do */ 6492 if (rq->core != rq) 6493 return; 6494 6495 /* find a new leader */ 6496 for_each_cpu(t, smt_mask) { 6497 if (t == cpu) 6498 continue; 6499 core_rq = cpu_rq(t); 6500 break; 6501 } 6502 6503 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6504 return; 6505 6506 /* copy the shared state to the new leader */ 6507 core_rq->core_task_seq = rq->core_task_seq; 6508 core_rq->core_pick_seq = rq->core_pick_seq; 6509 core_rq->core_cookie = rq->core_cookie; 6510 core_rq->core_forceidle_count = rq->core_forceidle_count; 6511 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6512 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6513 6514 /* 6515 * Accounting edge for forced idle is handled in pick_next_task(). 6516 * Don't need another one here, since the hotplug thread shouldn't 6517 * have a cookie. 6518 */ 6519 core_rq->core_forceidle_start = 0; 6520 6521 /* install new leader */ 6522 for_each_cpu(t, smt_mask) { 6523 rq = cpu_rq(t); 6524 rq->core = core_rq; 6525 } 6526 } 6527 6528 static inline void sched_core_cpu_dying(unsigned int cpu) 6529 { 6530 struct rq *rq = cpu_rq(cpu); 6531 6532 if (rq->core != rq) 6533 rq->core = rq; 6534 } 6535 6536 #else /* !CONFIG_SCHED_CORE */ 6537 6538 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6539 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6540 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6541 6542 static struct task_struct * 6543 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6544 { 6545 return __pick_next_task(rq, prev, rf); 6546 } 6547 6548 #endif /* CONFIG_SCHED_CORE */ 6549 6550 /* 6551 * Constants for the sched_mode argument of __schedule(). 6552 * 6553 * The mode argument allows RT enabled kernels to differentiate a 6554 * preemption from blocking on an 'sleeping' spin/rwlock. Note that 6555 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to 6556 * optimize the AND operation out and just check for zero. 6557 */ 6558 #define SM_NONE 0x0 6559 #define SM_PREEMPT 0x1 6560 #define SM_RTLOCK_WAIT 0x2 6561 6562 #ifndef CONFIG_PREEMPT_RT 6563 # define SM_MASK_PREEMPT (~0U) 6564 #else 6565 # define SM_MASK_PREEMPT SM_PREEMPT 6566 #endif 6567 6568 /* 6569 * __schedule() is the main scheduler function. 6570 * 6571 * The main means of driving the scheduler and thus entering this function are: 6572 * 6573 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6574 * 6575 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6576 * paths. For example, see arch/x86/entry_64.S. 6577 * 6578 * To drive preemption between tasks, the scheduler sets the flag in timer 6579 * interrupt handler scheduler_tick(). 6580 * 6581 * 3. Wakeups don't really cause entry into schedule(). They add a 6582 * task to the run-queue and that's it. 6583 * 6584 * Now, if the new task added to the run-queue preempts the current 6585 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6586 * called on the nearest possible occasion: 6587 * 6588 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6589 * 6590 * - in syscall or exception context, at the next outmost 6591 * preempt_enable(). (this might be as soon as the wake_up()'s 6592 * spin_unlock()!) 6593 * 6594 * - in IRQ context, return from interrupt-handler to 6595 * preemptible context 6596 * 6597 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6598 * then at the next: 6599 * 6600 * - cond_resched() call 6601 * - explicit schedule() call 6602 * - return from syscall or exception to user-space 6603 * - return from interrupt-handler to user-space 6604 * 6605 * WARNING: must be called with preemption disabled! 6606 */ 6607 static void __sched notrace __schedule(unsigned int sched_mode) 6608 { 6609 struct task_struct *prev, *next; 6610 unsigned long *switch_count; 6611 unsigned long prev_state; 6612 struct rq_flags rf; 6613 struct rq *rq; 6614 int cpu; 6615 6616 cpu = smp_processor_id(); 6617 rq = cpu_rq(cpu); 6618 prev = rq->curr; 6619 6620 schedule_debug(prev, !!sched_mode); 6621 6622 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6623 hrtick_clear(rq); 6624 6625 local_irq_disable(); 6626 rcu_note_context_switch(!!sched_mode); 6627 6628 /* 6629 * Make sure that signal_pending_state()->signal_pending() below 6630 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6631 * done by the caller to avoid the race with signal_wake_up(): 6632 * 6633 * __set_current_state(@state) signal_wake_up() 6634 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6635 * wake_up_state(p, state) 6636 * LOCK rq->lock LOCK p->pi_state 6637 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6638 * if (signal_pending_state()) if (p->state & @state) 6639 * 6640 * Also, the membarrier system call requires a full memory barrier 6641 * after coming from user-space, before storing to rq->curr. 6642 */ 6643 rq_lock(rq, &rf); 6644 smp_mb__after_spinlock(); 6645 6646 /* Promote REQ to ACT */ 6647 rq->clock_update_flags <<= 1; 6648 update_rq_clock(rq); 6649 rq->clock_update_flags = RQCF_UPDATED; 6650 6651 switch_count = &prev->nivcsw; 6652 6653 /* 6654 * We must load prev->state once (task_struct::state is volatile), such 6655 * that we form a control dependency vs deactivate_task() below. 6656 */ 6657 prev_state = READ_ONCE(prev->__state); 6658 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { 6659 if (signal_pending_state(prev_state, prev)) { 6660 WRITE_ONCE(prev->__state, TASK_RUNNING); 6661 } else { 6662 prev->sched_contributes_to_load = 6663 (prev_state & TASK_UNINTERRUPTIBLE) && 6664 !(prev_state & TASK_NOLOAD) && 6665 !(prev_state & TASK_FROZEN); 6666 6667 if (prev->sched_contributes_to_load) 6668 rq->nr_uninterruptible++; 6669 6670 /* 6671 * __schedule() ttwu() 6672 * prev_state = prev->state; if (p->on_rq && ...) 6673 * if (prev_state) goto out; 6674 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6675 * p->state = TASK_WAKING 6676 * 6677 * Where __schedule() and ttwu() have matching control dependencies. 6678 * 6679 * After this, schedule() must not care about p->state any more. 6680 */ 6681 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 6682 6683 if (prev->in_iowait) { 6684 atomic_inc(&rq->nr_iowait); 6685 delayacct_blkio_start(); 6686 } 6687 } 6688 switch_count = &prev->nvcsw; 6689 } 6690 6691 next = pick_next_task(rq, prev, &rf); 6692 clear_tsk_need_resched(prev); 6693 clear_preempt_need_resched(); 6694 #ifdef CONFIG_SCHED_DEBUG 6695 rq->last_seen_need_resched_ns = 0; 6696 #endif 6697 6698 if (likely(prev != next)) { 6699 rq->nr_switches++; 6700 /* 6701 * RCU users of rcu_dereference(rq->curr) may not see 6702 * changes to task_struct made by pick_next_task(). 6703 */ 6704 RCU_INIT_POINTER(rq->curr, next); 6705 /* 6706 * The membarrier system call requires each architecture 6707 * to have a full memory barrier after updating 6708 * rq->curr, before returning to user-space. 6709 * 6710 * Here are the schemes providing that barrier on the 6711 * various architectures: 6712 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. 6713 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. 6714 * - finish_lock_switch() for weakly-ordered 6715 * architectures where spin_unlock is a full barrier, 6716 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6717 * is a RELEASE barrier), 6718 */ 6719 ++*switch_count; 6720 6721 migrate_disable_switch(rq, prev); 6722 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 6723 6724 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); 6725 6726 /* Also unlocks the rq: */ 6727 rq = context_switch(rq, prev, next, &rf); 6728 } else { 6729 rq_unpin_lock(rq, &rf); 6730 __balance_callbacks(rq); 6731 raw_spin_rq_unlock_irq(rq); 6732 } 6733 } 6734 6735 void __noreturn do_task_dead(void) 6736 { 6737 /* Causes final put_task_struct in finish_task_switch(): */ 6738 set_special_state(TASK_DEAD); 6739 6740 /* Tell freezer to ignore us: */ 6741 current->flags |= PF_NOFREEZE; 6742 6743 __schedule(SM_NONE); 6744 BUG(); 6745 6746 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6747 for (;;) 6748 cpu_relax(); 6749 } 6750 6751 static inline void sched_submit_work(struct task_struct *tsk) 6752 { 6753 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG); 6754 unsigned int task_flags; 6755 6756 /* 6757 * Establish LD_WAIT_CONFIG context to ensure none of the code called 6758 * will use a blocking primitive -- which would lead to recursion. 6759 */ 6760 lock_map_acquire_try(&sched_map); 6761 6762 task_flags = tsk->flags; 6763 /* 6764 * If a worker goes to sleep, notify and ask workqueue whether it 6765 * wants to wake up a task to maintain concurrency. 6766 */ 6767 if (task_flags & PF_WQ_WORKER) 6768 wq_worker_sleeping(tsk); 6769 else if (task_flags & PF_IO_WORKER) 6770 io_wq_worker_sleeping(tsk); 6771 6772 /* 6773 * spinlock and rwlock must not flush block requests. This will 6774 * deadlock if the callback attempts to acquire a lock which is 6775 * already acquired. 6776 */ 6777 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6778 6779 /* 6780 * If we are going to sleep and we have plugged IO queued, 6781 * make sure to submit it to avoid deadlocks. 6782 */ 6783 blk_flush_plug(tsk->plug, true); 6784 6785 lock_map_release(&sched_map); 6786 } 6787 6788 static void sched_update_worker(struct task_struct *tsk) 6789 { 6790 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { 6791 if (tsk->flags & PF_WQ_WORKER) 6792 wq_worker_running(tsk); 6793 else 6794 io_wq_worker_running(tsk); 6795 } 6796 } 6797 6798 static __always_inline void __schedule_loop(unsigned int sched_mode) 6799 { 6800 do { 6801 preempt_disable(); 6802 __schedule(sched_mode); 6803 sched_preempt_enable_no_resched(); 6804 } while (need_resched()); 6805 } 6806 6807 asmlinkage __visible void __sched schedule(void) 6808 { 6809 struct task_struct *tsk = current; 6810 6811 #ifdef CONFIG_RT_MUTEXES 6812 lockdep_assert(!tsk->sched_rt_mutex); 6813 #endif 6814 6815 if (!task_is_running(tsk)) 6816 sched_submit_work(tsk); 6817 __schedule_loop(SM_NONE); 6818 sched_update_worker(tsk); 6819 } 6820 EXPORT_SYMBOL(schedule); 6821 6822 /* 6823 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6824 * state (have scheduled out non-voluntarily) by making sure that all 6825 * tasks have either left the run queue or have gone into user space. 6826 * As idle tasks do not do either, they must not ever be preempted 6827 * (schedule out non-voluntarily). 6828 * 6829 * schedule_idle() is similar to schedule_preempt_disable() except that it 6830 * never enables preemption because it does not call sched_submit_work(). 6831 */ 6832 void __sched schedule_idle(void) 6833 { 6834 /* 6835 * As this skips calling sched_submit_work(), which the idle task does 6836 * regardless because that function is a nop when the task is in a 6837 * TASK_RUNNING state, make sure this isn't used someplace that the 6838 * current task can be in any other state. Note, idle is always in the 6839 * TASK_RUNNING state. 6840 */ 6841 WARN_ON_ONCE(current->__state); 6842 do { 6843 __schedule(SM_NONE); 6844 } while (need_resched()); 6845 } 6846 6847 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6848 asmlinkage __visible void __sched schedule_user(void) 6849 { 6850 /* 6851 * If we come here after a random call to set_need_resched(), 6852 * or we have been woken up remotely but the IPI has not yet arrived, 6853 * we haven't yet exited the RCU idle mode. Do it here manually until 6854 * we find a better solution. 6855 * 6856 * NB: There are buggy callers of this function. Ideally we 6857 * should warn if prev_state != CONTEXT_USER, but that will trigger 6858 * too frequently to make sense yet. 6859 */ 6860 enum ctx_state prev_state = exception_enter(); 6861 schedule(); 6862 exception_exit(prev_state); 6863 } 6864 #endif 6865 6866 /** 6867 * schedule_preempt_disabled - called with preemption disabled 6868 * 6869 * Returns with preemption disabled. Note: preempt_count must be 1 6870 */ 6871 void __sched schedule_preempt_disabled(void) 6872 { 6873 sched_preempt_enable_no_resched(); 6874 schedule(); 6875 preempt_disable(); 6876 } 6877 6878 #ifdef CONFIG_PREEMPT_RT 6879 void __sched notrace schedule_rtlock(void) 6880 { 6881 __schedule_loop(SM_RTLOCK_WAIT); 6882 } 6883 NOKPROBE_SYMBOL(schedule_rtlock); 6884 #endif 6885 6886 static void __sched notrace preempt_schedule_common(void) 6887 { 6888 do { 6889 /* 6890 * Because the function tracer can trace preempt_count_sub() 6891 * and it also uses preempt_enable/disable_notrace(), if 6892 * NEED_RESCHED is set, the preempt_enable_notrace() called 6893 * by the function tracer will call this function again and 6894 * cause infinite recursion. 6895 * 6896 * Preemption must be disabled here before the function 6897 * tracer can trace. Break up preempt_disable() into two 6898 * calls. One to disable preemption without fear of being 6899 * traced. The other to still record the preemption latency, 6900 * which can also be traced by the function tracer. 6901 */ 6902 preempt_disable_notrace(); 6903 preempt_latency_start(1); 6904 __schedule(SM_PREEMPT); 6905 preempt_latency_stop(1); 6906 preempt_enable_no_resched_notrace(); 6907 6908 /* 6909 * Check again in case we missed a preemption opportunity 6910 * between schedule and now. 6911 */ 6912 } while (need_resched()); 6913 } 6914 6915 #ifdef CONFIG_PREEMPTION 6916 /* 6917 * This is the entry point to schedule() from in-kernel preemption 6918 * off of preempt_enable. 6919 */ 6920 asmlinkage __visible void __sched notrace preempt_schedule(void) 6921 { 6922 /* 6923 * If there is a non-zero preempt_count or interrupts are disabled, 6924 * we do not want to preempt the current task. Just return.. 6925 */ 6926 if (likely(!preemptible())) 6927 return; 6928 preempt_schedule_common(); 6929 } 6930 NOKPROBE_SYMBOL(preempt_schedule); 6931 EXPORT_SYMBOL(preempt_schedule); 6932 6933 #ifdef CONFIG_PREEMPT_DYNAMIC 6934 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6935 #ifndef preempt_schedule_dynamic_enabled 6936 #define preempt_schedule_dynamic_enabled preempt_schedule 6937 #define preempt_schedule_dynamic_disabled NULL 6938 #endif 6939 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6940 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6941 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6942 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 6943 void __sched notrace dynamic_preempt_schedule(void) 6944 { 6945 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 6946 return; 6947 preempt_schedule(); 6948 } 6949 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 6950 EXPORT_SYMBOL(dynamic_preempt_schedule); 6951 #endif 6952 #endif 6953 6954 /** 6955 * preempt_schedule_notrace - preempt_schedule called by tracing 6956 * 6957 * The tracing infrastructure uses preempt_enable_notrace to prevent 6958 * recursion and tracing preempt enabling caused by the tracing 6959 * infrastructure itself. But as tracing can happen in areas coming 6960 * from userspace or just about to enter userspace, a preempt enable 6961 * can occur before user_exit() is called. This will cause the scheduler 6962 * to be called when the system is still in usermode. 6963 * 6964 * To prevent this, the preempt_enable_notrace will use this function 6965 * instead of preempt_schedule() to exit user context if needed before 6966 * calling the scheduler. 6967 */ 6968 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 6969 { 6970 enum ctx_state prev_ctx; 6971 6972 if (likely(!preemptible())) 6973 return; 6974 6975 do { 6976 /* 6977 * Because the function tracer can trace preempt_count_sub() 6978 * and it also uses preempt_enable/disable_notrace(), if 6979 * NEED_RESCHED is set, the preempt_enable_notrace() called 6980 * by the function tracer will call this function again and 6981 * cause infinite recursion. 6982 * 6983 * Preemption must be disabled here before the function 6984 * tracer can trace. Break up preempt_disable() into two 6985 * calls. One to disable preemption without fear of being 6986 * traced. The other to still record the preemption latency, 6987 * which can also be traced by the function tracer. 6988 */ 6989 preempt_disable_notrace(); 6990 preempt_latency_start(1); 6991 /* 6992 * Needs preempt disabled in case user_exit() is traced 6993 * and the tracer calls preempt_enable_notrace() causing 6994 * an infinite recursion. 6995 */ 6996 prev_ctx = exception_enter(); 6997 __schedule(SM_PREEMPT); 6998 exception_exit(prev_ctx); 6999 7000 preempt_latency_stop(1); 7001 preempt_enable_no_resched_notrace(); 7002 } while (need_resched()); 7003 } 7004 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 7005 7006 #ifdef CONFIG_PREEMPT_DYNAMIC 7007 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7008 #ifndef preempt_schedule_notrace_dynamic_enabled 7009 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 7010 #define preempt_schedule_notrace_dynamic_disabled NULL 7011 #endif 7012 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 7013 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 7014 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7015 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 7016 void __sched notrace dynamic_preempt_schedule_notrace(void) 7017 { 7018 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 7019 return; 7020 preempt_schedule_notrace(); 7021 } 7022 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 7023 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 7024 #endif 7025 #endif 7026 7027 #endif /* CONFIG_PREEMPTION */ 7028 7029 /* 7030 * This is the entry point to schedule() from kernel preemption 7031 * off of irq context. 7032 * Note, that this is called and return with irqs disabled. This will 7033 * protect us against recursive calling from irq. 7034 */ 7035 asmlinkage __visible void __sched preempt_schedule_irq(void) 7036 { 7037 enum ctx_state prev_state; 7038 7039 /* Catch callers which need to be fixed */ 7040 BUG_ON(preempt_count() || !irqs_disabled()); 7041 7042 prev_state = exception_enter(); 7043 7044 do { 7045 preempt_disable(); 7046 local_irq_enable(); 7047 __schedule(SM_PREEMPT); 7048 local_irq_disable(); 7049 sched_preempt_enable_no_resched(); 7050 } while (need_resched()); 7051 7052 exception_exit(prev_state); 7053 } 7054 7055 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 7056 void *key) 7057 { 7058 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); 7059 return try_to_wake_up(curr->private, mode, wake_flags); 7060 } 7061 EXPORT_SYMBOL(default_wake_function); 7062 7063 static void __setscheduler_prio(struct task_struct *p, int prio) 7064 { 7065 if (dl_prio(prio)) 7066 p->sched_class = &dl_sched_class; 7067 else if (rt_prio(prio)) 7068 p->sched_class = &rt_sched_class; 7069 else 7070 p->sched_class = &fair_sched_class; 7071 7072 p->prio = prio; 7073 } 7074 7075 #ifdef CONFIG_RT_MUTEXES 7076 7077 /* 7078 * Would be more useful with typeof()/auto_type but they don't mix with 7079 * bit-fields. Since it's a local thing, use int. Keep the generic sounding 7080 * name such that if someone were to implement this function we get to compare 7081 * notes. 7082 */ 7083 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; }) 7084 7085 void rt_mutex_pre_schedule(void) 7086 { 7087 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); 7088 sched_submit_work(current); 7089 } 7090 7091 void rt_mutex_schedule(void) 7092 { 7093 lockdep_assert(current->sched_rt_mutex); 7094 __schedule_loop(SM_NONE); 7095 } 7096 7097 void rt_mutex_post_schedule(void) 7098 { 7099 sched_update_worker(current); 7100 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); 7101 } 7102 7103 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 7104 { 7105 if (pi_task) 7106 prio = min(prio, pi_task->prio); 7107 7108 return prio; 7109 } 7110 7111 static inline int rt_effective_prio(struct task_struct *p, int prio) 7112 { 7113 struct task_struct *pi_task = rt_mutex_get_top_task(p); 7114 7115 return __rt_effective_prio(pi_task, prio); 7116 } 7117 7118 /* 7119 * rt_mutex_setprio - set the current priority of a task 7120 * @p: task to boost 7121 * @pi_task: donor task 7122 * 7123 * This function changes the 'effective' priority of a task. It does 7124 * not touch ->normal_prio like __setscheduler(). 7125 * 7126 * Used by the rt_mutex code to implement priority inheritance 7127 * logic. Call site only calls if the priority of the task changed. 7128 */ 7129 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 7130 { 7131 int prio, oldprio, queued, running, queue_flag = 7132 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7133 const struct sched_class *prev_class; 7134 struct rq_flags rf; 7135 struct rq *rq; 7136 7137 /* XXX used to be waiter->prio, not waiter->task->prio */ 7138 prio = __rt_effective_prio(pi_task, p->normal_prio); 7139 7140 /* 7141 * If nothing changed; bail early. 7142 */ 7143 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 7144 return; 7145 7146 rq = __task_rq_lock(p, &rf); 7147 update_rq_clock(rq); 7148 /* 7149 * Set under pi_lock && rq->lock, such that the value can be used under 7150 * either lock. 7151 * 7152 * Note that there is loads of tricky to make this pointer cache work 7153 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 7154 * ensure a task is de-boosted (pi_task is set to NULL) before the 7155 * task is allowed to run again (and can exit). This ensures the pointer 7156 * points to a blocked task -- which guarantees the task is present. 7157 */ 7158 p->pi_top_task = pi_task; 7159 7160 /* 7161 * For FIFO/RR we only need to set prio, if that matches we're done. 7162 */ 7163 if (prio == p->prio && !dl_prio(prio)) 7164 goto out_unlock; 7165 7166 /* 7167 * Idle task boosting is a nono in general. There is one 7168 * exception, when PREEMPT_RT and NOHZ is active: 7169 * 7170 * The idle task calls get_next_timer_interrupt() and holds 7171 * the timer wheel base->lock on the CPU and another CPU wants 7172 * to access the timer (probably to cancel it). We can safely 7173 * ignore the boosting request, as the idle CPU runs this code 7174 * with interrupts disabled and will complete the lock 7175 * protected section without being interrupted. So there is no 7176 * real need to boost. 7177 */ 7178 if (unlikely(p == rq->idle)) { 7179 WARN_ON(p != rq->curr); 7180 WARN_ON(p->pi_blocked_on); 7181 goto out_unlock; 7182 } 7183 7184 trace_sched_pi_setprio(p, pi_task); 7185 oldprio = p->prio; 7186 7187 if (oldprio == prio) 7188 queue_flag &= ~DEQUEUE_MOVE; 7189 7190 prev_class = p->sched_class; 7191 queued = task_on_rq_queued(p); 7192 running = task_current(rq, p); 7193 if (queued) 7194 dequeue_task(rq, p, queue_flag); 7195 if (running) 7196 put_prev_task(rq, p); 7197 7198 /* 7199 * Boosting condition are: 7200 * 1. -rt task is running and holds mutex A 7201 * --> -dl task blocks on mutex A 7202 * 7203 * 2. -dl task is running and holds mutex A 7204 * --> -dl task blocks on mutex A and could preempt the 7205 * running task 7206 */ 7207 if (dl_prio(prio)) { 7208 if (!dl_prio(p->normal_prio) || 7209 (pi_task && dl_prio(pi_task->prio) && 7210 dl_entity_preempt(&pi_task->dl, &p->dl))) { 7211 p->dl.pi_se = pi_task->dl.pi_se; 7212 queue_flag |= ENQUEUE_REPLENISH; 7213 } else { 7214 p->dl.pi_se = &p->dl; 7215 } 7216 } else if (rt_prio(prio)) { 7217 if (dl_prio(oldprio)) 7218 p->dl.pi_se = &p->dl; 7219 if (oldprio < prio) 7220 queue_flag |= ENQUEUE_HEAD; 7221 } else { 7222 if (dl_prio(oldprio)) 7223 p->dl.pi_se = &p->dl; 7224 if (rt_prio(oldprio)) 7225 p->rt.timeout = 0; 7226 } 7227 7228 __setscheduler_prio(p, prio); 7229 7230 if (queued) 7231 enqueue_task(rq, p, queue_flag); 7232 if (running) 7233 set_next_task(rq, p); 7234 7235 check_class_changed(rq, p, prev_class, oldprio); 7236 out_unlock: 7237 /* Avoid rq from going away on us: */ 7238 preempt_disable(); 7239 7240 rq_unpin_lock(rq, &rf); 7241 __balance_callbacks(rq); 7242 raw_spin_rq_unlock(rq); 7243 7244 preempt_enable(); 7245 } 7246 #else 7247 static inline int rt_effective_prio(struct task_struct *p, int prio) 7248 { 7249 return prio; 7250 } 7251 #endif 7252 7253 void set_user_nice(struct task_struct *p, long nice) 7254 { 7255 bool queued, running; 7256 struct rq *rq; 7257 int old_prio; 7258 7259 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 7260 return; 7261 /* 7262 * We have to be careful, if called from sys_setpriority(), 7263 * the task might be in the middle of scheduling on another CPU. 7264 */ 7265 CLASS(task_rq_lock, rq_guard)(p); 7266 rq = rq_guard.rq; 7267 7268 update_rq_clock(rq); 7269 7270 /* 7271 * The RT priorities are set via sched_setscheduler(), but we still 7272 * allow the 'normal' nice value to be set - but as expected 7273 * it won't have any effect on scheduling until the task is 7274 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 7275 */ 7276 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 7277 p->static_prio = NICE_TO_PRIO(nice); 7278 return; 7279 } 7280 7281 queued = task_on_rq_queued(p); 7282 running = task_current(rq, p); 7283 if (queued) 7284 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 7285 if (running) 7286 put_prev_task(rq, p); 7287 7288 p->static_prio = NICE_TO_PRIO(nice); 7289 set_load_weight(p, true); 7290 old_prio = p->prio; 7291 p->prio = effective_prio(p); 7292 7293 if (queued) 7294 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7295 if (running) 7296 set_next_task(rq, p); 7297 7298 /* 7299 * If the task increased its priority or is running and 7300 * lowered its priority, then reschedule its CPU: 7301 */ 7302 p->sched_class->prio_changed(rq, p, old_prio); 7303 } 7304 EXPORT_SYMBOL(set_user_nice); 7305 7306 /* 7307 * is_nice_reduction - check if nice value is an actual reduction 7308 * 7309 * Similar to can_nice() but does not perform a capability check. 7310 * 7311 * @p: task 7312 * @nice: nice value 7313 */ 7314 static bool is_nice_reduction(const struct task_struct *p, const int nice) 7315 { 7316 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 7317 int nice_rlim = nice_to_rlimit(nice); 7318 7319 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 7320 } 7321 7322 /* 7323 * can_nice - check if a task can reduce its nice value 7324 * @p: task 7325 * @nice: nice value 7326 */ 7327 int can_nice(const struct task_struct *p, const int nice) 7328 { 7329 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 7330 } 7331 7332 #ifdef __ARCH_WANT_SYS_NICE 7333 7334 /* 7335 * sys_nice - change the priority of the current process. 7336 * @increment: priority increment 7337 * 7338 * sys_setpriority is a more generic, but much slower function that 7339 * does similar things. 7340 */ 7341 SYSCALL_DEFINE1(nice, int, increment) 7342 { 7343 long nice, retval; 7344 7345 /* 7346 * Setpriority might change our priority at the same moment. 7347 * We don't have to worry. Conceptually one call occurs first 7348 * and we have a single winner. 7349 */ 7350 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 7351 nice = task_nice(current) + increment; 7352 7353 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 7354 if (increment < 0 && !can_nice(current, nice)) 7355 return -EPERM; 7356 7357 retval = security_task_setnice(current, nice); 7358 if (retval) 7359 return retval; 7360 7361 set_user_nice(current, nice); 7362 return 0; 7363 } 7364 7365 #endif 7366 7367 /** 7368 * task_prio - return the priority value of a given task. 7369 * @p: the task in question. 7370 * 7371 * Return: The priority value as seen by users in /proc. 7372 * 7373 * sched policy return value kernel prio user prio/nice 7374 * 7375 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 7376 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 7377 * deadline -101 -1 0 7378 */ 7379 int task_prio(const struct task_struct *p) 7380 { 7381 return p->prio - MAX_RT_PRIO; 7382 } 7383 7384 /** 7385 * idle_cpu - is a given CPU idle currently? 7386 * @cpu: the processor in question. 7387 * 7388 * Return: 1 if the CPU is currently idle. 0 otherwise. 7389 */ 7390 int idle_cpu(int cpu) 7391 { 7392 struct rq *rq = cpu_rq(cpu); 7393 7394 if (rq->curr != rq->idle) 7395 return 0; 7396 7397 if (rq->nr_running) 7398 return 0; 7399 7400 #ifdef CONFIG_SMP 7401 if (rq->ttwu_pending) 7402 return 0; 7403 #endif 7404 7405 return 1; 7406 } 7407 7408 /** 7409 * available_idle_cpu - is a given CPU idle for enqueuing work. 7410 * @cpu: the CPU in question. 7411 * 7412 * Return: 1 if the CPU is currently idle. 0 otherwise. 7413 */ 7414 int available_idle_cpu(int cpu) 7415 { 7416 if (!idle_cpu(cpu)) 7417 return 0; 7418 7419 if (vcpu_is_preempted(cpu)) 7420 return 0; 7421 7422 return 1; 7423 } 7424 7425 /** 7426 * idle_task - return the idle task for a given CPU. 7427 * @cpu: the processor in question. 7428 * 7429 * Return: The idle task for the CPU @cpu. 7430 */ 7431 struct task_struct *idle_task(int cpu) 7432 { 7433 return cpu_rq(cpu)->idle; 7434 } 7435 7436 #ifdef CONFIG_SCHED_CORE 7437 int sched_core_idle_cpu(int cpu) 7438 { 7439 struct rq *rq = cpu_rq(cpu); 7440 7441 if (sched_core_enabled(rq) && rq->curr == rq->idle) 7442 return 1; 7443 7444 return idle_cpu(cpu); 7445 } 7446 7447 #endif 7448 7449 #ifdef CONFIG_SMP 7450 /* 7451 * This function computes an effective utilization for the given CPU, to be 7452 * used for frequency selection given the linear relation: f = u * f_max. 7453 * 7454 * The scheduler tracks the following metrics: 7455 * 7456 * cpu_util_{cfs,rt,dl,irq}() 7457 * cpu_bw_dl() 7458 * 7459 * Where the cfs,rt and dl util numbers are tracked with the same metric and 7460 * synchronized windows and are thus directly comparable. 7461 * 7462 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 7463 * which excludes things like IRQ and steal-time. These latter are then accrued 7464 * in the irq utilization. 7465 * 7466 * The DL bandwidth number otoh is not a measured metric but a value computed 7467 * based on the task model parameters and gives the minimal utilization 7468 * required to meet deadlines. 7469 */ 7470 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 7471 unsigned long *min, 7472 unsigned long *max) 7473 { 7474 unsigned long util, irq, scale; 7475 struct rq *rq = cpu_rq(cpu); 7476 7477 scale = arch_scale_cpu_capacity(cpu); 7478 7479 /* 7480 * Early check to see if IRQ/steal time saturates the CPU, can be 7481 * because of inaccuracies in how we track these -- see 7482 * update_irq_load_avg(). 7483 */ 7484 irq = cpu_util_irq(rq); 7485 if (unlikely(irq >= scale)) { 7486 if (min) 7487 *min = scale; 7488 if (max) 7489 *max = scale; 7490 return scale; 7491 } 7492 7493 if (min) { 7494 /* 7495 * The minimum utilization returns the highest level between: 7496 * - the computed DL bandwidth needed with the IRQ pressure which 7497 * steals time to the deadline task. 7498 * - The minimum performance requirement for CFS and/or RT. 7499 */ 7500 *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); 7501 7502 /* 7503 * When an RT task is runnable and uclamp is not used, we must 7504 * ensure that the task will run at maximum compute capacity. 7505 */ 7506 if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) 7507 *min = max(*min, scale); 7508 } 7509 7510 /* 7511 * Because the time spend on RT/DL tasks is visible as 'lost' time to 7512 * CFS tasks and we use the same metric to track the effective 7513 * utilization (PELT windows are synchronized) we can directly add them 7514 * to obtain the CPU's actual utilization. 7515 */ 7516 util = util_cfs + cpu_util_rt(rq); 7517 util += cpu_util_dl(rq); 7518 7519 /* 7520 * The maximum hint is a soft bandwidth requirement, which can be lower 7521 * than the actual utilization because of uclamp_max requirements. 7522 */ 7523 if (max) 7524 *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); 7525 7526 if (util >= scale) 7527 return scale; 7528 7529 /* 7530 * There is still idle time; further improve the number by using the 7531 * irq metric. Because IRQ/steal time is hidden from the task clock we 7532 * need to scale the task numbers: 7533 * 7534 * max - irq 7535 * U' = irq + --------- * U 7536 * max 7537 */ 7538 util = scale_irq_capacity(util, irq, scale); 7539 util += irq; 7540 7541 return min(scale, util); 7542 } 7543 7544 unsigned long sched_cpu_util(int cpu) 7545 { 7546 return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); 7547 } 7548 #endif /* CONFIG_SMP */ 7549 7550 /** 7551 * find_process_by_pid - find a process with a matching PID value. 7552 * @pid: the pid in question. 7553 * 7554 * The task of @pid, if found. %NULL otherwise. 7555 */ 7556 static struct task_struct *find_process_by_pid(pid_t pid) 7557 { 7558 return pid ? find_task_by_vpid(pid) : current; 7559 } 7560 7561 static struct task_struct *find_get_task(pid_t pid) 7562 { 7563 struct task_struct *p; 7564 guard(rcu)(); 7565 7566 p = find_process_by_pid(pid); 7567 if (likely(p)) 7568 get_task_struct(p); 7569 7570 return p; 7571 } 7572 7573 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 7574 find_get_task(pid), pid_t pid) 7575 7576 /* 7577 * sched_setparam() passes in -1 for its policy, to let the functions 7578 * it calls know not to change it. 7579 */ 7580 #define SETPARAM_POLICY -1 7581 7582 static void __setscheduler_params(struct task_struct *p, 7583 const struct sched_attr *attr) 7584 { 7585 int policy = attr->sched_policy; 7586 7587 if (policy == SETPARAM_POLICY) 7588 policy = p->policy; 7589 7590 p->policy = policy; 7591 7592 if (dl_policy(policy)) 7593 __setparam_dl(p, attr); 7594 else if (fair_policy(policy)) 7595 p->static_prio = NICE_TO_PRIO(attr->sched_nice); 7596 7597 /* 7598 * __sched_setscheduler() ensures attr->sched_priority == 0 when 7599 * !rt_policy. Always setting this ensures that things like 7600 * getparam()/getattr() don't report silly values for !rt tasks. 7601 */ 7602 p->rt_priority = attr->sched_priority; 7603 p->normal_prio = normal_prio(p); 7604 set_load_weight(p, true); 7605 } 7606 7607 /* 7608 * Check the target process has a UID that matches the current process's: 7609 */ 7610 static bool check_same_owner(struct task_struct *p) 7611 { 7612 const struct cred *cred = current_cred(), *pcred; 7613 guard(rcu)(); 7614 7615 pcred = __task_cred(p); 7616 return (uid_eq(cred->euid, pcred->euid) || 7617 uid_eq(cred->euid, pcred->uid)); 7618 } 7619 7620 /* 7621 * Allow unprivileged RT tasks to decrease priority. 7622 * Only issue a capable test if needed and only once to avoid an audit 7623 * event on permitted non-privileged operations: 7624 */ 7625 static int user_check_sched_setscheduler(struct task_struct *p, 7626 const struct sched_attr *attr, 7627 int policy, int reset_on_fork) 7628 { 7629 if (fair_policy(policy)) { 7630 if (attr->sched_nice < task_nice(p) && 7631 !is_nice_reduction(p, attr->sched_nice)) 7632 goto req_priv; 7633 } 7634 7635 if (rt_policy(policy)) { 7636 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 7637 7638 /* Can't set/change the rt policy: */ 7639 if (policy != p->policy && !rlim_rtprio) 7640 goto req_priv; 7641 7642 /* Can't increase priority: */ 7643 if (attr->sched_priority > p->rt_priority && 7644 attr->sched_priority > rlim_rtprio) 7645 goto req_priv; 7646 } 7647 7648 /* 7649 * Can't set/change SCHED_DEADLINE policy at all for now 7650 * (safest behavior); in the future we would like to allow 7651 * unprivileged DL tasks to increase their relative deadline 7652 * or reduce their runtime (both ways reducing utilization) 7653 */ 7654 if (dl_policy(policy)) 7655 goto req_priv; 7656 7657 /* 7658 * Treat SCHED_IDLE as nice 20. Only allow a switch to 7659 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 7660 */ 7661 if (task_has_idle_policy(p) && !idle_policy(policy)) { 7662 if (!is_nice_reduction(p, task_nice(p))) 7663 goto req_priv; 7664 } 7665 7666 /* Can't change other user's priorities: */ 7667 if (!check_same_owner(p)) 7668 goto req_priv; 7669 7670 /* Normal users shall not reset the sched_reset_on_fork flag: */ 7671 if (p->sched_reset_on_fork && !reset_on_fork) 7672 goto req_priv; 7673 7674 return 0; 7675 7676 req_priv: 7677 if (!capable(CAP_SYS_NICE)) 7678 return -EPERM; 7679 7680 return 0; 7681 } 7682 7683 static int __sched_setscheduler(struct task_struct *p, 7684 const struct sched_attr *attr, 7685 bool user, bool pi) 7686 { 7687 int oldpolicy = -1, policy = attr->sched_policy; 7688 int retval, oldprio, newprio, queued, running; 7689 const struct sched_class *prev_class; 7690 struct balance_callback *head; 7691 struct rq_flags rf; 7692 int reset_on_fork; 7693 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7694 struct rq *rq; 7695 bool cpuset_locked = false; 7696 7697 /* The pi code expects interrupts enabled */ 7698 BUG_ON(pi && in_interrupt()); 7699 recheck: 7700 /* Double check policy once rq lock held: */ 7701 if (policy < 0) { 7702 reset_on_fork = p->sched_reset_on_fork; 7703 policy = oldpolicy = p->policy; 7704 } else { 7705 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 7706 7707 if (!valid_policy(policy)) 7708 return -EINVAL; 7709 } 7710 7711 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 7712 return -EINVAL; 7713 7714 /* 7715 * Valid priorities for SCHED_FIFO and SCHED_RR are 7716 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 7717 * SCHED_BATCH and SCHED_IDLE is 0. 7718 */ 7719 if (attr->sched_priority > MAX_RT_PRIO-1) 7720 return -EINVAL; 7721 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 7722 (rt_policy(policy) != (attr->sched_priority != 0))) 7723 return -EINVAL; 7724 7725 if (user) { 7726 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 7727 if (retval) 7728 return retval; 7729 7730 if (attr->sched_flags & SCHED_FLAG_SUGOV) 7731 return -EINVAL; 7732 7733 retval = security_task_setscheduler(p); 7734 if (retval) 7735 return retval; 7736 } 7737 7738 /* Update task specific "requested" clamps */ 7739 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 7740 retval = uclamp_validate(p, attr); 7741 if (retval) 7742 return retval; 7743 } 7744 7745 /* 7746 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 7747 * information. 7748 */ 7749 if (dl_policy(policy) || dl_policy(p->policy)) { 7750 cpuset_locked = true; 7751 cpuset_lock(); 7752 } 7753 7754 /* 7755 * Make sure no PI-waiters arrive (or leave) while we are 7756 * changing the priority of the task: 7757 * 7758 * To be able to change p->policy safely, the appropriate 7759 * runqueue lock must be held. 7760 */ 7761 rq = task_rq_lock(p, &rf); 7762 update_rq_clock(rq); 7763 7764 /* 7765 * Changing the policy of the stop threads its a very bad idea: 7766 */ 7767 if (p == rq->stop) { 7768 retval = -EINVAL; 7769 goto unlock; 7770 } 7771 7772 /* 7773 * If not changing anything there's no need to proceed further, 7774 * but store a possible modification of reset_on_fork. 7775 */ 7776 if (unlikely(policy == p->policy)) { 7777 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) 7778 goto change; 7779 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 7780 goto change; 7781 if (dl_policy(policy) && dl_param_changed(p, attr)) 7782 goto change; 7783 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 7784 goto change; 7785 7786 p->sched_reset_on_fork = reset_on_fork; 7787 retval = 0; 7788 goto unlock; 7789 } 7790 change: 7791 7792 if (user) { 7793 #ifdef CONFIG_RT_GROUP_SCHED 7794 /* 7795 * Do not allow realtime tasks into groups that have no runtime 7796 * assigned. 7797 */ 7798 if (rt_bandwidth_enabled() && rt_policy(policy) && 7799 task_group(p)->rt_bandwidth.rt_runtime == 0 && 7800 !task_group_is_autogroup(task_group(p))) { 7801 retval = -EPERM; 7802 goto unlock; 7803 } 7804 #endif 7805 #ifdef CONFIG_SMP 7806 if (dl_bandwidth_enabled() && dl_policy(policy) && 7807 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 7808 cpumask_t *span = rq->rd->span; 7809 7810 /* 7811 * Don't allow tasks with an affinity mask smaller than 7812 * the entire root_domain to become SCHED_DEADLINE. We 7813 * will also fail if there's no bandwidth available. 7814 */ 7815 if (!cpumask_subset(span, p->cpus_ptr) || 7816 rq->rd->dl_bw.bw == 0) { 7817 retval = -EPERM; 7818 goto unlock; 7819 } 7820 } 7821 #endif 7822 } 7823 7824 /* Re-check policy now with rq lock held: */ 7825 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 7826 policy = oldpolicy = -1; 7827 task_rq_unlock(rq, p, &rf); 7828 if (cpuset_locked) 7829 cpuset_unlock(); 7830 goto recheck; 7831 } 7832 7833 /* 7834 * If setscheduling to SCHED_DEADLINE (or changing the parameters 7835 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 7836 * is available. 7837 */ 7838 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 7839 retval = -EBUSY; 7840 goto unlock; 7841 } 7842 7843 p->sched_reset_on_fork = reset_on_fork; 7844 oldprio = p->prio; 7845 7846 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 7847 if (pi) { 7848 /* 7849 * Take priority boosted tasks into account. If the new 7850 * effective priority is unchanged, we just store the new 7851 * normal parameters and do not touch the scheduler class and 7852 * the runqueue. This will be done when the task deboost 7853 * itself. 7854 */ 7855 newprio = rt_effective_prio(p, newprio); 7856 if (newprio == oldprio) 7857 queue_flags &= ~DEQUEUE_MOVE; 7858 } 7859 7860 queued = task_on_rq_queued(p); 7861 running = task_current(rq, p); 7862 if (queued) 7863 dequeue_task(rq, p, queue_flags); 7864 if (running) 7865 put_prev_task(rq, p); 7866 7867 prev_class = p->sched_class; 7868 7869 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 7870 __setscheduler_params(p, attr); 7871 __setscheduler_prio(p, newprio); 7872 } 7873 __setscheduler_uclamp(p, attr); 7874 7875 if (queued) { 7876 /* 7877 * We enqueue to tail when the priority of a task is 7878 * increased (user space view). 7879 */ 7880 if (oldprio < p->prio) 7881 queue_flags |= ENQUEUE_HEAD; 7882 7883 enqueue_task(rq, p, queue_flags); 7884 } 7885 if (running) 7886 set_next_task(rq, p); 7887 7888 check_class_changed(rq, p, prev_class, oldprio); 7889 7890 /* Avoid rq from going away on us: */ 7891 preempt_disable(); 7892 head = splice_balance_callbacks(rq); 7893 task_rq_unlock(rq, p, &rf); 7894 7895 if (pi) { 7896 if (cpuset_locked) 7897 cpuset_unlock(); 7898 rt_mutex_adjust_pi(p); 7899 } 7900 7901 /* Run balance callbacks after we've adjusted the PI chain: */ 7902 balance_callbacks(rq, head); 7903 preempt_enable(); 7904 7905 return 0; 7906 7907 unlock: 7908 task_rq_unlock(rq, p, &rf); 7909 if (cpuset_locked) 7910 cpuset_unlock(); 7911 return retval; 7912 } 7913 7914 static int _sched_setscheduler(struct task_struct *p, int policy, 7915 const struct sched_param *param, bool check) 7916 { 7917 struct sched_attr attr = { 7918 .sched_policy = policy, 7919 .sched_priority = param->sched_priority, 7920 .sched_nice = PRIO_TO_NICE(p->static_prio), 7921 }; 7922 7923 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 7924 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 7925 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 7926 policy &= ~SCHED_RESET_ON_FORK; 7927 attr.sched_policy = policy; 7928 } 7929 7930 return __sched_setscheduler(p, &attr, check, true); 7931 } 7932 /** 7933 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 7934 * @p: the task in question. 7935 * @policy: new policy. 7936 * @param: structure containing the new RT priority. 7937 * 7938 * Use sched_set_fifo(), read its comment. 7939 * 7940 * Return: 0 on success. An error code otherwise. 7941 * 7942 * NOTE that the task may be already dead. 7943 */ 7944 int sched_setscheduler(struct task_struct *p, int policy, 7945 const struct sched_param *param) 7946 { 7947 return _sched_setscheduler(p, policy, param, true); 7948 } 7949 7950 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 7951 { 7952 return __sched_setscheduler(p, attr, true, true); 7953 } 7954 7955 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 7956 { 7957 return __sched_setscheduler(p, attr, false, true); 7958 } 7959 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 7960 7961 /** 7962 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 7963 * @p: the task in question. 7964 * @policy: new policy. 7965 * @param: structure containing the new RT priority. 7966 * 7967 * Just like sched_setscheduler, only don't bother checking if the 7968 * current context has permission. For example, this is needed in 7969 * stop_machine(): we create temporary high priority worker threads, 7970 * but our caller might not have that capability. 7971 * 7972 * Return: 0 on success. An error code otherwise. 7973 */ 7974 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 7975 const struct sched_param *param) 7976 { 7977 return _sched_setscheduler(p, policy, param, false); 7978 } 7979 7980 /* 7981 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 7982 * incapable of resource management, which is the one thing an OS really should 7983 * be doing. 7984 * 7985 * This is of course the reason it is limited to privileged users only. 7986 * 7987 * Worse still; it is fundamentally impossible to compose static priority 7988 * workloads. You cannot take two correctly working static prio workloads 7989 * and smash them together and still expect them to work. 7990 * 7991 * For this reason 'all' FIFO tasks the kernel creates are basically at: 7992 * 7993 * MAX_RT_PRIO / 2 7994 * 7995 * The administrator _MUST_ configure the system, the kernel simply doesn't 7996 * know enough information to make a sensible choice. 7997 */ 7998 void sched_set_fifo(struct task_struct *p) 7999 { 8000 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 8001 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 8002 } 8003 EXPORT_SYMBOL_GPL(sched_set_fifo); 8004 8005 /* 8006 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 8007 */ 8008 void sched_set_fifo_low(struct task_struct *p) 8009 { 8010 struct sched_param sp = { .sched_priority = 1 }; 8011 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 8012 } 8013 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 8014 8015 void sched_set_normal(struct task_struct *p, int nice) 8016 { 8017 struct sched_attr attr = { 8018 .sched_policy = SCHED_NORMAL, 8019 .sched_nice = nice, 8020 }; 8021 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 8022 } 8023 EXPORT_SYMBOL_GPL(sched_set_normal); 8024 8025 static int 8026 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 8027 { 8028 struct sched_param lparam; 8029 8030 if (!param || pid < 0) 8031 return -EINVAL; 8032 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 8033 return -EFAULT; 8034 8035 CLASS(find_get_task, p)(pid); 8036 if (!p) 8037 return -ESRCH; 8038 8039 return sched_setscheduler(p, policy, &lparam); 8040 } 8041 8042 /* 8043 * Mimics kernel/events/core.c perf_copy_attr(). 8044 */ 8045 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 8046 { 8047 u32 size; 8048 int ret; 8049 8050 /* Zero the full structure, so that a short copy will be nice: */ 8051 memset(attr, 0, sizeof(*attr)); 8052 8053 ret = get_user(size, &uattr->size); 8054 if (ret) 8055 return ret; 8056 8057 /* ABI compatibility quirk: */ 8058 if (!size) 8059 size = SCHED_ATTR_SIZE_VER0; 8060 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 8061 goto err_size; 8062 8063 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 8064 if (ret) { 8065 if (ret == -E2BIG) 8066 goto err_size; 8067 return ret; 8068 } 8069 8070 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 8071 size < SCHED_ATTR_SIZE_VER1) 8072 return -EINVAL; 8073 8074 /* 8075 * XXX: Do we want to be lenient like existing syscalls; or do we want 8076 * to be strict and return an error on out-of-bounds values? 8077 */ 8078 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 8079 8080 return 0; 8081 8082 err_size: 8083 put_user(sizeof(*attr), &uattr->size); 8084 return -E2BIG; 8085 } 8086 8087 static void get_params(struct task_struct *p, struct sched_attr *attr) 8088 { 8089 if (task_has_dl_policy(p)) 8090 __getparam_dl(p, attr); 8091 else if (task_has_rt_policy(p)) 8092 attr->sched_priority = p->rt_priority; 8093 else 8094 attr->sched_nice = task_nice(p); 8095 } 8096 8097 /** 8098 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 8099 * @pid: the pid in question. 8100 * @policy: new policy. 8101 * @param: structure containing the new RT priority. 8102 * 8103 * Return: 0 on success. An error code otherwise. 8104 */ 8105 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 8106 { 8107 if (policy < 0) 8108 return -EINVAL; 8109 8110 return do_sched_setscheduler(pid, policy, param); 8111 } 8112 8113 /** 8114 * sys_sched_setparam - set/change the RT priority of a thread 8115 * @pid: the pid in question. 8116 * @param: structure containing the new RT priority. 8117 * 8118 * Return: 0 on success. An error code otherwise. 8119 */ 8120 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 8121 { 8122 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 8123 } 8124 8125 /** 8126 * sys_sched_setattr - same as above, but with extended sched_attr 8127 * @pid: the pid in question. 8128 * @uattr: structure containing the extended parameters. 8129 * @flags: for future extension. 8130 */ 8131 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 8132 unsigned int, flags) 8133 { 8134 struct sched_attr attr; 8135 int retval; 8136 8137 if (!uattr || pid < 0 || flags) 8138 return -EINVAL; 8139 8140 retval = sched_copy_attr(uattr, &attr); 8141 if (retval) 8142 return retval; 8143 8144 if ((int)attr.sched_policy < 0) 8145 return -EINVAL; 8146 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 8147 attr.sched_policy = SETPARAM_POLICY; 8148 8149 CLASS(find_get_task, p)(pid); 8150 if (!p) 8151 return -ESRCH; 8152 8153 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 8154 get_params(p, &attr); 8155 8156 return sched_setattr(p, &attr); 8157 } 8158 8159 /** 8160 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 8161 * @pid: the pid in question. 8162 * 8163 * Return: On success, the policy of the thread. Otherwise, a negative error 8164 * code. 8165 */ 8166 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 8167 { 8168 struct task_struct *p; 8169 int retval; 8170 8171 if (pid < 0) 8172 return -EINVAL; 8173 8174 guard(rcu)(); 8175 p = find_process_by_pid(pid); 8176 if (!p) 8177 return -ESRCH; 8178 8179 retval = security_task_getscheduler(p); 8180 if (!retval) { 8181 retval = p->policy; 8182 if (p->sched_reset_on_fork) 8183 retval |= SCHED_RESET_ON_FORK; 8184 } 8185 return retval; 8186 } 8187 8188 /** 8189 * sys_sched_getparam - get the RT priority of a thread 8190 * @pid: the pid in question. 8191 * @param: structure containing the RT priority. 8192 * 8193 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 8194 * code. 8195 */ 8196 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 8197 { 8198 struct sched_param lp = { .sched_priority = 0 }; 8199 struct task_struct *p; 8200 int retval; 8201 8202 if (!param || pid < 0) 8203 return -EINVAL; 8204 8205 scoped_guard (rcu) { 8206 p = find_process_by_pid(pid); 8207 if (!p) 8208 return -ESRCH; 8209 8210 retval = security_task_getscheduler(p); 8211 if (retval) 8212 return retval; 8213 8214 if (task_has_rt_policy(p)) 8215 lp.sched_priority = p->rt_priority; 8216 } 8217 8218 /* 8219 * This one might sleep, we cannot do it with a spinlock held ... 8220 */ 8221 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 8222 } 8223 8224 /* 8225 * Copy the kernel size attribute structure (which might be larger 8226 * than what user-space knows about) to user-space. 8227 * 8228 * Note that all cases are valid: user-space buffer can be larger or 8229 * smaller than the kernel-space buffer. The usual case is that both 8230 * have the same size. 8231 */ 8232 static int 8233 sched_attr_copy_to_user(struct sched_attr __user *uattr, 8234 struct sched_attr *kattr, 8235 unsigned int usize) 8236 { 8237 unsigned int ksize = sizeof(*kattr); 8238 8239 if (!access_ok(uattr, usize)) 8240 return -EFAULT; 8241 8242 /* 8243 * sched_getattr() ABI forwards and backwards compatibility: 8244 * 8245 * If usize == ksize then we just copy everything to user-space and all is good. 8246 * 8247 * If usize < ksize then we only copy as much as user-space has space for, 8248 * this keeps ABI compatibility as well. We skip the rest. 8249 * 8250 * If usize > ksize then user-space is using a newer version of the ABI, 8251 * which part the kernel doesn't know about. Just ignore it - tooling can 8252 * detect the kernel's knowledge of attributes from the attr->size value 8253 * which is set to ksize in this case. 8254 */ 8255 kattr->size = min(usize, ksize); 8256 8257 if (copy_to_user(uattr, kattr, kattr->size)) 8258 return -EFAULT; 8259 8260 return 0; 8261 } 8262 8263 /** 8264 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 8265 * @pid: the pid in question. 8266 * @uattr: structure containing the extended parameters. 8267 * @usize: sizeof(attr) for fwd/bwd comp. 8268 * @flags: for future extension. 8269 */ 8270 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 8271 unsigned int, usize, unsigned int, flags) 8272 { 8273 struct sched_attr kattr = { }; 8274 struct task_struct *p; 8275 int retval; 8276 8277 if (!uattr || pid < 0 || usize > PAGE_SIZE || 8278 usize < SCHED_ATTR_SIZE_VER0 || flags) 8279 return -EINVAL; 8280 8281 scoped_guard (rcu) { 8282 p = find_process_by_pid(pid); 8283 if (!p) 8284 return -ESRCH; 8285 8286 retval = security_task_getscheduler(p); 8287 if (retval) 8288 return retval; 8289 8290 kattr.sched_policy = p->policy; 8291 if (p->sched_reset_on_fork) 8292 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 8293 get_params(p, &kattr); 8294 kattr.sched_flags &= SCHED_FLAG_ALL; 8295 8296 #ifdef CONFIG_UCLAMP_TASK 8297 /* 8298 * This could race with another potential updater, but this is fine 8299 * because it'll correctly read the old or the new value. We don't need 8300 * to guarantee who wins the race as long as it doesn't return garbage. 8301 */ 8302 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 8303 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 8304 #endif 8305 } 8306 8307 return sched_attr_copy_to_user(uattr, &kattr, usize); 8308 } 8309 8310 #ifdef CONFIG_SMP 8311 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 8312 { 8313 /* 8314 * If the task isn't a deadline task or admission control is 8315 * disabled then we don't care about affinity changes. 8316 */ 8317 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 8318 return 0; 8319 8320 /* 8321 * Since bandwidth control happens on root_domain basis, 8322 * if admission test is enabled, we only admit -deadline 8323 * tasks allowed to run on all the CPUs in the task's 8324 * root_domain. 8325 */ 8326 guard(rcu)(); 8327 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 8328 return -EBUSY; 8329 8330 return 0; 8331 } 8332 #endif 8333 8334 static int 8335 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 8336 { 8337 int retval; 8338 cpumask_var_t cpus_allowed, new_mask; 8339 8340 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 8341 return -ENOMEM; 8342 8343 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 8344 retval = -ENOMEM; 8345 goto out_free_cpus_allowed; 8346 } 8347 8348 cpuset_cpus_allowed(p, cpus_allowed); 8349 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 8350 8351 ctx->new_mask = new_mask; 8352 ctx->flags |= SCA_CHECK; 8353 8354 retval = dl_task_check_affinity(p, new_mask); 8355 if (retval) 8356 goto out_free_new_mask; 8357 8358 retval = __set_cpus_allowed_ptr(p, ctx); 8359 if (retval) 8360 goto out_free_new_mask; 8361 8362 cpuset_cpus_allowed(p, cpus_allowed); 8363 if (!cpumask_subset(new_mask, cpus_allowed)) { 8364 /* 8365 * We must have raced with a concurrent cpuset update. 8366 * Just reset the cpumask to the cpuset's cpus_allowed. 8367 */ 8368 cpumask_copy(new_mask, cpus_allowed); 8369 8370 /* 8371 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 8372 * will restore the previous user_cpus_ptr value. 8373 * 8374 * In the unlikely event a previous user_cpus_ptr exists, 8375 * we need to further restrict the mask to what is allowed 8376 * by that old user_cpus_ptr. 8377 */ 8378 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 8379 bool empty = !cpumask_and(new_mask, new_mask, 8380 ctx->user_mask); 8381 8382 if (WARN_ON_ONCE(empty)) 8383 cpumask_copy(new_mask, cpus_allowed); 8384 } 8385 __set_cpus_allowed_ptr(p, ctx); 8386 retval = -EINVAL; 8387 } 8388 8389 out_free_new_mask: 8390 free_cpumask_var(new_mask); 8391 out_free_cpus_allowed: 8392 free_cpumask_var(cpus_allowed); 8393 return retval; 8394 } 8395 8396 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 8397 { 8398 struct affinity_context ac; 8399 struct cpumask *user_mask; 8400 int retval; 8401 8402 CLASS(find_get_task, p)(pid); 8403 if (!p) 8404 return -ESRCH; 8405 8406 if (p->flags & PF_NO_SETAFFINITY) 8407 return -EINVAL; 8408 8409 if (!check_same_owner(p)) { 8410 guard(rcu)(); 8411 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 8412 return -EPERM; 8413 } 8414 8415 retval = security_task_setscheduler(p); 8416 if (retval) 8417 return retval; 8418 8419 /* 8420 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 8421 * alloc_user_cpus_ptr() returns NULL. 8422 */ 8423 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 8424 if (user_mask) { 8425 cpumask_copy(user_mask, in_mask); 8426 } else if (IS_ENABLED(CONFIG_SMP)) { 8427 return -ENOMEM; 8428 } 8429 8430 ac = (struct affinity_context){ 8431 .new_mask = in_mask, 8432 .user_mask = user_mask, 8433 .flags = SCA_USER, 8434 }; 8435 8436 retval = __sched_setaffinity(p, &ac); 8437 kfree(ac.user_mask); 8438 8439 return retval; 8440 } 8441 8442 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 8443 struct cpumask *new_mask) 8444 { 8445 if (len < cpumask_size()) 8446 cpumask_clear(new_mask); 8447 else if (len > cpumask_size()) 8448 len = cpumask_size(); 8449 8450 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 8451 } 8452 8453 /** 8454 * sys_sched_setaffinity - set the CPU affinity of a process 8455 * @pid: pid of the process 8456 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8457 * @user_mask_ptr: user-space pointer to the new CPU mask 8458 * 8459 * Return: 0 on success. An error code otherwise. 8460 */ 8461 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 8462 unsigned long __user *, user_mask_ptr) 8463 { 8464 cpumask_var_t new_mask; 8465 int retval; 8466 8467 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 8468 return -ENOMEM; 8469 8470 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 8471 if (retval == 0) 8472 retval = sched_setaffinity(pid, new_mask); 8473 free_cpumask_var(new_mask); 8474 return retval; 8475 } 8476 8477 long sched_getaffinity(pid_t pid, struct cpumask *mask) 8478 { 8479 struct task_struct *p; 8480 int retval; 8481 8482 guard(rcu)(); 8483 p = find_process_by_pid(pid); 8484 if (!p) 8485 return -ESRCH; 8486 8487 retval = security_task_getscheduler(p); 8488 if (retval) 8489 return retval; 8490 8491 guard(raw_spinlock_irqsave)(&p->pi_lock); 8492 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 8493 8494 return 0; 8495 } 8496 8497 /** 8498 * sys_sched_getaffinity - get the CPU affinity of a process 8499 * @pid: pid of the process 8500 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 8501 * @user_mask_ptr: user-space pointer to hold the current CPU mask 8502 * 8503 * Return: size of CPU mask copied to user_mask_ptr on success. An 8504 * error code otherwise. 8505 */ 8506 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 8507 unsigned long __user *, user_mask_ptr) 8508 { 8509 int ret; 8510 cpumask_var_t mask; 8511 8512 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 8513 return -EINVAL; 8514 if (len & (sizeof(unsigned long)-1)) 8515 return -EINVAL; 8516 8517 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 8518 return -ENOMEM; 8519 8520 ret = sched_getaffinity(pid, mask); 8521 if (ret == 0) { 8522 unsigned int retlen = min(len, cpumask_size()); 8523 8524 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 8525 ret = -EFAULT; 8526 else 8527 ret = retlen; 8528 } 8529 free_cpumask_var(mask); 8530 8531 return ret; 8532 } 8533 8534 static void do_sched_yield(void) 8535 { 8536 struct rq_flags rf; 8537 struct rq *rq; 8538 8539 rq = this_rq_lock_irq(&rf); 8540 8541 schedstat_inc(rq->yld_count); 8542 current->sched_class->yield_task(rq); 8543 8544 preempt_disable(); 8545 rq_unlock_irq(rq, &rf); 8546 sched_preempt_enable_no_resched(); 8547 8548 schedule(); 8549 } 8550 8551 /** 8552 * sys_sched_yield - yield the current processor to other threads. 8553 * 8554 * This function yields the current CPU to other tasks. If there are no 8555 * other threads running on this CPU then this function will return. 8556 * 8557 * Return: 0. 8558 */ 8559 SYSCALL_DEFINE0(sched_yield) 8560 { 8561 do_sched_yield(); 8562 return 0; 8563 } 8564 8565 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 8566 int __sched __cond_resched(void) 8567 { 8568 if (should_resched(0)) { 8569 preempt_schedule_common(); 8570 return 1; 8571 } 8572 /* 8573 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick 8574 * whether the current CPU is in an RCU read-side critical section, 8575 * so the tick can report quiescent states even for CPUs looping 8576 * in kernel context. In contrast, in non-preemptible kernels, 8577 * RCU readers leave no in-memory hints, which means that CPU-bound 8578 * processes executing in kernel context might never report an 8579 * RCU quiescent state. Therefore, the following code causes 8580 * cond_resched() to report a quiescent state, but only when RCU 8581 * is in urgent need of one. 8582 */ 8583 #ifndef CONFIG_PREEMPT_RCU 8584 rcu_all_qs(); 8585 #endif 8586 return 0; 8587 } 8588 EXPORT_SYMBOL(__cond_resched); 8589 #endif 8590 8591 #ifdef CONFIG_PREEMPT_DYNAMIC 8592 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8593 #define cond_resched_dynamic_enabled __cond_resched 8594 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 8595 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 8596 EXPORT_STATIC_CALL_TRAMP(cond_resched); 8597 8598 #define might_resched_dynamic_enabled __cond_resched 8599 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 8600 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 8601 EXPORT_STATIC_CALL_TRAMP(might_resched); 8602 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8603 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 8604 int __sched dynamic_cond_resched(void) 8605 { 8606 klp_sched_try_switch(); 8607 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 8608 return 0; 8609 return __cond_resched(); 8610 } 8611 EXPORT_SYMBOL(dynamic_cond_resched); 8612 8613 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 8614 int __sched dynamic_might_resched(void) 8615 { 8616 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 8617 return 0; 8618 return __cond_resched(); 8619 } 8620 EXPORT_SYMBOL(dynamic_might_resched); 8621 #endif 8622 #endif 8623 8624 /* 8625 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 8626 * call schedule, and on return reacquire the lock. 8627 * 8628 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 8629 * operations here to prevent schedule() from being called twice (once via 8630 * spin_unlock(), once by hand). 8631 */ 8632 int __cond_resched_lock(spinlock_t *lock) 8633 { 8634 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8635 int ret = 0; 8636 8637 lockdep_assert_held(lock); 8638 8639 if (spin_needbreak(lock) || resched) { 8640 spin_unlock(lock); 8641 if (!_cond_resched()) 8642 cpu_relax(); 8643 ret = 1; 8644 spin_lock(lock); 8645 } 8646 return ret; 8647 } 8648 EXPORT_SYMBOL(__cond_resched_lock); 8649 8650 int __cond_resched_rwlock_read(rwlock_t *lock) 8651 { 8652 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8653 int ret = 0; 8654 8655 lockdep_assert_held_read(lock); 8656 8657 if (rwlock_needbreak(lock) || resched) { 8658 read_unlock(lock); 8659 if (!_cond_resched()) 8660 cpu_relax(); 8661 ret = 1; 8662 read_lock(lock); 8663 } 8664 return ret; 8665 } 8666 EXPORT_SYMBOL(__cond_resched_rwlock_read); 8667 8668 int __cond_resched_rwlock_write(rwlock_t *lock) 8669 { 8670 int resched = should_resched(PREEMPT_LOCK_OFFSET); 8671 int ret = 0; 8672 8673 lockdep_assert_held_write(lock); 8674 8675 if (rwlock_needbreak(lock) || resched) { 8676 write_unlock(lock); 8677 if (!_cond_resched()) 8678 cpu_relax(); 8679 ret = 1; 8680 write_lock(lock); 8681 } 8682 return ret; 8683 } 8684 EXPORT_SYMBOL(__cond_resched_rwlock_write); 8685 8686 #ifdef CONFIG_PREEMPT_DYNAMIC 8687 8688 #ifdef CONFIG_GENERIC_ENTRY 8689 #include <linux/entry-common.h> 8690 #endif 8691 8692 /* 8693 * SC:cond_resched 8694 * SC:might_resched 8695 * SC:preempt_schedule 8696 * SC:preempt_schedule_notrace 8697 * SC:irqentry_exit_cond_resched 8698 * 8699 * 8700 * NONE: 8701 * cond_resched <- __cond_resched 8702 * might_resched <- RET0 8703 * preempt_schedule <- NOP 8704 * preempt_schedule_notrace <- NOP 8705 * irqentry_exit_cond_resched <- NOP 8706 * 8707 * VOLUNTARY: 8708 * cond_resched <- __cond_resched 8709 * might_resched <- __cond_resched 8710 * preempt_schedule <- NOP 8711 * preempt_schedule_notrace <- NOP 8712 * irqentry_exit_cond_resched <- NOP 8713 * 8714 * FULL: 8715 * cond_resched <- RET0 8716 * might_resched <- RET0 8717 * preempt_schedule <- preempt_schedule 8718 * preempt_schedule_notrace <- preempt_schedule_notrace 8719 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 8720 */ 8721 8722 enum { 8723 preempt_dynamic_undefined = -1, 8724 preempt_dynamic_none, 8725 preempt_dynamic_voluntary, 8726 preempt_dynamic_full, 8727 }; 8728 8729 int preempt_dynamic_mode = preempt_dynamic_undefined; 8730 8731 int sched_dynamic_mode(const char *str) 8732 { 8733 if (!strcmp(str, "none")) 8734 return preempt_dynamic_none; 8735 8736 if (!strcmp(str, "voluntary")) 8737 return preempt_dynamic_voluntary; 8738 8739 if (!strcmp(str, "full")) 8740 return preempt_dynamic_full; 8741 8742 return -EINVAL; 8743 } 8744 8745 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 8746 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 8747 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 8748 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 8749 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) 8750 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) 8751 #else 8752 #error "Unsupported PREEMPT_DYNAMIC mechanism" 8753 #endif 8754 8755 static DEFINE_MUTEX(sched_dynamic_mutex); 8756 static bool klp_override; 8757 8758 static void __sched_dynamic_update(int mode) 8759 { 8760 /* 8761 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 8762 * the ZERO state, which is invalid. 8763 */ 8764 if (!klp_override) 8765 preempt_dynamic_enable(cond_resched); 8766 preempt_dynamic_enable(might_resched); 8767 preempt_dynamic_enable(preempt_schedule); 8768 preempt_dynamic_enable(preempt_schedule_notrace); 8769 preempt_dynamic_enable(irqentry_exit_cond_resched); 8770 8771 switch (mode) { 8772 case preempt_dynamic_none: 8773 if (!klp_override) 8774 preempt_dynamic_enable(cond_resched); 8775 preempt_dynamic_disable(might_resched); 8776 preempt_dynamic_disable(preempt_schedule); 8777 preempt_dynamic_disable(preempt_schedule_notrace); 8778 preempt_dynamic_disable(irqentry_exit_cond_resched); 8779 if (mode != preempt_dynamic_mode) 8780 pr_info("Dynamic Preempt: none\n"); 8781 break; 8782 8783 case preempt_dynamic_voluntary: 8784 if (!klp_override) 8785 preempt_dynamic_enable(cond_resched); 8786 preempt_dynamic_enable(might_resched); 8787 preempt_dynamic_disable(preempt_schedule); 8788 preempt_dynamic_disable(preempt_schedule_notrace); 8789 preempt_dynamic_disable(irqentry_exit_cond_resched); 8790 if (mode != preempt_dynamic_mode) 8791 pr_info("Dynamic Preempt: voluntary\n"); 8792 break; 8793 8794 case preempt_dynamic_full: 8795 if (!klp_override) 8796 preempt_dynamic_disable(cond_resched); 8797 preempt_dynamic_disable(might_resched); 8798 preempt_dynamic_enable(preempt_schedule); 8799 preempt_dynamic_enable(preempt_schedule_notrace); 8800 preempt_dynamic_enable(irqentry_exit_cond_resched); 8801 if (mode != preempt_dynamic_mode) 8802 pr_info("Dynamic Preempt: full\n"); 8803 break; 8804 } 8805 8806 preempt_dynamic_mode = mode; 8807 } 8808 8809 void sched_dynamic_update(int mode) 8810 { 8811 mutex_lock(&sched_dynamic_mutex); 8812 __sched_dynamic_update(mode); 8813 mutex_unlock(&sched_dynamic_mutex); 8814 } 8815 8816 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL 8817 8818 static int klp_cond_resched(void) 8819 { 8820 __klp_sched_try_switch(); 8821 return __cond_resched(); 8822 } 8823 8824 void sched_dynamic_klp_enable(void) 8825 { 8826 mutex_lock(&sched_dynamic_mutex); 8827 8828 klp_override = true; 8829 static_call_update(cond_resched, klp_cond_resched); 8830 8831 mutex_unlock(&sched_dynamic_mutex); 8832 } 8833 8834 void sched_dynamic_klp_disable(void) 8835 { 8836 mutex_lock(&sched_dynamic_mutex); 8837 8838 klp_override = false; 8839 __sched_dynamic_update(preempt_dynamic_mode); 8840 8841 mutex_unlock(&sched_dynamic_mutex); 8842 } 8843 8844 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 8845 8846 static int __init setup_preempt_mode(char *str) 8847 { 8848 int mode = sched_dynamic_mode(str); 8849 if (mode < 0) { 8850 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 8851 return 0; 8852 } 8853 8854 sched_dynamic_update(mode); 8855 return 1; 8856 } 8857 __setup("preempt=", setup_preempt_mode); 8858 8859 static void __init preempt_dynamic_init(void) 8860 { 8861 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 8862 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 8863 sched_dynamic_update(preempt_dynamic_none); 8864 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 8865 sched_dynamic_update(preempt_dynamic_voluntary); 8866 } else { 8867 /* Default static call setting, nothing to do */ 8868 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 8869 preempt_dynamic_mode = preempt_dynamic_full; 8870 pr_info("Dynamic Preempt: full\n"); 8871 } 8872 } 8873 } 8874 8875 #define PREEMPT_MODEL_ACCESSOR(mode) \ 8876 bool preempt_model_##mode(void) \ 8877 { \ 8878 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 8879 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 8880 } \ 8881 EXPORT_SYMBOL_GPL(preempt_model_##mode) 8882 8883 PREEMPT_MODEL_ACCESSOR(none); 8884 PREEMPT_MODEL_ACCESSOR(voluntary); 8885 PREEMPT_MODEL_ACCESSOR(full); 8886 8887 #else /* !CONFIG_PREEMPT_DYNAMIC */ 8888 8889 static inline void preempt_dynamic_init(void) { } 8890 8891 #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ 8892 8893 /** 8894 * yield - yield the current processor to other threads. 8895 * 8896 * Do not ever use this function, there's a 99% chance you're doing it wrong. 8897 * 8898 * The scheduler is at all times free to pick the calling task as the most 8899 * eligible task to run, if removing the yield() call from your code breaks 8900 * it, it's already broken. 8901 * 8902 * Typical broken usage is: 8903 * 8904 * while (!event) 8905 * yield(); 8906 * 8907 * where one assumes that yield() will let 'the other' process run that will 8908 * make event true. If the current task is a SCHED_FIFO task that will never 8909 * happen. Never use yield() as a progress guarantee!! 8910 * 8911 * If you want to use yield() to wait for something, use wait_event(). 8912 * If you want to use yield() to be 'nice' for others, use cond_resched(). 8913 * If you still want to use yield(), do not! 8914 */ 8915 void __sched yield(void) 8916 { 8917 set_current_state(TASK_RUNNING); 8918 do_sched_yield(); 8919 } 8920 EXPORT_SYMBOL(yield); 8921 8922 /** 8923 * yield_to - yield the current processor to another thread in 8924 * your thread group, or accelerate that thread toward the 8925 * processor it's on. 8926 * @p: target task 8927 * @preempt: whether task preemption is allowed or not 8928 * 8929 * It's the caller's job to ensure that the target task struct 8930 * can't go away on us before we can do any checks. 8931 * 8932 * Return: 8933 * true (>0) if we indeed boosted the target task. 8934 * false (0) if we failed to boost the target. 8935 * -ESRCH if there's no task to yield to. 8936 */ 8937 int __sched yield_to(struct task_struct *p, bool preempt) 8938 { 8939 struct task_struct *curr = current; 8940 struct rq *rq, *p_rq; 8941 int yielded = 0; 8942 8943 scoped_guard (irqsave) { 8944 rq = this_rq(); 8945 8946 again: 8947 p_rq = task_rq(p); 8948 /* 8949 * If we're the only runnable task on the rq and target rq also 8950 * has only one task, there's absolutely no point in yielding. 8951 */ 8952 if (rq->nr_running == 1 && p_rq->nr_running == 1) 8953 return -ESRCH; 8954 8955 guard(double_rq_lock)(rq, p_rq); 8956 if (task_rq(p) != p_rq) 8957 goto again; 8958 8959 if (!curr->sched_class->yield_to_task) 8960 return 0; 8961 8962 if (curr->sched_class != p->sched_class) 8963 return 0; 8964 8965 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 8966 return 0; 8967 8968 yielded = curr->sched_class->yield_to_task(rq, p); 8969 if (yielded) { 8970 schedstat_inc(rq->yld_count); 8971 /* 8972 * Make p's CPU reschedule; pick_next_entity 8973 * takes care of fairness. 8974 */ 8975 if (preempt && rq != p_rq) 8976 resched_curr(p_rq); 8977 } 8978 } 8979 8980 if (yielded) 8981 schedule(); 8982 8983 return yielded; 8984 } 8985 EXPORT_SYMBOL_GPL(yield_to); 8986 8987 int io_schedule_prepare(void) 8988 { 8989 int old_iowait = current->in_iowait; 8990 8991 current->in_iowait = 1; 8992 blk_flush_plug(current->plug, true); 8993 return old_iowait; 8994 } 8995 8996 void io_schedule_finish(int token) 8997 { 8998 current->in_iowait = token; 8999 } 9000 9001 /* 9002 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 9003 * that process accounting knows that this is a task in IO wait state. 9004 */ 9005 long __sched io_schedule_timeout(long timeout) 9006 { 9007 int token; 9008 long ret; 9009 9010 token = io_schedule_prepare(); 9011 ret = schedule_timeout(timeout); 9012 io_schedule_finish(token); 9013 9014 return ret; 9015 } 9016 EXPORT_SYMBOL(io_schedule_timeout); 9017 9018 void __sched io_schedule(void) 9019 { 9020 int token; 9021 9022 token = io_schedule_prepare(); 9023 schedule(); 9024 io_schedule_finish(token); 9025 } 9026 EXPORT_SYMBOL(io_schedule); 9027 9028 /** 9029 * sys_sched_get_priority_max - return maximum RT priority. 9030 * @policy: scheduling class. 9031 * 9032 * Return: On success, this syscall returns the maximum 9033 * rt_priority that can be used by a given scheduling class. 9034 * On failure, a negative error code is returned. 9035 */ 9036 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 9037 { 9038 int ret = -EINVAL; 9039 9040 switch (policy) { 9041 case SCHED_FIFO: 9042 case SCHED_RR: 9043 ret = MAX_RT_PRIO-1; 9044 break; 9045 case SCHED_DEADLINE: 9046 case SCHED_NORMAL: 9047 case SCHED_BATCH: 9048 case SCHED_IDLE: 9049 ret = 0; 9050 break; 9051 } 9052 return ret; 9053 } 9054 9055 /** 9056 * sys_sched_get_priority_min - return minimum RT priority. 9057 * @policy: scheduling class. 9058 * 9059 * Return: On success, this syscall returns the minimum 9060 * rt_priority that can be used by a given scheduling class. 9061 * On failure, a negative error code is returned. 9062 */ 9063 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 9064 { 9065 int ret = -EINVAL; 9066 9067 switch (policy) { 9068 case SCHED_FIFO: 9069 case SCHED_RR: 9070 ret = 1; 9071 break; 9072 case SCHED_DEADLINE: 9073 case SCHED_NORMAL: 9074 case SCHED_BATCH: 9075 case SCHED_IDLE: 9076 ret = 0; 9077 } 9078 return ret; 9079 } 9080 9081 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 9082 { 9083 unsigned int time_slice = 0; 9084 int retval; 9085 9086 if (pid < 0) 9087 return -EINVAL; 9088 9089 scoped_guard (rcu) { 9090 struct task_struct *p = find_process_by_pid(pid); 9091 if (!p) 9092 return -ESRCH; 9093 9094 retval = security_task_getscheduler(p); 9095 if (retval) 9096 return retval; 9097 9098 scoped_guard (task_rq_lock, p) { 9099 struct rq *rq = scope.rq; 9100 if (p->sched_class->get_rr_interval) 9101 time_slice = p->sched_class->get_rr_interval(rq, p); 9102 } 9103 } 9104 9105 jiffies_to_timespec64(time_slice, t); 9106 return 0; 9107 } 9108 9109 /** 9110 * sys_sched_rr_get_interval - return the default timeslice of a process. 9111 * @pid: pid of the process. 9112 * @interval: userspace pointer to the timeslice value. 9113 * 9114 * this syscall writes the default timeslice value of a given process 9115 * into the user-space timespec buffer. A value of '0' means infinity. 9116 * 9117 * Return: On success, 0 and the timeslice is in @interval. Otherwise, 9118 * an error code. 9119 */ 9120 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 9121 struct __kernel_timespec __user *, interval) 9122 { 9123 struct timespec64 t; 9124 int retval = sched_rr_get_interval(pid, &t); 9125 9126 if (retval == 0) 9127 retval = put_timespec64(&t, interval); 9128 9129 return retval; 9130 } 9131 9132 #ifdef CONFIG_COMPAT_32BIT_TIME 9133 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 9134 struct old_timespec32 __user *, interval) 9135 { 9136 struct timespec64 t; 9137 int retval = sched_rr_get_interval(pid, &t); 9138 9139 if (retval == 0) 9140 retval = put_old_timespec32(&t, interval); 9141 return retval; 9142 } 9143 #endif 9144 9145 void sched_show_task(struct task_struct *p) 9146 { 9147 unsigned long free = 0; 9148 int ppid; 9149 9150 if (!try_get_task_stack(p)) 9151 return; 9152 9153 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 9154 9155 if (task_is_running(p)) 9156 pr_cont(" running task "); 9157 #ifdef CONFIG_DEBUG_STACK_USAGE 9158 free = stack_not_used(p); 9159 #endif 9160 ppid = 0; 9161 rcu_read_lock(); 9162 if (pid_alive(p)) 9163 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 9164 rcu_read_unlock(); 9165 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", 9166 free, task_pid_nr(p), task_tgid_nr(p), 9167 ppid, read_task_thread_flags(p)); 9168 9169 print_worker_info(KERN_INFO, p); 9170 print_stop_info(KERN_INFO, p); 9171 show_stack(p, NULL, KERN_INFO); 9172 put_task_stack(p); 9173 } 9174 EXPORT_SYMBOL_GPL(sched_show_task); 9175 9176 static inline bool 9177 state_filter_match(unsigned long state_filter, struct task_struct *p) 9178 { 9179 unsigned int state = READ_ONCE(p->__state); 9180 9181 /* no filter, everything matches */ 9182 if (!state_filter) 9183 return true; 9184 9185 /* filter, but doesn't match */ 9186 if (!(state & state_filter)) 9187 return false; 9188 9189 /* 9190 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 9191 * TASK_KILLABLE). 9192 */ 9193 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) 9194 return false; 9195 9196 return true; 9197 } 9198 9199 9200 void show_state_filter(unsigned int state_filter) 9201 { 9202 struct task_struct *g, *p; 9203 9204 rcu_read_lock(); 9205 for_each_process_thread(g, p) { 9206 /* 9207 * reset the NMI-timeout, listing all files on a slow 9208 * console might take a lot of time: 9209 * Also, reset softlockup watchdogs on all CPUs, because 9210 * another CPU might be blocked waiting for us to process 9211 * an IPI. 9212 */ 9213 touch_nmi_watchdog(); 9214 touch_all_softlockup_watchdogs(); 9215 if (state_filter_match(state_filter, p)) 9216 sched_show_task(p); 9217 } 9218 9219 #ifdef CONFIG_SCHED_DEBUG 9220 if (!state_filter) 9221 sysrq_sched_debug_show(); 9222 #endif 9223 rcu_read_unlock(); 9224 /* 9225 * Only show locks if all tasks are dumped: 9226 */ 9227 if (!state_filter) 9228 debug_show_all_locks(); 9229 } 9230 9231 /** 9232 * init_idle - set up an idle thread for a given CPU 9233 * @idle: task in question 9234 * @cpu: CPU the idle task belongs to 9235 * 9236 * NOTE: this function does not set the idle thread's NEED_RESCHED 9237 * flag, to make booting more robust. 9238 */ 9239 void __init init_idle(struct task_struct *idle, int cpu) 9240 { 9241 #ifdef CONFIG_SMP 9242 struct affinity_context ac = (struct affinity_context) { 9243 .new_mask = cpumask_of(cpu), 9244 .flags = 0, 9245 }; 9246 #endif 9247 struct rq *rq = cpu_rq(cpu); 9248 unsigned long flags; 9249 9250 __sched_fork(0, idle); 9251 9252 raw_spin_lock_irqsave(&idle->pi_lock, flags); 9253 raw_spin_rq_lock(rq); 9254 9255 idle->__state = TASK_RUNNING; 9256 idle->se.exec_start = sched_clock(); 9257 /* 9258 * PF_KTHREAD should already be set at this point; regardless, make it 9259 * look like a proper per-CPU kthread. 9260 */ 9261 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; 9262 kthread_set_per_cpu(idle, cpu); 9263 9264 #ifdef CONFIG_SMP 9265 /* 9266 * It's possible that init_idle() gets called multiple times on a task, 9267 * in that case do_set_cpus_allowed() will not do the right thing. 9268 * 9269 * And since this is boot we can forgo the serialization. 9270 */ 9271 set_cpus_allowed_common(idle, &ac); 9272 #endif 9273 /* 9274 * We're having a chicken and egg problem, even though we are 9275 * holding rq->lock, the CPU isn't yet set to this CPU so the 9276 * lockdep check in task_group() will fail. 9277 * 9278 * Similar case to sched_fork(). / Alternatively we could 9279 * use task_rq_lock() here and obtain the other rq->lock. 9280 * 9281 * Silence PROVE_RCU 9282 */ 9283 rcu_read_lock(); 9284 __set_task_cpu(idle, cpu); 9285 rcu_read_unlock(); 9286 9287 rq->idle = idle; 9288 rcu_assign_pointer(rq->curr, idle); 9289 idle->on_rq = TASK_ON_RQ_QUEUED; 9290 #ifdef CONFIG_SMP 9291 idle->on_cpu = 1; 9292 #endif 9293 raw_spin_rq_unlock(rq); 9294 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 9295 9296 /* Set the preempt count _outside_ the spinlocks! */ 9297 init_idle_preempt_count(idle, cpu); 9298 9299 /* 9300 * The idle tasks have their own, simple scheduling class: 9301 */ 9302 idle->sched_class = &idle_sched_class; 9303 ftrace_graph_init_idle_task(idle, cpu); 9304 vtime_init_idle(idle, cpu); 9305 #ifdef CONFIG_SMP 9306 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 9307 #endif 9308 } 9309 9310 #ifdef CONFIG_SMP 9311 9312 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 9313 const struct cpumask *trial) 9314 { 9315 int ret = 1; 9316 9317 if (cpumask_empty(cur)) 9318 return ret; 9319 9320 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 9321 9322 return ret; 9323 } 9324 9325 int task_can_attach(struct task_struct *p) 9326 { 9327 int ret = 0; 9328 9329 /* 9330 * Kthreads which disallow setaffinity shouldn't be moved 9331 * to a new cpuset; we don't want to change their CPU 9332 * affinity and isolating such threads by their set of 9333 * allowed nodes is unnecessary. Thus, cpusets are not 9334 * applicable for such threads. This prevents checking for 9335 * success of set_cpus_allowed_ptr() on all attached tasks 9336 * before cpus_mask may be changed. 9337 */ 9338 if (p->flags & PF_NO_SETAFFINITY) 9339 ret = -EINVAL; 9340 9341 return ret; 9342 } 9343 9344 bool sched_smp_initialized __read_mostly; 9345 9346 #ifdef CONFIG_NUMA_BALANCING 9347 /* Migrate current task p to target_cpu */ 9348 int migrate_task_to(struct task_struct *p, int target_cpu) 9349 { 9350 struct migration_arg arg = { p, target_cpu }; 9351 int curr_cpu = task_cpu(p); 9352 9353 if (curr_cpu == target_cpu) 9354 return 0; 9355 9356 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 9357 return -EINVAL; 9358 9359 /* TODO: This is not properly updating schedstats */ 9360 9361 trace_sched_move_numa(p, curr_cpu, target_cpu); 9362 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 9363 } 9364 9365 /* 9366 * Requeue a task on a given node and accurately track the number of NUMA 9367 * tasks on the runqueues 9368 */ 9369 void sched_setnuma(struct task_struct *p, int nid) 9370 { 9371 bool queued, running; 9372 struct rq_flags rf; 9373 struct rq *rq; 9374 9375 rq = task_rq_lock(p, &rf); 9376 queued = task_on_rq_queued(p); 9377 running = task_current(rq, p); 9378 9379 if (queued) 9380 dequeue_task(rq, p, DEQUEUE_SAVE); 9381 if (running) 9382 put_prev_task(rq, p); 9383 9384 p->numa_preferred_nid = nid; 9385 9386 if (queued) 9387 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 9388 if (running) 9389 set_next_task(rq, p); 9390 task_rq_unlock(rq, p, &rf); 9391 } 9392 #endif /* CONFIG_NUMA_BALANCING */ 9393 9394 #ifdef CONFIG_HOTPLUG_CPU 9395 /* 9396 * Ensure that the idle task is using init_mm right before its CPU goes 9397 * offline. 9398 */ 9399 void idle_task_exit(void) 9400 { 9401 struct mm_struct *mm = current->active_mm; 9402 9403 BUG_ON(cpu_online(smp_processor_id())); 9404 BUG_ON(current != this_rq()->idle); 9405 9406 if (mm != &init_mm) { 9407 switch_mm(mm, &init_mm, current); 9408 finish_arch_post_lock_switch(); 9409 } 9410 9411 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 9412 } 9413 9414 static int __balance_push_cpu_stop(void *arg) 9415 { 9416 struct task_struct *p = arg; 9417 struct rq *rq = this_rq(); 9418 struct rq_flags rf; 9419 int cpu; 9420 9421 raw_spin_lock_irq(&p->pi_lock); 9422 rq_lock(rq, &rf); 9423 9424 update_rq_clock(rq); 9425 9426 if (task_rq(p) == rq && task_on_rq_queued(p)) { 9427 cpu = select_fallback_rq(rq->cpu, p); 9428 rq = __migrate_task(rq, &rf, p, cpu); 9429 } 9430 9431 rq_unlock(rq, &rf); 9432 raw_spin_unlock_irq(&p->pi_lock); 9433 9434 put_task_struct(p); 9435 9436 return 0; 9437 } 9438 9439 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 9440 9441 /* 9442 * Ensure we only run per-cpu kthreads once the CPU goes !active. 9443 * 9444 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 9445 * effective when the hotplug motion is down. 9446 */ 9447 static void balance_push(struct rq *rq) 9448 { 9449 struct task_struct *push_task = rq->curr; 9450 9451 lockdep_assert_rq_held(rq); 9452 9453 /* 9454 * Ensure the thing is persistent until balance_push_set(.on = false); 9455 */ 9456 rq->balance_callback = &balance_push_callback; 9457 9458 /* 9459 * Only active while going offline and when invoked on the outgoing 9460 * CPU. 9461 */ 9462 if (!cpu_dying(rq->cpu) || rq != this_rq()) 9463 return; 9464 9465 /* 9466 * Both the cpu-hotplug and stop task are in this case and are 9467 * required to complete the hotplug process. 9468 */ 9469 if (kthread_is_per_cpu(push_task) || 9470 is_migration_disabled(push_task)) { 9471 9472 /* 9473 * If this is the idle task on the outgoing CPU try to wake 9474 * up the hotplug control thread which might wait for the 9475 * last task to vanish. The rcuwait_active() check is 9476 * accurate here because the waiter is pinned on this CPU 9477 * and can't obviously be running in parallel. 9478 * 9479 * On RT kernels this also has to check whether there are 9480 * pinned and scheduled out tasks on the runqueue. They 9481 * need to leave the migrate disabled section first. 9482 */ 9483 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 9484 rcuwait_active(&rq->hotplug_wait)) { 9485 raw_spin_rq_unlock(rq); 9486 rcuwait_wake_up(&rq->hotplug_wait); 9487 raw_spin_rq_lock(rq); 9488 } 9489 return; 9490 } 9491 9492 get_task_struct(push_task); 9493 /* 9494 * Temporarily drop rq->lock such that we can wake-up the stop task. 9495 * Both preemption and IRQs are still disabled. 9496 */ 9497 preempt_disable(); 9498 raw_spin_rq_unlock(rq); 9499 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 9500 this_cpu_ptr(&push_work)); 9501 preempt_enable(); 9502 /* 9503 * At this point need_resched() is true and we'll take the loop in 9504 * schedule(). The next pick is obviously going to be the stop task 9505 * which kthread_is_per_cpu() and will push this task away. 9506 */ 9507 raw_spin_rq_lock(rq); 9508 } 9509 9510 static void balance_push_set(int cpu, bool on) 9511 { 9512 struct rq *rq = cpu_rq(cpu); 9513 struct rq_flags rf; 9514 9515 rq_lock_irqsave(rq, &rf); 9516 if (on) { 9517 WARN_ON_ONCE(rq->balance_callback); 9518 rq->balance_callback = &balance_push_callback; 9519 } else if (rq->balance_callback == &balance_push_callback) { 9520 rq->balance_callback = NULL; 9521 } 9522 rq_unlock_irqrestore(rq, &rf); 9523 } 9524 9525 /* 9526 * Invoked from a CPUs hotplug control thread after the CPU has been marked 9527 * inactive. All tasks which are not per CPU kernel threads are either 9528 * pushed off this CPU now via balance_push() or placed on a different CPU 9529 * during wakeup. Wait until the CPU is quiescent. 9530 */ 9531 static void balance_hotplug_wait(void) 9532 { 9533 struct rq *rq = this_rq(); 9534 9535 rcuwait_wait_event(&rq->hotplug_wait, 9536 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 9537 TASK_UNINTERRUPTIBLE); 9538 } 9539 9540 #else 9541 9542 static inline void balance_push(struct rq *rq) 9543 { 9544 } 9545 9546 static inline void balance_push_set(int cpu, bool on) 9547 { 9548 } 9549 9550 static inline void balance_hotplug_wait(void) 9551 { 9552 } 9553 9554 #endif /* CONFIG_HOTPLUG_CPU */ 9555 9556 void set_rq_online(struct rq *rq) 9557 { 9558 if (!rq->online) { 9559 const struct sched_class *class; 9560 9561 cpumask_set_cpu(rq->cpu, rq->rd->online); 9562 rq->online = 1; 9563 9564 for_each_class(class) { 9565 if (class->rq_online) 9566 class->rq_online(rq); 9567 } 9568 } 9569 } 9570 9571 void set_rq_offline(struct rq *rq) 9572 { 9573 if (rq->online) { 9574 const struct sched_class *class; 9575 9576 update_rq_clock(rq); 9577 for_each_class(class) { 9578 if (class->rq_offline) 9579 class->rq_offline(rq); 9580 } 9581 9582 cpumask_clear_cpu(rq->cpu, rq->rd->online); 9583 rq->online = 0; 9584 } 9585 } 9586 9587 /* 9588 * used to mark begin/end of suspend/resume: 9589 */ 9590 static int num_cpus_frozen; 9591 9592 /* 9593 * Update cpusets according to cpu_active mask. If cpusets are 9594 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 9595 * around partition_sched_domains(). 9596 * 9597 * If we come here as part of a suspend/resume, don't touch cpusets because we 9598 * want to restore it back to its original state upon resume anyway. 9599 */ 9600 static void cpuset_cpu_active(void) 9601 { 9602 if (cpuhp_tasks_frozen) { 9603 /* 9604 * num_cpus_frozen tracks how many CPUs are involved in suspend 9605 * resume sequence. As long as this is not the last online 9606 * operation in the resume sequence, just build a single sched 9607 * domain, ignoring cpusets. 9608 */ 9609 partition_sched_domains(1, NULL, NULL); 9610 if (--num_cpus_frozen) 9611 return; 9612 /* 9613 * This is the last CPU online operation. So fall through and 9614 * restore the original sched domains by considering the 9615 * cpuset configurations. 9616 */ 9617 cpuset_force_rebuild(); 9618 } 9619 cpuset_update_active_cpus(); 9620 } 9621 9622 static int cpuset_cpu_inactive(unsigned int cpu) 9623 { 9624 if (!cpuhp_tasks_frozen) { 9625 int ret = dl_bw_check_overflow(cpu); 9626 9627 if (ret) 9628 return ret; 9629 cpuset_update_active_cpus(); 9630 } else { 9631 num_cpus_frozen++; 9632 partition_sched_domains(1, NULL, NULL); 9633 } 9634 return 0; 9635 } 9636 9637 int sched_cpu_activate(unsigned int cpu) 9638 { 9639 struct rq *rq = cpu_rq(cpu); 9640 struct rq_flags rf; 9641 9642 /* 9643 * Clear the balance_push callback and prepare to schedule 9644 * regular tasks. 9645 */ 9646 balance_push_set(cpu, false); 9647 9648 #ifdef CONFIG_SCHED_SMT 9649 /* 9650 * When going up, increment the number of cores with SMT present. 9651 */ 9652 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9653 static_branch_inc_cpuslocked(&sched_smt_present); 9654 #endif 9655 set_cpu_active(cpu, true); 9656 9657 if (sched_smp_initialized) { 9658 sched_update_numa(cpu, true); 9659 sched_domains_numa_masks_set(cpu); 9660 cpuset_cpu_active(); 9661 } 9662 9663 /* 9664 * Put the rq online, if not already. This happens: 9665 * 9666 * 1) In the early boot process, because we build the real domains 9667 * after all CPUs have been brought up. 9668 * 9669 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 9670 * domains. 9671 */ 9672 rq_lock_irqsave(rq, &rf); 9673 if (rq->rd) { 9674 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9675 set_rq_online(rq); 9676 } 9677 rq_unlock_irqrestore(rq, &rf); 9678 9679 return 0; 9680 } 9681 9682 int sched_cpu_deactivate(unsigned int cpu) 9683 { 9684 struct rq *rq = cpu_rq(cpu); 9685 struct rq_flags rf; 9686 int ret; 9687 9688 /* 9689 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 9690 * load balancing when not active 9691 */ 9692 nohz_balance_exit_idle(rq); 9693 9694 set_cpu_active(cpu, false); 9695 9696 /* 9697 * From this point forward, this CPU will refuse to run any task that 9698 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 9699 * push those tasks away until this gets cleared, see 9700 * sched_cpu_dying(). 9701 */ 9702 balance_push_set(cpu, true); 9703 9704 /* 9705 * We've cleared cpu_active_mask / set balance_push, wait for all 9706 * preempt-disabled and RCU users of this state to go away such that 9707 * all new such users will observe it. 9708 * 9709 * Specifically, we rely on ttwu to no longer target this CPU, see 9710 * ttwu_queue_cond() and is_cpu_allowed(). 9711 * 9712 * Do sync before park smpboot threads to take care the rcu boost case. 9713 */ 9714 synchronize_rcu(); 9715 9716 rq_lock_irqsave(rq, &rf); 9717 if (rq->rd) { 9718 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 9719 set_rq_offline(rq); 9720 } 9721 rq_unlock_irqrestore(rq, &rf); 9722 9723 #ifdef CONFIG_SCHED_SMT 9724 /* 9725 * When going down, decrement the number of cores with SMT present. 9726 */ 9727 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 9728 static_branch_dec_cpuslocked(&sched_smt_present); 9729 9730 sched_core_cpu_deactivate(cpu); 9731 #endif 9732 9733 if (!sched_smp_initialized) 9734 return 0; 9735 9736 sched_update_numa(cpu, false); 9737 ret = cpuset_cpu_inactive(cpu); 9738 if (ret) { 9739 balance_push_set(cpu, false); 9740 set_cpu_active(cpu, true); 9741 sched_update_numa(cpu, true); 9742 return ret; 9743 } 9744 sched_domains_numa_masks_clear(cpu); 9745 return 0; 9746 } 9747 9748 static void sched_rq_cpu_starting(unsigned int cpu) 9749 { 9750 struct rq *rq = cpu_rq(cpu); 9751 9752 rq->calc_load_update = calc_load_update; 9753 update_max_interval(); 9754 } 9755 9756 int sched_cpu_starting(unsigned int cpu) 9757 { 9758 sched_core_cpu_starting(cpu); 9759 sched_rq_cpu_starting(cpu); 9760 sched_tick_start(cpu); 9761 return 0; 9762 } 9763 9764 #ifdef CONFIG_HOTPLUG_CPU 9765 9766 /* 9767 * Invoked immediately before the stopper thread is invoked to bring the 9768 * CPU down completely. At this point all per CPU kthreads except the 9769 * hotplug thread (current) and the stopper thread (inactive) have been 9770 * either parked or have been unbound from the outgoing CPU. Ensure that 9771 * any of those which might be on the way out are gone. 9772 * 9773 * If after this point a bound task is being woken on this CPU then the 9774 * responsible hotplug callback has failed to do it's job. 9775 * sched_cpu_dying() will catch it with the appropriate fireworks. 9776 */ 9777 int sched_cpu_wait_empty(unsigned int cpu) 9778 { 9779 balance_hotplug_wait(); 9780 return 0; 9781 } 9782 9783 /* 9784 * Since this CPU is going 'away' for a while, fold any nr_active delta we 9785 * might have. Called from the CPU stopper task after ensuring that the 9786 * stopper is the last running task on the CPU, so nr_active count is 9787 * stable. We need to take the teardown thread which is calling this into 9788 * account, so we hand in adjust = 1 to the load calculation. 9789 * 9790 * Also see the comment "Global load-average calculations". 9791 */ 9792 static void calc_load_migrate(struct rq *rq) 9793 { 9794 long delta = calc_load_fold_active(rq, 1); 9795 9796 if (delta) 9797 atomic_long_add(delta, &calc_load_tasks); 9798 } 9799 9800 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 9801 { 9802 struct task_struct *g, *p; 9803 int cpu = cpu_of(rq); 9804 9805 lockdep_assert_rq_held(rq); 9806 9807 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 9808 for_each_process_thread(g, p) { 9809 if (task_cpu(p) != cpu) 9810 continue; 9811 9812 if (!task_on_rq_queued(p)) 9813 continue; 9814 9815 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 9816 } 9817 } 9818 9819 int sched_cpu_dying(unsigned int cpu) 9820 { 9821 struct rq *rq = cpu_rq(cpu); 9822 struct rq_flags rf; 9823 9824 /* Handle pending wakeups and then migrate everything off */ 9825 sched_tick_stop(cpu); 9826 9827 rq_lock_irqsave(rq, &rf); 9828 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 9829 WARN(true, "Dying CPU not properly vacated!"); 9830 dump_rq_tasks(rq, KERN_WARNING); 9831 } 9832 rq_unlock_irqrestore(rq, &rf); 9833 9834 calc_load_migrate(rq); 9835 update_max_interval(); 9836 hrtick_clear(rq); 9837 sched_core_cpu_dying(cpu); 9838 return 0; 9839 } 9840 #endif 9841 9842 void __init sched_init_smp(void) 9843 { 9844 sched_init_numa(NUMA_NO_NODE); 9845 9846 /* 9847 * There's no userspace yet to cause hotplug operations; hence all the 9848 * CPU masks are stable and all blatant races in the below code cannot 9849 * happen. 9850 */ 9851 mutex_lock(&sched_domains_mutex); 9852 sched_init_domains(cpu_active_mask); 9853 mutex_unlock(&sched_domains_mutex); 9854 9855 /* Move init over to a non-isolated CPU */ 9856 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 9857 BUG(); 9858 current->flags &= ~PF_NO_SETAFFINITY; 9859 sched_init_granularity(); 9860 9861 init_sched_rt_class(); 9862 init_sched_dl_class(); 9863 9864 sched_smp_initialized = true; 9865 } 9866 9867 static int __init migration_init(void) 9868 { 9869 sched_cpu_starting(smp_processor_id()); 9870 return 0; 9871 } 9872 early_initcall(migration_init); 9873 9874 #else 9875 void __init sched_init_smp(void) 9876 { 9877 sched_init_granularity(); 9878 } 9879 #endif /* CONFIG_SMP */ 9880 9881 int in_sched_functions(unsigned long addr) 9882 { 9883 return in_lock_functions(addr) || 9884 (addr >= (unsigned long)__sched_text_start 9885 && addr < (unsigned long)__sched_text_end); 9886 } 9887 9888 #ifdef CONFIG_CGROUP_SCHED 9889 /* 9890 * Default task group. 9891 * Every task in system belongs to this group at bootup. 9892 */ 9893 struct task_group root_task_group; 9894 LIST_HEAD(task_groups); 9895 9896 /* Cacheline aligned slab cache for task_group */ 9897 static struct kmem_cache *task_group_cache __ro_after_init; 9898 #endif 9899 9900 void __init sched_init(void) 9901 { 9902 unsigned long ptr = 0; 9903 int i; 9904 9905 /* Make sure the linker didn't screw up */ 9906 BUG_ON(&idle_sched_class != &fair_sched_class + 1 || 9907 &fair_sched_class != &rt_sched_class + 1 || 9908 &rt_sched_class != &dl_sched_class + 1); 9909 #ifdef CONFIG_SMP 9910 BUG_ON(&dl_sched_class != &stop_sched_class + 1); 9911 #endif 9912 9913 wait_bit_init(); 9914 9915 #ifdef CONFIG_FAIR_GROUP_SCHED 9916 ptr += 2 * nr_cpu_ids * sizeof(void **); 9917 #endif 9918 #ifdef CONFIG_RT_GROUP_SCHED 9919 ptr += 2 * nr_cpu_ids * sizeof(void **); 9920 #endif 9921 if (ptr) { 9922 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 9923 9924 #ifdef CONFIG_FAIR_GROUP_SCHED 9925 root_task_group.se = (struct sched_entity **)ptr; 9926 ptr += nr_cpu_ids * sizeof(void **); 9927 9928 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 9929 ptr += nr_cpu_ids * sizeof(void **); 9930 9931 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 9932 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); 9933 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9934 #ifdef CONFIG_RT_GROUP_SCHED 9935 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 9936 ptr += nr_cpu_ids * sizeof(void **); 9937 9938 root_task_group.rt_rq = (struct rt_rq **)ptr; 9939 ptr += nr_cpu_ids * sizeof(void **); 9940 9941 #endif /* CONFIG_RT_GROUP_SCHED */ 9942 } 9943 9944 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); 9945 9946 #ifdef CONFIG_SMP 9947 init_defrootdomain(); 9948 #endif 9949 9950 #ifdef CONFIG_RT_GROUP_SCHED 9951 init_rt_bandwidth(&root_task_group.rt_bandwidth, 9952 global_rt_period(), global_rt_runtime()); 9953 #endif /* CONFIG_RT_GROUP_SCHED */ 9954 9955 #ifdef CONFIG_CGROUP_SCHED 9956 task_group_cache = KMEM_CACHE(task_group, 0); 9957 9958 list_add(&root_task_group.list, &task_groups); 9959 INIT_LIST_HEAD(&root_task_group.children); 9960 INIT_LIST_HEAD(&root_task_group.siblings); 9961 autogroup_init(&init_task); 9962 #endif /* CONFIG_CGROUP_SCHED */ 9963 9964 for_each_possible_cpu(i) { 9965 struct rq *rq; 9966 9967 rq = cpu_rq(i); 9968 raw_spin_lock_init(&rq->__lock); 9969 rq->nr_running = 0; 9970 rq->calc_load_active = 0; 9971 rq->calc_load_update = jiffies + LOAD_FREQ; 9972 init_cfs_rq(&rq->cfs); 9973 init_rt_rq(&rq->rt); 9974 init_dl_rq(&rq->dl); 9975 #ifdef CONFIG_FAIR_GROUP_SCHED 9976 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 9977 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 9978 /* 9979 * How much CPU bandwidth does root_task_group get? 9980 * 9981 * In case of task-groups formed thr' the cgroup filesystem, it 9982 * gets 100% of the CPU resources in the system. This overall 9983 * system CPU resource is divided among the tasks of 9984 * root_task_group and its child task-groups in a fair manner, 9985 * based on each entity's (task or task-group's) weight 9986 * (se->load.weight). 9987 * 9988 * In other words, if root_task_group has 10 tasks of weight 9989 * 1024) and two child groups A0 and A1 (of weight 1024 each), 9990 * then A0's share of the CPU resource is: 9991 * 9992 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9993 * 9994 * We achieve this by letting root_task_group's tasks sit 9995 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 9996 */ 9997 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 9998 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9999 10000 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 10001 #ifdef CONFIG_RT_GROUP_SCHED 10002 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 10003 #endif 10004 #ifdef CONFIG_SMP 10005 rq->sd = NULL; 10006 rq->rd = NULL; 10007 rq->cpu_capacity = SCHED_CAPACITY_SCALE; 10008 rq->balance_callback = &balance_push_callback; 10009 rq->active_balance = 0; 10010 rq->next_balance = jiffies; 10011 rq->push_cpu = 0; 10012 rq->cpu = i; 10013 rq->online = 0; 10014 rq->idle_stamp = 0; 10015 rq->avg_idle = 2*sysctl_sched_migration_cost; 10016 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 10017 10018 INIT_LIST_HEAD(&rq->cfs_tasks); 10019 10020 rq_attach_root(rq, &def_root_domain); 10021 #ifdef CONFIG_NO_HZ_COMMON 10022 rq->last_blocked_load_update_tick = jiffies; 10023 atomic_set(&rq->nohz_flags, 0); 10024 10025 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 10026 #endif 10027 #ifdef CONFIG_HOTPLUG_CPU 10028 rcuwait_init(&rq->hotplug_wait); 10029 #endif 10030 #endif /* CONFIG_SMP */ 10031 hrtick_rq_init(rq); 10032 atomic_set(&rq->nr_iowait, 0); 10033 10034 #ifdef CONFIG_SCHED_CORE 10035 rq->core = rq; 10036 rq->core_pick = NULL; 10037 rq->core_enabled = 0; 10038 rq->core_tree = RB_ROOT; 10039 rq->core_forceidle_count = 0; 10040 rq->core_forceidle_occupation = 0; 10041 rq->core_forceidle_start = 0; 10042 10043 rq->core_cookie = 0UL; 10044 #endif 10045 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); 10046 } 10047 10048 set_load_weight(&init_task, false); 10049 10050 /* 10051 * The boot idle thread does lazy MMU switching as well: 10052 */ 10053 mmgrab_lazy_tlb(&init_mm); 10054 enter_lazy_tlb(&init_mm, current); 10055 10056 /* 10057 * The idle task doesn't need the kthread struct to function, but it 10058 * is dressed up as a per-CPU kthread and thus needs to play the part 10059 * if we want to avoid special-casing it in code that deals with per-CPU 10060 * kthreads. 10061 */ 10062 WARN_ON(!set_kthread_struct(current)); 10063 10064 /* 10065 * Make us the idle thread. Technically, schedule() should not be 10066 * called from this thread, however somewhere below it might be, 10067 * but because we are the idle thread, we just pick up running again 10068 * when this runqueue becomes "idle". 10069 */ 10070 init_idle(current, smp_processor_id()); 10071 10072 calc_load_update = jiffies + LOAD_FREQ; 10073 10074 #ifdef CONFIG_SMP 10075 idle_thread_set_boot_cpu(); 10076 balance_push_set(smp_processor_id(), false); 10077 #endif 10078 init_sched_fair_class(); 10079 10080 psi_init(); 10081 10082 init_uclamp(); 10083 10084 preempt_dynamic_init(); 10085 10086 scheduler_running = 1; 10087 } 10088 10089 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 10090 10091 void __might_sleep(const char *file, int line) 10092 { 10093 unsigned int state = get_current_state(); 10094 /* 10095 * Blocking primitives will set (and therefore destroy) current->state, 10096 * since we will exit with TASK_RUNNING make sure we enter with it, 10097 * otherwise we will destroy state. 10098 */ 10099 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 10100 "do not call blocking ops when !TASK_RUNNING; " 10101 "state=%x set at [<%p>] %pS\n", state, 10102 (void *)current->task_state_change, 10103 (void *)current->task_state_change); 10104 10105 __might_resched(file, line, 0); 10106 } 10107 EXPORT_SYMBOL(__might_sleep); 10108 10109 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 10110 { 10111 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 10112 return; 10113 10114 if (preempt_count() == preempt_offset) 10115 return; 10116 10117 pr_err("Preemption disabled at:"); 10118 print_ip_sym(KERN_ERR, ip); 10119 } 10120 10121 static inline bool resched_offsets_ok(unsigned int offsets) 10122 { 10123 unsigned int nested = preempt_count(); 10124 10125 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 10126 10127 return nested == offsets; 10128 } 10129 10130 void __might_resched(const char *file, int line, unsigned int offsets) 10131 { 10132 /* Ratelimiting timestamp: */ 10133 static unsigned long prev_jiffy; 10134 10135 unsigned long preempt_disable_ip; 10136 10137 /* WARN_ON_ONCE() by default, no rate limit required: */ 10138 rcu_sleep_check(); 10139 10140 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 10141 !is_idle_task(current) && !current->non_block_count) || 10142 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 10143 oops_in_progress) 10144 return; 10145 10146 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10147 return; 10148 prev_jiffy = jiffies; 10149 10150 /* Save this before calling printk(), since that will clobber it: */ 10151 preempt_disable_ip = get_preempt_disable_ip(current); 10152 10153 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 10154 file, line); 10155 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 10156 in_atomic(), irqs_disabled(), current->non_block_count, 10157 current->pid, current->comm); 10158 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 10159 offsets & MIGHT_RESCHED_PREEMPT_MASK); 10160 10161 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 10162 pr_err("RCU nest depth: %d, expected: %u\n", 10163 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 10164 } 10165 10166 if (task_stack_end_corrupted(current)) 10167 pr_emerg("Thread overran stack, or stack corrupted\n"); 10168 10169 debug_show_held_locks(current); 10170 if (irqs_disabled()) 10171 print_irqtrace_events(current); 10172 10173 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 10174 preempt_disable_ip); 10175 10176 dump_stack(); 10177 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10178 } 10179 EXPORT_SYMBOL(__might_resched); 10180 10181 void __cant_sleep(const char *file, int line, int preempt_offset) 10182 { 10183 static unsigned long prev_jiffy; 10184 10185 if (irqs_disabled()) 10186 return; 10187 10188 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 10189 return; 10190 10191 if (preempt_count() > preempt_offset) 10192 return; 10193 10194 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10195 return; 10196 prev_jiffy = jiffies; 10197 10198 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 10199 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 10200 in_atomic(), irqs_disabled(), 10201 current->pid, current->comm); 10202 10203 debug_show_held_locks(current); 10204 dump_stack(); 10205 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10206 } 10207 EXPORT_SYMBOL_GPL(__cant_sleep); 10208 10209 #ifdef CONFIG_SMP 10210 void __cant_migrate(const char *file, int line) 10211 { 10212 static unsigned long prev_jiffy; 10213 10214 if (irqs_disabled()) 10215 return; 10216 10217 if (is_migration_disabled(current)) 10218 return; 10219 10220 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 10221 return; 10222 10223 if (preempt_count() > 0) 10224 return; 10225 10226 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 10227 return; 10228 prev_jiffy = jiffies; 10229 10230 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 10231 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 10232 in_atomic(), irqs_disabled(), is_migration_disabled(current), 10233 current->pid, current->comm); 10234 10235 debug_show_held_locks(current); 10236 dump_stack(); 10237 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 10238 } 10239 EXPORT_SYMBOL_GPL(__cant_migrate); 10240 #endif 10241 #endif 10242 10243 #ifdef CONFIG_MAGIC_SYSRQ 10244 void normalize_rt_tasks(void) 10245 { 10246 struct task_struct *g, *p; 10247 struct sched_attr attr = { 10248 .sched_policy = SCHED_NORMAL, 10249 }; 10250 10251 read_lock(&tasklist_lock); 10252 for_each_process_thread(g, p) { 10253 /* 10254 * Only normalize user tasks: 10255 */ 10256 if (p->flags & PF_KTHREAD) 10257 continue; 10258 10259 p->se.exec_start = 0; 10260 schedstat_set(p->stats.wait_start, 0); 10261 schedstat_set(p->stats.sleep_start, 0); 10262 schedstat_set(p->stats.block_start, 0); 10263 10264 if (!dl_task(p) && !rt_task(p)) { 10265 /* 10266 * Renice negative nice level userspace 10267 * tasks back to 0: 10268 */ 10269 if (task_nice(p) < 0) 10270 set_user_nice(p, 0); 10271 continue; 10272 } 10273 10274 __sched_setscheduler(p, &attr, false, false); 10275 } 10276 read_unlock(&tasklist_lock); 10277 } 10278 10279 #endif /* CONFIG_MAGIC_SYSRQ */ 10280 10281 #if defined(CONFIG_KGDB_KDB) 10282 /* 10283 * These functions are only useful for kdb. 10284 * 10285 * They can only be called when the whole system has been 10286 * stopped - every CPU needs to be quiescent, and no scheduling 10287 * activity can take place. Using them for anything else would 10288 * be a serious bug, and as a result, they aren't even visible 10289 * under any other configuration. 10290 */ 10291 10292 /** 10293 * curr_task - return the current task for a given CPU. 10294 * @cpu: the processor in question. 10295 * 10296 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 10297 * 10298 * Return: The current task for @cpu. 10299 */ 10300 struct task_struct *curr_task(int cpu) 10301 { 10302 return cpu_curr(cpu); 10303 } 10304 10305 #endif /* defined(CONFIG_KGDB_KDB) */ 10306 10307 #ifdef CONFIG_CGROUP_SCHED 10308 /* task_group_lock serializes the addition/removal of task groups */ 10309 static DEFINE_SPINLOCK(task_group_lock); 10310 10311 static inline void alloc_uclamp_sched_group(struct task_group *tg, 10312 struct task_group *parent) 10313 { 10314 #ifdef CONFIG_UCLAMP_TASK_GROUP 10315 enum uclamp_id clamp_id; 10316 10317 for_each_clamp_id(clamp_id) { 10318 uclamp_se_set(&tg->uclamp_req[clamp_id], 10319 uclamp_none(clamp_id), false); 10320 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 10321 } 10322 #endif 10323 } 10324 10325 static void sched_free_group(struct task_group *tg) 10326 { 10327 free_fair_sched_group(tg); 10328 free_rt_sched_group(tg); 10329 autogroup_free(tg); 10330 kmem_cache_free(task_group_cache, tg); 10331 } 10332 10333 static void sched_free_group_rcu(struct rcu_head *rcu) 10334 { 10335 sched_free_group(container_of(rcu, struct task_group, rcu)); 10336 } 10337 10338 static void sched_unregister_group(struct task_group *tg) 10339 { 10340 unregister_fair_sched_group(tg); 10341 unregister_rt_sched_group(tg); 10342 /* 10343 * We have to wait for yet another RCU grace period to expire, as 10344 * print_cfs_stats() might run concurrently. 10345 */ 10346 call_rcu(&tg->rcu, sched_free_group_rcu); 10347 } 10348 10349 /* allocate runqueue etc for a new task group */ 10350 struct task_group *sched_create_group(struct task_group *parent) 10351 { 10352 struct task_group *tg; 10353 10354 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 10355 if (!tg) 10356 return ERR_PTR(-ENOMEM); 10357 10358 if (!alloc_fair_sched_group(tg, parent)) 10359 goto err; 10360 10361 if (!alloc_rt_sched_group(tg, parent)) 10362 goto err; 10363 10364 alloc_uclamp_sched_group(tg, parent); 10365 10366 return tg; 10367 10368 err: 10369 sched_free_group(tg); 10370 return ERR_PTR(-ENOMEM); 10371 } 10372 10373 void sched_online_group(struct task_group *tg, struct task_group *parent) 10374 { 10375 unsigned long flags; 10376 10377 spin_lock_irqsave(&task_group_lock, flags); 10378 list_add_rcu(&tg->list, &task_groups); 10379 10380 /* Root should already exist: */ 10381 WARN_ON(!parent); 10382 10383 tg->parent = parent; 10384 INIT_LIST_HEAD(&tg->children); 10385 list_add_rcu(&tg->siblings, &parent->children); 10386 spin_unlock_irqrestore(&task_group_lock, flags); 10387 10388 online_fair_sched_group(tg); 10389 } 10390 10391 /* rcu callback to free various structures associated with a task group */ 10392 static void sched_unregister_group_rcu(struct rcu_head *rhp) 10393 { 10394 /* Now it should be safe to free those cfs_rqs: */ 10395 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 10396 } 10397 10398 void sched_destroy_group(struct task_group *tg) 10399 { 10400 /* Wait for possible concurrent references to cfs_rqs complete: */ 10401 call_rcu(&tg->rcu, sched_unregister_group_rcu); 10402 } 10403 10404 void sched_release_group(struct task_group *tg) 10405 { 10406 unsigned long flags; 10407 10408 /* 10409 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 10410 * sched_cfs_period_timer()). 10411 * 10412 * For this to be effective, we have to wait for all pending users of 10413 * this task group to leave their RCU critical section to ensure no new 10414 * user will see our dying task group any more. Specifically ensure 10415 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 10416 * 10417 * We therefore defer calling unregister_fair_sched_group() to 10418 * sched_unregister_group() which is guarantied to get called only after the 10419 * current RCU grace period has expired. 10420 */ 10421 spin_lock_irqsave(&task_group_lock, flags); 10422 list_del_rcu(&tg->list); 10423 list_del_rcu(&tg->siblings); 10424 spin_unlock_irqrestore(&task_group_lock, flags); 10425 } 10426 10427 static struct task_group *sched_get_task_group(struct task_struct *tsk) 10428 { 10429 struct task_group *tg; 10430 10431 /* 10432 * All callers are synchronized by task_rq_lock(); we do not use RCU 10433 * which is pointless here. Thus, we pass "true" to task_css_check() 10434 * to prevent lockdep warnings. 10435 */ 10436 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 10437 struct task_group, css); 10438 tg = autogroup_task_group(tsk, tg); 10439 10440 return tg; 10441 } 10442 10443 static void sched_change_group(struct task_struct *tsk, struct task_group *group) 10444 { 10445 tsk->sched_task_group = group; 10446 10447 #ifdef CONFIG_FAIR_GROUP_SCHED 10448 if (tsk->sched_class->task_change_group) 10449 tsk->sched_class->task_change_group(tsk); 10450 else 10451 #endif 10452 set_task_rq(tsk, task_cpu(tsk)); 10453 } 10454 10455 /* 10456 * Change task's runqueue when it moves between groups. 10457 * 10458 * The caller of this function should have put the task in its new group by 10459 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 10460 * its new group. 10461 */ 10462 void sched_move_task(struct task_struct *tsk) 10463 { 10464 int queued, running, queue_flags = 10465 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 10466 struct task_group *group; 10467 struct rq *rq; 10468 10469 CLASS(task_rq_lock, rq_guard)(tsk); 10470 rq = rq_guard.rq; 10471 10472 /* 10473 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous 10474 * group changes. 10475 */ 10476 group = sched_get_task_group(tsk); 10477 if (group == tsk->sched_task_group) 10478 return; 10479 10480 update_rq_clock(rq); 10481 10482 running = task_current(rq, tsk); 10483 queued = task_on_rq_queued(tsk); 10484 10485 if (queued) 10486 dequeue_task(rq, tsk, queue_flags); 10487 if (running) 10488 put_prev_task(rq, tsk); 10489 10490 sched_change_group(tsk, group); 10491 10492 if (queued) 10493 enqueue_task(rq, tsk, queue_flags); 10494 if (running) { 10495 set_next_task(rq, tsk); 10496 /* 10497 * After changing group, the running task may have joined a 10498 * throttled one but it's still the running task. Trigger a 10499 * resched to make sure that task can still run. 10500 */ 10501 resched_curr(rq); 10502 } 10503 } 10504 10505 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 10506 { 10507 return css ? container_of(css, struct task_group, css) : NULL; 10508 } 10509 10510 static struct cgroup_subsys_state * 10511 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 10512 { 10513 struct task_group *parent = css_tg(parent_css); 10514 struct task_group *tg; 10515 10516 if (!parent) { 10517 /* This is early initialization for the top cgroup */ 10518 return &root_task_group.css; 10519 } 10520 10521 tg = sched_create_group(parent); 10522 if (IS_ERR(tg)) 10523 return ERR_PTR(-ENOMEM); 10524 10525 return &tg->css; 10526 } 10527 10528 /* Expose task group only after completing cgroup initialization */ 10529 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 10530 { 10531 struct task_group *tg = css_tg(css); 10532 struct task_group *parent = css_tg(css->parent); 10533 10534 if (parent) 10535 sched_online_group(tg, parent); 10536 10537 #ifdef CONFIG_UCLAMP_TASK_GROUP 10538 /* Propagate the effective uclamp value for the new group */ 10539 guard(mutex)(&uclamp_mutex); 10540 guard(rcu)(); 10541 cpu_util_update_eff(css); 10542 #endif 10543 10544 return 0; 10545 } 10546 10547 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 10548 { 10549 struct task_group *tg = css_tg(css); 10550 10551 sched_release_group(tg); 10552 } 10553 10554 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 10555 { 10556 struct task_group *tg = css_tg(css); 10557 10558 /* 10559 * Relies on the RCU grace period between css_released() and this. 10560 */ 10561 sched_unregister_group(tg); 10562 } 10563 10564 #ifdef CONFIG_RT_GROUP_SCHED 10565 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 10566 { 10567 struct task_struct *task; 10568 struct cgroup_subsys_state *css; 10569 10570 cgroup_taskset_for_each(task, css, tset) { 10571 if (!sched_rt_can_attach(css_tg(css), task)) 10572 return -EINVAL; 10573 } 10574 return 0; 10575 } 10576 #endif 10577 10578 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 10579 { 10580 struct task_struct *task; 10581 struct cgroup_subsys_state *css; 10582 10583 cgroup_taskset_for_each(task, css, tset) 10584 sched_move_task(task); 10585 } 10586 10587 #ifdef CONFIG_UCLAMP_TASK_GROUP 10588 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 10589 { 10590 struct cgroup_subsys_state *top_css = css; 10591 struct uclamp_se *uc_parent = NULL; 10592 struct uclamp_se *uc_se = NULL; 10593 unsigned int eff[UCLAMP_CNT]; 10594 enum uclamp_id clamp_id; 10595 unsigned int clamps; 10596 10597 lockdep_assert_held(&uclamp_mutex); 10598 SCHED_WARN_ON(!rcu_read_lock_held()); 10599 10600 css_for_each_descendant_pre(css, top_css) { 10601 uc_parent = css_tg(css)->parent 10602 ? css_tg(css)->parent->uclamp : NULL; 10603 10604 for_each_clamp_id(clamp_id) { 10605 /* Assume effective clamps matches requested clamps */ 10606 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 10607 /* Cap effective clamps with parent's effective clamps */ 10608 if (uc_parent && 10609 eff[clamp_id] > uc_parent[clamp_id].value) { 10610 eff[clamp_id] = uc_parent[clamp_id].value; 10611 } 10612 } 10613 /* Ensure protection is always capped by limit */ 10614 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 10615 10616 /* Propagate most restrictive effective clamps */ 10617 clamps = 0x0; 10618 uc_se = css_tg(css)->uclamp; 10619 for_each_clamp_id(clamp_id) { 10620 if (eff[clamp_id] == uc_se[clamp_id].value) 10621 continue; 10622 uc_se[clamp_id].value = eff[clamp_id]; 10623 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 10624 clamps |= (0x1 << clamp_id); 10625 } 10626 if (!clamps) { 10627 css = css_rightmost_descendant(css); 10628 continue; 10629 } 10630 10631 /* Immediately update descendants RUNNABLE tasks */ 10632 uclamp_update_active_tasks(css); 10633 } 10634 } 10635 10636 /* 10637 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 10638 * C expression. Since there is no way to convert a macro argument (N) into a 10639 * character constant, use two levels of macros. 10640 */ 10641 #define _POW10(exp) ((unsigned int)1e##exp) 10642 #define POW10(exp) _POW10(exp) 10643 10644 struct uclamp_request { 10645 #define UCLAMP_PERCENT_SHIFT 2 10646 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 10647 s64 percent; 10648 u64 util; 10649 int ret; 10650 }; 10651 10652 static inline struct uclamp_request 10653 capacity_from_percent(char *buf) 10654 { 10655 struct uclamp_request req = { 10656 .percent = UCLAMP_PERCENT_SCALE, 10657 .util = SCHED_CAPACITY_SCALE, 10658 .ret = 0, 10659 }; 10660 10661 buf = strim(buf); 10662 if (strcmp(buf, "max")) { 10663 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 10664 &req.percent); 10665 if (req.ret) 10666 return req; 10667 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 10668 req.ret = -ERANGE; 10669 return req; 10670 } 10671 10672 req.util = req.percent << SCHED_CAPACITY_SHIFT; 10673 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 10674 } 10675 10676 return req; 10677 } 10678 10679 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 10680 size_t nbytes, loff_t off, 10681 enum uclamp_id clamp_id) 10682 { 10683 struct uclamp_request req; 10684 struct task_group *tg; 10685 10686 req = capacity_from_percent(buf); 10687 if (req.ret) 10688 return req.ret; 10689 10690 static_branch_enable(&sched_uclamp_used); 10691 10692 guard(mutex)(&uclamp_mutex); 10693 guard(rcu)(); 10694 10695 tg = css_tg(of_css(of)); 10696 if (tg->uclamp_req[clamp_id].value != req.util) 10697 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 10698 10699 /* 10700 * Because of not recoverable conversion rounding we keep track of the 10701 * exact requested value 10702 */ 10703 tg->uclamp_pct[clamp_id] = req.percent; 10704 10705 /* Update effective clamps to track the most restrictive value */ 10706 cpu_util_update_eff(of_css(of)); 10707 10708 return nbytes; 10709 } 10710 10711 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 10712 char *buf, size_t nbytes, 10713 loff_t off) 10714 { 10715 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 10716 } 10717 10718 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 10719 char *buf, size_t nbytes, 10720 loff_t off) 10721 { 10722 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 10723 } 10724 10725 static inline void cpu_uclamp_print(struct seq_file *sf, 10726 enum uclamp_id clamp_id) 10727 { 10728 struct task_group *tg; 10729 u64 util_clamp; 10730 u64 percent; 10731 u32 rem; 10732 10733 scoped_guard (rcu) { 10734 tg = css_tg(seq_css(sf)); 10735 util_clamp = tg->uclamp_req[clamp_id].value; 10736 } 10737 10738 if (util_clamp == SCHED_CAPACITY_SCALE) { 10739 seq_puts(sf, "max\n"); 10740 return; 10741 } 10742 10743 percent = tg->uclamp_pct[clamp_id]; 10744 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 10745 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 10746 } 10747 10748 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 10749 { 10750 cpu_uclamp_print(sf, UCLAMP_MIN); 10751 return 0; 10752 } 10753 10754 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 10755 { 10756 cpu_uclamp_print(sf, UCLAMP_MAX); 10757 return 0; 10758 } 10759 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 10760 10761 #ifdef CONFIG_FAIR_GROUP_SCHED 10762 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 10763 struct cftype *cftype, u64 shareval) 10764 { 10765 if (shareval > scale_load_down(ULONG_MAX)) 10766 shareval = MAX_SHARES; 10767 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 10768 } 10769 10770 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 10771 struct cftype *cft) 10772 { 10773 struct task_group *tg = css_tg(css); 10774 10775 return (u64) scale_load_down(tg->shares); 10776 } 10777 10778 #ifdef CONFIG_CFS_BANDWIDTH 10779 static DEFINE_MUTEX(cfs_constraints_mutex); 10780 10781 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 10782 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 10783 /* More than 203 days if BW_SHIFT equals 20. */ 10784 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 10785 10786 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 10787 10788 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 10789 u64 burst) 10790 { 10791 int i, ret = 0, runtime_enabled, runtime_was_enabled; 10792 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 10793 10794 if (tg == &root_task_group) 10795 return -EINVAL; 10796 10797 /* 10798 * Ensure we have at some amount of bandwidth every period. This is 10799 * to prevent reaching a state of large arrears when throttled via 10800 * entity_tick() resulting in prolonged exit starvation. 10801 */ 10802 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 10803 return -EINVAL; 10804 10805 /* 10806 * Likewise, bound things on the other side by preventing insane quota 10807 * periods. This also allows us to normalize in computing quota 10808 * feasibility. 10809 */ 10810 if (period > max_cfs_quota_period) 10811 return -EINVAL; 10812 10813 /* 10814 * Bound quota to defend quota against overflow during bandwidth shift. 10815 */ 10816 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 10817 return -EINVAL; 10818 10819 if (quota != RUNTIME_INF && (burst > quota || 10820 burst + quota > max_cfs_runtime)) 10821 return -EINVAL; 10822 10823 /* 10824 * Prevent race between setting of cfs_rq->runtime_enabled and 10825 * unthrottle_offline_cfs_rqs(). 10826 */ 10827 guard(cpus_read_lock)(); 10828 guard(mutex)(&cfs_constraints_mutex); 10829 10830 ret = __cfs_schedulable(tg, period, quota); 10831 if (ret) 10832 return ret; 10833 10834 runtime_enabled = quota != RUNTIME_INF; 10835 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 10836 /* 10837 * If we need to toggle cfs_bandwidth_used, off->on must occur 10838 * before making related changes, and on->off must occur afterwards 10839 */ 10840 if (runtime_enabled && !runtime_was_enabled) 10841 cfs_bandwidth_usage_inc(); 10842 10843 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { 10844 cfs_b->period = ns_to_ktime(period); 10845 cfs_b->quota = quota; 10846 cfs_b->burst = burst; 10847 10848 __refill_cfs_bandwidth_runtime(cfs_b); 10849 10850 /* 10851 * Restart the period timer (if active) to handle new 10852 * period expiry: 10853 */ 10854 if (runtime_enabled) 10855 start_cfs_bandwidth(cfs_b); 10856 } 10857 10858 for_each_online_cpu(i) { 10859 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 10860 struct rq *rq = cfs_rq->rq; 10861 10862 guard(rq_lock_irq)(rq); 10863 cfs_rq->runtime_enabled = runtime_enabled; 10864 cfs_rq->runtime_remaining = 0; 10865 10866 if (cfs_rq->throttled) 10867 unthrottle_cfs_rq(cfs_rq); 10868 } 10869 10870 if (runtime_was_enabled && !runtime_enabled) 10871 cfs_bandwidth_usage_dec(); 10872 10873 return 0; 10874 } 10875 10876 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 10877 { 10878 u64 quota, period, burst; 10879 10880 period = ktime_to_ns(tg->cfs_bandwidth.period); 10881 burst = tg->cfs_bandwidth.burst; 10882 if (cfs_quota_us < 0) 10883 quota = RUNTIME_INF; 10884 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 10885 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 10886 else 10887 return -EINVAL; 10888 10889 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10890 } 10891 10892 static long tg_get_cfs_quota(struct task_group *tg) 10893 { 10894 u64 quota_us; 10895 10896 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 10897 return -1; 10898 10899 quota_us = tg->cfs_bandwidth.quota; 10900 do_div(quota_us, NSEC_PER_USEC); 10901 10902 return quota_us; 10903 } 10904 10905 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 10906 { 10907 u64 quota, period, burst; 10908 10909 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 10910 return -EINVAL; 10911 10912 period = (u64)cfs_period_us * NSEC_PER_USEC; 10913 quota = tg->cfs_bandwidth.quota; 10914 burst = tg->cfs_bandwidth.burst; 10915 10916 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10917 } 10918 10919 static long tg_get_cfs_period(struct task_group *tg) 10920 { 10921 u64 cfs_period_us; 10922 10923 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 10924 do_div(cfs_period_us, NSEC_PER_USEC); 10925 10926 return cfs_period_us; 10927 } 10928 10929 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 10930 { 10931 u64 quota, period, burst; 10932 10933 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 10934 return -EINVAL; 10935 10936 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 10937 period = ktime_to_ns(tg->cfs_bandwidth.period); 10938 quota = tg->cfs_bandwidth.quota; 10939 10940 return tg_set_cfs_bandwidth(tg, period, quota, burst); 10941 } 10942 10943 static long tg_get_cfs_burst(struct task_group *tg) 10944 { 10945 u64 burst_us; 10946 10947 burst_us = tg->cfs_bandwidth.burst; 10948 do_div(burst_us, NSEC_PER_USEC); 10949 10950 return burst_us; 10951 } 10952 10953 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 10954 struct cftype *cft) 10955 { 10956 return tg_get_cfs_quota(css_tg(css)); 10957 } 10958 10959 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 10960 struct cftype *cftype, s64 cfs_quota_us) 10961 { 10962 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 10963 } 10964 10965 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 10966 struct cftype *cft) 10967 { 10968 return tg_get_cfs_period(css_tg(css)); 10969 } 10970 10971 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 10972 struct cftype *cftype, u64 cfs_period_us) 10973 { 10974 return tg_set_cfs_period(css_tg(css), cfs_period_us); 10975 } 10976 10977 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 10978 struct cftype *cft) 10979 { 10980 return tg_get_cfs_burst(css_tg(css)); 10981 } 10982 10983 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 10984 struct cftype *cftype, u64 cfs_burst_us) 10985 { 10986 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 10987 } 10988 10989 struct cfs_schedulable_data { 10990 struct task_group *tg; 10991 u64 period, quota; 10992 }; 10993 10994 /* 10995 * normalize group quota/period to be quota/max_period 10996 * note: units are usecs 10997 */ 10998 static u64 normalize_cfs_quota(struct task_group *tg, 10999 struct cfs_schedulable_data *d) 11000 { 11001 u64 quota, period; 11002 11003 if (tg == d->tg) { 11004 period = d->period; 11005 quota = d->quota; 11006 } else { 11007 period = tg_get_cfs_period(tg); 11008 quota = tg_get_cfs_quota(tg); 11009 } 11010 11011 /* note: these should typically be equivalent */ 11012 if (quota == RUNTIME_INF || quota == -1) 11013 return RUNTIME_INF; 11014 11015 return to_ratio(period, quota); 11016 } 11017 11018 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 11019 { 11020 struct cfs_schedulable_data *d = data; 11021 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11022 s64 quota = 0, parent_quota = -1; 11023 11024 if (!tg->parent) { 11025 quota = RUNTIME_INF; 11026 } else { 11027 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 11028 11029 quota = normalize_cfs_quota(tg, d); 11030 parent_quota = parent_b->hierarchical_quota; 11031 11032 /* 11033 * Ensure max(child_quota) <= parent_quota. On cgroup2, 11034 * always take the non-RUNTIME_INF min. On cgroup1, only 11035 * inherit when no limit is set. In both cases this is used 11036 * by the scheduler to determine if a given CFS task has a 11037 * bandwidth constraint at some higher level. 11038 */ 11039 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 11040 if (quota == RUNTIME_INF) 11041 quota = parent_quota; 11042 else if (parent_quota != RUNTIME_INF) 11043 quota = min(quota, parent_quota); 11044 } else { 11045 if (quota == RUNTIME_INF) 11046 quota = parent_quota; 11047 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 11048 return -EINVAL; 11049 } 11050 } 11051 cfs_b->hierarchical_quota = quota; 11052 11053 return 0; 11054 } 11055 11056 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 11057 { 11058 struct cfs_schedulable_data data = { 11059 .tg = tg, 11060 .period = period, 11061 .quota = quota, 11062 }; 11063 11064 if (quota != RUNTIME_INF) { 11065 do_div(data.period, NSEC_PER_USEC); 11066 do_div(data.quota, NSEC_PER_USEC); 11067 } 11068 11069 guard(rcu)(); 11070 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 11071 } 11072 11073 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 11074 { 11075 struct task_group *tg = css_tg(seq_css(sf)); 11076 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11077 11078 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 11079 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 11080 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 11081 11082 if (schedstat_enabled() && tg != &root_task_group) { 11083 struct sched_statistics *stats; 11084 u64 ws = 0; 11085 int i; 11086 11087 for_each_possible_cpu(i) { 11088 stats = __schedstats_from_se(tg->se[i]); 11089 ws += schedstat_val(stats->wait_sum); 11090 } 11091 11092 seq_printf(sf, "wait_sum %llu\n", ws); 11093 } 11094 11095 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 11096 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 11097 11098 return 0; 11099 } 11100 11101 static u64 throttled_time_self(struct task_group *tg) 11102 { 11103 int i; 11104 u64 total = 0; 11105 11106 for_each_possible_cpu(i) { 11107 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); 11108 } 11109 11110 return total; 11111 } 11112 11113 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) 11114 { 11115 struct task_group *tg = css_tg(seq_css(sf)); 11116 11117 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); 11118 11119 return 0; 11120 } 11121 #endif /* CONFIG_CFS_BANDWIDTH */ 11122 #endif /* CONFIG_FAIR_GROUP_SCHED */ 11123 11124 #ifdef CONFIG_RT_GROUP_SCHED 11125 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 11126 struct cftype *cft, s64 val) 11127 { 11128 return sched_group_set_rt_runtime(css_tg(css), val); 11129 } 11130 11131 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 11132 struct cftype *cft) 11133 { 11134 return sched_group_rt_runtime(css_tg(css)); 11135 } 11136 11137 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 11138 struct cftype *cftype, u64 rt_period_us) 11139 { 11140 return sched_group_set_rt_period(css_tg(css), rt_period_us); 11141 } 11142 11143 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 11144 struct cftype *cft) 11145 { 11146 return sched_group_rt_period(css_tg(css)); 11147 } 11148 #endif /* CONFIG_RT_GROUP_SCHED */ 11149 11150 #ifdef CONFIG_FAIR_GROUP_SCHED 11151 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 11152 struct cftype *cft) 11153 { 11154 return css_tg(css)->idle; 11155 } 11156 11157 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 11158 struct cftype *cft, s64 idle) 11159 { 11160 return sched_group_set_idle(css_tg(css), idle); 11161 } 11162 #endif 11163 11164 static struct cftype cpu_legacy_files[] = { 11165 #ifdef CONFIG_FAIR_GROUP_SCHED 11166 { 11167 .name = "shares", 11168 .read_u64 = cpu_shares_read_u64, 11169 .write_u64 = cpu_shares_write_u64, 11170 }, 11171 { 11172 .name = "idle", 11173 .read_s64 = cpu_idle_read_s64, 11174 .write_s64 = cpu_idle_write_s64, 11175 }, 11176 #endif 11177 #ifdef CONFIG_CFS_BANDWIDTH 11178 { 11179 .name = "cfs_quota_us", 11180 .read_s64 = cpu_cfs_quota_read_s64, 11181 .write_s64 = cpu_cfs_quota_write_s64, 11182 }, 11183 { 11184 .name = "cfs_period_us", 11185 .read_u64 = cpu_cfs_period_read_u64, 11186 .write_u64 = cpu_cfs_period_write_u64, 11187 }, 11188 { 11189 .name = "cfs_burst_us", 11190 .read_u64 = cpu_cfs_burst_read_u64, 11191 .write_u64 = cpu_cfs_burst_write_u64, 11192 }, 11193 { 11194 .name = "stat", 11195 .seq_show = cpu_cfs_stat_show, 11196 }, 11197 { 11198 .name = "stat.local", 11199 .seq_show = cpu_cfs_local_stat_show, 11200 }, 11201 #endif 11202 #ifdef CONFIG_RT_GROUP_SCHED 11203 { 11204 .name = "rt_runtime_us", 11205 .read_s64 = cpu_rt_runtime_read, 11206 .write_s64 = cpu_rt_runtime_write, 11207 }, 11208 { 11209 .name = "rt_period_us", 11210 .read_u64 = cpu_rt_period_read_uint, 11211 .write_u64 = cpu_rt_period_write_uint, 11212 }, 11213 #endif 11214 #ifdef CONFIG_UCLAMP_TASK_GROUP 11215 { 11216 .name = "uclamp.min", 11217 .flags = CFTYPE_NOT_ON_ROOT, 11218 .seq_show = cpu_uclamp_min_show, 11219 .write = cpu_uclamp_min_write, 11220 }, 11221 { 11222 .name = "uclamp.max", 11223 .flags = CFTYPE_NOT_ON_ROOT, 11224 .seq_show = cpu_uclamp_max_show, 11225 .write = cpu_uclamp_max_write, 11226 }, 11227 #endif 11228 { } /* Terminate */ 11229 }; 11230 11231 static int cpu_extra_stat_show(struct seq_file *sf, 11232 struct cgroup_subsys_state *css) 11233 { 11234 #ifdef CONFIG_CFS_BANDWIDTH 11235 { 11236 struct task_group *tg = css_tg(css); 11237 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 11238 u64 throttled_usec, burst_usec; 11239 11240 throttled_usec = cfs_b->throttled_time; 11241 do_div(throttled_usec, NSEC_PER_USEC); 11242 burst_usec = cfs_b->burst_time; 11243 do_div(burst_usec, NSEC_PER_USEC); 11244 11245 seq_printf(sf, "nr_periods %d\n" 11246 "nr_throttled %d\n" 11247 "throttled_usec %llu\n" 11248 "nr_bursts %d\n" 11249 "burst_usec %llu\n", 11250 cfs_b->nr_periods, cfs_b->nr_throttled, 11251 throttled_usec, cfs_b->nr_burst, burst_usec); 11252 } 11253 #endif 11254 return 0; 11255 } 11256 11257 static int cpu_local_stat_show(struct seq_file *sf, 11258 struct cgroup_subsys_state *css) 11259 { 11260 #ifdef CONFIG_CFS_BANDWIDTH 11261 { 11262 struct task_group *tg = css_tg(css); 11263 u64 throttled_self_usec; 11264 11265 throttled_self_usec = throttled_time_self(tg); 11266 do_div(throttled_self_usec, NSEC_PER_USEC); 11267 11268 seq_printf(sf, "throttled_usec %llu\n", 11269 throttled_self_usec); 11270 } 11271 #endif 11272 return 0; 11273 } 11274 11275 #ifdef CONFIG_FAIR_GROUP_SCHED 11276 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 11277 struct cftype *cft) 11278 { 11279 struct task_group *tg = css_tg(css); 11280 u64 weight = scale_load_down(tg->shares); 11281 11282 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 11283 } 11284 11285 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 11286 struct cftype *cft, u64 weight) 11287 { 11288 /* 11289 * cgroup weight knobs should use the common MIN, DFL and MAX 11290 * values which are 1, 100 and 10000 respectively. While it loses 11291 * a bit of range on both ends, it maps pretty well onto the shares 11292 * value used by scheduler and the round-trip conversions preserve 11293 * the original value over the entire range. 11294 */ 11295 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 11296 return -ERANGE; 11297 11298 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 11299 11300 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11301 } 11302 11303 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 11304 struct cftype *cft) 11305 { 11306 unsigned long weight = scale_load_down(css_tg(css)->shares); 11307 int last_delta = INT_MAX; 11308 int prio, delta; 11309 11310 /* find the closest nice value to the current weight */ 11311 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 11312 delta = abs(sched_prio_to_weight[prio] - weight); 11313 if (delta >= last_delta) 11314 break; 11315 last_delta = delta; 11316 } 11317 11318 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 11319 } 11320 11321 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 11322 struct cftype *cft, s64 nice) 11323 { 11324 unsigned long weight; 11325 int idx; 11326 11327 if (nice < MIN_NICE || nice > MAX_NICE) 11328 return -ERANGE; 11329 11330 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 11331 idx = array_index_nospec(idx, 40); 11332 weight = sched_prio_to_weight[idx]; 11333 11334 return sched_group_set_shares(css_tg(css), scale_load(weight)); 11335 } 11336 #endif 11337 11338 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 11339 long period, long quota) 11340 { 11341 if (quota < 0) 11342 seq_puts(sf, "max"); 11343 else 11344 seq_printf(sf, "%ld", quota); 11345 11346 seq_printf(sf, " %ld\n", period); 11347 } 11348 11349 /* caller should put the current value in *@periodp before calling */ 11350 static int __maybe_unused cpu_period_quota_parse(char *buf, 11351 u64 *periodp, u64 *quotap) 11352 { 11353 char tok[21]; /* U64_MAX */ 11354 11355 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 11356 return -EINVAL; 11357 11358 *periodp *= NSEC_PER_USEC; 11359 11360 if (sscanf(tok, "%llu", quotap)) 11361 *quotap *= NSEC_PER_USEC; 11362 else if (!strcmp(tok, "max")) 11363 *quotap = RUNTIME_INF; 11364 else 11365 return -EINVAL; 11366 11367 return 0; 11368 } 11369 11370 #ifdef CONFIG_CFS_BANDWIDTH 11371 static int cpu_max_show(struct seq_file *sf, void *v) 11372 { 11373 struct task_group *tg = css_tg(seq_css(sf)); 11374 11375 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 11376 return 0; 11377 } 11378 11379 static ssize_t cpu_max_write(struct kernfs_open_file *of, 11380 char *buf, size_t nbytes, loff_t off) 11381 { 11382 struct task_group *tg = css_tg(of_css(of)); 11383 u64 period = tg_get_cfs_period(tg); 11384 u64 burst = tg_get_cfs_burst(tg); 11385 u64 quota; 11386 int ret; 11387 11388 ret = cpu_period_quota_parse(buf, &period, "a); 11389 if (!ret) 11390 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 11391 return ret ?: nbytes; 11392 } 11393 #endif 11394 11395 static struct cftype cpu_files[] = { 11396 #ifdef CONFIG_FAIR_GROUP_SCHED 11397 { 11398 .name = "weight", 11399 .flags = CFTYPE_NOT_ON_ROOT, 11400 .read_u64 = cpu_weight_read_u64, 11401 .write_u64 = cpu_weight_write_u64, 11402 }, 11403 { 11404 .name = "weight.nice", 11405 .flags = CFTYPE_NOT_ON_ROOT, 11406 .read_s64 = cpu_weight_nice_read_s64, 11407 .write_s64 = cpu_weight_nice_write_s64, 11408 }, 11409 { 11410 .name = "idle", 11411 .flags = CFTYPE_NOT_ON_ROOT, 11412 .read_s64 = cpu_idle_read_s64, 11413 .write_s64 = cpu_idle_write_s64, 11414 }, 11415 #endif 11416 #ifdef CONFIG_CFS_BANDWIDTH 11417 { 11418 .name = "max", 11419 .flags = CFTYPE_NOT_ON_ROOT, 11420 .seq_show = cpu_max_show, 11421 .write = cpu_max_write, 11422 }, 11423 { 11424 .name = "max.burst", 11425 .flags = CFTYPE_NOT_ON_ROOT, 11426 .read_u64 = cpu_cfs_burst_read_u64, 11427 .write_u64 = cpu_cfs_burst_write_u64, 11428 }, 11429 #endif 11430 #ifdef CONFIG_UCLAMP_TASK_GROUP 11431 { 11432 .name = "uclamp.min", 11433 .flags = CFTYPE_NOT_ON_ROOT, 11434 .seq_show = cpu_uclamp_min_show, 11435 .write = cpu_uclamp_min_write, 11436 }, 11437 { 11438 .name = "uclamp.max", 11439 .flags = CFTYPE_NOT_ON_ROOT, 11440 .seq_show = cpu_uclamp_max_show, 11441 .write = cpu_uclamp_max_write, 11442 }, 11443 #endif 11444 { } /* terminate */ 11445 }; 11446 11447 struct cgroup_subsys cpu_cgrp_subsys = { 11448 .css_alloc = cpu_cgroup_css_alloc, 11449 .css_online = cpu_cgroup_css_online, 11450 .css_released = cpu_cgroup_css_released, 11451 .css_free = cpu_cgroup_css_free, 11452 .css_extra_stat_show = cpu_extra_stat_show, 11453 .css_local_stat_show = cpu_local_stat_show, 11454 #ifdef CONFIG_RT_GROUP_SCHED 11455 .can_attach = cpu_cgroup_can_attach, 11456 #endif 11457 .attach = cpu_cgroup_attach, 11458 .legacy_cftypes = cpu_legacy_files, 11459 .dfl_cftypes = cpu_files, 11460 .early_init = true, 11461 .threaded = true, 11462 }; 11463 11464 #endif /* CONFIG_CGROUP_SCHED */ 11465 11466 void dump_cpu_task(int cpu) 11467 { 11468 if (cpu == smp_processor_id() && in_hardirq()) { 11469 struct pt_regs *regs; 11470 11471 regs = get_irq_regs(); 11472 if (regs) { 11473 show_regs(regs); 11474 return; 11475 } 11476 } 11477 11478 if (trigger_single_cpu_backtrace(cpu)) 11479 return; 11480 11481 pr_info("Task dump for CPU %d:\n", cpu); 11482 sched_show_task(cpu_curr(cpu)); 11483 } 11484 11485 /* 11486 * Nice levels are multiplicative, with a gentle 10% change for every 11487 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 11488 * nice 1, it will get ~10% less CPU time than another CPU-bound task 11489 * that remained on nice 0. 11490 * 11491 * The "10% effect" is relative and cumulative: from _any_ nice level, 11492 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 11493 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 11494 * If a task goes up by ~10% and another task goes down by ~10% then 11495 * the relative distance between them is ~25%.) 11496 */ 11497 const int sched_prio_to_weight[40] = { 11498 /* -20 */ 88761, 71755, 56483, 46273, 36291, 11499 /* -15 */ 29154, 23254, 18705, 14949, 11916, 11500 /* -10 */ 9548, 7620, 6100, 4904, 3906, 11501 /* -5 */ 3121, 2501, 1991, 1586, 1277, 11502 /* 0 */ 1024, 820, 655, 526, 423, 11503 /* 5 */ 335, 272, 215, 172, 137, 11504 /* 10 */ 110, 87, 70, 56, 45, 11505 /* 15 */ 36, 29, 23, 18, 15, 11506 }; 11507 11508 /* 11509 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. 11510 * 11511 * In cases where the weight does not change often, we can use the 11512 * precalculated inverse to speed up arithmetics by turning divisions 11513 * into multiplications: 11514 */ 11515 const u32 sched_prio_to_wmult[40] = { 11516 /* -20 */ 48388, 59856, 76040, 92818, 118348, 11517 /* -15 */ 147320, 184698, 229616, 287308, 360437, 11518 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 11519 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 11520 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 11521 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 11522 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 11523 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 11524 }; 11525 11526 void call_trace_sched_update_nr_running(struct rq *rq, int count) 11527 { 11528 trace_sched_update_nr_running_tp(rq, count); 11529 } 11530 11531 #ifdef CONFIG_SCHED_MM_CID 11532 11533 /* 11534 * @cid_lock: Guarantee forward-progress of cid allocation. 11535 * 11536 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock 11537 * is only used when contention is detected by the lock-free allocation so 11538 * forward progress can be guaranteed. 11539 */ 11540 DEFINE_RAW_SPINLOCK(cid_lock); 11541 11542 /* 11543 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. 11544 * 11545 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is 11546 * detected, it is set to 1 to ensure that all newly coming allocations are 11547 * serialized by @cid_lock until the allocation which detected contention 11548 * completes and sets @use_cid_lock back to 0. This guarantees forward progress 11549 * of a cid allocation. 11550 */ 11551 int use_cid_lock; 11552 11553 /* 11554 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid 11555 * concurrently with respect to the execution of the source runqueue context 11556 * switch. 11557 * 11558 * There is one basic properties we want to guarantee here: 11559 * 11560 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively 11561 * used by a task. That would lead to concurrent allocation of the cid and 11562 * userspace corruption. 11563 * 11564 * Provide this guarantee by introducing a Dekker memory ordering to guarantee 11565 * that a pair of loads observe at least one of a pair of stores, which can be 11566 * shown as: 11567 * 11568 * X = Y = 0 11569 * 11570 * w[X]=1 w[Y]=1 11571 * MB MB 11572 * r[Y]=y r[X]=x 11573 * 11574 * Which guarantees that x==0 && y==0 is impossible. But rather than using 11575 * values 0 and 1, this algorithm cares about specific state transitions of the 11576 * runqueue current task (as updated by the scheduler context switch), and the 11577 * per-mm/cpu cid value. 11578 * 11579 * Let's introduce task (Y) which has task->mm == mm and task (N) which has 11580 * task->mm != mm for the rest of the discussion. There are two scheduler state 11581 * transitions on context switch we care about: 11582 * 11583 * (TSA) Store to rq->curr with transition from (N) to (Y) 11584 * 11585 * (TSB) Store to rq->curr with transition from (Y) to (N) 11586 * 11587 * On the remote-clear side, there is one transition we care about: 11588 * 11589 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag 11590 * 11591 * There is also a transition to UNSET state which can be performed from all 11592 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which 11593 * guarantees that only a single thread will succeed: 11594 * 11595 * (TMB) cmpxchg to *pcpu_cid to mark UNSET 11596 * 11597 * Just to be clear, what we do _not_ want to happen is a transition to UNSET 11598 * when a thread is actively using the cid (property (1)). 11599 * 11600 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions. 11601 * 11602 * Scenario A) (TSA)+(TMA) (from next task perspective) 11603 * 11604 * CPU0 CPU1 11605 * 11606 * Context switch CS-1 Remote-clear 11607 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA) 11608 * (implied barrier after cmpxchg) 11609 * - switch_mm_cid() 11610 * - memory barrier (see switch_mm_cid() 11611 * comment explaining how this barrier 11612 * is combined with other scheduler 11613 * barriers) 11614 * - mm_cid_get (next) 11615 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) 11616 * 11617 * This Dekker ensures that either task (Y) is observed by the 11618 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are 11619 * observed. 11620 * 11621 * If task (Y) store is observed by rcu_dereference(), it means that there is 11622 * still an active task on the cpu. Remote-clear will therefore not transition 11623 * to UNSET, which fulfills property (1). 11624 * 11625 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(), 11626 * it will move its state to UNSET, which clears the percpu cid perhaps 11627 * uselessly (which is not an issue for correctness). Because task (Y) is not 11628 * observed, CPU1 can move ahead to set the state to UNSET. Because moving 11629 * state to UNSET is done with a cmpxchg expecting that the old state has the 11630 * LAZY flag set, only one thread will successfully UNSET. 11631 * 11632 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0 11633 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and 11634 * CPU1 will observe task (Y) and do nothing more, which is fine. 11635 * 11636 * What we are effectively preventing with this Dekker is a scenario where 11637 * neither LAZY flag nor store (Y) are observed, which would fail property (1) 11638 * because this would UNSET a cid which is actively used. 11639 */ 11640 11641 void sched_mm_cid_migrate_from(struct task_struct *t) 11642 { 11643 t->migrate_from_cpu = task_cpu(t); 11644 } 11645 11646 static 11647 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, 11648 struct task_struct *t, 11649 struct mm_cid *src_pcpu_cid) 11650 { 11651 struct mm_struct *mm = t->mm; 11652 struct task_struct *src_task; 11653 int src_cid, last_mm_cid; 11654 11655 if (!mm) 11656 return -1; 11657 11658 last_mm_cid = t->last_mm_cid; 11659 /* 11660 * If the migrated task has no last cid, or if the current 11661 * task on src rq uses the cid, it means the source cid does not need 11662 * to be moved to the destination cpu. 11663 */ 11664 if (last_mm_cid == -1) 11665 return -1; 11666 src_cid = READ_ONCE(src_pcpu_cid->cid); 11667 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid) 11668 return -1; 11669 11670 /* 11671 * If we observe an active task using the mm on this rq, it means we 11672 * are not the last task to be migrated from this cpu for this mm, so 11673 * there is no need to move src_cid to the destination cpu. 11674 */ 11675 guard(rcu)(); 11676 src_task = rcu_dereference(src_rq->curr); 11677 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 11678 t->last_mm_cid = -1; 11679 return -1; 11680 } 11681 11682 return src_cid; 11683 } 11684 11685 static 11686 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, 11687 struct task_struct *t, 11688 struct mm_cid *src_pcpu_cid, 11689 int src_cid) 11690 { 11691 struct task_struct *src_task; 11692 struct mm_struct *mm = t->mm; 11693 int lazy_cid; 11694 11695 if (src_cid == -1) 11696 return -1; 11697 11698 /* 11699 * Attempt to clear the source cpu cid to move it to the destination 11700 * cpu. 11701 */ 11702 lazy_cid = mm_cid_set_lazy_put(src_cid); 11703 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) 11704 return -1; 11705 11706 /* 11707 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11708 * rq->curr->mm matches the scheduler barrier in context_switch() 11709 * between store to rq->curr and load of prev and next task's 11710 * per-mm/cpu cid. 11711 * 11712 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11713 * rq->curr->mm_cid_active matches the barrier in 11714 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 11715 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 11716 * load of per-mm/cpu cid. 11717 */ 11718 11719 /* 11720 * If we observe an active task using the mm on this rq after setting 11721 * the lazy-put flag, this task will be responsible for transitioning 11722 * from lazy-put flag set to MM_CID_UNSET. 11723 */ 11724 scoped_guard (rcu) { 11725 src_task = rcu_dereference(src_rq->curr); 11726 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 11727 /* 11728 * We observed an active task for this mm, there is therefore 11729 * no point in moving this cid to the destination cpu. 11730 */ 11731 t->last_mm_cid = -1; 11732 return -1; 11733 } 11734 } 11735 11736 /* 11737 * The src_cid is unused, so it can be unset. 11738 */ 11739 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 11740 return -1; 11741 return src_cid; 11742 } 11743 11744 /* 11745 * Migration to dst cpu. Called with dst_rq lock held. 11746 * Interrupts are disabled, which keeps the window of cid ownership without the 11747 * source rq lock held small. 11748 */ 11749 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) 11750 { 11751 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; 11752 struct mm_struct *mm = t->mm; 11753 int src_cid, dst_cid, src_cpu; 11754 struct rq *src_rq; 11755 11756 lockdep_assert_rq_held(dst_rq); 11757 11758 if (!mm) 11759 return; 11760 src_cpu = t->migrate_from_cpu; 11761 if (src_cpu == -1) { 11762 t->last_mm_cid = -1; 11763 return; 11764 } 11765 /* 11766 * Move the src cid if the dst cid is unset. This keeps id 11767 * allocation closest to 0 in cases where few threads migrate around 11768 * many cpus. 11769 * 11770 * If destination cid is already set, we may have to just clear 11771 * the src cid to ensure compactness in frequent migrations 11772 * scenarios. 11773 * 11774 * It is not useful to clear the src cid when the number of threads is 11775 * greater or equal to the number of allowed cpus, because user-space 11776 * can expect that the number of allowed cids can reach the number of 11777 * allowed cpus. 11778 */ 11779 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); 11780 dst_cid = READ_ONCE(dst_pcpu_cid->cid); 11781 if (!mm_cid_is_unset(dst_cid) && 11782 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) 11783 return; 11784 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); 11785 src_rq = cpu_rq(src_cpu); 11786 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid); 11787 if (src_cid == -1) 11788 return; 11789 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid, 11790 src_cid); 11791 if (src_cid == -1) 11792 return; 11793 if (!mm_cid_is_unset(dst_cid)) { 11794 __mm_cid_put(mm, src_cid); 11795 return; 11796 } 11797 /* Move src_cid to dst cpu. */ 11798 mm_cid_snapshot_time(dst_rq, mm); 11799 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); 11800 } 11801 11802 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, 11803 int cpu) 11804 { 11805 struct rq *rq = cpu_rq(cpu); 11806 struct task_struct *t; 11807 int cid, lazy_cid; 11808 11809 cid = READ_ONCE(pcpu_cid->cid); 11810 if (!mm_cid_is_valid(cid)) 11811 return; 11812 11813 /* 11814 * Clear the cpu cid if it is set to keep cid allocation compact. If 11815 * there happens to be other tasks left on the source cpu using this 11816 * mm, the next task using this mm will reallocate its cid on context 11817 * switch. 11818 */ 11819 lazy_cid = mm_cid_set_lazy_put(cid); 11820 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) 11821 return; 11822 11823 /* 11824 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11825 * rq->curr->mm matches the scheduler barrier in context_switch() 11826 * between store to rq->curr and load of prev and next task's 11827 * per-mm/cpu cid. 11828 * 11829 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 11830 * rq->curr->mm_cid_active matches the barrier in 11831 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 11832 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 11833 * load of per-mm/cpu cid. 11834 */ 11835 11836 /* 11837 * If we observe an active task using the mm on this rq after setting 11838 * the lazy-put flag, that task will be responsible for transitioning 11839 * from lazy-put flag set to MM_CID_UNSET. 11840 */ 11841 scoped_guard (rcu) { 11842 t = rcu_dereference(rq->curr); 11843 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) 11844 return; 11845 } 11846 11847 /* 11848 * The cid is unused, so it can be unset. 11849 * Disable interrupts to keep the window of cid ownership without rq 11850 * lock small. 11851 */ 11852 scoped_guard (irqsave) { 11853 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 11854 __mm_cid_put(mm, cid); 11855 } 11856 } 11857 11858 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) 11859 { 11860 struct rq *rq = cpu_rq(cpu); 11861 struct mm_cid *pcpu_cid; 11862 struct task_struct *curr; 11863 u64 rq_clock; 11864 11865 /* 11866 * rq->clock load is racy on 32-bit but one spurious clear once in a 11867 * while is irrelevant. 11868 */ 11869 rq_clock = READ_ONCE(rq->clock); 11870 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 11871 11872 /* 11873 * In order to take care of infrequently scheduled tasks, bump the time 11874 * snapshot associated with this cid if an active task using the mm is 11875 * observed on this rq. 11876 */ 11877 scoped_guard (rcu) { 11878 curr = rcu_dereference(rq->curr); 11879 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { 11880 WRITE_ONCE(pcpu_cid->time, rq_clock); 11881 return; 11882 } 11883 } 11884 11885 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) 11886 return; 11887 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 11888 } 11889 11890 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, 11891 int weight) 11892 { 11893 struct mm_cid *pcpu_cid; 11894 int cid; 11895 11896 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 11897 cid = READ_ONCE(pcpu_cid->cid); 11898 if (!mm_cid_is_valid(cid) || cid < weight) 11899 return; 11900 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 11901 } 11902 11903 static void task_mm_cid_work(struct callback_head *work) 11904 { 11905 unsigned long now = jiffies, old_scan, next_scan; 11906 struct task_struct *t = current; 11907 struct cpumask *cidmask; 11908 struct mm_struct *mm; 11909 int weight, cpu; 11910 11911 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work)); 11912 11913 work->next = work; /* Prevent double-add */ 11914 if (t->flags & PF_EXITING) 11915 return; 11916 mm = t->mm; 11917 if (!mm) 11918 return; 11919 old_scan = READ_ONCE(mm->mm_cid_next_scan); 11920 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); 11921 if (!old_scan) { 11922 unsigned long res; 11923 11924 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); 11925 if (res != old_scan) 11926 old_scan = res; 11927 else 11928 old_scan = next_scan; 11929 } 11930 if (time_before(now, old_scan)) 11931 return; 11932 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) 11933 return; 11934 cidmask = mm_cidmask(mm); 11935 /* Clear cids that were not recently used. */ 11936 for_each_possible_cpu(cpu) 11937 sched_mm_cid_remote_clear_old(mm, cpu); 11938 weight = cpumask_weight(cidmask); 11939 /* 11940 * Clear cids that are greater or equal to the cidmask weight to 11941 * recompact it. 11942 */ 11943 for_each_possible_cpu(cpu) 11944 sched_mm_cid_remote_clear_weight(mm, cpu, weight); 11945 } 11946 11947 void init_sched_mm_cid(struct task_struct *t) 11948 { 11949 struct mm_struct *mm = t->mm; 11950 int mm_users = 0; 11951 11952 if (mm) { 11953 mm_users = atomic_read(&mm->mm_users); 11954 if (mm_users == 1) 11955 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); 11956 } 11957 t->cid_work.next = &t->cid_work; /* Protect against double add */ 11958 init_task_work(&t->cid_work, task_mm_cid_work); 11959 } 11960 11961 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) 11962 { 11963 struct callback_head *work = &curr->cid_work; 11964 unsigned long now = jiffies; 11965 11966 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || 11967 work->next != work) 11968 return; 11969 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) 11970 return; 11971 task_work_add(curr, work, TWA_RESUME); 11972 } 11973 11974 void sched_mm_cid_exit_signals(struct task_struct *t) 11975 { 11976 struct mm_struct *mm = t->mm; 11977 struct rq *rq; 11978 11979 if (!mm) 11980 return; 11981 11982 preempt_disable(); 11983 rq = this_rq(); 11984 guard(rq_lock_irqsave)(rq); 11985 preempt_enable_no_resched(); /* holding spinlock */ 11986 WRITE_ONCE(t->mm_cid_active, 0); 11987 /* 11988 * Store t->mm_cid_active before loading per-mm/cpu cid. 11989 * Matches barrier in sched_mm_cid_remote_clear_old(). 11990 */ 11991 smp_mb(); 11992 mm_cid_put(mm); 11993 t->last_mm_cid = t->mm_cid = -1; 11994 } 11995 11996 void sched_mm_cid_before_execve(struct task_struct *t) 11997 { 11998 struct mm_struct *mm = t->mm; 11999 struct rq *rq; 12000 12001 if (!mm) 12002 return; 12003 12004 preempt_disable(); 12005 rq = this_rq(); 12006 guard(rq_lock_irqsave)(rq); 12007 preempt_enable_no_resched(); /* holding spinlock */ 12008 WRITE_ONCE(t->mm_cid_active, 0); 12009 /* 12010 * Store t->mm_cid_active before loading per-mm/cpu cid. 12011 * Matches barrier in sched_mm_cid_remote_clear_old(). 12012 */ 12013 smp_mb(); 12014 mm_cid_put(mm); 12015 t->last_mm_cid = t->mm_cid = -1; 12016 } 12017 12018 void sched_mm_cid_after_execve(struct task_struct *t) 12019 { 12020 struct mm_struct *mm = t->mm; 12021 struct rq *rq; 12022 12023 if (!mm) 12024 return; 12025 12026 preempt_disable(); 12027 rq = this_rq(); 12028 scoped_guard (rq_lock_irqsave, rq) { 12029 preempt_enable_no_resched(); /* holding spinlock */ 12030 WRITE_ONCE(t->mm_cid_active, 1); 12031 /* 12032 * Store t->mm_cid_active before loading per-mm/cpu cid. 12033 * Matches barrier in sched_mm_cid_remote_clear_old(). 12034 */ 12035 smp_mb(); 12036 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); 12037 } 12038 rseq_set_notify_resume(t); 12039 } 12040 12041 void sched_mm_cid_fork(struct task_struct *t) 12042 { 12043 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); 12044 t->mm_cid_active = 1; 12045 } 12046 #endif 12047