1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel CPU scheduler code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/highmem.h> 11 #include <linux/hrtimer_api.h> 12 #include <linux/ktime_api.h> 13 #include <linux/sched/signal.h> 14 #include <linux/syscalls_api.h> 15 #include <linux/debug_locks.h> 16 #include <linux/prefetch.h> 17 #include <linux/capability.h> 18 #include <linux/pgtable_api.h> 19 #include <linux/wait_bit.h> 20 #include <linux/jiffies.h> 21 #include <linux/spinlock_api.h> 22 #include <linux/cpumask_api.h> 23 #include <linux/lockdep_api.h> 24 #include <linux/hardirq.h> 25 #include <linux/softirq.h> 26 #include <linux/refcount_api.h> 27 #include <linux/topology.h> 28 #include <linux/sched/clock.h> 29 #include <linux/sched/cond_resched.h> 30 #include <linux/sched/cputime.h> 31 #include <linux/sched/debug.h> 32 #include <linux/sched/hotplug.h> 33 #include <linux/sched/init.h> 34 #include <linux/sched/isolation.h> 35 #include <linux/sched/loadavg.h> 36 #include <linux/sched/mm.h> 37 #include <linux/sched/nohz.h> 38 #include <linux/sched/rseq_api.h> 39 #include <linux/sched/rt.h> 40 41 #include <linux/blkdev.h> 42 #include <linux/context_tracking.h> 43 #include <linux/cpuset.h> 44 #include <linux/delayacct.h> 45 #include <linux/init_task.h> 46 #include <linux/interrupt.h> 47 #include <linux/ioprio.h> 48 #include <linux/kallsyms.h> 49 #include <linux/kcov.h> 50 #include <linux/kprobes.h> 51 #include <linux/llist_api.h> 52 #include <linux/mmu_context.h> 53 #include <linux/mmzone.h> 54 #include <linux/mutex_api.h> 55 #include <linux/nmi.h> 56 #include <linux/nospec.h> 57 #include <linux/perf_event_api.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcuwait_api.h> 61 #include <linux/rseq.h> 62 #include <linux/sched/wake_q.h> 63 #include <linux/scs.h> 64 #include <linux/slab.h> 65 #include <linux/syscalls.h> 66 #include <linux/vtime.h> 67 #include <linux/wait_api.h> 68 #include <linux/workqueue_api.h> 69 70 #ifdef CONFIG_PREEMPT_DYNAMIC 71 # ifdef CONFIG_GENERIC_ENTRY 72 # include <linux/entry-common.h> 73 # endif 74 #endif 75 76 #include <uapi/linux/sched/types.h> 77 78 #include <asm/irq_regs.h> 79 #include <asm/switch_to.h> 80 #include <asm/tlb.h> 81 82 #define CREATE_TRACE_POINTS 83 #include <linux/sched/rseq_api.h> 84 #include <trace/events/sched.h> 85 #include <trace/events/ipi.h> 86 #undef CREATE_TRACE_POINTS 87 88 #include "sched.h" 89 #include "stats.h" 90 91 #include "autogroup.h" 92 #include "pelt.h" 93 #include "smp.h" 94 #include "stats.h" 95 96 #include "../workqueue_internal.h" 97 #include "../../io_uring/io-wq.h" 98 #include "../smpboot.h" 99 100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); 101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); 102 103 /* 104 * Export tracepoints that act as a bare tracehook (ie: have no trace event 105 * associated with them) to allow external modules to probe them. 106 */ 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp); 113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); 119 120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 121 122 #ifdef CONFIG_SCHED_DEBUG 123 /* 124 * Debugging: various feature bits 125 * 126 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 127 * sysctl_sched_features, defined in sched.h, to allow constants propagation 128 * at compile time and compiler optimization based on features default. 129 */ 130 #define SCHED_FEAT(name, enabled) \ 131 (1UL << __SCHED_FEAT_##name) * enabled | 132 const_debug unsigned int sysctl_sched_features = 133 #include "features.h" 134 0; 135 #undef SCHED_FEAT 136 137 /* 138 * Print a warning if need_resched is set for the given duration (if 139 * LATENCY_WARN is enabled). 140 * 141 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 142 * per boot. 143 */ 144 __read_mostly int sysctl_resched_latency_warn_ms = 100; 145 __read_mostly int sysctl_resched_latency_warn_once = 1; 146 #endif /* CONFIG_SCHED_DEBUG */ 147 148 /* 149 * Number of tasks to iterate in a single balance run. 150 * Limited because this is done with IRQs disabled. 151 */ 152 const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; 153 154 __read_mostly int scheduler_running; 155 156 #ifdef CONFIG_SCHED_CORE 157 158 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 159 160 /* kernel prio, less is more */ 161 static inline int __task_prio(const struct task_struct *p) 162 { 163 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 164 return -2; 165 166 if (p->dl_server) 167 return -1; /* deadline */ 168 169 if (rt_or_dl_prio(p->prio)) 170 return p->prio; /* [-1, 99] */ 171 172 if (p->sched_class == &idle_sched_class) 173 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 174 175 return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ 176 } 177 178 /* 179 * l(a,b) 180 * le(a,b) := !l(b,a) 181 * g(a,b) := l(b,a) 182 * ge(a,b) := !l(a,b) 183 */ 184 185 /* real prio, less is less */ 186 static inline bool prio_less(const struct task_struct *a, 187 const struct task_struct *b, bool in_fi) 188 { 189 190 int pa = __task_prio(a), pb = __task_prio(b); 191 192 if (-pa < -pb) 193 return true; 194 195 if (-pb < -pa) 196 return false; 197 198 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ 199 const struct sched_dl_entity *a_dl, *b_dl; 200 201 a_dl = &a->dl; 202 /* 203 * Since,'a' and 'b' can be CFS tasks served by DL server, 204 * __task_prio() can return -1 (for DL) even for those. In that 205 * case, get to the dl_server's DL entity. 206 */ 207 if (a->dl_server) 208 a_dl = a->dl_server; 209 210 b_dl = &b->dl; 211 if (b->dl_server) 212 b_dl = b->dl_server; 213 214 return !dl_time_before(a_dl->deadline, b_dl->deadline); 215 } 216 217 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 218 return cfs_prio_less(a, b, in_fi); 219 220 return false; 221 } 222 223 static inline bool __sched_core_less(const struct task_struct *a, 224 const struct task_struct *b) 225 { 226 if (a->core_cookie < b->core_cookie) 227 return true; 228 229 if (a->core_cookie > b->core_cookie) 230 return false; 231 232 /* flip prio, so high prio is leftmost */ 233 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 234 return true; 235 236 return false; 237 } 238 239 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 240 241 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 242 { 243 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 244 } 245 246 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 247 { 248 const struct task_struct *p = __node_2_sc(node); 249 unsigned long cookie = (unsigned long)key; 250 251 if (cookie < p->core_cookie) 252 return -1; 253 254 if (cookie > p->core_cookie) 255 return 1; 256 257 return 0; 258 } 259 260 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 261 { 262 rq->core->core_task_seq++; 263 264 if (!p->core_cookie) 265 return; 266 267 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 268 } 269 270 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 271 { 272 rq->core->core_task_seq++; 273 274 if (sched_core_enqueued(p)) { 275 rb_erase(&p->core_node, &rq->core_tree); 276 RB_CLEAR_NODE(&p->core_node); 277 } 278 279 /* 280 * Migrating the last task off the cpu, with the cpu in forced idle 281 * state. Reschedule to create an accounting edge for forced idle, 282 * and re-examine whether the core is still in forced idle state. 283 */ 284 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 285 rq->core->core_forceidle_count && rq->curr == rq->idle) 286 resched_curr(rq); 287 } 288 289 static int sched_task_is_throttled(struct task_struct *p, int cpu) 290 { 291 if (p->sched_class->task_is_throttled) 292 return p->sched_class->task_is_throttled(p, cpu); 293 294 return 0; 295 } 296 297 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 298 { 299 struct rb_node *node = &p->core_node; 300 int cpu = task_cpu(p); 301 302 do { 303 node = rb_next(node); 304 if (!node) 305 return NULL; 306 307 p = __node_2_sc(node); 308 if (p->core_cookie != cookie) 309 return NULL; 310 311 } while (sched_task_is_throttled(p, cpu)); 312 313 return p; 314 } 315 316 /* 317 * Find left-most (aka, highest priority) and unthrottled task matching @cookie. 318 * If no suitable task is found, NULL will be returned. 319 */ 320 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 321 { 322 struct task_struct *p; 323 struct rb_node *node; 324 325 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 326 if (!node) 327 return NULL; 328 329 p = __node_2_sc(node); 330 if (!sched_task_is_throttled(p, rq->cpu)) 331 return p; 332 333 return sched_core_next(p, cookie); 334 } 335 336 /* 337 * Magic required such that: 338 * 339 * raw_spin_rq_lock(rq); 340 * ... 341 * raw_spin_rq_unlock(rq); 342 * 343 * ends up locking and unlocking the _same_ lock, and all CPUs 344 * always agree on what rq has what lock. 345 * 346 * XXX entirely possible to selectively enable cores, don't bother for now. 347 */ 348 349 static DEFINE_MUTEX(sched_core_mutex); 350 static atomic_t sched_core_count; 351 static struct cpumask sched_core_mask; 352 353 static void sched_core_lock(int cpu, unsigned long *flags) 354 { 355 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 356 int t, i = 0; 357 358 local_irq_save(*flags); 359 for_each_cpu(t, smt_mask) 360 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 361 } 362 363 static void sched_core_unlock(int cpu, unsigned long *flags) 364 { 365 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 366 int t; 367 368 for_each_cpu(t, smt_mask) 369 raw_spin_unlock(&cpu_rq(t)->__lock); 370 local_irq_restore(*flags); 371 } 372 373 static void __sched_core_flip(bool enabled) 374 { 375 unsigned long flags; 376 int cpu, t; 377 378 cpus_read_lock(); 379 380 /* 381 * Toggle the online cores, one by one. 382 */ 383 cpumask_copy(&sched_core_mask, cpu_online_mask); 384 for_each_cpu(cpu, &sched_core_mask) { 385 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 386 387 sched_core_lock(cpu, &flags); 388 389 for_each_cpu(t, smt_mask) 390 cpu_rq(t)->core_enabled = enabled; 391 392 cpu_rq(cpu)->core->core_forceidle_start = 0; 393 394 sched_core_unlock(cpu, &flags); 395 396 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 397 } 398 399 /* 400 * Toggle the offline CPUs. 401 */ 402 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) 403 cpu_rq(cpu)->core_enabled = enabled; 404 405 cpus_read_unlock(); 406 } 407 408 static void sched_core_assert_empty(void) 409 { 410 int cpu; 411 412 for_each_possible_cpu(cpu) 413 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 414 } 415 416 static void __sched_core_enable(void) 417 { 418 static_branch_enable(&__sched_core_enabled); 419 /* 420 * Ensure all previous instances of raw_spin_rq_*lock() have finished 421 * and future ones will observe !sched_core_disabled(). 422 */ 423 synchronize_rcu(); 424 __sched_core_flip(true); 425 sched_core_assert_empty(); 426 } 427 428 static void __sched_core_disable(void) 429 { 430 sched_core_assert_empty(); 431 __sched_core_flip(false); 432 static_branch_disable(&__sched_core_enabled); 433 } 434 435 void sched_core_get(void) 436 { 437 if (atomic_inc_not_zero(&sched_core_count)) 438 return; 439 440 mutex_lock(&sched_core_mutex); 441 if (!atomic_read(&sched_core_count)) 442 __sched_core_enable(); 443 444 smp_mb__before_atomic(); 445 atomic_inc(&sched_core_count); 446 mutex_unlock(&sched_core_mutex); 447 } 448 449 static void __sched_core_put(struct work_struct *work) 450 { 451 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 452 __sched_core_disable(); 453 mutex_unlock(&sched_core_mutex); 454 } 455 } 456 457 void sched_core_put(void) 458 { 459 static DECLARE_WORK(_work, __sched_core_put); 460 461 /* 462 * "There can be only one" 463 * 464 * Either this is the last one, or we don't actually need to do any 465 * 'work'. If it is the last *again*, we rely on 466 * WORK_STRUCT_PENDING_BIT. 467 */ 468 if (!atomic_add_unless(&sched_core_count, -1, 1)) 469 schedule_work(&_work); 470 } 471 472 #else /* !CONFIG_SCHED_CORE */ 473 474 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 475 static inline void 476 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 477 478 #endif /* CONFIG_SCHED_CORE */ 479 480 /* 481 * Serialization rules: 482 * 483 * Lock order: 484 * 485 * p->pi_lock 486 * rq->lock 487 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 488 * 489 * rq1->lock 490 * rq2->lock where: rq1 < rq2 491 * 492 * Regular state: 493 * 494 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 495 * local CPU's rq->lock, it optionally removes the task from the runqueue and 496 * always looks at the local rq data structures to find the most eligible task 497 * to run next. 498 * 499 * Task enqueue is also under rq->lock, possibly taken from another CPU. 500 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 501 * the local CPU to avoid bouncing the runqueue state around [ see 502 * ttwu_queue_wakelist() ] 503 * 504 * Task wakeup, specifically wakeups that involve migration, are horribly 505 * complicated to avoid having to take two rq->locks. 506 * 507 * Special state: 508 * 509 * System-calls and anything external will use task_rq_lock() which acquires 510 * both p->pi_lock and rq->lock. As a consequence the state they change is 511 * stable while holding either lock: 512 * 513 * - sched_setaffinity()/ 514 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 515 * - set_user_nice(): p->se.load, p->*prio 516 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 517 * p->se.load, p->rt_priority, 518 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 519 * - sched_setnuma(): p->numa_preferred_nid 520 * - sched_move_task(): p->sched_task_group 521 * - uclamp_update_active() p->uclamp* 522 * 523 * p->state <- TASK_*: 524 * 525 * is changed locklessly using set_current_state(), __set_current_state() or 526 * set_special_state(), see their respective comments, or by 527 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 528 * concurrent self. 529 * 530 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 531 * 532 * is set by activate_task() and cleared by deactivate_task(), under 533 * rq->lock. Non-zero indicates the task is runnable, the special 534 * ON_RQ_MIGRATING state is used for migration without holding both 535 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 536 * 537 * p->on_cpu <- { 0, 1 }: 538 * 539 * is set by prepare_task() and cleared by finish_task() such that it will be 540 * set before p is scheduled-in and cleared after p is scheduled-out, both 541 * under rq->lock. Non-zero indicates the task is running on its CPU. 542 * 543 * [ The astute reader will observe that it is possible for two tasks on one 544 * CPU to have ->on_cpu = 1 at the same time. ] 545 * 546 * task_cpu(p): is changed by set_task_cpu(), the rules are: 547 * 548 * - Don't call set_task_cpu() on a blocked task: 549 * 550 * We don't care what CPU we're not running on, this simplifies hotplug, 551 * the CPU assignment of blocked tasks isn't required to be valid. 552 * 553 * - for try_to_wake_up(), called under p->pi_lock: 554 * 555 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 556 * 557 * - for migration called under rq->lock: 558 * [ see task_on_rq_migrating() in task_rq_lock() ] 559 * 560 * o move_queued_task() 561 * o detach_task() 562 * 563 * - for migration called under double_rq_lock(): 564 * 565 * o __migrate_swap_task() 566 * o push_rt_task() / pull_rt_task() 567 * o push_dl_task() / pull_dl_task() 568 * o dl_task_offline_migration() 569 * 570 */ 571 572 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 573 { 574 raw_spinlock_t *lock; 575 576 /* Matches synchronize_rcu() in __sched_core_enable() */ 577 preempt_disable(); 578 if (sched_core_disabled()) { 579 raw_spin_lock_nested(&rq->__lock, subclass); 580 /* preempt_count *MUST* be > 1 */ 581 preempt_enable_no_resched(); 582 return; 583 } 584 585 for (;;) { 586 lock = __rq_lockp(rq); 587 raw_spin_lock_nested(lock, subclass); 588 if (likely(lock == __rq_lockp(rq))) { 589 /* preempt_count *MUST* be > 1 */ 590 preempt_enable_no_resched(); 591 return; 592 } 593 raw_spin_unlock(lock); 594 } 595 } 596 597 bool raw_spin_rq_trylock(struct rq *rq) 598 { 599 raw_spinlock_t *lock; 600 bool ret; 601 602 /* Matches synchronize_rcu() in __sched_core_enable() */ 603 preempt_disable(); 604 if (sched_core_disabled()) { 605 ret = raw_spin_trylock(&rq->__lock); 606 preempt_enable(); 607 return ret; 608 } 609 610 for (;;) { 611 lock = __rq_lockp(rq); 612 ret = raw_spin_trylock(lock); 613 if (!ret || (likely(lock == __rq_lockp(rq)))) { 614 preempt_enable(); 615 return ret; 616 } 617 raw_spin_unlock(lock); 618 } 619 } 620 621 void raw_spin_rq_unlock(struct rq *rq) 622 { 623 raw_spin_unlock(rq_lockp(rq)); 624 } 625 626 #ifdef CONFIG_SMP 627 /* 628 * double_rq_lock - safely lock two runqueues 629 */ 630 void double_rq_lock(struct rq *rq1, struct rq *rq2) 631 { 632 lockdep_assert_irqs_disabled(); 633 634 if (rq_order_less(rq2, rq1)) 635 swap(rq1, rq2); 636 637 raw_spin_rq_lock(rq1); 638 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 639 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 640 641 double_rq_clock_clear_update(rq1, rq2); 642 } 643 #endif 644 645 /* 646 * __task_rq_lock - lock the rq @p resides on. 647 */ 648 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 649 __acquires(rq->lock) 650 { 651 struct rq *rq; 652 653 lockdep_assert_held(&p->pi_lock); 654 655 for (;;) { 656 rq = task_rq(p); 657 raw_spin_rq_lock(rq); 658 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 659 rq_pin_lock(rq, rf); 660 return rq; 661 } 662 raw_spin_rq_unlock(rq); 663 664 while (unlikely(task_on_rq_migrating(p))) 665 cpu_relax(); 666 } 667 } 668 669 /* 670 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 671 */ 672 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 673 __acquires(p->pi_lock) 674 __acquires(rq->lock) 675 { 676 struct rq *rq; 677 678 for (;;) { 679 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 680 rq = task_rq(p); 681 raw_spin_rq_lock(rq); 682 /* 683 * move_queued_task() task_rq_lock() 684 * 685 * ACQUIRE (rq->lock) 686 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 687 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 688 * [S] ->cpu = new_cpu [L] task_rq() 689 * [L] ->on_rq 690 * RELEASE (rq->lock) 691 * 692 * If we observe the old CPU in task_rq_lock(), the acquire of 693 * the old rq->lock will fully serialize against the stores. 694 * 695 * If we observe the new CPU in task_rq_lock(), the address 696 * dependency headed by '[L] rq = task_rq()' and the acquire 697 * will pair with the WMB to ensure we then also see migrating. 698 */ 699 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 700 rq_pin_lock(rq, rf); 701 return rq; 702 } 703 raw_spin_rq_unlock(rq); 704 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 705 706 while (unlikely(task_on_rq_migrating(p))) 707 cpu_relax(); 708 } 709 } 710 711 /* 712 * RQ-clock updating methods: 713 */ 714 715 static void update_rq_clock_task(struct rq *rq, s64 delta) 716 { 717 /* 718 * In theory, the compile should just see 0 here, and optimize out the call 719 * to sched_rt_avg_update. But I don't trust it... 720 */ 721 s64 __maybe_unused steal = 0, irq_delta = 0; 722 723 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 724 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 725 726 /* 727 * Since irq_time is only updated on {soft,}irq_exit, we might run into 728 * this case when a previous update_rq_clock() happened inside a 729 * {soft,}IRQ region. 730 * 731 * When this happens, we stop ->clock_task and only update the 732 * prev_irq_time stamp to account for the part that fit, so that a next 733 * update will consume the rest. This ensures ->clock_task is 734 * monotonic. 735 * 736 * It does however cause some slight miss-attribution of {soft,}IRQ 737 * time, a more accurate solution would be to update the irq_time using 738 * the current rq->clock timestamp, except that would require using 739 * atomic ops. 740 */ 741 if (irq_delta > delta) 742 irq_delta = delta; 743 744 rq->prev_irq_time += irq_delta; 745 delta -= irq_delta; 746 delayacct_irq(rq->curr, irq_delta); 747 #endif 748 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 749 if (static_key_false((¶virt_steal_rq_enabled))) { 750 steal = paravirt_steal_clock(cpu_of(rq)); 751 steal -= rq->prev_steal_time_rq; 752 753 if (unlikely(steal > delta)) 754 steal = delta; 755 756 rq->prev_steal_time_rq += steal; 757 delta -= steal; 758 } 759 #endif 760 761 rq->clock_task += delta; 762 763 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 764 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 765 update_irq_load_avg(rq, irq_delta + steal); 766 #endif 767 update_rq_clock_pelt(rq, delta); 768 } 769 770 void update_rq_clock(struct rq *rq) 771 { 772 s64 delta; 773 774 lockdep_assert_rq_held(rq); 775 776 if (rq->clock_update_flags & RQCF_ACT_SKIP) 777 return; 778 779 #ifdef CONFIG_SCHED_DEBUG 780 if (sched_feat(WARN_DOUBLE_CLOCK)) 781 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); 782 rq->clock_update_flags |= RQCF_UPDATED; 783 #endif 784 785 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 786 if (delta < 0) 787 return; 788 rq->clock += delta; 789 update_rq_clock_task(rq, delta); 790 } 791 792 #ifdef CONFIG_SCHED_HRTICK 793 /* 794 * Use HR-timers to deliver accurate preemption points. 795 */ 796 797 static void hrtick_clear(struct rq *rq) 798 { 799 if (hrtimer_active(&rq->hrtick_timer)) 800 hrtimer_cancel(&rq->hrtick_timer); 801 } 802 803 /* 804 * High-resolution timer tick. 805 * Runs from hardirq context with interrupts disabled. 806 */ 807 static enum hrtimer_restart hrtick(struct hrtimer *timer) 808 { 809 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 810 struct rq_flags rf; 811 812 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 813 814 rq_lock(rq, &rf); 815 update_rq_clock(rq); 816 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 817 rq_unlock(rq, &rf); 818 819 return HRTIMER_NORESTART; 820 } 821 822 #ifdef CONFIG_SMP 823 824 static void __hrtick_restart(struct rq *rq) 825 { 826 struct hrtimer *timer = &rq->hrtick_timer; 827 ktime_t time = rq->hrtick_time; 828 829 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 830 } 831 832 /* 833 * called from hardirq (IPI) context 834 */ 835 static void __hrtick_start(void *arg) 836 { 837 struct rq *rq = arg; 838 struct rq_flags rf; 839 840 rq_lock(rq, &rf); 841 __hrtick_restart(rq); 842 rq_unlock(rq, &rf); 843 } 844 845 /* 846 * Called to set the hrtick timer state. 847 * 848 * called with rq->lock held and IRQs disabled 849 */ 850 void hrtick_start(struct rq *rq, u64 delay) 851 { 852 struct hrtimer *timer = &rq->hrtick_timer; 853 s64 delta; 854 855 /* 856 * Don't schedule slices shorter than 10000ns, that just 857 * doesn't make sense and can cause timer DoS. 858 */ 859 delta = max_t(s64, delay, 10000LL); 860 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 861 862 if (rq == this_rq()) 863 __hrtick_restart(rq); 864 else 865 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 866 } 867 868 #else 869 /* 870 * Called to set the hrtick timer state. 871 * 872 * called with rq->lock held and IRQs disabled 873 */ 874 void hrtick_start(struct rq *rq, u64 delay) 875 { 876 /* 877 * Don't schedule slices shorter than 10000ns, that just 878 * doesn't make sense. Rely on vruntime for fairness. 879 */ 880 delay = max_t(u64, delay, 10000LL); 881 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 882 HRTIMER_MODE_REL_PINNED_HARD); 883 } 884 885 #endif /* CONFIG_SMP */ 886 887 static void hrtick_rq_init(struct rq *rq) 888 { 889 #ifdef CONFIG_SMP 890 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 891 #endif 892 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 893 rq->hrtick_timer.function = hrtick; 894 } 895 #else /* CONFIG_SCHED_HRTICK */ 896 static inline void hrtick_clear(struct rq *rq) 897 { 898 } 899 900 static inline void hrtick_rq_init(struct rq *rq) 901 { 902 } 903 #endif /* CONFIG_SCHED_HRTICK */ 904 905 /* 906 * try_cmpxchg based fetch_or() macro so it works for different integer types: 907 */ 908 #define fetch_or(ptr, mask) \ 909 ({ \ 910 typeof(ptr) _ptr = (ptr); \ 911 typeof(mask) _mask = (mask); \ 912 typeof(*_ptr) _val = *_ptr; \ 913 \ 914 do { \ 915 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 916 _val; \ 917 }) 918 919 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 920 /* 921 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 922 * this avoids any races wrt polling state changes and thereby avoids 923 * spurious IPIs. 924 */ 925 static inline bool set_nr_and_not_polling(struct task_struct *p) 926 { 927 struct thread_info *ti = task_thread_info(p); 928 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); 929 } 930 931 /* 932 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 933 * 934 * If this returns true, then the idle task promises to call 935 * sched_ttwu_pending() and reschedule soon. 936 */ 937 static bool set_nr_if_polling(struct task_struct *p) 938 { 939 struct thread_info *ti = task_thread_info(p); 940 typeof(ti->flags) val = READ_ONCE(ti->flags); 941 942 do { 943 if (!(val & _TIF_POLLING_NRFLAG)) 944 return false; 945 if (val & _TIF_NEED_RESCHED) 946 return true; 947 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); 948 949 return true; 950 } 951 952 #else 953 static inline bool set_nr_and_not_polling(struct task_struct *p) 954 { 955 set_tsk_need_resched(p); 956 return true; 957 } 958 959 #ifdef CONFIG_SMP 960 static inline bool set_nr_if_polling(struct task_struct *p) 961 { 962 return false; 963 } 964 #endif 965 #endif 966 967 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 968 { 969 struct wake_q_node *node = &task->wake_q; 970 971 /* 972 * Atomically grab the task, if ->wake_q is !nil already it means 973 * it's already queued (either by us or someone else) and will get the 974 * wakeup due to that. 975 * 976 * In order to ensure that a pending wakeup will observe our pending 977 * state, even in the failed case, an explicit smp_mb() must be used. 978 */ 979 smp_mb__before_atomic(); 980 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 981 return false; 982 983 /* 984 * The head is context local, there can be no concurrency. 985 */ 986 *head->lastp = node; 987 head->lastp = &node->next; 988 return true; 989 } 990 991 /** 992 * wake_q_add() - queue a wakeup for 'later' waking. 993 * @head: the wake_q_head to add @task to 994 * @task: the task to queue for 'later' wakeup 995 * 996 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 997 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 998 * instantly. 999 * 1000 * This function must be used as-if it were wake_up_process(); IOW the task 1001 * must be ready to be woken at this location. 1002 */ 1003 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 1004 { 1005 if (__wake_q_add(head, task)) 1006 get_task_struct(task); 1007 } 1008 1009 /** 1010 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 1011 * @head: the wake_q_head to add @task to 1012 * @task: the task to queue for 'later' wakeup 1013 * 1014 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 1015 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 1016 * instantly. 1017 * 1018 * This function must be used as-if it were wake_up_process(); IOW the task 1019 * must be ready to be woken at this location. 1020 * 1021 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 1022 * that already hold reference to @task can call the 'safe' version and trust 1023 * wake_q to do the right thing depending whether or not the @task is already 1024 * queued for wakeup. 1025 */ 1026 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 1027 { 1028 if (!__wake_q_add(head, task)) 1029 put_task_struct(task); 1030 } 1031 1032 void wake_up_q(struct wake_q_head *head) 1033 { 1034 struct wake_q_node *node = head->first; 1035 1036 while (node != WAKE_Q_TAIL) { 1037 struct task_struct *task; 1038 1039 task = container_of(node, struct task_struct, wake_q); 1040 /* Task can safely be re-inserted now: */ 1041 node = node->next; 1042 task->wake_q.next = NULL; 1043 1044 /* 1045 * wake_up_process() executes a full barrier, which pairs with 1046 * the queueing in wake_q_add() so as not to miss wakeups. 1047 */ 1048 wake_up_process(task); 1049 put_task_struct(task); 1050 } 1051 } 1052 1053 /* 1054 * resched_curr - mark rq's current task 'to be rescheduled now'. 1055 * 1056 * On UP this means the setting of the need_resched flag, on SMP it 1057 * might also involve a cross-CPU call to trigger the scheduler on 1058 * the target CPU. 1059 */ 1060 void resched_curr(struct rq *rq) 1061 { 1062 struct task_struct *curr = rq->curr; 1063 int cpu; 1064 1065 lockdep_assert_rq_held(rq); 1066 1067 if (test_tsk_need_resched(curr)) 1068 return; 1069 1070 cpu = cpu_of(rq); 1071 1072 if (cpu == smp_processor_id()) { 1073 set_tsk_need_resched(curr); 1074 set_preempt_need_resched(); 1075 return; 1076 } 1077 1078 if (set_nr_and_not_polling(curr)) 1079 smp_send_reschedule(cpu); 1080 else 1081 trace_sched_wake_idle_without_ipi(cpu); 1082 } 1083 1084 void resched_cpu(int cpu) 1085 { 1086 struct rq *rq = cpu_rq(cpu); 1087 unsigned long flags; 1088 1089 raw_spin_rq_lock_irqsave(rq, flags); 1090 if (cpu_online(cpu) || cpu == smp_processor_id()) 1091 resched_curr(rq); 1092 raw_spin_rq_unlock_irqrestore(rq, flags); 1093 } 1094 1095 #ifdef CONFIG_SMP 1096 #ifdef CONFIG_NO_HZ_COMMON 1097 /* 1098 * In the semi idle case, use the nearest busy CPU for migrating timers 1099 * from an idle CPU. This is good for power-savings. 1100 * 1101 * We don't do similar optimization for completely idle system, as 1102 * selecting an idle CPU will add more delays to the timers than intended 1103 * (as that CPU's timer base may not be up to date wrt jiffies etc). 1104 */ 1105 int get_nohz_timer_target(void) 1106 { 1107 int i, cpu = smp_processor_id(), default_cpu = -1; 1108 struct sched_domain *sd; 1109 const struct cpumask *hk_mask; 1110 1111 if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { 1112 if (!idle_cpu(cpu)) 1113 return cpu; 1114 default_cpu = cpu; 1115 } 1116 1117 hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); 1118 1119 guard(rcu)(); 1120 1121 for_each_domain(cpu, sd) { 1122 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1123 if (cpu == i) 1124 continue; 1125 1126 if (!idle_cpu(i)) 1127 return i; 1128 } 1129 } 1130 1131 if (default_cpu == -1) 1132 default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); 1133 1134 return default_cpu; 1135 } 1136 1137 /* 1138 * When add_timer_on() enqueues a timer into the timer wheel of an 1139 * idle CPU then this timer might expire before the next timer event 1140 * which is scheduled to wake up that CPU. In case of a completely 1141 * idle system the next event might even be infinite time into the 1142 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1143 * leaves the inner idle loop so the newly added timer is taken into 1144 * account when the CPU goes back to idle and evaluates the timer 1145 * wheel for the next timer event. 1146 */ 1147 static void wake_up_idle_cpu(int cpu) 1148 { 1149 struct rq *rq = cpu_rq(cpu); 1150 1151 if (cpu == smp_processor_id()) 1152 return; 1153 1154 /* 1155 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling 1156 * part of the idle loop. This forces an exit from the idle loop 1157 * and a round trip to schedule(). Now this could be optimized 1158 * because a simple new idle loop iteration is enough to 1159 * re-evaluate the next tick. Provided some re-ordering of tick 1160 * nohz functions that would need to follow TIF_NR_POLLING 1161 * clearing: 1162 * 1163 * - On most architectures, a simple fetch_or on ti::flags with a 1164 * "0" value would be enough to know if an IPI needs to be sent. 1165 * 1166 * - x86 needs to perform a last need_resched() check between 1167 * monitor and mwait which doesn't take timers into account. 1168 * There a dedicated TIF_TIMER flag would be required to 1169 * fetch_or here and be checked along with TIF_NEED_RESCHED 1170 * before mwait(). 1171 * 1172 * However, remote timer enqueue is not such a frequent event 1173 * and testing of the above solutions didn't appear to report 1174 * much benefits. 1175 */ 1176 if (set_nr_and_not_polling(rq->idle)) 1177 smp_send_reschedule(cpu); 1178 else 1179 trace_sched_wake_idle_without_ipi(cpu); 1180 } 1181 1182 static bool wake_up_full_nohz_cpu(int cpu) 1183 { 1184 /* 1185 * We just need the target to call irq_exit() and re-evaluate 1186 * the next tick. The nohz full kick at least implies that. 1187 * If needed we can still optimize that later with an 1188 * empty IRQ. 1189 */ 1190 if (cpu_is_offline(cpu)) 1191 return true; /* Don't try to wake offline CPUs. */ 1192 if (tick_nohz_full_cpu(cpu)) { 1193 if (cpu != smp_processor_id() || 1194 tick_nohz_tick_stopped()) 1195 tick_nohz_full_kick_cpu(cpu); 1196 return true; 1197 } 1198 1199 return false; 1200 } 1201 1202 /* 1203 * Wake up the specified CPU. If the CPU is going offline, it is the 1204 * caller's responsibility to deal with the lost wakeup, for example, 1205 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1206 */ 1207 void wake_up_nohz_cpu(int cpu) 1208 { 1209 if (!wake_up_full_nohz_cpu(cpu)) 1210 wake_up_idle_cpu(cpu); 1211 } 1212 1213 static void nohz_csd_func(void *info) 1214 { 1215 struct rq *rq = info; 1216 int cpu = cpu_of(rq); 1217 unsigned int flags; 1218 1219 /* 1220 * Release the rq::nohz_csd. 1221 */ 1222 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1223 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1224 1225 rq->idle_balance = idle_cpu(cpu); 1226 if (rq->idle_balance && !need_resched()) { 1227 rq->nohz_idle_balance = flags; 1228 raise_softirq_irqoff(SCHED_SOFTIRQ); 1229 } 1230 } 1231 1232 #endif /* CONFIG_NO_HZ_COMMON */ 1233 1234 #ifdef CONFIG_NO_HZ_FULL 1235 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) 1236 { 1237 if (rq->nr_running != 1) 1238 return false; 1239 1240 if (p->sched_class != &fair_sched_class) 1241 return false; 1242 1243 if (!task_on_rq_queued(p)) 1244 return false; 1245 1246 return true; 1247 } 1248 1249 bool sched_can_stop_tick(struct rq *rq) 1250 { 1251 int fifo_nr_running; 1252 1253 /* Deadline tasks, even if single, need the tick */ 1254 if (rq->dl.dl_nr_running) 1255 return false; 1256 1257 /* 1258 * If there are more than one RR tasks, we need the tick to affect the 1259 * actual RR behaviour. 1260 */ 1261 if (rq->rt.rr_nr_running) { 1262 if (rq->rt.rr_nr_running == 1) 1263 return true; 1264 else 1265 return false; 1266 } 1267 1268 /* 1269 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1270 * forced preemption between FIFO tasks. 1271 */ 1272 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1273 if (fifo_nr_running) 1274 return true; 1275 1276 /* 1277 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; 1278 * if there's more than one we need the tick for involuntary 1279 * preemption. 1280 */ 1281 if (rq->nr_running > 1) 1282 return false; 1283 1284 /* 1285 * If there is one task and it has CFS runtime bandwidth constraints 1286 * and it's on the cpu now we don't want to stop the tick. 1287 * This check prevents clearing the bit if a newly enqueued task here is 1288 * dequeued by migrating while the constrained task continues to run. 1289 * E.g. going from 2->1 without going through pick_next_task(). 1290 */ 1291 if (__need_bw_check(rq, rq->curr)) { 1292 if (cfs_task_bw_constrained(rq->curr)) 1293 return false; 1294 } 1295 1296 return true; 1297 } 1298 #endif /* CONFIG_NO_HZ_FULL */ 1299 #endif /* CONFIG_SMP */ 1300 1301 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1302 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1303 /* 1304 * Iterate task_group tree rooted at *from, calling @down when first entering a 1305 * node and @up when leaving it for the final time. 1306 * 1307 * Caller must hold rcu_lock or sufficient equivalent. 1308 */ 1309 int walk_tg_tree_from(struct task_group *from, 1310 tg_visitor down, tg_visitor up, void *data) 1311 { 1312 struct task_group *parent, *child; 1313 int ret; 1314 1315 parent = from; 1316 1317 down: 1318 ret = (*down)(parent, data); 1319 if (ret) 1320 goto out; 1321 list_for_each_entry_rcu(child, &parent->children, siblings) { 1322 parent = child; 1323 goto down; 1324 1325 up: 1326 continue; 1327 } 1328 ret = (*up)(parent, data); 1329 if (ret || parent == from) 1330 goto out; 1331 1332 child = parent; 1333 parent = parent->parent; 1334 if (parent) 1335 goto up; 1336 out: 1337 return ret; 1338 } 1339 1340 int tg_nop(struct task_group *tg, void *data) 1341 { 1342 return 0; 1343 } 1344 #endif 1345 1346 void set_load_weight(struct task_struct *p, bool update_load) 1347 { 1348 int prio = p->static_prio - MAX_RT_PRIO; 1349 struct load_weight lw; 1350 1351 if (task_has_idle_policy(p)) { 1352 lw.weight = scale_load(WEIGHT_IDLEPRIO); 1353 lw.inv_weight = WMULT_IDLEPRIO; 1354 } else { 1355 lw.weight = scale_load(sched_prio_to_weight[prio]); 1356 lw.inv_weight = sched_prio_to_wmult[prio]; 1357 } 1358 1359 /* 1360 * SCHED_OTHER tasks have to update their load when changing their 1361 * weight 1362 */ 1363 if (update_load && p->sched_class == &fair_sched_class) 1364 reweight_task(p, &lw); 1365 else 1366 p->se.load = lw; 1367 } 1368 1369 #ifdef CONFIG_UCLAMP_TASK 1370 /* 1371 * Serializes updates of utilization clamp values 1372 * 1373 * The (slow-path) user-space triggers utilization clamp value updates which 1374 * can require updates on (fast-path) scheduler's data structures used to 1375 * support enqueue/dequeue operations. 1376 * While the per-CPU rq lock protects fast-path update operations, user-space 1377 * requests are serialized using a mutex to reduce the risk of conflicting 1378 * updates or API abuses. 1379 */ 1380 static DEFINE_MUTEX(uclamp_mutex); 1381 1382 /* Max allowed minimum utilization */ 1383 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1384 1385 /* Max allowed maximum utilization */ 1386 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1387 1388 /* 1389 * By default RT tasks run at the maximum performance point/capacity of the 1390 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1391 * SCHED_CAPACITY_SCALE. 1392 * 1393 * This knob allows admins to change the default behavior when uclamp is being 1394 * used. In battery powered devices, particularly, running at the maximum 1395 * capacity and frequency will increase energy consumption and shorten the 1396 * battery life. 1397 * 1398 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1399 * 1400 * This knob will not override the system default sched_util_clamp_min defined 1401 * above. 1402 */ 1403 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1404 1405 /* All clamps are required to be less or equal than these values */ 1406 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1407 1408 /* 1409 * This static key is used to reduce the uclamp overhead in the fast path. It 1410 * primarily disables the call to uclamp_rq_{inc, dec}() in 1411 * enqueue/dequeue_task(). 1412 * 1413 * This allows users to continue to enable uclamp in their kernel config with 1414 * minimum uclamp overhead in the fast path. 1415 * 1416 * As soon as userspace modifies any of the uclamp knobs, the static key is 1417 * enabled, since we have an actual users that make use of uclamp 1418 * functionality. 1419 * 1420 * The knobs that would enable this static key are: 1421 * 1422 * * A task modifying its uclamp value with sched_setattr(). 1423 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1424 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1425 */ 1426 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1427 1428 static inline unsigned int 1429 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1430 unsigned int clamp_value) 1431 { 1432 /* 1433 * Avoid blocked utilization pushing up the frequency when we go 1434 * idle (which drops the max-clamp) by retaining the last known 1435 * max-clamp. 1436 */ 1437 if (clamp_id == UCLAMP_MAX) { 1438 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1439 return clamp_value; 1440 } 1441 1442 return uclamp_none(UCLAMP_MIN); 1443 } 1444 1445 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1446 unsigned int clamp_value) 1447 { 1448 /* Reset max-clamp retention only on idle exit */ 1449 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1450 return; 1451 1452 uclamp_rq_set(rq, clamp_id, clamp_value); 1453 } 1454 1455 static inline 1456 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1457 unsigned int clamp_value) 1458 { 1459 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1460 int bucket_id = UCLAMP_BUCKETS - 1; 1461 1462 /* 1463 * Since both min and max clamps are max aggregated, find the 1464 * top most bucket with tasks in. 1465 */ 1466 for ( ; bucket_id >= 0; bucket_id--) { 1467 if (!bucket[bucket_id].tasks) 1468 continue; 1469 return bucket[bucket_id].value; 1470 } 1471 1472 /* No tasks -- default clamp values */ 1473 return uclamp_idle_value(rq, clamp_id, clamp_value); 1474 } 1475 1476 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1477 { 1478 unsigned int default_util_min; 1479 struct uclamp_se *uc_se; 1480 1481 lockdep_assert_held(&p->pi_lock); 1482 1483 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1484 1485 /* Only sync if user didn't override the default */ 1486 if (uc_se->user_defined) 1487 return; 1488 1489 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1490 uclamp_se_set(uc_se, default_util_min, false); 1491 } 1492 1493 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1494 { 1495 if (!rt_task(p)) 1496 return; 1497 1498 /* Protect updates to p->uclamp_* */ 1499 guard(task_rq_lock)(p); 1500 __uclamp_update_util_min_rt_default(p); 1501 } 1502 1503 static inline struct uclamp_se 1504 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1505 { 1506 /* Copy by value as we could modify it */ 1507 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1508 #ifdef CONFIG_UCLAMP_TASK_GROUP 1509 unsigned int tg_min, tg_max, value; 1510 1511 /* 1512 * Tasks in autogroups or root task group will be 1513 * restricted by system defaults. 1514 */ 1515 if (task_group_is_autogroup(task_group(p))) 1516 return uc_req; 1517 if (task_group(p) == &root_task_group) 1518 return uc_req; 1519 1520 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1521 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1522 value = uc_req.value; 1523 value = clamp(value, tg_min, tg_max); 1524 uclamp_se_set(&uc_req, value, false); 1525 #endif 1526 1527 return uc_req; 1528 } 1529 1530 /* 1531 * The effective clamp bucket index of a task depends on, by increasing 1532 * priority: 1533 * - the task specific clamp value, when explicitly requested from userspace 1534 * - the task group effective clamp value, for tasks not either in the root 1535 * group or in an autogroup 1536 * - the system default clamp value, defined by the sysadmin 1537 */ 1538 static inline struct uclamp_se 1539 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1540 { 1541 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1542 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1543 1544 /* System default restrictions always apply */ 1545 if (unlikely(uc_req.value > uc_max.value)) 1546 return uc_max; 1547 1548 return uc_req; 1549 } 1550 1551 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1552 { 1553 struct uclamp_se uc_eff; 1554 1555 /* Task currently refcounted: use back-annotated (effective) value */ 1556 if (p->uclamp[clamp_id].active) 1557 return (unsigned long)p->uclamp[clamp_id].value; 1558 1559 uc_eff = uclamp_eff_get(p, clamp_id); 1560 1561 return (unsigned long)uc_eff.value; 1562 } 1563 1564 /* 1565 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1566 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1567 * updates the rq's clamp value if required. 1568 * 1569 * Tasks can have a task-specific value requested from user-space, track 1570 * within each bucket the maximum value for tasks refcounted in it. 1571 * This "local max aggregation" allows to track the exact "requested" value 1572 * for each bucket when all its RUNNABLE tasks require the same clamp. 1573 */ 1574 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1575 enum uclamp_id clamp_id) 1576 { 1577 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1578 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1579 struct uclamp_bucket *bucket; 1580 1581 lockdep_assert_rq_held(rq); 1582 1583 /* Update task effective clamp */ 1584 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1585 1586 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1587 bucket->tasks++; 1588 uc_se->active = true; 1589 1590 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1591 1592 /* 1593 * Local max aggregation: rq buckets always track the max 1594 * "requested" clamp value of its RUNNABLE tasks. 1595 */ 1596 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1597 bucket->value = uc_se->value; 1598 1599 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) 1600 uclamp_rq_set(rq, clamp_id, uc_se->value); 1601 } 1602 1603 /* 1604 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1605 * is released. If this is the last task reference counting the rq's max 1606 * active clamp value, then the rq's clamp value is updated. 1607 * 1608 * Both refcounted tasks and rq's cached clamp values are expected to be 1609 * always valid. If it's detected they are not, as defensive programming, 1610 * enforce the expected state and warn. 1611 */ 1612 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1613 enum uclamp_id clamp_id) 1614 { 1615 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1616 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1617 struct uclamp_bucket *bucket; 1618 unsigned int bkt_clamp; 1619 unsigned int rq_clamp; 1620 1621 lockdep_assert_rq_held(rq); 1622 1623 /* 1624 * If sched_uclamp_used was enabled after task @p was enqueued, 1625 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1626 * 1627 * In this case the uc_se->active flag should be false since no uclamp 1628 * accounting was performed at enqueue time and we can just return 1629 * here. 1630 * 1631 * Need to be careful of the following enqueue/dequeue ordering 1632 * problem too 1633 * 1634 * enqueue(taskA) 1635 * // sched_uclamp_used gets enabled 1636 * enqueue(taskB) 1637 * dequeue(taskA) 1638 * // Must not decrement bucket->tasks here 1639 * dequeue(taskB) 1640 * 1641 * where we could end up with stale data in uc_se and 1642 * bucket[uc_se->bucket_id]. 1643 * 1644 * The following check here eliminates the possibility of such race. 1645 */ 1646 if (unlikely(!uc_se->active)) 1647 return; 1648 1649 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1650 1651 SCHED_WARN_ON(!bucket->tasks); 1652 if (likely(bucket->tasks)) 1653 bucket->tasks--; 1654 1655 uc_se->active = false; 1656 1657 /* 1658 * Keep "local max aggregation" simple and accept to (possibly) 1659 * overboost some RUNNABLE tasks in the same bucket. 1660 * The rq clamp bucket value is reset to its base value whenever 1661 * there are no more RUNNABLE tasks refcounting it. 1662 */ 1663 if (likely(bucket->tasks)) 1664 return; 1665 1666 rq_clamp = uclamp_rq_get(rq, clamp_id); 1667 /* 1668 * Defensive programming: this should never happen. If it happens, 1669 * e.g. due to future modification, warn and fix up the expected value. 1670 */ 1671 SCHED_WARN_ON(bucket->value > rq_clamp); 1672 if (bucket->value >= rq_clamp) { 1673 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1674 uclamp_rq_set(rq, clamp_id, bkt_clamp); 1675 } 1676 } 1677 1678 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) 1679 { 1680 enum uclamp_id clamp_id; 1681 1682 /* 1683 * Avoid any overhead until uclamp is actually used by the userspace. 1684 * 1685 * The condition is constructed such that a NOP is generated when 1686 * sched_uclamp_used is disabled. 1687 */ 1688 if (!static_branch_unlikely(&sched_uclamp_used)) 1689 return; 1690 1691 if (unlikely(!p->sched_class->uclamp_enabled)) 1692 return; 1693 1694 for_each_clamp_id(clamp_id) 1695 uclamp_rq_inc_id(rq, p, clamp_id); 1696 1697 /* Reset clamp idle holding when there is one RUNNABLE task */ 1698 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1699 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1700 } 1701 1702 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1703 { 1704 enum uclamp_id clamp_id; 1705 1706 /* 1707 * Avoid any overhead until uclamp is actually used by the userspace. 1708 * 1709 * The condition is constructed such that a NOP is generated when 1710 * sched_uclamp_used is disabled. 1711 */ 1712 if (!static_branch_unlikely(&sched_uclamp_used)) 1713 return; 1714 1715 if (unlikely(!p->sched_class->uclamp_enabled)) 1716 return; 1717 1718 for_each_clamp_id(clamp_id) 1719 uclamp_rq_dec_id(rq, p, clamp_id); 1720 } 1721 1722 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1723 enum uclamp_id clamp_id) 1724 { 1725 if (!p->uclamp[clamp_id].active) 1726 return; 1727 1728 uclamp_rq_dec_id(rq, p, clamp_id); 1729 uclamp_rq_inc_id(rq, p, clamp_id); 1730 1731 /* 1732 * Make sure to clear the idle flag if we've transiently reached 0 1733 * active tasks on rq. 1734 */ 1735 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1736 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1737 } 1738 1739 static inline void 1740 uclamp_update_active(struct task_struct *p) 1741 { 1742 enum uclamp_id clamp_id; 1743 struct rq_flags rf; 1744 struct rq *rq; 1745 1746 /* 1747 * Lock the task and the rq where the task is (or was) queued. 1748 * 1749 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1750 * price to pay to safely serialize util_{min,max} updates with 1751 * enqueues, dequeues and migration operations. 1752 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1753 */ 1754 rq = task_rq_lock(p, &rf); 1755 1756 /* 1757 * Setting the clamp bucket is serialized by task_rq_lock(). 1758 * If the task is not yet RUNNABLE and its task_struct is not 1759 * affecting a valid clamp bucket, the next time it's enqueued, 1760 * it will already see the updated clamp bucket value. 1761 */ 1762 for_each_clamp_id(clamp_id) 1763 uclamp_rq_reinc_id(rq, p, clamp_id); 1764 1765 task_rq_unlock(rq, p, &rf); 1766 } 1767 1768 #ifdef CONFIG_UCLAMP_TASK_GROUP 1769 static inline void 1770 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1771 { 1772 struct css_task_iter it; 1773 struct task_struct *p; 1774 1775 css_task_iter_start(css, 0, &it); 1776 while ((p = css_task_iter_next(&it))) 1777 uclamp_update_active(p); 1778 css_task_iter_end(&it); 1779 } 1780 1781 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1782 #endif 1783 1784 #ifdef CONFIG_SYSCTL 1785 #ifdef CONFIG_UCLAMP_TASK_GROUP 1786 static void uclamp_update_root_tg(void) 1787 { 1788 struct task_group *tg = &root_task_group; 1789 1790 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1791 sysctl_sched_uclamp_util_min, false); 1792 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1793 sysctl_sched_uclamp_util_max, false); 1794 1795 guard(rcu)(); 1796 cpu_util_update_eff(&root_task_group.css); 1797 } 1798 #else 1799 static void uclamp_update_root_tg(void) { } 1800 #endif 1801 1802 static void uclamp_sync_util_min_rt_default(void) 1803 { 1804 struct task_struct *g, *p; 1805 1806 /* 1807 * copy_process() sysctl_uclamp 1808 * uclamp_min_rt = X; 1809 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1810 * // link thread smp_mb__after_spinlock() 1811 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1812 * sched_post_fork() for_each_process_thread() 1813 * __uclamp_sync_rt() __uclamp_sync_rt() 1814 * 1815 * Ensures that either sched_post_fork() will observe the new 1816 * uclamp_min_rt or for_each_process_thread() will observe the new 1817 * task. 1818 */ 1819 read_lock(&tasklist_lock); 1820 smp_mb__after_spinlock(); 1821 read_unlock(&tasklist_lock); 1822 1823 guard(rcu)(); 1824 for_each_process_thread(g, p) 1825 uclamp_update_util_min_rt_default(p); 1826 } 1827 1828 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write, 1829 void *buffer, size_t *lenp, loff_t *ppos) 1830 { 1831 bool update_root_tg = false; 1832 int old_min, old_max, old_min_rt; 1833 int result; 1834 1835 guard(mutex)(&uclamp_mutex); 1836 1837 old_min = sysctl_sched_uclamp_util_min; 1838 old_max = sysctl_sched_uclamp_util_max; 1839 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1840 1841 result = proc_dointvec(table, write, buffer, lenp, ppos); 1842 if (result) 1843 goto undo; 1844 if (!write) 1845 return 0; 1846 1847 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1848 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1849 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1850 1851 result = -EINVAL; 1852 goto undo; 1853 } 1854 1855 if (old_min != sysctl_sched_uclamp_util_min) { 1856 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1857 sysctl_sched_uclamp_util_min, false); 1858 update_root_tg = true; 1859 } 1860 if (old_max != sysctl_sched_uclamp_util_max) { 1861 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1862 sysctl_sched_uclamp_util_max, false); 1863 update_root_tg = true; 1864 } 1865 1866 if (update_root_tg) { 1867 static_branch_enable(&sched_uclamp_used); 1868 uclamp_update_root_tg(); 1869 } 1870 1871 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1872 static_branch_enable(&sched_uclamp_used); 1873 uclamp_sync_util_min_rt_default(); 1874 } 1875 1876 /* 1877 * We update all RUNNABLE tasks only when task groups are in use. 1878 * Otherwise, keep it simple and do just a lazy update at each next 1879 * task enqueue time. 1880 */ 1881 return 0; 1882 1883 undo: 1884 sysctl_sched_uclamp_util_min = old_min; 1885 sysctl_sched_uclamp_util_max = old_max; 1886 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1887 return result; 1888 } 1889 #endif 1890 1891 static void uclamp_fork(struct task_struct *p) 1892 { 1893 enum uclamp_id clamp_id; 1894 1895 /* 1896 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1897 * as the task is still at its early fork stages. 1898 */ 1899 for_each_clamp_id(clamp_id) 1900 p->uclamp[clamp_id].active = false; 1901 1902 if (likely(!p->sched_reset_on_fork)) 1903 return; 1904 1905 for_each_clamp_id(clamp_id) { 1906 uclamp_se_set(&p->uclamp_req[clamp_id], 1907 uclamp_none(clamp_id), false); 1908 } 1909 } 1910 1911 static void uclamp_post_fork(struct task_struct *p) 1912 { 1913 uclamp_update_util_min_rt_default(p); 1914 } 1915 1916 static void __init init_uclamp_rq(struct rq *rq) 1917 { 1918 enum uclamp_id clamp_id; 1919 struct uclamp_rq *uc_rq = rq->uclamp; 1920 1921 for_each_clamp_id(clamp_id) { 1922 uc_rq[clamp_id] = (struct uclamp_rq) { 1923 .value = uclamp_none(clamp_id) 1924 }; 1925 } 1926 1927 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 1928 } 1929 1930 static void __init init_uclamp(void) 1931 { 1932 struct uclamp_se uc_max = {}; 1933 enum uclamp_id clamp_id; 1934 int cpu; 1935 1936 for_each_possible_cpu(cpu) 1937 init_uclamp_rq(cpu_rq(cpu)); 1938 1939 for_each_clamp_id(clamp_id) { 1940 uclamp_se_set(&init_task.uclamp_req[clamp_id], 1941 uclamp_none(clamp_id), false); 1942 } 1943 1944 /* System defaults allow max clamp values for both indexes */ 1945 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 1946 for_each_clamp_id(clamp_id) { 1947 uclamp_default[clamp_id] = uc_max; 1948 #ifdef CONFIG_UCLAMP_TASK_GROUP 1949 root_task_group.uclamp_req[clamp_id] = uc_max; 1950 root_task_group.uclamp[clamp_id] = uc_max; 1951 #endif 1952 } 1953 } 1954 1955 #else /* !CONFIG_UCLAMP_TASK */ 1956 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } 1957 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 1958 static inline void uclamp_fork(struct task_struct *p) { } 1959 static inline void uclamp_post_fork(struct task_struct *p) { } 1960 static inline void init_uclamp(void) { } 1961 #endif /* CONFIG_UCLAMP_TASK */ 1962 1963 bool sched_task_on_rq(struct task_struct *p) 1964 { 1965 return task_on_rq_queued(p); 1966 } 1967 1968 unsigned long get_wchan(struct task_struct *p) 1969 { 1970 unsigned long ip = 0; 1971 unsigned int state; 1972 1973 if (!p || p == current) 1974 return 0; 1975 1976 /* Only get wchan if task is blocked and we can keep it that way. */ 1977 raw_spin_lock_irq(&p->pi_lock); 1978 state = READ_ONCE(p->__state); 1979 smp_rmb(); /* see try_to_wake_up() */ 1980 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 1981 ip = __get_wchan(p); 1982 raw_spin_unlock_irq(&p->pi_lock); 1983 1984 return ip; 1985 } 1986 1987 void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1988 { 1989 if (!(flags & ENQUEUE_NOCLOCK)) 1990 update_rq_clock(rq); 1991 1992 if (!(flags & ENQUEUE_RESTORE)) { 1993 sched_info_enqueue(rq, p); 1994 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); 1995 } 1996 1997 uclamp_rq_inc(rq, p); 1998 p->sched_class->enqueue_task(rq, p, flags); 1999 2000 if (sched_core_enabled(rq)) 2001 sched_core_enqueue(rq, p); 2002 } 2003 2004 void dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2005 { 2006 if (sched_core_enabled(rq)) 2007 sched_core_dequeue(rq, p, flags); 2008 2009 if (!(flags & DEQUEUE_NOCLOCK)) 2010 update_rq_clock(rq); 2011 2012 if (!(flags & DEQUEUE_SAVE)) { 2013 sched_info_dequeue(rq, p); 2014 psi_dequeue(p, flags & DEQUEUE_SLEEP); 2015 } 2016 2017 uclamp_rq_dec(rq, p); 2018 p->sched_class->dequeue_task(rq, p, flags); 2019 } 2020 2021 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2022 { 2023 if (task_on_rq_migrating(p)) 2024 flags |= ENQUEUE_MIGRATED; 2025 if (flags & ENQUEUE_MIGRATED) 2026 sched_mm_cid_migrate_to(rq, p); 2027 2028 enqueue_task(rq, p, flags); 2029 2030 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); 2031 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2032 } 2033 2034 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2035 { 2036 WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING); 2037 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2038 2039 dequeue_task(rq, p, flags); 2040 } 2041 2042 /** 2043 * task_curr - is this task currently executing on a CPU? 2044 * @p: the task in question. 2045 * 2046 * Return: 1 if the task is currently executing. 0 otherwise. 2047 */ 2048 inline int task_curr(const struct task_struct *p) 2049 { 2050 return cpu_curr(task_cpu(p)) == p; 2051 } 2052 2053 /* 2054 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2055 * use the balance_callback list if you want balancing. 2056 * 2057 * this means any call to check_class_changed() must be followed by a call to 2058 * balance_callback(). 2059 */ 2060 void check_class_changed(struct rq *rq, struct task_struct *p, 2061 const struct sched_class *prev_class, 2062 int oldprio) 2063 { 2064 if (prev_class != p->sched_class) { 2065 if (prev_class->switched_from) 2066 prev_class->switched_from(rq, p); 2067 2068 p->sched_class->switched_to(rq, p); 2069 } else if (oldprio != p->prio || dl_task(p)) 2070 p->sched_class->prio_changed(rq, p, oldprio); 2071 } 2072 2073 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) 2074 { 2075 if (p->sched_class == rq->curr->sched_class) 2076 rq->curr->sched_class->wakeup_preempt(rq, p, flags); 2077 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) 2078 resched_curr(rq); 2079 2080 /* 2081 * A queue event has occurred, and we're going to schedule. In 2082 * this case, we can save a useless back to back clock update. 2083 */ 2084 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 2085 rq_clock_skip_update(rq); 2086 } 2087 2088 static __always_inline 2089 int __task_state_match(struct task_struct *p, unsigned int state) 2090 { 2091 if (READ_ONCE(p->__state) & state) 2092 return 1; 2093 2094 if (READ_ONCE(p->saved_state) & state) 2095 return -1; 2096 2097 return 0; 2098 } 2099 2100 static __always_inline 2101 int task_state_match(struct task_struct *p, unsigned int state) 2102 { 2103 /* 2104 * Serialize against current_save_and_set_rtlock_wait_state(), 2105 * current_restore_rtlock_saved_state(), and __refrigerator(). 2106 */ 2107 guard(raw_spinlock_irq)(&p->pi_lock); 2108 return __task_state_match(p, state); 2109 } 2110 2111 /* 2112 * wait_task_inactive - wait for a thread to unschedule. 2113 * 2114 * Wait for the thread to block in any of the states set in @match_state. 2115 * If it changes, i.e. @p might have woken up, then return zero. When we 2116 * succeed in waiting for @p to be off its CPU, we return a positive number 2117 * (its total switch count). If a second call a short while later returns the 2118 * same number, the caller can be sure that @p has remained unscheduled the 2119 * whole time. 2120 * 2121 * The caller must ensure that the task *will* unschedule sometime soon, 2122 * else this function might spin for a *long* time. This function can't 2123 * be called with interrupts off, or it may introduce deadlock with 2124 * smp_call_function() if an IPI is sent by the same process we are 2125 * waiting to become inactive. 2126 */ 2127 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 2128 { 2129 int running, queued, match; 2130 struct rq_flags rf; 2131 unsigned long ncsw; 2132 struct rq *rq; 2133 2134 for (;;) { 2135 /* 2136 * We do the initial early heuristics without holding 2137 * any task-queue locks at all. We'll only try to get 2138 * the runqueue lock when things look like they will 2139 * work out! 2140 */ 2141 rq = task_rq(p); 2142 2143 /* 2144 * If the task is actively running on another CPU 2145 * still, just relax and busy-wait without holding 2146 * any locks. 2147 * 2148 * NOTE! Since we don't hold any locks, it's not 2149 * even sure that "rq" stays as the right runqueue! 2150 * But we don't care, since "task_on_cpu()" will 2151 * return false if the runqueue has changed and p 2152 * is actually now running somewhere else! 2153 */ 2154 while (task_on_cpu(rq, p)) { 2155 if (!task_state_match(p, match_state)) 2156 return 0; 2157 cpu_relax(); 2158 } 2159 2160 /* 2161 * Ok, time to look more closely! We need the rq 2162 * lock now, to be *sure*. If we're wrong, we'll 2163 * just go back and repeat. 2164 */ 2165 rq = task_rq_lock(p, &rf); 2166 trace_sched_wait_task(p); 2167 running = task_on_cpu(rq, p); 2168 queued = task_on_rq_queued(p); 2169 ncsw = 0; 2170 if ((match = __task_state_match(p, match_state))) { 2171 /* 2172 * When matching on p->saved_state, consider this task 2173 * still queued so it will wait. 2174 */ 2175 if (match < 0) 2176 queued = 1; 2177 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2178 } 2179 task_rq_unlock(rq, p, &rf); 2180 2181 /* 2182 * If it changed from the expected state, bail out now. 2183 */ 2184 if (unlikely(!ncsw)) 2185 break; 2186 2187 /* 2188 * Was it really running after all now that we 2189 * checked with the proper locks actually held? 2190 * 2191 * Oops. Go back and try again.. 2192 */ 2193 if (unlikely(running)) { 2194 cpu_relax(); 2195 continue; 2196 } 2197 2198 /* 2199 * It's not enough that it's not actively running, 2200 * it must be off the runqueue _entirely_, and not 2201 * preempted! 2202 * 2203 * So if it was still runnable (but just not actively 2204 * running right now), it's preempted, and we should 2205 * yield - it could be a while. 2206 */ 2207 if (unlikely(queued)) { 2208 ktime_t to = NSEC_PER_SEC / HZ; 2209 2210 set_current_state(TASK_UNINTERRUPTIBLE); 2211 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 2212 continue; 2213 } 2214 2215 /* 2216 * Ahh, all good. It wasn't running, and it wasn't 2217 * runnable, which means that it will never become 2218 * running in the future either. We're all done! 2219 */ 2220 break; 2221 } 2222 2223 return ncsw; 2224 } 2225 2226 #ifdef CONFIG_SMP 2227 2228 static void 2229 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); 2230 2231 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2232 { 2233 struct affinity_context ac = { 2234 .new_mask = cpumask_of(rq->cpu), 2235 .flags = SCA_MIGRATE_DISABLE, 2236 }; 2237 2238 if (likely(!p->migration_disabled)) 2239 return; 2240 2241 if (p->cpus_ptr != &p->cpus_mask) 2242 return; 2243 2244 /* 2245 * Violates locking rules! See comment in __do_set_cpus_allowed(). 2246 */ 2247 __do_set_cpus_allowed(p, &ac); 2248 } 2249 2250 void migrate_disable(void) 2251 { 2252 struct task_struct *p = current; 2253 2254 if (p->migration_disabled) { 2255 #ifdef CONFIG_DEBUG_PREEMPT 2256 /* 2257 *Warn about overflow half-way through the range. 2258 */ 2259 WARN_ON_ONCE((s16)p->migration_disabled < 0); 2260 #endif 2261 p->migration_disabled++; 2262 return; 2263 } 2264 2265 guard(preempt)(); 2266 this_rq()->nr_pinned++; 2267 p->migration_disabled = 1; 2268 } 2269 EXPORT_SYMBOL_GPL(migrate_disable); 2270 2271 void migrate_enable(void) 2272 { 2273 struct task_struct *p = current; 2274 struct affinity_context ac = { 2275 .new_mask = &p->cpus_mask, 2276 .flags = SCA_MIGRATE_ENABLE, 2277 }; 2278 2279 #ifdef CONFIG_DEBUG_PREEMPT 2280 /* 2281 * Check both overflow from migrate_disable() and superfluous 2282 * migrate_enable(). 2283 */ 2284 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) 2285 return; 2286 #endif 2287 2288 if (p->migration_disabled > 1) { 2289 p->migration_disabled--; 2290 return; 2291 } 2292 2293 /* 2294 * Ensure stop_task runs either before or after this, and that 2295 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2296 */ 2297 guard(preempt)(); 2298 if (p->cpus_ptr != &p->cpus_mask) 2299 __set_cpus_allowed_ptr(p, &ac); 2300 /* 2301 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2302 * regular cpus_mask, otherwise things that race (eg. 2303 * select_fallback_rq) get confused. 2304 */ 2305 barrier(); 2306 p->migration_disabled = 0; 2307 this_rq()->nr_pinned--; 2308 } 2309 EXPORT_SYMBOL_GPL(migrate_enable); 2310 2311 static inline bool rq_has_pinned_tasks(struct rq *rq) 2312 { 2313 return rq->nr_pinned; 2314 } 2315 2316 /* 2317 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2318 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2319 */ 2320 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2321 { 2322 /* When not in the task's cpumask, no point in looking further. */ 2323 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2324 return false; 2325 2326 /* migrate_disabled() must be allowed to finish. */ 2327 if (is_migration_disabled(p)) 2328 return cpu_online(cpu); 2329 2330 /* Non kernel threads are not allowed during either online or offline. */ 2331 if (!(p->flags & PF_KTHREAD)) 2332 return cpu_active(cpu) && task_cpu_possible(cpu, p); 2333 2334 /* KTHREAD_IS_PER_CPU is always allowed. */ 2335 if (kthread_is_per_cpu(p)) 2336 return cpu_online(cpu); 2337 2338 /* Regular kernel threads don't get to stay during offline. */ 2339 if (cpu_dying(cpu)) 2340 return false; 2341 2342 /* But are allowed during online. */ 2343 return cpu_online(cpu); 2344 } 2345 2346 /* 2347 * This is how migration works: 2348 * 2349 * 1) we invoke migration_cpu_stop() on the target CPU using 2350 * stop_one_cpu(). 2351 * 2) stopper starts to run (implicitly forcing the migrated thread 2352 * off the CPU) 2353 * 3) it checks whether the migrated task is still in the wrong runqueue. 2354 * 4) if it's in the wrong runqueue then the migration thread removes 2355 * it and puts it into the right queue. 2356 * 5) stopper completes and stop_one_cpu() returns and the migration 2357 * is done. 2358 */ 2359 2360 /* 2361 * move_queued_task - move a queued task to new rq. 2362 * 2363 * Returns (locked) new rq. Old rq's lock is released. 2364 */ 2365 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2366 struct task_struct *p, int new_cpu) 2367 { 2368 lockdep_assert_rq_held(rq); 2369 2370 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2371 set_task_cpu(p, new_cpu); 2372 rq_unlock(rq, rf); 2373 2374 rq = cpu_rq(new_cpu); 2375 2376 rq_lock(rq, rf); 2377 WARN_ON_ONCE(task_cpu(p) != new_cpu); 2378 activate_task(rq, p, 0); 2379 wakeup_preempt(rq, p, 0); 2380 2381 return rq; 2382 } 2383 2384 struct migration_arg { 2385 struct task_struct *task; 2386 int dest_cpu; 2387 struct set_affinity_pending *pending; 2388 }; 2389 2390 /* 2391 * @refs: number of wait_for_completion() 2392 * @stop_pending: is @stop_work in use 2393 */ 2394 struct set_affinity_pending { 2395 refcount_t refs; 2396 unsigned int stop_pending; 2397 struct completion done; 2398 struct cpu_stop_work stop_work; 2399 struct migration_arg arg; 2400 }; 2401 2402 /* 2403 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2404 * this because either it can't run here any more (set_cpus_allowed() 2405 * away from this CPU, or CPU going down), or because we're 2406 * attempting to rebalance this task on exec (sched_exec). 2407 * 2408 * So we race with normal scheduler movements, but that's OK, as long 2409 * as the task is no longer on this CPU. 2410 */ 2411 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2412 struct task_struct *p, int dest_cpu) 2413 { 2414 /* Affinity changed (again). */ 2415 if (!is_cpu_allowed(p, dest_cpu)) 2416 return rq; 2417 2418 rq = move_queued_task(rq, rf, p, dest_cpu); 2419 2420 return rq; 2421 } 2422 2423 /* 2424 * migration_cpu_stop - this will be executed by a high-prio stopper thread 2425 * and performs thread migration by bumping thread off CPU then 2426 * 'pushing' onto another runqueue. 2427 */ 2428 static int migration_cpu_stop(void *data) 2429 { 2430 struct migration_arg *arg = data; 2431 struct set_affinity_pending *pending = arg->pending; 2432 struct task_struct *p = arg->task; 2433 struct rq *rq = this_rq(); 2434 bool complete = false; 2435 struct rq_flags rf; 2436 2437 /* 2438 * The original target CPU might have gone down and we might 2439 * be on another CPU but it doesn't matter. 2440 */ 2441 local_irq_save(rf.flags); 2442 /* 2443 * We need to explicitly wake pending tasks before running 2444 * __migrate_task() such that we will not miss enforcing cpus_ptr 2445 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2446 */ 2447 flush_smp_call_function_queue(); 2448 2449 raw_spin_lock(&p->pi_lock); 2450 rq_lock(rq, &rf); 2451 2452 /* 2453 * If we were passed a pending, then ->stop_pending was set, thus 2454 * p->migration_pending must have remained stable. 2455 */ 2456 WARN_ON_ONCE(pending && pending != p->migration_pending); 2457 2458 /* 2459 * If task_rq(p) != rq, it cannot be migrated here, because we're 2460 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2461 * we're holding p->pi_lock. 2462 */ 2463 if (task_rq(p) == rq) { 2464 if (is_migration_disabled(p)) 2465 goto out; 2466 2467 if (pending) { 2468 p->migration_pending = NULL; 2469 complete = true; 2470 2471 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2472 goto out; 2473 } 2474 2475 if (task_on_rq_queued(p)) { 2476 update_rq_clock(rq); 2477 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2478 } else { 2479 p->wake_cpu = arg->dest_cpu; 2480 } 2481 2482 /* 2483 * XXX __migrate_task() can fail, at which point we might end 2484 * up running on a dodgy CPU, AFAICT this can only happen 2485 * during CPU hotplug, at which point we'll get pushed out 2486 * anyway, so it's probably not a big deal. 2487 */ 2488 2489 } else if (pending) { 2490 /* 2491 * This happens when we get migrated between migrate_enable()'s 2492 * preempt_enable() and scheduling the stopper task. At that 2493 * point we're a regular task again and not current anymore. 2494 * 2495 * A !PREEMPT kernel has a giant hole here, which makes it far 2496 * more likely. 2497 */ 2498 2499 /* 2500 * The task moved before the stopper got to run. We're holding 2501 * ->pi_lock, so the allowed mask is stable - if it got 2502 * somewhere allowed, we're done. 2503 */ 2504 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2505 p->migration_pending = NULL; 2506 complete = true; 2507 goto out; 2508 } 2509 2510 /* 2511 * When migrate_enable() hits a rq mis-match we can't reliably 2512 * determine is_migration_disabled() and so have to chase after 2513 * it. 2514 */ 2515 WARN_ON_ONCE(!pending->stop_pending); 2516 preempt_disable(); 2517 task_rq_unlock(rq, p, &rf); 2518 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2519 &pending->arg, &pending->stop_work); 2520 preempt_enable(); 2521 return 0; 2522 } 2523 out: 2524 if (pending) 2525 pending->stop_pending = false; 2526 task_rq_unlock(rq, p, &rf); 2527 2528 if (complete) 2529 complete_all(&pending->done); 2530 2531 return 0; 2532 } 2533 2534 int push_cpu_stop(void *arg) 2535 { 2536 struct rq *lowest_rq = NULL, *rq = this_rq(); 2537 struct task_struct *p = arg; 2538 2539 raw_spin_lock_irq(&p->pi_lock); 2540 raw_spin_rq_lock(rq); 2541 2542 if (task_rq(p) != rq) 2543 goto out_unlock; 2544 2545 if (is_migration_disabled(p)) { 2546 p->migration_flags |= MDF_PUSH; 2547 goto out_unlock; 2548 } 2549 2550 p->migration_flags &= ~MDF_PUSH; 2551 2552 if (p->sched_class->find_lock_rq) 2553 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2554 2555 if (!lowest_rq) 2556 goto out_unlock; 2557 2558 // XXX validate p is still the highest prio task 2559 if (task_rq(p) == rq) { 2560 deactivate_task(rq, p, 0); 2561 set_task_cpu(p, lowest_rq->cpu); 2562 activate_task(lowest_rq, p, 0); 2563 resched_curr(lowest_rq); 2564 } 2565 2566 double_unlock_balance(rq, lowest_rq); 2567 2568 out_unlock: 2569 rq->push_busy = false; 2570 raw_spin_rq_unlock(rq); 2571 raw_spin_unlock_irq(&p->pi_lock); 2572 2573 put_task_struct(p); 2574 return 0; 2575 } 2576 2577 /* 2578 * sched_class::set_cpus_allowed must do the below, but is not required to 2579 * actually call this function. 2580 */ 2581 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) 2582 { 2583 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2584 p->cpus_ptr = ctx->new_mask; 2585 return; 2586 } 2587 2588 cpumask_copy(&p->cpus_mask, ctx->new_mask); 2589 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); 2590 2591 /* 2592 * Swap in a new user_cpus_ptr if SCA_USER flag set 2593 */ 2594 if (ctx->flags & SCA_USER) 2595 swap(p->user_cpus_ptr, ctx->user_mask); 2596 } 2597 2598 static void 2599 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) 2600 { 2601 struct rq *rq = task_rq(p); 2602 bool queued, running; 2603 2604 /* 2605 * This here violates the locking rules for affinity, since we're only 2606 * supposed to change these variables while holding both rq->lock and 2607 * p->pi_lock. 2608 * 2609 * HOWEVER, it magically works, because ttwu() is the only code that 2610 * accesses these variables under p->pi_lock and only does so after 2611 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2612 * before finish_task(). 2613 * 2614 * XXX do further audits, this smells like something putrid. 2615 */ 2616 if (ctx->flags & SCA_MIGRATE_DISABLE) 2617 SCHED_WARN_ON(!p->on_cpu); 2618 else 2619 lockdep_assert_held(&p->pi_lock); 2620 2621 queued = task_on_rq_queued(p); 2622 running = task_current(rq, p); 2623 2624 if (queued) { 2625 /* 2626 * Because __kthread_bind() calls this on blocked tasks without 2627 * holding rq->lock. 2628 */ 2629 lockdep_assert_rq_held(rq); 2630 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2631 } 2632 if (running) 2633 put_prev_task(rq, p); 2634 2635 p->sched_class->set_cpus_allowed(p, ctx); 2636 2637 if (queued) 2638 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2639 if (running) 2640 set_next_task(rq, p); 2641 } 2642 2643 /* 2644 * Used for kthread_bind() and select_fallback_rq(), in both cases the user 2645 * affinity (if any) should be destroyed too. 2646 */ 2647 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2648 { 2649 struct affinity_context ac = { 2650 .new_mask = new_mask, 2651 .user_mask = NULL, 2652 .flags = SCA_USER, /* clear the user requested mask */ 2653 }; 2654 union cpumask_rcuhead { 2655 cpumask_t cpumask; 2656 struct rcu_head rcu; 2657 }; 2658 2659 __do_set_cpus_allowed(p, &ac); 2660 2661 /* 2662 * Because this is called with p->pi_lock held, it is not possible 2663 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using 2664 * kfree_rcu(). 2665 */ 2666 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu); 2667 } 2668 2669 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2670 int node) 2671 { 2672 cpumask_t *user_mask; 2673 unsigned long flags; 2674 2675 /* 2676 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's 2677 * may differ by now due to racing. 2678 */ 2679 dst->user_cpus_ptr = NULL; 2680 2681 /* 2682 * This check is racy and losing the race is a valid situation. 2683 * It is not worth the extra overhead of taking the pi_lock on 2684 * every fork/clone. 2685 */ 2686 if (data_race(!src->user_cpus_ptr)) 2687 return 0; 2688 2689 user_mask = alloc_user_cpus_ptr(node); 2690 if (!user_mask) 2691 return -ENOMEM; 2692 2693 /* 2694 * Use pi_lock to protect content of user_cpus_ptr 2695 * 2696 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent 2697 * do_set_cpus_allowed(). 2698 */ 2699 raw_spin_lock_irqsave(&src->pi_lock, flags); 2700 if (src->user_cpus_ptr) { 2701 swap(dst->user_cpus_ptr, user_mask); 2702 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2703 } 2704 raw_spin_unlock_irqrestore(&src->pi_lock, flags); 2705 2706 if (unlikely(user_mask)) 2707 kfree(user_mask); 2708 2709 return 0; 2710 } 2711 2712 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2713 { 2714 struct cpumask *user_mask = NULL; 2715 2716 swap(p->user_cpus_ptr, user_mask); 2717 2718 return user_mask; 2719 } 2720 2721 void release_user_cpus_ptr(struct task_struct *p) 2722 { 2723 kfree(clear_user_cpus_ptr(p)); 2724 } 2725 2726 /* 2727 * This function is wildly self concurrent; here be dragons. 2728 * 2729 * 2730 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2731 * designated task is enqueued on an allowed CPU. If that task is currently 2732 * running, we have to kick it out using the CPU stopper. 2733 * 2734 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2735 * Consider: 2736 * 2737 * Initial conditions: P0->cpus_mask = [0, 1] 2738 * 2739 * P0@CPU0 P1 2740 * 2741 * migrate_disable(); 2742 * <preempted> 2743 * set_cpus_allowed_ptr(P0, [1]); 2744 * 2745 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2746 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2747 * This means we need the following scheme: 2748 * 2749 * P0@CPU0 P1 2750 * 2751 * migrate_disable(); 2752 * <preempted> 2753 * set_cpus_allowed_ptr(P0, [1]); 2754 * <blocks> 2755 * <resumes> 2756 * migrate_enable(); 2757 * __set_cpus_allowed_ptr(); 2758 * <wakes local stopper> 2759 * `--> <woken on migration completion> 2760 * 2761 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2762 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2763 * task p are serialized by p->pi_lock, which we can leverage: the one that 2764 * should come into effect at the end of the Migrate-Disable region is the last 2765 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2766 * but we still need to properly signal those waiting tasks at the appropriate 2767 * moment. 2768 * 2769 * This is implemented using struct set_affinity_pending. The first 2770 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2771 * setup an instance of that struct and install it on the targeted task_struct. 2772 * Any and all further callers will reuse that instance. Those then wait for 2773 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2774 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2775 * 2776 * 2777 * (1) In the cases covered above. There is one more where the completion is 2778 * signaled within affine_move_task() itself: when a subsequent affinity request 2779 * occurs after the stopper bailed out due to the targeted task still being 2780 * Migrate-Disable. Consider: 2781 * 2782 * Initial conditions: P0->cpus_mask = [0, 1] 2783 * 2784 * CPU0 P1 P2 2785 * <P0> 2786 * migrate_disable(); 2787 * <preempted> 2788 * set_cpus_allowed_ptr(P0, [1]); 2789 * <blocks> 2790 * <migration/0> 2791 * migration_cpu_stop() 2792 * is_migration_disabled() 2793 * <bails> 2794 * set_cpus_allowed_ptr(P0, [0, 1]); 2795 * <signal completion> 2796 * <awakes> 2797 * 2798 * Note that the above is safe vs a concurrent migrate_enable(), as any 2799 * pending affinity completion is preceded by an uninstallation of 2800 * p->migration_pending done with p->pi_lock held. 2801 */ 2802 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2803 int dest_cpu, unsigned int flags) 2804 __releases(rq->lock) 2805 __releases(p->pi_lock) 2806 { 2807 struct set_affinity_pending my_pending = { }, *pending = NULL; 2808 bool stop_pending, complete = false; 2809 2810 /* Can the task run on the task's current CPU? If so, we're done */ 2811 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2812 struct task_struct *push_task = NULL; 2813 2814 if ((flags & SCA_MIGRATE_ENABLE) && 2815 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2816 rq->push_busy = true; 2817 push_task = get_task_struct(p); 2818 } 2819 2820 /* 2821 * If there are pending waiters, but no pending stop_work, 2822 * then complete now. 2823 */ 2824 pending = p->migration_pending; 2825 if (pending && !pending->stop_pending) { 2826 p->migration_pending = NULL; 2827 complete = true; 2828 } 2829 2830 preempt_disable(); 2831 task_rq_unlock(rq, p, rf); 2832 if (push_task) { 2833 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2834 p, &rq->push_work); 2835 } 2836 preempt_enable(); 2837 2838 if (complete) 2839 complete_all(&pending->done); 2840 2841 return 0; 2842 } 2843 2844 if (!(flags & SCA_MIGRATE_ENABLE)) { 2845 /* serialized by p->pi_lock */ 2846 if (!p->migration_pending) { 2847 /* Install the request */ 2848 refcount_set(&my_pending.refs, 1); 2849 init_completion(&my_pending.done); 2850 my_pending.arg = (struct migration_arg) { 2851 .task = p, 2852 .dest_cpu = dest_cpu, 2853 .pending = &my_pending, 2854 }; 2855 2856 p->migration_pending = &my_pending; 2857 } else { 2858 pending = p->migration_pending; 2859 refcount_inc(&pending->refs); 2860 /* 2861 * Affinity has changed, but we've already installed a 2862 * pending. migration_cpu_stop() *must* see this, else 2863 * we risk a completion of the pending despite having a 2864 * task on a disallowed CPU. 2865 * 2866 * Serialized by p->pi_lock, so this is safe. 2867 */ 2868 pending->arg.dest_cpu = dest_cpu; 2869 } 2870 } 2871 pending = p->migration_pending; 2872 /* 2873 * - !MIGRATE_ENABLE: 2874 * we'll have installed a pending if there wasn't one already. 2875 * 2876 * - MIGRATE_ENABLE: 2877 * we're here because the current CPU isn't matching anymore, 2878 * the only way that can happen is because of a concurrent 2879 * set_cpus_allowed_ptr() call, which should then still be 2880 * pending completion. 2881 * 2882 * Either way, we really should have a @pending here. 2883 */ 2884 if (WARN_ON_ONCE(!pending)) { 2885 task_rq_unlock(rq, p, rf); 2886 return -EINVAL; 2887 } 2888 2889 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 2890 /* 2891 * MIGRATE_ENABLE gets here because 'p == current', but for 2892 * anything else we cannot do is_migration_disabled(), punt 2893 * and have the stopper function handle it all race-free. 2894 */ 2895 stop_pending = pending->stop_pending; 2896 if (!stop_pending) 2897 pending->stop_pending = true; 2898 2899 if (flags & SCA_MIGRATE_ENABLE) 2900 p->migration_flags &= ~MDF_PUSH; 2901 2902 preempt_disable(); 2903 task_rq_unlock(rq, p, rf); 2904 if (!stop_pending) { 2905 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 2906 &pending->arg, &pending->stop_work); 2907 } 2908 preempt_enable(); 2909 2910 if (flags & SCA_MIGRATE_ENABLE) 2911 return 0; 2912 } else { 2913 2914 if (!is_migration_disabled(p)) { 2915 if (task_on_rq_queued(p)) 2916 rq = move_queued_task(rq, rf, p, dest_cpu); 2917 2918 if (!pending->stop_pending) { 2919 p->migration_pending = NULL; 2920 complete = true; 2921 } 2922 } 2923 task_rq_unlock(rq, p, rf); 2924 2925 if (complete) 2926 complete_all(&pending->done); 2927 } 2928 2929 wait_for_completion(&pending->done); 2930 2931 if (refcount_dec_and_test(&pending->refs)) 2932 wake_up_var(&pending->refs); /* No UaF, just an address */ 2933 2934 /* 2935 * Block the original owner of &pending until all subsequent callers 2936 * have seen the completion and decremented the refcount 2937 */ 2938 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 2939 2940 /* ARGH */ 2941 WARN_ON_ONCE(my_pending.stop_pending); 2942 2943 return 0; 2944 } 2945 2946 /* 2947 * Called with both p->pi_lock and rq->lock held; drops both before returning. 2948 */ 2949 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 2950 struct affinity_context *ctx, 2951 struct rq *rq, 2952 struct rq_flags *rf) 2953 __releases(rq->lock) 2954 __releases(p->pi_lock) 2955 { 2956 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 2957 const struct cpumask *cpu_valid_mask = cpu_active_mask; 2958 bool kthread = p->flags & PF_KTHREAD; 2959 unsigned int dest_cpu; 2960 int ret = 0; 2961 2962 update_rq_clock(rq); 2963 2964 if (kthread || is_migration_disabled(p)) { 2965 /* 2966 * Kernel threads are allowed on online && !active CPUs, 2967 * however, during cpu-hot-unplug, even these might get pushed 2968 * away if not KTHREAD_IS_PER_CPU. 2969 * 2970 * Specifically, migration_disabled() tasks must not fail the 2971 * cpumask_any_and_distribute() pick below, esp. so on 2972 * SCA_MIGRATE_ENABLE, otherwise we'll not call 2973 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 2974 */ 2975 cpu_valid_mask = cpu_online_mask; 2976 } 2977 2978 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { 2979 ret = -EINVAL; 2980 goto out; 2981 } 2982 2983 /* 2984 * Must re-check here, to close a race against __kthread_bind(), 2985 * sched_setaffinity() is not guaranteed to observe the flag. 2986 */ 2987 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 2988 ret = -EINVAL; 2989 goto out; 2990 } 2991 2992 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { 2993 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { 2994 if (ctx->flags & SCA_USER) 2995 swap(p->user_cpus_ptr, ctx->user_mask); 2996 goto out; 2997 } 2998 2999 if (WARN_ON_ONCE(p == current && 3000 is_migration_disabled(p) && 3001 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { 3002 ret = -EBUSY; 3003 goto out; 3004 } 3005 } 3006 3007 /* 3008 * Picking a ~random cpu helps in cases where we are changing affinity 3009 * for groups of tasks (ie. cpuset), so that load balancing is not 3010 * immediately required to distribute the tasks within their new mask. 3011 */ 3012 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); 3013 if (dest_cpu >= nr_cpu_ids) { 3014 ret = -EINVAL; 3015 goto out; 3016 } 3017 3018 __do_set_cpus_allowed(p, ctx); 3019 3020 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); 3021 3022 out: 3023 task_rq_unlock(rq, p, rf); 3024 3025 return ret; 3026 } 3027 3028 /* 3029 * Change a given task's CPU affinity. Migrate the thread to a 3030 * proper CPU and schedule it away if the CPU it's executing on 3031 * is removed from the allowed bitmask. 3032 * 3033 * NOTE: the caller must have a valid reference to the task, the 3034 * task must not exit() & deallocate itself prematurely. The 3035 * call is not atomic; no spinlocks may be held. 3036 */ 3037 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx) 3038 { 3039 struct rq_flags rf; 3040 struct rq *rq; 3041 3042 rq = task_rq_lock(p, &rf); 3043 /* 3044 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_* 3045 * flags are set. 3046 */ 3047 if (p->user_cpus_ptr && 3048 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && 3049 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) 3050 ctx->new_mask = rq->scratch_mask; 3051 3052 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); 3053 } 3054 3055 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 3056 { 3057 struct affinity_context ac = { 3058 .new_mask = new_mask, 3059 .flags = 0, 3060 }; 3061 3062 return __set_cpus_allowed_ptr(p, &ac); 3063 } 3064 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 3065 3066 /* 3067 * Change a given task's CPU affinity to the intersection of its current 3068 * affinity mask and @subset_mask, writing the resulting mask to @new_mask. 3069 * If user_cpus_ptr is defined, use it as the basis for restricting CPU 3070 * affinity or use cpu_online_mask instead. 3071 * 3072 * If the resulting mask is empty, leave the affinity unchanged and return 3073 * -EINVAL. 3074 */ 3075 static int restrict_cpus_allowed_ptr(struct task_struct *p, 3076 struct cpumask *new_mask, 3077 const struct cpumask *subset_mask) 3078 { 3079 struct affinity_context ac = { 3080 .new_mask = new_mask, 3081 .flags = 0, 3082 }; 3083 struct rq_flags rf; 3084 struct rq *rq; 3085 int err; 3086 3087 rq = task_rq_lock(p, &rf); 3088 3089 /* 3090 * Forcefully restricting the affinity of a deadline task is 3091 * likely to cause problems, so fail and noisily override the 3092 * mask entirely. 3093 */ 3094 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 3095 err = -EPERM; 3096 goto err_unlock; 3097 } 3098 3099 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { 3100 err = -EINVAL; 3101 goto err_unlock; 3102 } 3103 3104 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); 3105 3106 err_unlock: 3107 task_rq_unlock(rq, p, &rf); 3108 return err; 3109 } 3110 3111 /* 3112 * Restrict the CPU affinity of task @p so that it is a subset of 3113 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the 3114 * old affinity mask. If the resulting mask is empty, we warn and walk 3115 * up the cpuset hierarchy until we find a suitable mask. 3116 */ 3117 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3118 { 3119 cpumask_var_t new_mask; 3120 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3121 3122 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3123 3124 /* 3125 * __migrate_task() can fail silently in the face of concurrent 3126 * offlining of the chosen destination CPU, so take the hotplug 3127 * lock to ensure that the migration succeeds. 3128 */ 3129 cpus_read_lock(); 3130 if (!cpumask_available(new_mask)) 3131 goto out_set_mask; 3132 3133 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3134 goto out_free_mask; 3135 3136 /* 3137 * We failed to find a valid subset of the affinity mask for the 3138 * task, so override it based on its cpuset hierarchy. 3139 */ 3140 cpuset_cpus_allowed(p, new_mask); 3141 override_mask = new_mask; 3142 3143 out_set_mask: 3144 if (printk_ratelimit()) { 3145 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3146 task_pid_nr(p), p->comm, 3147 cpumask_pr_args(override_mask)); 3148 } 3149 3150 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3151 out_free_mask: 3152 cpus_read_unlock(); 3153 free_cpumask_var(new_mask); 3154 } 3155 3156 /* 3157 * Restore the affinity of a task @p which was previously restricted by a 3158 * call to force_compatible_cpus_allowed_ptr(). 3159 * 3160 * It is the caller's responsibility to serialise this with any calls to 3161 * force_compatible_cpus_allowed_ptr(@p). 3162 */ 3163 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3164 { 3165 struct affinity_context ac = { 3166 .new_mask = task_user_cpus(p), 3167 .flags = 0, 3168 }; 3169 int ret; 3170 3171 /* 3172 * Try to restore the old affinity mask with __sched_setaffinity(). 3173 * Cpuset masking will be done there too. 3174 */ 3175 ret = __sched_setaffinity(p, &ac); 3176 WARN_ON_ONCE(ret); 3177 } 3178 3179 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3180 { 3181 #ifdef CONFIG_SCHED_DEBUG 3182 unsigned int state = READ_ONCE(p->__state); 3183 3184 /* 3185 * We should never call set_task_cpu() on a blocked task, 3186 * ttwu() will sort out the placement. 3187 */ 3188 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3189 3190 /* 3191 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3192 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3193 * time relying on p->on_rq. 3194 */ 3195 WARN_ON_ONCE(state == TASK_RUNNING && 3196 p->sched_class == &fair_sched_class && 3197 (p->on_rq && !task_on_rq_migrating(p))); 3198 3199 #ifdef CONFIG_LOCKDEP 3200 /* 3201 * The caller should hold either p->pi_lock or rq->lock, when changing 3202 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3203 * 3204 * sched_move_task() holds both and thus holding either pins the cgroup, 3205 * see task_group(). 3206 * 3207 * Furthermore, all task_rq users should acquire both locks, see 3208 * task_rq_lock(). 3209 */ 3210 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3211 lockdep_is_held(__rq_lockp(task_rq(p))))); 3212 #endif 3213 /* 3214 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3215 */ 3216 WARN_ON_ONCE(!cpu_online(new_cpu)); 3217 3218 WARN_ON_ONCE(is_migration_disabled(p)); 3219 #endif 3220 3221 trace_sched_migrate_task(p, new_cpu); 3222 3223 if (task_cpu(p) != new_cpu) { 3224 if (p->sched_class->migrate_task_rq) 3225 p->sched_class->migrate_task_rq(p, new_cpu); 3226 p->se.nr_migrations++; 3227 rseq_migrate(p); 3228 sched_mm_cid_migrate_from(p); 3229 perf_event_task_migrate(p); 3230 } 3231 3232 __set_task_cpu(p, new_cpu); 3233 } 3234 3235 #ifdef CONFIG_NUMA_BALANCING 3236 static void __migrate_swap_task(struct task_struct *p, int cpu) 3237 { 3238 if (task_on_rq_queued(p)) { 3239 struct rq *src_rq, *dst_rq; 3240 struct rq_flags srf, drf; 3241 3242 src_rq = task_rq(p); 3243 dst_rq = cpu_rq(cpu); 3244 3245 rq_pin_lock(src_rq, &srf); 3246 rq_pin_lock(dst_rq, &drf); 3247 3248 deactivate_task(src_rq, p, 0); 3249 set_task_cpu(p, cpu); 3250 activate_task(dst_rq, p, 0); 3251 wakeup_preempt(dst_rq, p, 0); 3252 3253 rq_unpin_lock(dst_rq, &drf); 3254 rq_unpin_lock(src_rq, &srf); 3255 3256 } else { 3257 /* 3258 * Task isn't running anymore; make it appear like we migrated 3259 * it before it went to sleep. This means on wakeup we make the 3260 * previous CPU our target instead of where it really is. 3261 */ 3262 p->wake_cpu = cpu; 3263 } 3264 } 3265 3266 struct migration_swap_arg { 3267 struct task_struct *src_task, *dst_task; 3268 int src_cpu, dst_cpu; 3269 }; 3270 3271 static int migrate_swap_stop(void *data) 3272 { 3273 struct migration_swap_arg *arg = data; 3274 struct rq *src_rq, *dst_rq; 3275 3276 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3277 return -EAGAIN; 3278 3279 src_rq = cpu_rq(arg->src_cpu); 3280 dst_rq = cpu_rq(arg->dst_cpu); 3281 3282 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); 3283 guard(double_rq_lock)(src_rq, dst_rq); 3284 3285 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3286 return -EAGAIN; 3287 3288 if (task_cpu(arg->src_task) != arg->src_cpu) 3289 return -EAGAIN; 3290 3291 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3292 return -EAGAIN; 3293 3294 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3295 return -EAGAIN; 3296 3297 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3298 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3299 3300 return 0; 3301 } 3302 3303 /* 3304 * Cross migrate two tasks 3305 */ 3306 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3307 int target_cpu, int curr_cpu) 3308 { 3309 struct migration_swap_arg arg; 3310 int ret = -EINVAL; 3311 3312 arg = (struct migration_swap_arg){ 3313 .src_task = cur, 3314 .src_cpu = curr_cpu, 3315 .dst_task = p, 3316 .dst_cpu = target_cpu, 3317 }; 3318 3319 if (arg.src_cpu == arg.dst_cpu) 3320 goto out; 3321 3322 /* 3323 * These three tests are all lockless; this is OK since all of them 3324 * will be re-checked with proper locks held further down the line. 3325 */ 3326 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3327 goto out; 3328 3329 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3330 goto out; 3331 3332 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3333 goto out; 3334 3335 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3336 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3337 3338 out: 3339 return ret; 3340 } 3341 #endif /* CONFIG_NUMA_BALANCING */ 3342 3343 /*** 3344 * kick_process - kick a running thread to enter/exit the kernel 3345 * @p: the to-be-kicked thread 3346 * 3347 * Cause a process which is running on another CPU to enter 3348 * kernel-mode, without any delay. (to get signals handled.) 3349 * 3350 * NOTE: this function doesn't have to take the runqueue lock, 3351 * because all it wants to ensure is that the remote task enters 3352 * the kernel. If the IPI races and the task has been migrated 3353 * to another CPU then no harm is done and the purpose has been 3354 * achieved as well. 3355 */ 3356 void kick_process(struct task_struct *p) 3357 { 3358 guard(preempt)(); 3359 int cpu = task_cpu(p); 3360 3361 if ((cpu != smp_processor_id()) && task_curr(p)) 3362 smp_send_reschedule(cpu); 3363 } 3364 EXPORT_SYMBOL_GPL(kick_process); 3365 3366 /* 3367 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3368 * 3369 * A few notes on cpu_active vs cpu_online: 3370 * 3371 * - cpu_active must be a subset of cpu_online 3372 * 3373 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3374 * see __set_cpus_allowed_ptr(). At this point the newly online 3375 * CPU isn't yet part of the sched domains, and balancing will not 3376 * see it. 3377 * 3378 * - on CPU-down we clear cpu_active() to mask the sched domains and 3379 * avoid the load balancer to place new tasks on the to be removed 3380 * CPU. Existing tasks will remain running there and will be taken 3381 * off. 3382 * 3383 * This means that fallback selection must not select !active CPUs. 3384 * And can assume that any active CPU must be online. Conversely 3385 * select_task_rq() below may allow selection of !active CPUs in order 3386 * to satisfy the above rules. 3387 */ 3388 static int select_fallback_rq(int cpu, struct task_struct *p) 3389 { 3390 int nid = cpu_to_node(cpu); 3391 const struct cpumask *nodemask = NULL; 3392 enum { cpuset, possible, fail } state = cpuset; 3393 int dest_cpu; 3394 3395 /* 3396 * If the node that the CPU is on has been offlined, cpu_to_node() 3397 * will return -1. There is no CPU on the node, and we should 3398 * select the CPU on the other node. 3399 */ 3400 if (nid != -1) { 3401 nodemask = cpumask_of_node(nid); 3402 3403 /* Look for allowed, online CPU in same node. */ 3404 for_each_cpu(dest_cpu, nodemask) { 3405 if (is_cpu_allowed(p, dest_cpu)) 3406 return dest_cpu; 3407 } 3408 } 3409 3410 for (;;) { 3411 /* Any allowed, online CPU? */ 3412 for_each_cpu(dest_cpu, p->cpus_ptr) { 3413 if (!is_cpu_allowed(p, dest_cpu)) 3414 continue; 3415 3416 goto out; 3417 } 3418 3419 /* No more Mr. Nice Guy. */ 3420 switch (state) { 3421 case cpuset: 3422 if (cpuset_cpus_allowed_fallback(p)) { 3423 state = possible; 3424 break; 3425 } 3426 fallthrough; 3427 case possible: 3428 /* 3429 * XXX When called from select_task_rq() we only 3430 * hold p->pi_lock and again violate locking order. 3431 * 3432 * More yuck to audit. 3433 */ 3434 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); 3435 state = fail; 3436 break; 3437 case fail: 3438 BUG(); 3439 break; 3440 } 3441 } 3442 3443 out: 3444 if (state != cpuset) { 3445 /* 3446 * Don't tell them about moving exiting tasks or 3447 * kernel threads (both mm NULL), since they never 3448 * leave kernel. 3449 */ 3450 if (p->mm && printk_ratelimit()) { 3451 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3452 task_pid_nr(p), p->comm, cpu); 3453 } 3454 } 3455 3456 return dest_cpu; 3457 } 3458 3459 /* 3460 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3461 */ 3462 static inline 3463 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) 3464 { 3465 lockdep_assert_held(&p->pi_lock); 3466 3467 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) 3468 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); 3469 else 3470 cpu = cpumask_any(p->cpus_ptr); 3471 3472 /* 3473 * In order not to call set_task_cpu() on a blocking task we need 3474 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3475 * CPU. 3476 * 3477 * Since this is common to all placement strategies, this lives here. 3478 * 3479 * [ this allows ->select_task() to simply return task_cpu(p) and 3480 * not worry about this generic constraint ] 3481 */ 3482 if (unlikely(!is_cpu_allowed(p, cpu))) 3483 cpu = select_fallback_rq(task_cpu(p), p); 3484 3485 return cpu; 3486 } 3487 3488 void sched_set_stop_task(int cpu, struct task_struct *stop) 3489 { 3490 static struct lock_class_key stop_pi_lock; 3491 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3492 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3493 3494 if (stop) { 3495 /* 3496 * Make it appear like a SCHED_FIFO task, its something 3497 * userspace knows about and won't get confused about. 3498 * 3499 * Also, it will make PI more or less work without too 3500 * much confusion -- but then, stop work should not 3501 * rely on PI working anyway. 3502 */ 3503 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3504 3505 stop->sched_class = &stop_sched_class; 3506 3507 /* 3508 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3509 * adjust the effective priority of a task. As a result, 3510 * rt_mutex_setprio() can trigger (RT) balancing operations, 3511 * which can then trigger wakeups of the stop thread to push 3512 * around the current task. 3513 * 3514 * The stop task itself will never be part of the PI-chain, it 3515 * never blocks, therefore that ->pi_lock recursion is safe. 3516 * Tell lockdep about this by placing the stop->pi_lock in its 3517 * own class. 3518 */ 3519 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3520 } 3521 3522 cpu_rq(cpu)->stop = stop; 3523 3524 if (old_stop) { 3525 /* 3526 * Reset it back to a normal scheduling class so that 3527 * it can die in pieces. 3528 */ 3529 old_stop->sched_class = &rt_sched_class; 3530 } 3531 } 3532 3533 #else /* CONFIG_SMP */ 3534 3535 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3536 3537 static inline bool rq_has_pinned_tasks(struct rq *rq) 3538 { 3539 return false; 3540 } 3541 3542 #endif /* !CONFIG_SMP */ 3543 3544 static void 3545 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3546 { 3547 struct rq *rq; 3548 3549 if (!schedstat_enabled()) 3550 return; 3551 3552 rq = this_rq(); 3553 3554 #ifdef CONFIG_SMP 3555 if (cpu == rq->cpu) { 3556 __schedstat_inc(rq->ttwu_local); 3557 __schedstat_inc(p->stats.nr_wakeups_local); 3558 } else { 3559 struct sched_domain *sd; 3560 3561 __schedstat_inc(p->stats.nr_wakeups_remote); 3562 3563 guard(rcu)(); 3564 for_each_domain(rq->cpu, sd) { 3565 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3566 __schedstat_inc(sd->ttwu_wake_remote); 3567 break; 3568 } 3569 } 3570 } 3571 3572 if (wake_flags & WF_MIGRATED) 3573 __schedstat_inc(p->stats.nr_wakeups_migrate); 3574 #endif /* CONFIG_SMP */ 3575 3576 __schedstat_inc(rq->ttwu_count); 3577 __schedstat_inc(p->stats.nr_wakeups); 3578 3579 if (wake_flags & WF_SYNC) 3580 __schedstat_inc(p->stats.nr_wakeups_sync); 3581 } 3582 3583 /* 3584 * Mark the task runnable. 3585 */ 3586 static inline void ttwu_do_wakeup(struct task_struct *p) 3587 { 3588 WRITE_ONCE(p->__state, TASK_RUNNING); 3589 trace_sched_wakeup(p); 3590 } 3591 3592 static void 3593 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3594 struct rq_flags *rf) 3595 { 3596 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3597 3598 lockdep_assert_rq_held(rq); 3599 3600 if (p->sched_contributes_to_load) 3601 rq->nr_uninterruptible--; 3602 3603 #ifdef CONFIG_SMP 3604 if (wake_flags & WF_MIGRATED) 3605 en_flags |= ENQUEUE_MIGRATED; 3606 else 3607 #endif 3608 if (p->in_iowait) { 3609 delayacct_blkio_end(p); 3610 atomic_dec(&task_rq(p)->nr_iowait); 3611 } 3612 3613 activate_task(rq, p, en_flags); 3614 wakeup_preempt(rq, p, wake_flags); 3615 3616 ttwu_do_wakeup(p); 3617 3618 #ifdef CONFIG_SMP 3619 if (p->sched_class->task_woken) { 3620 /* 3621 * Our task @p is fully woken up and running; so it's safe to 3622 * drop the rq->lock, hereafter rq is only used for statistics. 3623 */ 3624 rq_unpin_lock(rq, rf); 3625 p->sched_class->task_woken(rq, p); 3626 rq_repin_lock(rq, rf); 3627 } 3628 3629 if (rq->idle_stamp) { 3630 u64 delta = rq_clock(rq) - rq->idle_stamp; 3631 u64 max = 2*rq->max_idle_balance_cost; 3632 3633 update_avg(&rq->avg_idle, delta); 3634 3635 if (rq->avg_idle > max) 3636 rq->avg_idle = max; 3637 3638 rq->idle_stamp = 0; 3639 } 3640 #endif 3641 3642 p->dl_server = NULL; 3643 } 3644 3645 /* 3646 * Consider @p being inside a wait loop: 3647 * 3648 * for (;;) { 3649 * set_current_state(TASK_UNINTERRUPTIBLE); 3650 * 3651 * if (CONDITION) 3652 * break; 3653 * 3654 * schedule(); 3655 * } 3656 * __set_current_state(TASK_RUNNING); 3657 * 3658 * between set_current_state() and schedule(). In this case @p is still 3659 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3660 * an atomic manner. 3661 * 3662 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3663 * then schedule() must still happen and p->state can be changed to 3664 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3665 * need to do a full wakeup with enqueue. 3666 * 3667 * Returns: %true when the wakeup is done, 3668 * %false otherwise. 3669 */ 3670 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3671 { 3672 struct rq_flags rf; 3673 struct rq *rq; 3674 int ret = 0; 3675 3676 rq = __task_rq_lock(p, &rf); 3677 if (task_on_rq_queued(p)) { 3678 if (!task_on_cpu(rq, p)) { 3679 /* 3680 * When on_rq && !on_cpu the task is preempted, see if 3681 * it should preempt the task that is current now. 3682 */ 3683 update_rq_clock(rq); 3684 wakeup_preempt(rq, p, wake_flags); 3685 } 3686 ttwu_do_wakeup(p); 3687 ret = 1; 3688 } 3689 __task_rq_unlock(rq, &rf); 3690 3691 return ret; 3692 } 3693 3694 #ifdef CONFIG_SMP 3695 void sched_ttwu_pending(void *arg) 3696 { 3697 struct llist_node *llist = arg; 3698 struct rq *rq = this_rq(); 3699 struct task_struct *p, *t; 3700 struct rq_flags rf; 3701 3702 if (!llist) 3703 return; 3704 3705 rq_lock_irqsave(rq, &rf); 3706 update_rq_clock(rq); 3707 3708 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3709 if (WARN_ON_ONCE(p->on_cpu)) 3710 smp_cond_load_acquire(&p->on_cpu, !VAL); 3711 3712 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3713 set_task_cpu(p, cpu_of(rq)); 3714 3715 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3716 } 3717 3718 /* 3719 * Must be after enqueueing at least once task such that 3720 * idle_cpu() does not observe a false-negative -- if it does, 3721 * it is possible for select_idle_siblings() to stack a number 3722 * of tasks on this CPU during that window. 3723 * 3724 * It is OK to clear ttwu_pending when another task pending. 3725 * We will receive IPI after local IRQ enabled and then enqueue it. 3726 * Since now nr_running > 0, idle_cpu() will always get correct result. 3727 */ 3728 WRITE_ONCE(rq->ttwu_pending, 0); 3729 rq_unlock_irqrestore(rq, &rf); 3730 } 3731 3732 /* 3733 * Prepare the scene for sending an IPI for a remote smp_call 3734 * 3735 * Returns true if the caller can proceed with sending the IPI. 3736 * Returns false otherwise. 3737 */ 3738 bool call_function_single_prep_ipi(int cpu) 3739 { 3740 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { 3741 trace_sched_wake_idle_without_ipi(cpu); 3742 return false; 3743 } 3744 3745 return true; 3746 } 3747 3748 /* 3749 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3750 * necessary. The wakee CPU on receipt of the IPI will queue the task 3751 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3752 * of the wakeup instead of the waker. 3753 */ 3754 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3755 { 3756 struct rq *rq = cpu_rq(cpu); 3757 3758 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3759 3760 WRITE_ONCE(rq->ttwu_pending, 1); 3761 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3762 } 3763 3764 void wake_up_if_idle(int cpu) 3765 { 3766 struct rq *rq = cpu_rq(cpu); 3767 3768 guard(rcu)(); 3769 if (is_idle_task(rcu_dereference(rq->curr))) { 3770 guard(rq_lock_irqsave)(rq); 3771 if (is_idle_task(rq->curr)) 3772 resched_curr(rq); 3773 } 3774 } 3775 3776 bool cpus_equal_capacity(int this_cpu, int that_cpu) 3777 { 3778 if (!sched_asym_cpucap_active()) 3779 return true; 3780 3781 if (this_cpu == that_cpu) 3782 return true; 3783 3784 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu); 3785 } 3786 3787 bool cpus_share_cache(int this_cpu, int that_cpu) 3788 { 3789 if (this_cpu == that_cpu) 3790 return true; 3791 3792 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3793 } 3794 3795 /* 3796 * Whether CPUs are share cache resources, which means LLC on non-cluster 3797 * machines and LLC tag or L2 on machines with clusters. 3798 */ 3799 bool cpus_share_resources(int this_cpu, int that_cpu) 3800 { 3801 if (this_cpu == that_cpu) 3802 return true; 3803 3804 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); 3805 } 3806 3807 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3808 { 3809 /* 3810 * Do not complicate things with the async wake_list while the CPU is 3811 * in hotplug state. 3812 */ 3813 if (!cpu_active(cpu)) 3814 return false; 3815 3816 /* Ensure the task will still be allowed to run on the CPU. */ 3817 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3818 return false; 3819 3820 /* 3821 * If the CPU does not share cache, then queue the task on the 3822 * remote rqs wakelist to avoid accessing remote data. 3823 */ 3824 if (!cpus_share_cache(smp_processor_id(), cpu)) 3825 return true; 3826 3827 if (cpu == smp_processor_id()) 3828 return false; 3829 3830 /* 3831 * If the wakee cpu is idle, or the task is descheduling and the 3832 * only running task on the CPU, then use the wakelist to offload 3833 * the task activation to the idle (or soon-to-be-idle) CPU as 3834 * the current CPU is likely busy. nr_running is checked to 3835 * avoid unnecessary task stacking. 3836 * 3837 * Note that we can only get here with (wakee) p->on_rq=0, 3838 * p->on_cpu can be whatever, we've done the dequeue, so 3839 * the wakee has been accounted out of ->nr_running. 3840 */ 3841 if (!cpu_rq(cpu)->nr_running) 3842 return true; 3843 3844 return false; 3845 } 3846 3847 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3848 { 3849 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 3850 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3851 __ttwu_queue_wakelist(p, cpu, wake_flags); 3852 return true; 3853 } 3854 3855 return false; 3856 } 3857 3858 #else /* !CONFIG_SMP */ 3859 3860 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3861 { 3862 return false; 3863 } 3864 3865 #endif /* CONFIG_SMP */ 3866 3867 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 3868 { 3869 struct rq *rq = cpu_rq(cpu); 3870 struct rq_flags rf; 3871 3872 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 3873 return; 3874 3875 rq_lock(rq, &rf); 3876 update_rq_clock(rq); 3877 ttwu_do_activate(rq, p, wake_flags, &rf); 3878 rq_unlock(rq, &rf); 3879 } 3880 3881 /* 3882 * Invoked from try_to_wake_up() to check whether the task can be woken up. 3883 * 3884 * The caller holds p::pi_lock if p != current or has preemption 3885 * disabled when p == current. 3886 * 3887 * The rules of saved_state: 3888 * 3889 * The related locking code always holds p::pi_lock when updating 3890 * p::saved_state, which means the code is fully serialized in both cases. 3891 * 3892 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. 3893 * No other bits set. This allows to distinguish all wakeup scenarios. 3894 * 3895 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This 3896 * allows us to prevent early wakeup of tasks before they can be run on 3897 * asymmetric ISA architectures (eg ARMv9). 3898 */ 3899 static __always_inline 3900 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 3901 { 3902 int match; 3903 3904 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 3905 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 3906 state != TASK_RTLOCK_WAIT); 3907 } 3908 3909 *success = !!(match = __task_state_match(p, state)); 3910 3911 /* 3912 * Saved state preserves the task state across blocking on 3913 * an RT lock or TASK_FREEZABLE tasks. If the state matches, 3914 * set p::saved_state to TASK_RUNNING, but do not wake the task 3915 * because it waits for a lock wakeup or __thaw_task(). Also 3916 * indicate success because from the regular waker's point of 3917 * view this has succeeded. 3918 * 3919 * After acquiring the lock the task will restore p::__state 3920 * from p::saved_state which ensures that the regular 3921 * wakeup is not lost. The restore will also set 3922 * p::saved_state to TASK_RUNNING so any further tests will 3923 * not result in false positives vs. @success 3924 */ 3925 if (match < 0) 3926 p->saved_state = TASK_RUNNING; 3927 3928 return match > 0; 3929 } 3930 3931 /* 3932 * Notes on Program-Order guarantees on SMP systems. 3933 * 3934 * MIGRATION 3935 * 3936 * The basic program-order guarantee on SMP systems is that when a task [t] 3937 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3938 * execution on its new CPU [c1]. 3939 * 3940 * For migration (of runnable tasks) this is provided by the following means: 3941 * 3942 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3943 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3944 * rq(c1)->lock (if not at the same time, then in that order). 3945 * C) LOCK of the rq(c1)->lock scheduling in task 3946 * 3947 * Release/acquire chaining guarantees that B happens after A and C after B. 3948 * Note: the CPU doing B need not be c0 or c1 3949 * 3950 * Example: 3951 * 3952 * CPU0 CPU1 CPU2 3953 * 3954 * LOCK rq(0)->lock 3955 * sched-out X 3956 * sched-in Y 3957 * UNLOCK rq(0)->lock 3958 * 3959 * LOCK rq(0)->lock // orders against CPU0 3960 * dequeue X 3961 * UNLOCK rq(0)->lock 3962 * 3963 * LOCK rq(1)->lock 3964 * enqueue X 3965 * UNLOCK rq(1)->lock 3966 * 3967 * LOCK rq(1)->lock // orders against CPU2 3968 * sched-out Z 3969 * sched-in X 3970 * UNLOCK rq(1)->lock 3971 * 3972 * 3973 * BLOCKING -- aka. SLEEP + WAKEUP 3974 * 3975 * For blocking we (obviously) need to provide the same guarantee as for 3976 * migration. However the means are completely different as there is no lock 3977 * chain to provide order. Instead we do: 3978 * 3979 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 3980 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 3981 * 3982 * Example: 3983 * 3984 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 3985 * 3986 * LOCK rq(0)->lock LOCK X->pi_lock 3987 * dequeue X 3988 * sched-out X 3989 * smp_store_release(X->on_cpu, 0); 3990 * 3991 * smp_cond_load_acquire(&X->on_cpu, !VAL); 3992 * X->state = WAKING 3993 * set_task_cpu(X,2) 3994 * 3995 * LOCK rq(2)->lock 3996 * enqueue X 3997 * X->state = RUNNING 3998 * UNLOCK rq(2)->lock 3999 * 4000 * LOCK rq(2)->lock // orders against CPU1 4001 * sched-out Z 4002 * sched-in X 4003 * UNLOCK rq(2)->lock 4004 * 4005 * UNLOCK X->pi_lock 4006 * UNLOCK rq(0)->lock 4007 * 4008 * 4009 * However, for wakeups there is a second guarantee we must provide, namely we 4010 * must ensure that CONDITION=1 done by the caller can not be reordered with 4011 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4012 */ 4013 4014 /** 4015 * try_to_wake_up - wake up a thread 4016 * @p: the thread to be awakened 4017 * @state: the mask of task states that can be woken 4018 * @wake_flags: wake modifier flags (WF_*) 4019 * 4020 * Conceptually does: 4021 * 4022 * If (@state & @p->state) @p->state = TASK_RUNNING. 4023 * 4024 * If the task was not queued/runnable, also place it back on a runqueue. 4025 * 4026 * This function is atomic against schedule() which would dequeue the task. 4027 * 4028 * It issues a full memory barrier before accessing @p->state, see the comment 4029 * with set_current_state(). 4030 * 4031 * Uses p->pi_lock to serialize against concurrent wake-ups. 4032 * 4033 * Relies on p->pi_lock stabilizing: 4034 * - p->sched_class 4035 * - p->cpus_ptr 4036 * - p->sched_task_group 4037 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4038 * 4039 * Tries really hard to only take one task_rq(p)->lock for performance. 4040 * Takes rq->lock in: 4041 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4042 * - ttwu_queue() -- new rq, for enqueue of the task; 4043 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4044 * 4045 * As a consequence we race really badly with just about everything. See the 4046 * many memory barriers and their comments for details. 4047 * 4048 * Return: %true if @p->state changes (an actual wakeup was done), 4049 * %false otherwise. 4050 */ 4051 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4052 { 4053 guard(preempt)(); 4054 int cpu, success = 0; 4055 4056 if (p == current) { 4057 /* 4058 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4059 * == smp_processor_id()'. Together this means we can special 4060 * case the whole 'p->on_rq && ttwu_runnable()' case below 4061 * without taking any locks. 4062 * 4063 * In particular: 4064 * - we rely on Program-Order guarantees for all the ordering, 4065 * - we're serialized against set_special_state() by virtue of 4066 * it disabling IRQs (this allows not taking ->pi_lock). 4067 */ 4068 if (!ttwu_state_match(p, state, &success)) 4069 goto out; 4070 4071 trace_sched_waking(p); 4072 ttwu_do_wakeup(p); 4073 goto out; 4074 } 4075 4076 /* 4077 * If we are going to wake up a thread waiting for CONDITION we 4078 * need to ensure that CONDITION=1 done by the caller can not be 4079 * reordered with p->state check below. This pairs with smp_store_mb() 4080 * in set_current_state() that the waiting thread does. 4081 */ 4082 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 4083 smp_mb__after_spinlock(); 4084 if (!ttwu_state_match(p, state, &success)) 4085 break; 4086 4087 trace_sched_waking(p); 4088 4089 /* 4090 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4091 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4092 * in smp_cond_load_acquire() below. 4093 * 4094 * sched_ttwu_pending() try_to_wake_up() 4095 * STORE p->on_rq = 1 LOAD p->state 4096 * UNLOCK rq->lock 4097 * 4098 * __schedule() (switch to task 'p') 4099 * LOCK rq->lock smp_rmb(); 4100 * smp_mb__after_spinlock(); 4101 * UNLOCK rq->lock 4102 * 4103 * [task p] 4104 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4105 * 4106 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4107 * __schedule(). See the comment for smp_mb__after_spinlock(). 4108 * 4109 * A similar smp_rmb() lives in __task_needs_rq_lock(). 4110 */ 4111 smp_rmb(); 4112 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4113 break; 4114 4115 #ifdef CONFIG_SMP 4116 /* 4117 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4118 * possible to, falsely, observe p->on_cpu == 0. 4119 * 4120 * One must be running (->on_cpu == 1) in order to remove oneself 4121 * from the runqueue. 4122 * 4123 * __schedule() (switch to task 'p') try_to_wake_up() 4124 * STORE p->on_cpu = 1 LOAD p->on_rq 4125 * UNLOCK rq->lock 4126 * 4127 * __schedule() (put 'p' to sleep) 4128 * LOCK rq->lock smp_rmb(); 4129 * smp_mb__after_spinlock(); 4130 * STORE p->on_rq = 0 LOAD p->on_cpu 4131 * 4132 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4133 * __schedule(). See the comment for smp_mb__after_spinlock(). 4134 * 4135 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4136 * schedule()'s deactivate_task() has 'happened' and p will no longer 4137 * care about it's own p->state. See the comment in __schedule(). 4138 */ 4139 smp_acquire__after_ctrl_dep(); 4140 4141 /* 4142 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4143 * == 0), which means we need to do an enqueue, change p->state to 4144 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4145 * enqueue, such as ttwu_queue_wakelist(). 4146 */ 4147 WRITE_ONCE(p->__state, TASK_WAKING); 4148 4149 /* 4150 * If the owning (remote) CPU is still in the middle of schedule() with 4151 * this task as prev, considering queueing p on the remote CPUs wake_list 4152 * which potentially sends an IPI instead of spinning on p->on_cpu to 4153 * let the waker make forward progress. This is safe because IRQs are 4154 * disabled and the IPI will deliver after on_cpu is cleared. 4155 * 4156 * Ensure we load task_cpu(p) after p->on_cpu: 4157 * 4158 * set_task_cpu(p, cpu); 4159 * STORE p->cpu = @cpu 4160 * __schedule() (switch to task 'p') 4161 * LOCK rq->lock 4162 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4163 * STORE p->on_cpu = 1 LOAD p->cpu 4164 * 4165 * to ensure we observe the correct CPU on which the task is currently 4166 * scheduling. 4167 */ 4168 if (smp_load_acquire(&p->on_cpu) && 4169 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4170 break; 4171 4172 /* 4173 * If the owning (remote) CPU is still in the middle of schedule() with 4174 * this task as prev, wait until it's done referencing the task. 4175 * 4176 * Pairs with the smp_store_release() in finish_task(). 4177 * 4178 * This ensures that tasks getting woken will be fully ordered against 4179 * their previous state and preserve Program Order. 4180 */ 4181 smp_cond_load_acquire(&p->on_cpu, !VAL); 4182 4183 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4184 if (task_cpu(p) != cpu) { 4185 if (p->in_iowait) { 4186 delayacct_blkio_end(p); 4187 atomic_dec(&task_rq(p)->nr_iowait); 4188 } 4189 4190 wake_flags |= WF_MIGRATED; 4191 psi_ttwu_dequeue(p); 4192 set_task_cpu(p, cpu); 4193 } 4194 #else 4195 cpu = task_cpu(p); 4196 #endif /* CONFIG_SMP */ 4197 4198 ttwu_queue(p, cpu, wake_flags); 4199 } 4200 out: 4201 if (success) 4202 ttwu_stat(p, task_cpu(p), wake_flags); 4203 4204 return success; 4205 } 4206 4207 static bool __task_needs_rq_lock(struct task_struct *p) 4208 { 4209 unsigned int state = READ_ONCE(p->__state); 4210 4211 /* 4212 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4213 * the task is blocked. Make sure to check @state since ttwu() can drop 4214 * locks at the end, see ttwu_queue_wakelist(). 4215 */ 4216 if (state == TASK_RUNNING || state == TASK_WAKING) 4217 return true; 4218 4219 /* 4220 * Ensure we load p->on_rq after p->__state, otherwise it would be 4221 * possible to, falsely, observe p->on_rq == 0. 4222 * 4223 * See try_to_wake_up() for a longer comment. 4224 */ 4225 smp_rmb(); 4226 if (p->on_rq) 4227 return true; 4228 4229 #ifdef CONFIG_SMP 4230 /* 4231 * Ensure the task has finished __schedule() and will not be referenced 4232 * anymore. Again, see try_to_wake_up() for a longer comment. 4233 */ 4234 smp_rmb(); 4235 smp_cond_load_acquire(&p->on_cpu, !VAL); 4236 #endif 4237 4238 return false; 4239 } 4240 4241 /** 4242 * task_call_func - Invoke a function on task in fixed state 4243 * @p: Process for which the function is to be invoked, can be @current. 4244 * @func: Function to invoke. 4245 * @arg: Argument to function. 4246 * 4247 * Fix the task in it's current state by avoiding wakeups and or rq operations 4248 * and call @func(@arg) on it. This function can use ->on_rq and task_curr() 4249 * to work out what the state is, if required. Given that @func can be invoked 4250 * with a runqueue lock held, it had better be quite lightweight. 4251 * 4252 * Returns: 4253 * Whatever @func returns 4254 */ 4255 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4256 { 4257 struct rq *rq = NULL; 4258 struct rq_flags rf; 4259 int ret; 4260 4261 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4262 4263 if (__task_needs_rq_lock(p)) 4264 rq = __task_rq_lock(p, &rf); 4265 4266 /* 4267 * At this point the task is pinned; either: 4268 * - blocked and we're holding off wakeups (pi->lock) 4269 * - woken, and we're holding off enqueue (rq->lock) 4270 * - queued, and we're holding off schedule (rq->lock) 4271 * - running, and we're holding off de-schedule (rq->lock) 4272 * 4273 * The called function (@func) can use: task_curr(), p->on_rq and 4274 * p->__state to differentiate between these states. 4275 */ 4276 ret = func(p, arg); 4277 4278 if (rq) 4279 rq_unlock(rq, &rf); 4280 4281 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4282 return ret; 4283 } 4284 4285 /** 4286 * cpu_curr_snapshot - Return a snapshot of the currently running task 4287 * @cpu: The CPU on which to snapshot the task. 4288 * 4289 * Returns the task_struct pointer of the task "currently" running on 4290 * the specified CPU. 4291 * 4292 * If the specified CPU was offline, the return value is whatever it 4293 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4294 * task, but there is no guarantee. Callers wishing a useful return 4295 * value must take some action to ensure that the specified CPU remains 4296 * online throughout. 4297 * 4298 * This function executes full memory barriers before and after fetching 4299 * the pointer, which permits the caller to confine this function's fetch 4300 * with respect to the caller's accesses to other shared variables. 4301 */ 4302 struct task_struct *cpu_curr_snapshot(int cpu) 4303 { 4304 struct rq *rq = cpu_rq(cpu); 4305 struct task_struct *t; 4306 struct rq_flags rf; 4307 4308 rq_lock_irqsave(rq, &rf); 4309 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ 4310 t = rcu_dereference(cpu_curr(cpu)); 4311 rq_unlock_irqrestore(rq, &rf); 4312 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4313 4314 return t; 4315 } 4316 4317 /** 4318 * wake_up_process - Wake up a specific process 4319 * @p: The process to be woken up. 4320 * 4321 * Attempt to wake up the nominated process and move it to the set of runnable 4322 * processes. 4323 * 4324 * Return: 1 if the process was woken up, 0 if it was already running. 4325 * 4326 * This function executes a full memory barrier before accessing the task state. 4327 */ 4328 int wake_up_process(struct task_struct *p) 4329 { 4330 return try_to_wake_up(p, TASK_NORMAL, 0); 4331 } 4332 EXPORT_SYMBOL(wake_up_process); 4333 4334 int wake_up_state(struct task_struct *p, unsigned int state) 4335 { 4336 return try_to_wake_up(p, state, 0); 4337 } 4338 4339 /* 4340 * Perform scheduler related setup for a newly forked process p. 4341 * p is forked by current. 4342 * 4343 * __sched_fork() is basic setup used by init_idle() too: 4344 */ 4345 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4346 { 4347 p->on_rq = 0; 4348 4349 p->se.on_rq = 0; 4350 p->se.exec_start = 0; 4351 p->se.sum_exec_runtime = 0; 4352 p->se.prev_sum_exec_runtime = 0; 4353 p->se.nr_migrations = 0; 4354 p->se.vruntime = 0; 4355 p->se.vlag = 0; 4356 p->se.slice = sysctl_sched_base_slice; 4357 INIT_LIST_HEAD(&p->se.group_node); 4358 4359 #ifdef CONFIG_FAIR_GROUP_SCHED 4360 p->se.cfs_rq = NULL; 4361 #endif 4362 4363 #ifdef CONFIG_SCHEDSTATS 4364 /* Even if schedstat is disabled, there should not be garbage */ 4365 memset(&p->stats, 0, sizeof(p->stats)); 4366 #endif 4367 4368 init_dl_entity(&p->dl); 4369 4370 INIT_LIST_HEAD(&p->rt.run_list); 4371 p->rt.timeout = 0; 4372 p->rt.time_slice = sched_rr_timeslice; 4373 p->rt.on_rq = 0; 4374 p->rt.on_list = 0; 4375 4376 #ifdef CONFIG_PREEMPT_NOTIFIERS 4377 INIT_HLIST_HEAD(&p->preempt_notifiers); 4378 #endif 4379 4380 #ifdef CONFIG_COMPACTION 4381 p->capture_control = NULL; 4382 #endif 4383 init_numa_balancing(clone_flags, p); 4384 #ifdef CONFIG_SMP 4385 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4386 p->migration_pending = NULL; 4387 #endif 4388 init_sched_mm_cid(p); 4389 } 4390 4391 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4392 4393 #ifdef CONFIG_NUMA_BALANCING 4394 4395 int sysctl_numa_balancing_mode; 4396 4397 static void __set_numabalancing_state(bool enabled) 4398 { 4399 if (enabled) 4400 static_branch_enable(&sched_numa_balancing); 4401 else 4402 static_branch_disable(&sched_numa_balancing); 4403 } 4404 4405 void set_numabalancing_state(bool enabled) 4406 { 4407 if (enabled) 4408 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4409 else 4410 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4411 __set_numabalancing_state(enabled); 4412 } 4413 4414 #ifdef CONFIG_PROC_SYSCTL 4415 static void reset_memory_tiering(void) 4416 { 4417 struct pglist_data *pgdat; 4418 4419 for_each_online_pgdat(pgdat) { 4420 pgdat->nbp_threshold = 0; 4421 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4422 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); 4423 } 4424 } 4425 4426 static int sysctl_numa_balancing(const struct ctl_table *table, int write, 4427 void *buffer, size_t *lenp, loff_t *ppos) 4428 { 4429 struct ctl_table t; 4430 int err; 4431 int state = sysctl_numa_balancing_mode; 4432 4433 if (write && !capable(CAP_SYS_ADMIN)) 4434 return -EPERM; 4435 4436 t = *table; 4437 t.data = &state; 4438 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4439 if (err < 0) 4440 return err; 4441 if (write) { 4442 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4443 (state & NUMA_BALANCING_MEMORY_TIERING)) 4444 reset_memory_tiering(); 4445 sysctl_numa_balancing_mode = state; 4446 __set_numabalancing_state(state); 4447 } 4448 return err; 4449 } 4450 #endif 4451 #endif 4452 4453 #ifdef CONFIG_SCHEDSTATS 4454 4455 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4456 4457 static void set_schedstats(bool enabled) 4458 { 4459 if (enabled) 4460 static_branch_enable(&sched_schedstats); 4461 else 4462 static_branch_disable(&sched_schedstats); 4463 } 4464 4465 void force_schedstat_enabled(void) 4466 { 4467 if (!schedstat_enabled()) { 4468 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4469 static_branch_enable(&sched_schedstats); 4470 } 4471 } 4472 4473 static int __init setup_schedstats(char *str) 4474 { 4475 int ret = 0; 4476 if (!str) 4477 goto out; 4478 4479 if (!strcmp(str, "enable")) { 4480 set_schedstats(true); 4481 ret = 1; 4482 } else if (!strcmp(str, "disable")) { 4483 set_schedstats(false); 4484 ret = 1; 4485 } 4486 out: 4487 if (!ret) 4488 pr_warn("Unable to parse schedstats=\n"); 4489 4490 return ret; 4491 } 4492 __setup("schedstats=", setup_schedstats); 4493 4494 #ifdef CONFIG_PROC_SYSCTL 4495 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer, 4496 size_t *lenp, loff_t *ppos) 4497 { 4498 struct ctl_table t; 4499 int err; 4500 int state = static_branch_likely(&sched_schedstats); 4501 4502 if (write && !capable(CAP_SYS_ADMIN)) 4503 return -EPERM; 4504 4505 t = *table; 4506 t.data = &state; 4507 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4508 if (err < 0) 4509 return err; 4510 if (write) 4511 set_schedstats(state); 4512 return err; 4513 } 4514 #endif /* CONFIG_PROC_SYSCTL */ 4515 #endif /* CONFIG_SCHEDSTATS */ 4516 4517 #ifdef CONFIG_SYSCTL 4518 static struct ctl_table sched_core_sysctls[] = { 4519 #ifdef CONFIG_SCHEDSTATS 4520 { 4521 .procname = "sched_schedstats", 4522 .data = NULL, 4523 .maxlen = sizeof(unsigned int), 4524 .mode = 0644, 4525 .proc_handler = sysctl_schedstats, 4526 .extra1 = SYSCTL_ZERO, 4527 .extra2 = SYSCTL_ONE, 4528 }, 4529 #endif /* CONFIG_SCHEDSTATS */ 4530 #ifdef CONFIG_UCLAMP_TASK 4531 { 4532 .procname = "sched_util_clamp_min", 4533 .data = &sysctl_sched_uclamp_util_min, 4534 .maxlen = sizeof(unsigned int), 4535 .mode = 0644, 4536 .proc_handler = sysctl_sched_uclamp_handler, 4537 }, 4538 { 4539 .procname = "sched_util_clamp_max", 4540 .data = &sysctl_sched_uclamp_util_max, 4541 .maxlen = sizeof(unsigned int), 4542 .mode = 0644, 4543 .proc_handler = sysctl_sched_uclamp_handler, 4544 }, 4545 { 4546 .procname = "sched_util_clamp_min_rt_default", 4547 .data = &sysctl_sched_uclamp_util_min_rt_default, 4548 .maxlen = sizeof(unsigned int), 4549 .mode = 0644, 4550 .proc_handler = sysctl_sched_uclamp_handler, 4551 }, 4552 #endif /* CONFIG_UCLAMP_TASK */ 4553 #ifdef CONFIG_NUMA_BALANCING 4554 { 4555 .procname = "numa_balancing", 4556 .data = NULL, /* filled in by handler */ 4557 .maxlen = sizeof(unsigned int), 4558 .mode = 0644, 4559 .proc_handler = sysctl_numa_balancing, 4560 .extra1 = SYSCTL_ZERO, 4561 .extra2 = SYSCTL_FOUR, 4562 }, 4563 #endif /* CONFIG_NUMA_BALANCING */ 4564 }; 4565 static int __init sched_core_sysctl_init(void) 4566 { 4567 register_sysctl_init("kernel", sched_core_sysctls); 4568 return 0; 4569 } 4570 late_initcall(sched_core_sysctl_init); 4571 #endif /* CONFIG_SYSCTL */ 4572 4573 /* 4574 * fork()/clone()-time setup: 4575 */ 4576 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4577 { 4578 __sched_fork(clone_flags, p); 4579 /* 4580 * We mark the process as NEW here. This guarantees that 4581 * nobody will actually run it, and a signal or other external 4582 * event cannot wake it up and insert it on the runqueue either. 4583 */ 4584 p->__state = TASK_NEW; 4585 4586 /* 4587 * Make sure we do not leak PI boosting priority to the child. 4588 */ 4589 p->prio = current->normal_prio; 4590 4591 uclamp_fork(p); 4592 4593 /* 4594 * Revert to default priority/policy on fork if requested. 4595 */ 4596 if (unlikely(p->sched_reset_on_fork)) { 4597 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4598 p->policy = SCHED_NORMAL; 4599 p->static_prio = NICE_TO_PRIO(0); 4600 p->rt_priority = 0; 4601 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4602 p->static_prio = NICE_TO_PRIO(0); 4603 4604 p->prio = p->normal_prio = p->static_prio; 4605 set_load_weight(p, false); 4606 4607 /* 4608 * We don't need the reset flag anymore after the fork. It has 4609 * fulfilled its duty: 4610 */ 4611 p->sched_reset_on_fork = 0; 4612 } 4613 4614 if (dl_prio(p->prio)) 4615 return -EAGAIN; 4616 else if (rt_prio(p->prio)) 4617 p->sched_class = &rt_sched_class; 4618 else 4619 p->sched_class = &fair_sched_class; 4620 4621 init_entity_runnable_average(&p->se); 4622 4623 4624 #ifdef CONFIG_SCHED_INFO 4625 if (likely(sched_info_on())) 4626 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4627 #endif 4628 #if defined(CONFIG_SMP) 4629 p->on_cpu = 0; 4630 #endif 4631 init_task_preempt_count(p); 4632 #ifdef CONFIG_SMP 4633 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4634 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4635 #endif 4636 return 0; 4637 } 4638 4639 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4640 { 4641 unsigned long flags; 4642 4643 /* 4644 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4645 * required yet, but lockdep gets upset if rules are violated. 4646 */ 4647 raw_spin_lock_irqsave(&p->pi_lock, flags); 4648 #ifdef CONFIG_CGROUP_SCHED 4649 if (1) { 4650 struct task_group *tg; 4651 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4652 struct task_group, css); 4653 tg = autogroup_task_group(p, tg); 4654 p->sched_task_group = tg; 4655 } 4656 #endif 4657 rseq_migrate(p); 4658 /* 4659 * We're setting the CPU for the first time, we don't migrate, 4660 * so use __set_task_cpu(). 4661 */ 4662 __set_task_cpu(p, smp_processor_id()); 4663 if (p->sched_class->task_fork) 4664 p->sched_class->task_fork(p); 4665 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4666 } 4667 4668 void sched_post_fork(struct task_struct *p) 4669 { 4670 uclamp_post_fork(p); 4671 } 4672 4673 unsigned long to_ratio(u64 period, u64 runtime) 4674 { 4675 if (runtime == RUNTIME_INF) 4676 return BW_UNIT; 4677 4678 /* 4679 * Doing this here saves a lot of checks in all 4680 * the calling paths, and returning zero seems 4681 * safe for them anyway. 4682 */ 4683 if (period == 0) 4684 return 0; 4685 4686 return div64_u64(runtime << BW_SHIFT, period); 4687 } 4688 4689 /* 4690 * wake_up_new_task - wake up a newly created task for the first time. 4691 * 4692 * This function will do some initial scheduler statistics housekeeping 4693 * that must be done for every newly created context, then puts the task 4694 * on the runqueue and wakes it. 4695 */ 4696 void wake_up_new_task(struct task_struct *p) 4697 { 4698 struct rq_flags rf; 4699 struct rq *rq; 4700 4701 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4702 WRITE_ONCE(p->__state, TASK_RUNNING); 4703 #ifdef CONFIG_SMP 4704 /* 4705 * Fork balancing, do it here and not earlier because: 4706 * - cpus_ptr can change in the fork path 4707 * - any previously selected CPU might disappear through hotplug 4708 * 4709 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4710 * as we're not fully set-up yet. 4711 */ 4712 p->recent_used_cpu = task_cpu(p); 4713 rseq_migrate(p); 4714 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); 4715 #endif 4716 rq = __task_rq_lock(p, &rf); 4717 update_rq_clock(rq); 4718 post_init_entity_util_avg(p); 4719 4720 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL); 4721 trace_sched_wakeup_new(p); 4722 wakeup_preempt(rq, p, WF_FORK); 4723 #ifdef CONFIG_SMP 4724 if (p->sched_class->task_woken) { 4725 /* 4726 * Nothing relies on rq->lock after this, so it's fine to 4727 * drop it. 4728 */ 4729 rq_unpin_lock(rq, &rf); 4730 p->sched_class->task_woken(rq, p); 4731 rq_repin_lock(rq, &rf); 4732 } 4733 #endif 4734 task_rq_unlock(rq, p, &rf); 4735 } 4736 4737 #ifdef CONFIG_PREEMPT_NOTIFIERS 4738 4739 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4740 4741 void preempt_notifier_inc(void) 4742 { 4743 static_branch_inc(&preempt_notifier_key); 4744 } 4745 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4746 4747 void preempt_notifier_dec(void) 4748 { 4749 static_branch_dec(&preempt_notifier_key); 4750 } 4751 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4752 4753 /** 4754 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4755 * @notifier: notifier struct to register 4756 */ 4757 void preempt_notifier_register(struct preempt_notifier *notifier) 4758 { 4759 if (!static_branch_unlikely(&preempt_notifier_key)) 4760 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4761 4762 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4763 } 4764 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4765 4766 /** 4767 * preempt_notifier_unregister - no longer interested in preemption notifications 4768 * @notifier: notifier struct to unregister 4769 * 4770 * This is *not* safe to call from within a preemption notifier. 4771 */ 4772 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4773 { 4774 hlist_del(¬ifier->link); 4775 } 4776 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4777 4778 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4779 { 4780 struct preempt_notifier *notifier; 4781 4782 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4783 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4784 } 4785 4786 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4787 { 4788 if (static_branch_unlikely(&preempt_notifier_key)) 4789 __fire_sched_in_preempt_notifiers(curr); 4790 } 4791 4792 static void 4793 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4794 struct task_struct *next) 4795 { 4796 struct preempt_notifier *notifier; 4797 4798 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4799 notifier->ops->sched_out(notifier, next); 4800 } 4801 4802 static __always_inline void 4803 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4804 struct task_struct *next) 4805 { 4806 if (static_branch_unlikely(&preempt_notifier_key)) 4807 __fire_sched_out_preempt_notifiers(curr, next); 4808 } 4809 4810 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4811 4812 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4813 { 4814 } 4815 4816 static inline void 4817 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4818 struct task_struct *next) 4819 { 4820 } 4821 4822 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4823 4824 static inline void prepare_task(struct task_struct *next) 4825 { 4826 #ifdef CONFIG_SMP 4827 /* 4828 * Claim the task as running, we do this before switching to it 4829 * such that any running task will have this set. 4830 * 4831 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 4832 * its ordering comment. 4833 */ 4834 WRITE_ONCE(next->on_cpu, 1); 4835 #endif 4836 } 4837 4838 static inline void finish_task(struct task_struct *prev) 4839 { 4840 #ifdef CONFIG_SMP 4841 /* 4842 * This must be the very last reference to @prev from this CPU. After 4843 * p->on_cpu is cleared, the task can be moved to a different CPU. We 4844 * must ensure this doesn't happen until the switch is completely 4845 * finished. 4846 * 4847 * In particular, the load of prev->state in finish_task_switch() must 4848 * happen before this. 4849 * 4850 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 4851 */ 4852 smp_store_release(&prev->on_cpu, 0); 4853 #endif 4854 } 4855 4856 #ifdef CONFIG_SMP 4857 4858 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 4859 { 4860 void (*func)(struct rq *rq); 4861 struct balance_callback *next; 4862 4863 lockdep_assert_rq_held(rq); 4864 4865 while (head) { 4866 func = (void (*)(struct rq *))head->func; 4867 next = head->next; 4868 head->next = NULL; 4869 head = next; 4870 4871 func(rq); 4872 } 4873 } 4874 4875 static void balance_push(struct rq *rq); 4876 4877 /* 4878 * balance_push_callback is a right abuse of the callback interface and plays 4879 * by significantly different rules. 4880 * 4881 * Where the normal balance_callback's purpose is to be ran in the same context 4882 * that queued it (only later, when it's safe to drop rq->lock again), 4883 * balance_push_callback is specifically targeted at __schedule(). 4884 * 4885 * This abuse is tolerated because it places all the unlikely/odd cases behind 4886 * a single test, namely: rq->balance_callback == NULL. 4887 */ 4888 struct balance_callback balance_push_callback = { 4889 .next = NULL, 4890 .func = balance_push, 4891 }; 4892 4893 static inline struct balance_callback * 4894 __splice_balance_callbacks(struct rq *rq, bool split) 4895 { 4896 struct balance_callback *head = rq->balance_callback; 4897 4898 if (likely(!head)) 4899 return NULL; 4900 4901 lockdep_assert_rq_held(rq); 4902 /* 4903 * Must not take balance_push_callback off the list when 4904 * splice_balance_callbacks() and balance_callbacks() are not 4905 * in the same rq->lock section. 4906 * 4907 * In that case it would be possible for __schedule() to interleave 4908 * and observe the list empty. 4909 */ 4910 if (split && head == &balance_push_callback) 4911 head = NULL; 4912 else 4913 rq->balance_callback = NULL; 4914 4915 return head; 4916 } 4917 4918 struct balance_callback *splice_balance_callbacks(struct rq *rq) 4919 { 4920 return __splice_balance_callbacks(rq, true); 4921 } 4922 4923 static void __balance_callbacks(struct rq *rq) 4924 { 4925 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 4926 } 4927 4928 void balance_callbacks(struct rq *rq, struct balance_callback *head) 4929 { 4930 unsigned long flags; 4931 4932 if (unlikely(head)) { 4933 raw_spin_rq_lock_irqsave(rq, flags); 4934 do_balance_callbacks(rq, head); 4935 raw_spin_rq_unlock_irqrestore(rq, flags); 4936 } 4937 } 4938 4939 #else 4940 4941 static inline void __balance_callbacks(struct rq *rq) 4942 { 4943 } 4944 4945 #endif 4946 4947 static inline void 4948 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 4949 { 4950 /* 4951 * Since the runqueue lock will be released by the next 4952 * task (which is an invalid locking op but in the case 4953 * of the scheduler it's an obvious special-case), so we 4954 * do an early lockdep release here: 4955 */ 4956 rq_unpin_lock(rq, rf); 4957 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 4958 #ifdef CONFIG_DEBUG_SPINLOCK 4959 /* this is a valid case when another task releases the spinlock */ 4960 rq_lockp(rq)->owner = next; 4961 #endif 4962 } 4963 4964 static inline void finish_lock_switch(struct rq *rq) 4965 { 4966 /* 4967 * If we are tracking spinlock dependencies then we have to 4968 * fix up the runqueue lock - which gets 'carried over' from 4969 * prev into current: 4970 */ 4971 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 4972 __balance_callbacks(rq); 4973 raw_spin_rq_unlock_irq(rq); 4974 } 4975 4976 /* 4977 * NOP if the arch has not defined these: 4978 */ 4979 4980 #ifndef prepare_arch_switch 4981 # define prepare_arch_switch(next) do { } while (0) 4982 #endif 4983 4984 #ifndef finish_arch_post_lock_switch 4985 # define finish_arch_post_lock_switch() do { } while (0) 4986 #endif 4987 4988 static inline void kmap_local_sched_out(void) 4989 { 4990 #ifdef CONFIG_KMAP_LOCAL 4991 if (unlikely(current->kmap_ctrl.idx)) 4992 __kmap_local_sched_out(); 4993 #endif 4994 } 4995 4996 static inline void kmap_local_sched_in(void) 4997 { 4998 #ifdef CONFIG_KMAP_LOCAL 4999 if (unlikely(current->kmap_ctrl.idx)) 5000 __kmap_local_sched_in(); 5001 #endif 5002 } 5003 5004 /** 5005 * prepare_task_switch - prepare to switch tasks 5006 * @rq: the runqueue preparing to switch 5007 * @prev: the current task that is being switched out 5008 * @next: the task we are going to switch to. 5009 * 5010 * This is called with the rq lock held and interrupts off. It must 5011 * be paired with a subsequent finish_task_switch after the context 5012 * switch. 5013 * 5014 * prepare_task_switch sets up locking and calls architecture specific 5015 * hooks. 5016 */ 5017 static inline void 5018 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5019 struct task_struct *next) 5020 { 5021 kcov_prepare_switch(prev); 5022 sched_info_switch(rq, prev, next); 5023 perf_event_task_sched_out(prev, next); 5024 rseq_preempt(prev); 5025 fire_sched_out_preempt_notifiers(prev, next); 5026 kmap_local_sched_out(); 5027 prepare_task(next); 5028 prepare_arch_switch(next); 5029 } 5030 5031 /** 5032 * finish_task_switch - clean up after a task-switch 5033 * @prev: the thread we just switched away from. 5034 * 5035 * finish_task_switch must be called after the context switch, paired 5036 * with a prepare_task_switch call before the context switch. 5037 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5038 * and do any other architecture-specific cleanup actions. 5039 * 5040 * Note that we may have delayed dropping an mm in context_switch(). If 5041 * so, we finish that here outside of the runqueue lock. (Doing it 5042 * with the lock held can cause deadlocks; see schedule() for 5043 * details.) 5044 * 5045 * The context switch have flipped the stack from under us and restored the 5046 * local variables which were saved when this task called schedule() in the 5047 * past. 'prev == current' is still correct but we need to recalculate this_rq 5048 * because prev may have moved to another CPU. 5049 */ 5050 static struct rq *finish_task_switch(struct task_struct *prev) 5051 __releases(rq->lock) 5052 { 5053 struct rq *rq = this_rq(); 5054 struct mm_struct *mm = rq->prev_mm; 5055 unsigned int prev_state; 5056 5057 /* 5058 * The previous task will have left us with a preempt_count of 2 5059 * because it left us after: 5060 * 5061 * schedule() 5062 * preempt_disable(); // 1 5063 * __schedule() 5064 * raw_spin_lock_irq(&rq->lock) // 2 5065 * 5066 * Also, see FORK_PREEMPT_COUNT. 5067 */ 5068 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5069 "corrupted preempt_count: %s/%d/0x%x\n", 5070 current->comm, current->pid, preempt_count())) 5071 preempt_count_set(FORK_PREEMPT_COUNT); 5072 5073 rq->prev_mm = NULL; 5074 5075 /* 5076 * A task struct has one reference for the use as "current". 5077 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5078 * schedule one last time. The schedule call will never return, and 5079 * the scheduled task must drop that reference. 5080 * 5081 * We must observe prev->state before clearing prev->on_cpu (in 5082 * finish_task), otherwise a concurrent wakeup can get prev 5083 * running on another CPU and we could rave with its RUNNING -> DEAD 5084 * transition, resulting in a double drop. 5085 */ 5086 prev_state = READ_ONCE(prev->__state); 5087 vtime_task_switch(prev); 5088 perf_event_task_sched_in(prev, current); 5089 finish_task(prev); 5090 tick_nohz_task_switch(); 5091 finish_lock_switch(rq); 5092 finish_arch_post_lock_switch(); 5093 kcov_finish_switch(current); 5094 /* 5095 * kmap_local_sched_out() is invoked with rq::lock held and 5096 * interrupts disabled. There is no requirement for that, but the 5097 * sched out code does not have an interrupt enabled section. 5098 * Restoring the maps on sched in does not require interrupts being 5099 * disabled either. 5100 */ 5101 kmap_local_sched_in(); 5102 5103 fire_sched_in_preempt_notifiers(current); 5104 /* 5105 * When switching through a kernel thread, the loop in 5106 * membarrier_{private,global}_expedited() may have observed that 5107 * kernel thread and not issued an IPI. It is therefore possible to 5108 * schedule between user->kernel->user threads without passing though 5109 * switch_mm(). Membarrier requires a barrier after storing to 5110 * rq->curr, before returning to userspace, so provide them here: 5111 * 5112 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5113 * provided by mmdrop_lazy_tlb(), 5114 * - a sync_core for SYNC_CORE. 5115 */ 5116 if (mm) { 5117 membarrier_mm_sync_core_before_usermode(mm); 5118 mmdrop_lazy_tlb_sched(mm); 5119 } 5120 5121 if (unlikely(prev_state == TASK_DEAD)) { 5122 if (prev->sched_class->task_dead) 5123 prev->sched_class->task_dead(prev); 5124 5125 /* Task is done with its stack. */ 5126 put_task_stack(prev); 5127 5128 put_task_struct_rcu_user(prev); 5129 } 5130 5131 return rq; 5132 } 5133 5134 /** 5135 * schedule_tail - first thing a freshly forked thread must call. 5136 * @prev: the thread we just switched away from. 5137 */ 5138 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5139 __releases(rq->lock) 5140 { 5141 /* 5142 * New tasks start with FORK_PREEMPT_COUNT, see there and 5143 * finish_task_switch() for details. 5144 * 5145 * finish_task_switch() will drop rq->lock() and lower preempt_count 5146 * and the preempt_enable() will end up enabling preemption (on 5147 * PREEMPT_COUNT kernels). 5148 */ 5149 5150 finish_task_switch(prev); 5151 preempt_enable(); 5152 5153 if (current->set_child_tid) 5154 put_user(task_pid_vnr(current), current->set_child_tid); 5155 5156 calculate_sigpending(); 5157 } 5158 5159 /* 5160 * context_switch - switch to the new MM and the new thread's register state. 5161 */ 5162 static __always_inline struct rq * 5163 context_switch(struct rq *rq, struct task_struct *prev, 5164 struct task_struct *next, struct rq_flags *rf) 5165 { 5166 prepare_task_switch(rq, prev, next); 5167 5168 /* 5169 * For paravirt, this is coupled with an exit in switch_to to 5170 * combine the page table reload and the switch backend into 5171 * one hypercall. 5172 */ 5173 arch_start_context_switch(prev); 5174 5175 /* 5176 * kernel -> kernel lazy + transfer active 5177 * user -> kernel lazy + mmgrab_lazy_tlb() active 5178 * 5179 * kernel -> user switch + mmdrop_lazy_tlb() active 5180 * user -> user switch 5181 * 5182 * switch_mm_cid() needs to be updated if the barriers provided 5183 * by context_switch() are modified. 5184 */ 5185 if (!next->mm) { // to kernel 5186 enter_lazy_tlb(prev->active_mm, next); 5187 5188 next->active_mm = prev->active_mm; 5189 if (prev->mm) // from user 5190 mmgrab_lazy_tlb(prev->active_mm); 5191 else 5192 prev->active_mm = NULL; 5193 } else { // to user 5194 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5195 /* 5196 * sys_membarrier() requires an smp_mb() between setting 5197 * rq->curr / membarrier_switch_mm() and returning to userspace. 5198 * 5199 * The below provides this either through switch_mm(), or in 5200 * case 'prev->active_mm == next->mm' through 5201 * finish_task_switch()'s mmdrop(). 5202 */ 5203 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5204 lru_gen_use_mm(next->mm); 5205 5206 if (!prev->mm) { // from kernel 5207 /* will mmdrop_lazy_tlb() in finish_task_switch(). */ 5208 rq->prev_mm = prev->active_mm; 5209 prev->active_mm = NULL; 5210 } 5211 } 5212 5213 /* switch_mm_cid() requires the memory barriers above. */ 5214 switch_mm_cid(rq, prev, next); 5215 5216 prepare_lock_switch(rq, next, rf); 5217 5218 /* Here we just switch the register state and the stack. */ 5219 switch_to(prev, next, prev); 5220 barrier(); 5221 5222 return finish_task_switch(prev); 5223 } 5224 5225 /* 5226 * nr_running and nr_context_switches: 5227 * 5228 * externally visible scheduler statistics: current number of runnable 5229 * threads, total number of context switches performed since bootup. 5230 */ 5231 unsigned int nr_running(void) 5232 { 5233 unsigned int i, sum = 0; 5234 5235 for_each_online_cpu(i) 5236 sum += cpu_rq(i)->nr_running; 5237 5238 return sum; 5239 } 5240 5241 /* 5242 * Check if only the current task is running on the CPU. 5243 * 5244 * Caution: this function does not check that the caller has disabled 5245 * preemption, thus the result might have a time-of-check-to-time-of-use 5246 * race. The caller is responsible to use it correctly, for example: 5247 * 5248 * - from a non-preemptible section (of course) 5249 * 5250 * - from a thread that is bound to a single CPU 5251 * 5252 * - in a loop with very short iterations (e.g. a polling loop) 5253 */ 5254 bool single_task_running(void) 5255 { 5256 return raw_rq()->nr_running == 1; 5257 } 5258 EXPORT_SYMBOL(single_task_running); 5259 5260 unsigned long long nr_context_switches_cpu(int cpu) 5261 { 5262 return cpu_rq(cpu)->nr_switches; 5263 } 5264 5265 unsigned long long nr_context_switches(void) 5266 { 5267 int i; 5268 unsigned long long sum = 0; 5269 5270 for_each_possible_cpu(i) 5271 sum += cpu_rq(i)->nr_switches; 5272 5273 return sum; 5274 } 5275 5276 /* 5277 * Consumers of these two interfaces, like for example the cpuidle menu 5278 * governor, are using nonsensical data. Preferring shallow idle state selection 5279 * for a CPU that has IO-wait which might not even end up running the task when 5280 * it does become runnable. 5281 */ 5282 5283 unsigned int nr_iowait_cpu(int cpu) 5284 { 5285 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5286 } 5287 5288 /* 5289 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5290 * 5291 * The idea behind IO-wait account is to account the idle time that we could 5292 * have spend running if it were not for IO. That is, if we were to improve the 5293 * storage performance, we'd have a proportional reduction in IO-wait time. 5294 * 5295 * This all works nicely on UP, where, when a task blocks on IO, we account 5296 * idle time as IO-wait, because if the storage were faster, it could've been 5297 * running and we'd not be idle. 5298 * 5299 * This has been extended to SMP, by doing the same for each CPU. This however 5300 * is broken. 5301 * 5302 * Imagine for instance the case where two tasks block on one CPU, only the one 5303 * CPU will have IO-wait accounted, while the other has regular idle. Even 5304 * though, if the storage were faster, both could've ran at the same time, 5305 * utilising both CPUs. 5306 * 5307 * This means, that when looking globally, the current IO-wait accounting on 5308 * SMP is a lower bound, by reason of under accounting. 5309 * 5310 * Worse, since the numbers are provided per CPU, they are sometimes 5311 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5312 * associated with any one particular CPU, it can wake to another CPU than it 5313 * blocked on. This means the per CPU IO-wait number is meaningless. 5314 * 5315 * Task CPU affinities can make all that even more 'interesting'. 5316 */ 5317 5318 unsigned int nr_iowait(void) 5319 { 5320 unsigned int i, sum = 0; 5321 5322 for_each_possible_cpu(i) 5323 sum += nr_iowait_cpu(i); 5324 5325 return sum; 5326 } 5327 5328 #ifdef CONFIG_SMP 5329 5330 /* 5331 * sched_exec - execve() is a valuable balancing opportunity, because at 5332 * this point the task has the smallest effective memory and cache footprint. 5333 */ 5334 void sched_exec(void) 5335 { 5336 struct task_struct *p = current; 5337 struct migration_arg arg; 5338 int dest_cpu; 5339 5340 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 5341 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5342 if (dest_cpu == smp_processor_id()) 5343 return; 5344 5345 if (unlikely(!cpu_active(dest_cpu))) 5346 return; 5347 5348 arg = (struct migration_arg){ p, dest_cpu }; 5349 } 5350 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5351 } 5352 5353 #endif 5354 5355 DEFINE_PER_CPU(struct kernel_stat, kstat); 5356 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5357 5358 EXPORT_PER_CPU_SYMBOL(kstat); 5359 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5360 5361 /* 5362 * The function fair_sched_class.update_curr accesses the struct curr 5363 * and its field curr->exec_start; when called from task_sched_runtime(), 5364 * we observe a high rate of cache misses in practice. 5365 * Prefetching this data results in improved performance. 5366 */ 5367 static inline void prefetch_curr_exec_start(struct task_struct *p) 5368 { 5369 #ifdef CONFIG_FAIR_GROUP_SCHED 5370 struct sched_entity *curr = p->se.cfs_rq->curr; 5371 #else 5372 struct sched_entity *curr = task_rq(p)->cfs.curr; 5373 #endif 5374 prefetch(curr); 5375 prefetch(&curr->exec_start); 5376 } 5377 5378 /* 5379 * Return accounted runtime for the task. 5380 * In case the task is currently running, return the runtime plus current's 5381 * pending runtime that have not been accounted yet. 5382 */ 5383 unsigned long long task_sched_runtime(struct task_struct *p) 5384 { 5385 struct rq_flags rf; 5386 struct rq *rq; 5387 u64 ns; 5388 5389 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5390 /* 5391 * 64-bit doesn't need locks to atomically read a 64-bit value. 5392 * So we have a optimization chance when the task's delta_exec is 0. 5393 * Reading ->on_cpu is racy, but this is OK. 5394 * 5395 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5396 * If we race with it entering CPU, unaccounted time is 0. This is 5397 * indistinguishable from the read occurring a few cycles earlier. 5398 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5399 * been accounted, so we're correct here as well. 5400 */ 5401 if (!p->on_cpu || !task_on_rq_queued(p)) 5402 return p->se.sum_exec_runtime; 5403 #endif 5404 5405 rq = task_rq_lock(p, &rf); 5406 /* 5407 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5408 * project cycles that may never be accounted to this 5409 * thread, breaking clock_gettime(). 5410 */ 5411 if (task_current(rq, p) && task_on_rq_queued(p)) { 5412 prefetch_curr_exec_start(p); 5413 update_rq_clock(rq); 5414 p->sched_class->update_curr(rq); 5415 } 5416 ns = p->se.sum_exec_runtime; 5417 task_rq_unlock(rq, p, &rf); 5418 5419 return ns; 5420 } 5421 5422 #ifdef CONFIG_SCHED_DEBUG 5423 static u64 cpu_resched_latency(struct rq *rq) 5424 { 5425 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5426 u64 resched_latency, now = rq_clock(rq); 5427 static bool warned_once; 5428 5429 if (sysctl_resched_latency_warn_once && warned_once) 5430 return 0; 5431 5432 if (!need_resched() || !latency_warn_ms) 5433 return 0; 5434 5435 if (system_state == SYSTEM_BOOTING) 5436 return 0; 5437 5438 if (!rq->last_seen_need_resched_ns) { 5439 rq->last_seen_need_resched_ns = now; 5440 rq->ticks_without_resched = 0; 5441 return 0; 5442 } 5443 5444 rq->ticks_without_resched++; 5445 resched_latency = now - rq->last_seen_need_resched_ns; 5446 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5447 return 0; 5448 5449 warned_once = true; 5450 5451 return resched_latency; 5452 } 5453 5454 static int __init setup_resched_latency_warn_ms(char *str) 5455 { 5456 long val; 5457 5458 if ((kstrtol(str, 0, &val))) { 5459 pr_warn("Unable to set resched_latency_warn_ms\n"); 5460 return 1; 5461 } 5462 5463 sysctl_resched_latency_warn_ms = val; 5464 return 1; 5465 } 5466 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5467 #else 5468 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } 5469 #endif /* CONFIG_SCHED_DEBUG */ 5470 5471 /* 5472 * This function gets called by the timer code, with HZ frequency. 5473 * We call it with interrupts disabled. 5474 */ 5475 void sched_tick(void) 5476 { 5477 int cpu = smp_processor_id(); 5478 struct rq *rq = cpu_rq(cpu); 5479 struct task_struct *curr; 5480 struct rq_flags rf; 5481 unsigned long hw_pressure; 5482 u64 resched_latency; 5483 5484 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5485 arch_scale_freq_tick(); 5486 5487 sched_clock_tick(); 5488 5489 rq_lock(rq, &rf); 5490 5491 curr = rq->curr; 5492 psi_account_irqtime(rq, curr, NULL); 5493 5494 update_rq_clock(rq); 5495 hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); 5496 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure); 5497 curr->sched_class->task_tick(rq, curr, 0); 5498 if (sched_feat(LATENCY_WARN)) 5499 resched_latency = cpu_resched_latency(rq); 5500 calc_global_load_tick(rq); 5501 sched_core_tick(rq); 5502 task_tick_mm_cid(rq, curr); 5503 5504 rq_unlock(rq, &rf); 5505 5506 if (sched_feat(LATENCY_WARN) && resched_latency) 5507 resched_latency_warn(cpu, resched_latency); 5508 5509 perf_event_task_tick(); 5510 5511 if (curr->flags & PF_WQ_WORKER) 5512 wq_worker_tick(curr); 5513 5514 #ifdef CONFIG_SMP 5515 rq->idle_balance = idle_cpu(cpu); 5516 sched_balance_trigger(rq); 5517 #endif 5518 } 5519 5520 #ifdef CONFIG_NO_HZ_FULL 5521 5522 struct tick_work { 5523 int cpu; 5524 atomic_t state; 5525 struct delayed_work work; 5526 }; 5527 /* Values for ->state, see diagram below. */ 5528 #define TICK_SCHED_REMOTE_OFFLINE 0 5529 #define TICK_SCHED_REMOTE_OFFLINING 1 5530 #define TICK_SCHED_REMOTE_RUNNING 2 5531 5532 /* 5533 * State diagram for ->state: 5534 * 5535 * 5536 * TICK_SCHED_REMOTE_OFFLINE 5537 * | ^ 5538 * | | 5539 * | | sched_tick_remote() 5540 * | | 5541 * | | 5542 * +--TICK_SCHED_REMOTE_OFFLINING 5543 * | ^ 5544 * | | 5545 * sched_tick_start() | | sched_tick_stop() 5546 * | | 5547 * V | 5548 * TICK_SCHED_REMOTE_RUNNING 5549 * 5550 * 5551 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5552 * and sched_tick_start() are happy to leave the state in RUNNING. 5553 */ 5554 5555 static struct tick_work __percpu *tick_work_cpu; 5556 5557 static void sched_tick_remote(struct work_struct *work) 5558 { 5559 struct delayed_work *dwork = to_delayed_work(work); 5560 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5561 int cpu = twork->cpu; 5562 struct rq *rq = cpu_rq(cpu); 5563 int os; 5564 5565 /* 5566 * Handle the tick only if it appears the remote CPU is running in full 5567 * dynticks mode. The check is racy by nature, but missing a tick or 5568 * having one too much is no big deal because the scheduler tick updates 5569 * statistics and checks timeslices in a time-independent way, regardless 5570 * of when exactly it is running. 5571 */ 5572 if (tick_nohz_tick_stopped_cpu(cpu)) { 5573 guard(rq_lock_irq)(rq); 5574 struct task_struct *curr = rq->curr; 5575 5576 if (cpu_online(cpu)) { 5577 update_rq_clock(rq); 5578 5579 if (!is_idle_task(curr)) { 5580 /* 5581 * Make sure the next tick runs within a 5582 * reasonable amount of time. 5583 */ 5584 u64 delta = rq_clock_task(rq) - curr->se.exec_start; 5585 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5586 } 5587 curr->sched_class->task_tick(rq, curr, 0); 5588 5589 calc_load_nohz_remote(rq); 5590 } 5591 } 5592 5593 /* 5594 * Run the remote tick once per second (1Hz). This arbitrary 5595 * frequency is large enough to avoid overload but short enough 5596 * to keep scheduler internal stats reasonably up to date. But 5597 * first update state to reflect hotplug activity if required. 5598 */ 5599 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5600 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5601 if (os == TICK_SCHED_REMOTE_RUNNING) 5602 queue_delayed_work(system_unbound_wq, dwork, HZ); 5603 } 5604 5605 static void sched_tick_start(int cpu) 5606 { 5607 int os; 5608 struct tick_work *twork; 5609 5610 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5611 return; 5612 5613 WARN_ON_ONCE(!tick_work_cpu); 5614 5615 twork = per_cpu_ptr(tick_work_cpu, cpu); 5616 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5617 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5618 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5619 twork->cpu = cpu; 5620 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5621 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5622 } 5623 } 5624 5625 #ifdef CONFIG_HOTPLUG_CPU 5626 static void sched_tick_stop(int cpu) 5627 { 5628 struct tick_work *twork; 5629 int os; 5630 5631 if (housekeeping_cpu(cpu, HK_TYPE_TICK)) 5632 return; 5633 5634 WARN_ON_ONCE(!tick_work_cpu); 5635 5636 twork = per_cpu_ptr(tick_work_cpu, cpu); 5637 /* There cannot be competing actions, but don't rely on stop-machine. */ 5638 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5639 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5640 /* Don't cancel, as this would mess up the state machine. */ 5641 } 5642 #endif /* CONFIG_HOTPLUG_CPU */ 5643 5644 int __init sched_tick_offload_init(void) 5645 { 5646 tick_work_cpu = alloc_percpu(struct tick_work); 5647 BUG_ON(!tick_work_cpu); 5648 return 0; 5649 } 5650 5651 #else /* !CONFIG_NO_HZ_FULL */ 5652 static inline void sched_tick_start(int cpu) { } 5653 static inline void sched_tick_stop(int cpu) { } 5654 #endif 5655 5656 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5657 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5658 /* 5659 * If the value passed in is equal to the current preempt count 5660 * then we just disabled preemption. Start timing the latency. 5661 */ 5662 static inline void preempt_latency_start(int val) 5663 { 5664 if (preempt_count() == val) { 5665 unsigned long ip = get_lock_parent_ip(); 5666 #ifdef CONFIG_DEBUG_PREEMPT 5667 current->preempt_disable_ip = ip; 5668 #endif 5669 trace_preempt_off(CALLER_ADDR0, ip); 5670 } 5671 } 5672 5673 void preempt_count_add(int val) 5674 { 5675 #ifdef CONFIG_DEBUG_PREEMPT 5676 /* 5677 * Underflow? 5678 */ 5679 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5680 return; 5681 #endif 5682 __preempt_count_add(val); 5683 #ifdef CONFIG_DEBUG_PREEMPT 5684 /* 5685 * Spinlock count overflowing soon? 5686 */ 5687 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5688 PREEMPT_MASK - 10); 5689 #endif 5690 preempt_latency_start(val); 5691 } 5692 EXPORT_SYMBOL(preempt_count_add); 5693 NOKPROBE_SYMBOL(preempt_count_add); 5694 5695 /* 5696 * If the value passed in equals to the current preempt count 5697 * then we just enabled preemption. Stop timing the latency. 5698 */ 5699 static inline void preempt_latency_stop(int val) 5700 { 5701 if (preempt_count() == val) 5702 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5703 } 5704 5705 void preempt_count_sub(int val) 5706 { 5707 #ifdef CONFIG_DEBUG_PREEMPT 5708 /* 5709 * Underflow? 5710 */ 5711 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5712 return; 5713 /* 5714 * Is the spinlock portion underflowing? 5715 */ 5716 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5717 !(preempt_count() & PREEMPT_MASK))) 5718 return; 5719 #endif 5720 5721 preempt_latency_stop(val); 5722 __preempt_count_sub(val); 5723 } 5724 EXPORT_SYMBOL(preempt_count_sub); 5725 NOKPROBE_SYMBOL(preempt_count_sub); 5726 5727 #else 5728 static inline void preempt_latency_start(int val) { } 5729 static inline void preempt_latency_stop(int val) { } 5730 #endif 5731 5732 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5733 { 5734 #ifdef CONFIG_DEBUG_PREEMPT 5735 return p->preempt_disable_ip; 5736 #else 5737 return 0; 5738 #endif 5739 } 5740 5741 /* 5742 * Print scheduling while atomic bug: 5743 */ 5744 static noinline void __schedule_bug(struct task_struct *prev) 5745 { 5746 /* Save this before calling printk(), since that will clobber it */ 5747 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5748 5749 if (oops_in_progress) 5750 return; 5751 5752 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5753 prev->comm, prev->pid, preempt_count()); 5754 5755 debug_show_held_locks(prev); 5756 print_modules(); 5757 if (irqs_disabled()) 5758 print_irqtrace_events(prev); 5759 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 5760 pr_err("Preemption disabled at:"); 5761 print_ip_sym(KERN_ERR, preempt_disable_ip); 5762 } 5763 check_panic_on_warn("scheduling while atomic"); 5764 5765 dump_stack(); 5766 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5767 } 5768 5769 /* 5770 * Various schedule()-time debugging checks and statistics: 5771 */ 5772 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5773 { 5774 #ifdef CONFIG_SCHED_STACK_END_CHECK 5775 if (task_stack_end_corrupted(prev)) 5776 panic("corrupted stack end detected inside scheduler\n"); 5777 5778 if (task_scs_end_corrupted(prev)) 5779 panic("corrupted shadow stack detected inside scheduler\n"); 5780 #endif 5781 5782 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5783 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5784 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5785 prev->comm, prev->pid, prev->non_block_count); 5786 dump_stack(); 5787 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5788 } 5789 #endif 5790 5791 if (unlikely(in_atomic_preempt_off())) { 5792 __schedule_bug(prev); 5793 preempt_count_set(PREEMPT_DISABLED); 5794 } 5795 rcu_sleep_check(); 5796 SCHED_WARN_ON(ct_state() == CONTEXT_USER); 5797 5798 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5799 5800 schedstat_inc(this_rq()->sched_count); 5801 } 5802 5803 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, 5804 struct rq_flags *rf) 5805 { 5806 #ifdef CONFIG_SMP 5807 const struct sched_class *class; 5808 /* 5809 * We must do the balancing pass before put_prev_task(), such 5810 * that when we release the rq->lock the task is in the same 5811 * state as before we took rq->lock. 5812 * 5813 * We can terminate the balance pass as soon as we know there is 5814 * a runnable task of @class priority or higher. 5815 */ 5816 for_class_range(class, prev->sched_class, &idle_sched_class) { 5817 if (class->balance(rq, prev, rf)) 5818 break; 5819 } 5820 #endif 5821 5822 put_prev_task(rq, prev); 5823 5824 /* 5825 * We've updated @prev and no longer need the server link, clear it. 5826 * Must be done before ->pick_next_task() because that can (re)set 5827 * ->dl_server. 5828 */ 5829 if (prev->dl_server) 5830 prev->dl_server = NULL; 5831 } 5832 5833 /* 5834 * Pick up the highest-prio task: 5835 */ 5836 static inline struct task_struct * 5837 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5838 { 5839 const struct sched_class *class; 5840 struct task_struct *p; 5841 5842 /* 5843 * Optimization: we know that if all tasks are in the fair class we can 5844 * call that function directly, but only if the @prev task wasn't of a 5845 * higher scheduling class, because otherwise those lose the 5846 * opportunity to pull in more work from other CPUs. 5847 */ 5848 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 5849 rq->nr_running == rq->cfs.h_nr_running)) { 5850 5851 p = pick_next_task_fair(rq, prev, rf); 5852 if (unlikely(p == RETRY_TASK)) 5853 goto restart; 5854 5855 /* Assume the next prioritized class is idle_sched_class */ 5856 if (!p) { 5857 put_prev_task(rq, prev); 5858 p = pick_next_task_idle(rq); 5859 } 5860 5861 /* 5862 * This is a normal CFS pick, but the previous could be a DL pick. 5863 * Clear it as previous is no longer picked. 5864 */ 5865 if (prev->dl_server) 5866 prev->dl_server = NULL; 5867 5868 /* 5869 * This is the fast path; it cannot be a DL server pick; 5870 * therefore even if @p == @prev, ->dl_server must be NULL. 5871 */ 5872 if (p->dl_server) 5873 p->dl_server = NULL; 5874 5875 return p; 5876 } 5877 5878 restart: 5879 put_prev_task_balance(rq, prev, rf); 5880 5881 for_each_class(class) { 5882 p = class->pick_next_task(rq); 5883 if (p) 5884 return p; 5885 } 5886 5887 BUG(); /* The idle class should always have a runnable task. */ 5888 } 5889 5890 #ifdef CONFIG_SCHED_CORE 5891 static inline bool is_task_rq_idle(struct task_struct *t) 5892 { 5893 return (task_rq(t)->idle == t); 5894 } 5895 5896 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 5897 { 5898 return is_task_rq_idle(a) || (a->core_cookie == cookie); 5899 } 5900 5901 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 5902 { 5903 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 5904 return true; 5905 5906 return a->core_cookie == b->core_cookie; 5907 } 5908 5909 static inline struct task_struct *pick_task(struct rq *rq) 5910 { 5911 const struct sched_class *class; 5912 struct task_struct *p; 5913 5914 for_each_class(class) { 5915 p = class->pick_task(rq); 5916 if (p) 5917 return p; 5918 } 5919 5920 BUG(); /* The idle class should always have a runnable task. */ 5921 } 5922 5923 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 5924 5925 static void queue_core_balance(struct rq *rq); 5926 5927 static struct task_struct * 5928 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 5929 { 5930 struct task_struct *next, *p, *max = NULL; 5931 const struct cpumask *smt_mask; 5932 bool fi_before = false; 5933 bool core_clock_updated = (rq == rq->core); 5934 unsigned long cookie; 5935 int i, cpu, occ = 0; 5936 struct rq *rq_i; 5937 bool need_sync; 5938 5939 if (!sched_core_enabled(rq)) 5940 return __pick_next_task(rq, prev, rf); 5941 5942 cpu = cpu_of(rq); 5943 5944 /* Stopper task is switching into idle, no need core-wide selection. */ 5945 if (cpu_is_offline(cpu)) { 5946 /* 5947 * Reset core_pick so that we don't enter the fastpath when 5948 * coming online. core_pick would already be migrated to 5949 * another cpu during offline. 5950 */ 5951 rq->core_pick = NULL; 5952 return __pick_next_task(rq, prev, rf); 5953 } 5954 5955 /* 5956 * If there were no {en,de}queues since we picked (IOW, the task 5957 * pointers are all still valid), and we haven't scheduled the last 5958 * pick yet, do so now. 5959 * 5960 * rq->core_pick can be NULL if no selection was made for a CPU because 5961 * it was either offline or went offline during a sibling's core-wide 5962 * selection. In this case, do a core-wide selection. 5963 */ 5964 if (rq->core->core_pick_seq == rq->core->core_task_seq && 5965 rq->core->core_pick_seq != rq->core_sched_seq && 5966 rq->core_pick) { 5967 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 5968 5969 next = rq->core_pick; 5970 if (next != prev) { 5971 put_prev_task(rq, prev); 5972 set_next_task(rq, next); 5973 } 5974 5975 rq->core_pick = NULL; 5976 goto out; 5977 } 5978 5979 put_prev_task_balance(rq, prev, rf); 5980 5981 smt_mask = cpu_smt_mask(cpu); 5982 need_sync = !!rq->core->core_cookie; 5983 5984 /* reset state */ 5985 rq->core->core_cookie = 0UL; 5986 if (rq->core->core_forceidle_count) { 5987 if (!core_clock_updated) { 5988 update_rq_clock(rq->core); 5989 core_clock_updated = true; 5990 } 5991 sched_core_account_forceidle(rq); 5992 /* reset after accounting force idle */ 5993 rq->core->core_forceidle_start = 0; 5994 rq->core->core_forceidle_count = 0; 5995 rq->core->core_forceidle_occupation = 0; 5996 need_sync = true; 5997 fi_before = true; 5998 } 5999 6000 /* 6001 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 6002 * 6003 * @task_seq guards the task state ({en,de}queues) 6004 * @pick_seq is the @task_seq we did a selection on 6005 * @sched_seq is the @pick_seq we scheduled 6006 * 6007 * However, preemptions can cause multiple picks on the same task set. 6008 * 'Fix' this by also increasing @task_seq for every pick. 6009 */ 6010 rq->core->core_task_seq++; 6011 6012 /* 6013 * Optimize for common case where this CPU has no cookies 6014 * and there are no cookied tasks running on siblings. 6015 */ 6016 if (!need_sync) { 6017 next = pick_task(rq); 6018 if (!next->core_cookie) { 6019 rq->core_pick = NULL; 6020 /* 6021 * For robustness, update the min_vruntime_fi for 6022 * unconstrained picks as well. 6023 */ 6024 WARN_ON_ONCE(fi_before); 6025 task_vruntime_update(rq, next, false); 6026 goto out_set_next; 6027 } 6028 } 6029 6030 /* 6031 * For each thread: do the regular task pick and find the max prio task 6032 * amongst them. 6033 * 6034 * Tie-break prio towards the current CPU 6035 */ 6036 for_each_cpu_wrap(i, smt_mask, cpu) { 6037 rq_i = cpu_rq(i); 6038 6039 /* 6040 * Current cpu always has its clock updated on entrance to 6041 * pick_next_task(). If the current cpu is not the core, 6042 * the core may also have been updated above. 6043 */ 6044 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 6045 update_rq_clock(rq_i); 6046 6047 p = rq_i->core_pick = pick_task(rq_i); 6048 if (!max || prio_less(max, p, fi_before)) 6049 max = p; 6050 } 6051 6052 cookie = rq->core->core_cookie = max->core_cookie; 6053 6054 /* 6055 * For each thread: try and find a runnable task that matches @max or 6056 * force idle. 6057 */ 6058 for_each_cpu(i, smt_mask) { 6059 rq_i = cpu_rq(i); 6060 p = rq_i->core_pick; 6061 6062 if (!cookie_equals(p, cookie)) { 6063 p = NULL; 6064 if (cookie) 6065 p = sched_core_find(rq_i, cookie); 6066 if (!p) 6067 p = idle_sched_class.pick_task(rq_i); 6068 } 6069 6070 rq_i->core_pick = p; 6071 6072 if (p == rq_i->idle) { 6073 if (rq_i->nr_running) { 6074 rq->core->core_forceidle_count++; 6075 if (!fi_before) 6076 rq->core->core_forceidle_seq++; 6077 } 6078 } else { 6079 occ++; 6080 } 6081 } 6082 6083 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6084 rq->core->core_forceidle_start = rq_clock(rq->core); 6085 rq->core->core_forceidle_occupation = occ; 6086 } 6087 6088 rq->core->core_pick_seq = rq->core->core_task_seq; 6089 next = rq->core_pick; 6090 rq->core_sched_seq = rq->core->core_pick_seq; 6091 6092 /* Something should have been selected for current CPU */ 6093 WARN_ON_ONCE(!next); 6094 6095 /* 6096 * Reschedule siblings 6097 * 6098 * NOTE: L1TF -- at this point we're no longer running the old task and 6099 * sending an IPI (below) ensures the sibling will no longer be running 6100 * their task. This ensures there is no inter-sibling overlap between 6101 * non-matching user state. 6102 */ 6103 for_each_cpu(i, smt_mask) { 6104 rq_i = cpu_rq(i); 6105 6106 /* 6107 * An online sibling might have gone offline before a task 6108 * could be picked for it, or it might be offline but later 6109 * happen to come online, but its too late and nothing was 6110 * picked for it. That's Ok - it will pick tasks for itself, 6111 * so ignore it. 6112 */ 6113 if (!rq_i->core_pick) 6114 continue; 6115 6116 /* 6117 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6118 * fi_before fi update? 6119 * 0 0 1 6120 * 0 1 1 6121 * 1 0 1 6122 * 1 1 0 6123 */ 6124 if (!(fi_before && rq->core->core_forceidle_count)) 6125 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6126 6127 rq_i->core_pick->core_occupation = occ; 6128 6129 if (i == cpu) { 6130 rq_i->core_pick = NULL; 6131 continue; 6132 } 6133 6134 /* Did we break L1TF mitigation requirements? */ 6135 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6136 6137 if (rq_i->curr == rq_i->core_pick) { 6138 rq_i->core_pick = NULL; 6139 continue; 6140 } 6141 6142 resched_curr(rq_i); 6143 } 6144 6145 out_set_next: 6146 set_next_task(rq, next); 6147 out: 6148 if (rq->core->core_forceidle_count && next == rq->idle) 6149 queue_core_balance(rq); 6150 6151 return next; 6152 } 6153 6154 static bool try_steal_cookie(int this, int that) 6155 { 6156 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6157 struct task_struct *p; 6158 unsigned long cookie; 6159 bool success = false; 6160 6161 guard(irq)(); 6162 guard(double_rq_lock)(dst, src); 6163 6164 cookie = dst->core->core_cookie; 6165 if (!cookie) 6166 return false; 6167 6168 if (dst->curr != dst->idle) 6169 return false; 6170 6171 p = sched_core_find(src, cookie); 6172 if (!p) 6173 return false; 6174 6175 do { 6176 if (p == src->core_pick || p == src->curr) 6177 goto next; 6178 6179 if (!is_cpu_allowed(p, this)) 6180 goto next; 6181 6182 if (p->core_occupation > dst->idle->core_occupation) 6183 goto next; 6184 /* 6185 * sched_core_find() and sched_core_next() will ensure 6186 * that task @p is not throttled now, we also need to 6187 * check whether the runqueue of the destination CPU is 6188 * being throttled. 6189 */ 6190 if (sched_task_is_throttled(p, this)) 6191 goto next; 6192 6193 deactivate_task(src, p, 0); 6194 set_task_cpu(p, this); 6195 activate_task(dst, p, 0); 6196 6197 resched_curr(dst); 6198 6199 success = true; 6200 break; 6201 6202 next: 6203 p = sched_core_next(p, cookie); 6204 } while (p); 6205 6206 return success; 6207 } 6208 6209 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6210 { 6211 int i; 6212 6213 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { 6214 if (i == cpu) 6215 continue; 6216 6217 if (need_resched()) 6218 break; 6219 6220 if (try_steal_cookie(cpu, i)) 6221 return true; 6222 } 6223 6224 return false; 6225 } 6226 6227 static void sched_core_balance(struct rq *rq) 6228 { 6229 struct sched_domain *sd; 6230 int cpu = cpu_of(rq); 6231 6232 guard(preempt)(); 6233 guard(rcu)(); 6234 6235 raw_spin_rq_unlock_irq(rq); 6236 for_each_domain(cpu, sd) { 6237 if (need_resched()) 6238 break; 6239 6240 if (steal_cookie_task(cpu, sd)) 6241 break; 6242 } 6243 raw_spin_rq_lock_irq(rq); 6244 } 6245 6246 static DEFINE_PER_CPU(struct balance_callback, core_balance_head); 6247 6248 static void queue_core_balance(struct rq *rq) 6249 { 6250 if (!sched_core_enabled(rq)) 6251 return; 6252 6253 if (!rq->core->core_cookie) 6254 return; 6255 6256 if (!rq->nr_running) /* not forced idle */ 6257 return; 6258 6259 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6260 } 6261 6262 DEFINE_LOCK_GUARD_1(core_lock, int, 6263 sched_core_lock(*_T->lock, &_T->flags), 6264 sched_core_unlock(*_T->lock, &_T->flags), 6265 unsigned long flags) 6266 6267 static void sched_core_cpu_starting(unsigned int cpu) 6268 { 6269 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6270 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6271 int t; 6272 6273 guard(core_lock)(&cpu); 6274 6275 WARN_ON_ONCE(rq->core != rq); 6276 6277 /* if we're the first, we'll be our own leader */ 6278 if (cpumask_weight(smt_mask) == 1) 6279 return; 6280 6281 /* find the leader */ 6282 for_each_cpu(t, smt_mask) { 6283 if (t == cpu) 6284 continue; 6285 rq = cpu_rq(t); 6286 if (rq->core == rq) { 6287 core_rq = rq; 6288 break; 6289 } 6290 } 6291 6292 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6293 return; 6294 6295 /* install and validate core_rq */ 6296 for_each_cpu(t, smt_mask) { 6297 rq = cpu_rq(t); 6298 6299 if (t == cpu) 6300 rq->core = core_rq; 6301 6302 WARN_ON_ONCE(rq->core != core_rq); 6303 } 6304 } 6305 6306 static void sched_core_cpu_deactivate(unsigned int cpu) 6307 { 6308 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6309 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6310 int t; 6311 6312 guard(core_lock)(&cpu); 6313 6314 /* if we're the last man standing, nothing to do */ 6315 if (cpumask_weight(smt_mask) == 1) { 6316 WARN_ON_ONCE(rq->core != rq); 6317 return; 6318 } 6319 6320 /* if we're not the leader, nothing to do */ 6321 if (rq->core != rq) 6322 return; 6323 6324 /* find a new leader */ 6325 for_each_cpu(t, smt_mask) { 6326 if (t == cpu) 6327 continue; 6328 core_rq = cpu_rq(t); 6329 break; 6330 } 6331 6332 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6333 return; 6334 6335 /* copy the shared state to the new leader */ 6336 core_rq->core_task_seq = rq->core_task_seq; 6337 core_rq->core_pick_seq = rq->core_pick_seq; 6338 core_rq->core_cookie = rq->core_cookie; 6339 core_rq->core_forceidle_count = rq->core_forceidle_count; 6340 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6341 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6342 6343 /* 6344 * Accounting edge for forced idle is handled in pick_next_task(). 6345 * Don't need another one here, since the hotplug thread shouldn't 6346 * have a cookie. 6347 */ 6348 core_rq->core_forceidle_start = 0; 6349 6350 /* install new leader */ 6351 for_each_cpu(t, smt_mask) { 6352 rq = cpu_rq(t); 6353 rq->core = core_rq; 6354 } 6355 } 6356 6357 static inline void sched_core_cpu_dying(unsigned int cpu) 6358 { 6359 struct rq *rq = cpu_rq(cpu); 6360 6361 if (rq->core != rq) 6362 rq->core = rq; 6363 } 6364 6365 #else /* !CONFIG_SCHED_CORE */ 6366 6367 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6368 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6369 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6370 6371 static struct task_struct * 6372 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6373 { 6374 return __pick_next_task(rq, prev, rf); 6375 } 6376 6377 #endif /* CONFIG_SCHED_CORE */ 6378 6379 /* 6380 * Constants for the sched_mode argument of __schedule(). 6381 * 6382 * The mode argument allows RT enabled kernels to differentiate a 6383 * preemption from blocking on an 'sleeping' spin/rwlock. Note that 6384 * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to 6385 * optimize the AND operation out and just check for zero. 6386 */ 6387 #define SM_NONE 0x0 6388 #define SM_PREEMPT 0x1 6389 #define SM_RTLOCK_WAIT 0x2 6390 6391 #ifndef CONFIG_PREEMPT_RT 6392 # define SM_MASK_PREEMPT (~0U) 6393 #else 6394 # define SM_MASK_PREEMPT SM_PREEMPT 6395 #endif 6396 6397 /* 6398 * __schedule() is the main scheduler function. 6399 * 6400 * The main means of driving the scheduler and thus entering this function are: 6401 * 6402 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6403 * 6404 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6405 * paths. For example, see arch/x86/entry_64.S. 6406 * 6407 * To drive preemption between tasks, the scheduler sets the flag in timer 6408 * interrupt handler sched_tick(). 6409 * 6410 * 3. Wakeups don't really cause entry into schedule(). They add a 6411 * task to the run-queue and that's it. 6412 * 6413 * Now, if the new task added to the run-queue preempts the current 6414 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6415 * called on the nearest possible occasion: 6416 * 6417 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6418 * 6419 * - in syscall or exception context, at the next outmost 6420 * preempt_enable(). (this might be as soon as the wake_up()'s 6421 * spin_unlock()!) 6422 * 6423 * - in IRQ context, return from interrupt-handler to 6424 * preemptible context 6425 * 6426 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6427 * then at the next: 6428 * 6429 * - cond_resched() call 6430 * - explicit schedule() call 6431 * - return from syscall or exception to user-space 6432 * - return from interrupt-handler to user-space 6433 * 6434 * WARNING: must be called with preemption disabled! 6435 */ 6436 static void __sched notrace __schedule(unsigned int sched_mode) 6437 { 6438 struct task_struct *prev, *next; 6439 unsigned long *switch_count; 6440 unsigned long prev_state; 6441 struct rq_flags rf; 6442 struct rq *rq; 6443 int cpu; 6444 6445 cpu = smp_processor_id(); 6446 rq = cpu_rq(cpu); 6447 prev = rq->curr; 6448 6449 schedule_debug(prev, !!sched_mode); 6450 6451 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6452 hrtick_clear(rq); 6453 6454 local_irq_disable(); 6455 rcu_note_context_switch(!!sched_mode); 6456 6457 /* 6458 * Make sure that signal_pending_state()->signal_pending() below 6459 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6460 * done by the caller to avoid the race with signal_wake_up(): 6461 * 6462 * __set_current_state(@state) signal_wake_up() 6463 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6464 * wake_up_state(p, state) 6465 * LOCK rq->lock LOCK p->pi_state 6466 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6467 * if (signal_pending_state()) if (p->state & @state) 6468 * 6469 * Also, the membarrier system call requires a full memory barrier 6470 * after coming from user-space, before storing to rq->curr; this 6471 * barrier matches a full barrier in the proximity of the membarrier 6472 * system call exit. 6473 */ 6474 rq_lock(rq, &rf); 6475 smp_mb__after_spinlock(); 6476 6477 /* Promote REQ to ACT */ 6478 rq->clock_update_flags <<= 1; 6479 update_rq_clock(rq); 6480 rq->clock_update_flags = RQCF_UPDATED; 6481 6482 switch_count = &prev->nivcsw; 6483 6484 /* 6485 * We must load prev->state once (task_struct::state is volatile), such 6486 * that we form a control dependency vs deactivate_task() below. 6487 */ 6488 prev_state = READ_ONCE(prev->__state); 6489 if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { 6490 if (signal_pending_state(prev_state, prev)) { 6491 WRITE_ONCE(prev->__state, TASK_RUNNING); 6492 } else { 6493 prev->sched_contributes_to_load = 6494 (prev_state & TASK_UNINTERRUPTIBLE) && 6495 !(prev_state & TASK_NOLOAD) && 6496 !(prev_state & TASK_FROZEN); 6497 6498 if (prev->sched_contributes_to_load) 6499 rq->nr_uninterruptible++; 6500 6501 /* 6502 * __schedule() ttwu() 6503 * prev_state = prev->state; if (p->on_rq && ...) 6504 * if (prev_state) goto out; 6505 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6506 * p->state = TASK_WAKING 6507 * 6508 * Where __schedule() and ttwu() have matching control dependencies. 6509 * 6510 * After this, schedule() must not care about p->state any more. 6511 */ 6512 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); 6513 6514 if (prev->in_iowait) { 6515 atomic_inc(&rq->nr_iowait); 6516 delayacct_blkio_start(); 6517 } 6518 } 6519 switch_count = &prev->nvcsw; 6520 } 6521 6522 next = pick_next_task(rq, prev, &rf); 6523 clear_tsk_need_resched(prev); 6524 clear_preempt_need_resched(); 6525 #ifdef CONFIG_SCHED_DEBUG 6526 rq->last_seen_need_resched_ns = 0; 6527 #endif 6528 6529 if (likely(prev != next)) { 6530 rq->nr_switches++; 6531 /* 6532 * RCU users of rcu_dereference(rq->curr) may not see 6533 * changes to task_struct made by pick_next_task(). 6534 */ 6535 RCU_INIT_POINTER(rq->curr, next); 6536 /* 6537 * The membarrier system call requires each architecture 6538 * to have a full memory barrier after updating 6539 * rq->curr, before returning to user-space. 6540 * 6541 * Here are the schemes providing that barrier on the 6542 * various architectures: 6543 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, 6544 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() 6545 * on PowerPC and on RISC-V. 6546 * - finish_lock_switch() for weakly-ordered 6547 * architectures where spin_unlock is a full barrier, 6548 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6549 * is a RELEASE barrier), 6550 * 6551 * The barrier matches a full barrier in the proximity of 6552 * the membarrier system call entry. 6553 * 6554 * On RISC-V, this barrier pairing is also needed for the 6555 * SYNC_CORE command when switching between processes, cf. 6556 * the inline comments in membarrier_arch_switch_mm(). 6557 */ 6558 ++*switch_count; 6559 6560 migrate_disable_switch(rq, prev); 6561 psi_account_irqtime(rq, prev, next); 6562 psi_sched_switch(prev, next, !task_on_rq_queued(prev)); 6563 6564 trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); 6565 6566 /* Also unlocks the rq: */ 6567 rq = context_switch(rq, prev, next, &rf); 6568 } else { 6569 rq_unpin_lock(rq, &rf); 6570 __balance_callbacks(rq); 6571 raw_spin_rq_unlock_irq(rq); 6572 } 6573 } 6574 6575 void __noreturn do_task_dead(void) 6576 { 6577 /* Causes final put_task_struct in finish_task_switch(): */ 6578 set_special_state(TASK_DEAD); 6579 6580 /* Tell freezer to ignore us: */ 6581 current->flags |= PF_NOFREEZE; 6582 6583 __schedule(SM_NONE); 6584 BUG(); 6585 6586 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6587 for (;;) 6588 cpu_relax(); 6589 } 6590 6591 static inline void sched_submit_work(struct task_struct *tsk) 6592 { 6593 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG); 6594 unsigned int task_flags; 6595 6596 /* 6597 * Establish LD_WAIT_CONFIG context to ensure none of the code called 6598 * will use a blocking primitive -- which would lead to recursion. 6599 */ 6600 lock_map_acquire_try(&sched_map); 6601 6602 task_flags = tsk->flags; 6603 /* 6604 * If a worker goes to sleep, notify and ask workqueue whether it 6605 * wants to wake up a task to maintain concurrency. 6606 */ 6607 if (task_flags & PF_WQ_WORKER) 6608 wq_worker_sleeping(tsk); 6609 else if (task_flags & PF_IO_WORKER) 6610 io_wq_worker_sleeping(tsk); 6611 6612 /* 6613 * spinlock and rwlock must not flush block requests. This will 6614 * deadlock if the callback attempts to acquire a lock which is 6615 * already acquired. 6616 */ 6617 SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); 6618 6619 /* 6620 * If we are going to sleep and we have plugged IO queued, 6621 * make sure to submit it to avoid deadlocks. 6622 */ 6623 blk_flush_plug(tsk->plug, true); 6624 6625 lock_map_release(&sched_map); 6626 } 6627 6628 static void sched_update_worker(struct task_struct *tsk) 6629 { 6630 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { 6631 if (tsk->flags & PF_BLOCK_TS) 6632 blk_plug_invalidate_ts(tsk); 6633 if (tsk->flags & PF_WQ_WORKER) 6634 wq_worker_running(tsk); 6635 else if (tsk->flags & PF_IO_WORKER) 6636 io_wq_worker_running(tsk); 6637 } 6638 } 6639 6640 static __always_inline void __schedule_loop(unsigned int sched_mode) 6641 { 6642 do { 6643 preempt_disable(); 6644 __schedule(sched_mode); 6645 sched_preempt_enable_no_resched(); 6646 } while (need_resched()); 6647 } 6648 6649 asmlinkage __visible void __sched schedule(void) 6650 { 6651 struct task_struct *tsk = current; 6652 6653 #ifdef CONFIG_RT_MUTEXES 6654 lockdep_assert(!tsk->sched_rt_mutex); 6655 #endif 6656 6657 if (!task_is_running(tsk)) 6658 sched_submit_work(tsk); 6659 __schedule_loop(SM_NONE); 6660 sched_update_worker(tsk); 6661 } 6662 EXPORT_SYMBOL(schedule); 6663 6664 /* 6665 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6666 * state (have scheduled out non-voluntarily) by making sure that all 6667 * tasks have either left the run queue or have gone into user space. 6668 * As idle tasks do not do either, they must not ever be preempted 6669 * (schedule out non-voluntarily). 6670 * 6671 * schedule_idle() is similar to schedule_preempt_disable() except that it 6672 * never enables preemption because it does not call sched_submit_work(). 6673 */ 6674 void __sched schedule_idle(void) 6675 { 6676 /* 6677 * As this skips calling sched_submit_work(), which the idle task does 6678 * regardless because that function is a NOP when the task is in a 6679 * TASK_RUNNING state, make sure this isn't used someplace that the 6680 * current task can be in any other state. Note, idle is always in the 6681 * TASK_RUNNING state. 6682 */ 6683 WARN_ON_ONCE(current->__state); 6684 do { 6685 __schedule(SM_NONE); 6686 } while (need_resched()); 6687 } 6688 6689 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6690 asmlinkage __visible void __sched schedule_user(void) 6691 { 6692 /* 6693 * If we come here after a random call to set_need_resched(), 6694 * or we have been woken up remotely but the IPI has not yet arrived, 6695 * we haven't yet exited the RCU idle mode. Do it here manually until 6696 * we find a better solution. 6697 * 6698 * NB: There are buggy callers of this function. Ideally we 6699 * should warn if prev_state != CONTEXT_USER, but that will trigger 6700 * too frequently to make sense yet. 6701 */ 6702 enum ctx_state prev_state = exception_enter(); 6703 schedule(); 6704 exception_exit(prev_state); 6705 } 6706 #endif 6707 6708 /** 6709 * schedule_preempt_disabled - called with preemption disabled 6710 * 6711 * Returns with preemption disabled. Note: preempt_count must be 1 6712 */ 6713 void __sched schedule_preempt_disabled(void) 6714 { 6715 sched_preempt_enable_no_resched(); 6716 schedule(); 6717 preempt_disable(); 6718 } 6719 6720 #ifdef CONFIG_PREEMPT_RT 6721 void __sched notrace schedule_rtlock(void) 6722 { 6723 __schedule_loop(SM_RTLOCK_WAIT); 6724 } 6725 NOKPROBE_SYMBOL(schedule_rtlock); 6726 #endif 6727 6728 static void __sched notrace preempt_schedule_common(void) 6729 { 6730 do { 6731 /* 6732 * Because the function tracer can trace preempt_count_sub() 6733 * and it also uses preempt_enable/disable_notrace(), if 6734 * NEED_RESCHED is set, the preempt_enable_notrace() called 6735 * by the function tracer will call this function again and 6736 * cause infinite recursion. 6737 * 6738 * Preemption must be disabled here before the function 6739 * tracer can trace. Break up preempt_disable() into two 6740 * calls. One to disable preemption without fear of being 6741 * traced. The other to still record the preemption latency, 6742 * which can also be traced by the function tracer. 6743 */ 6744 preempt_disable_notrace(); 6745 preempt_latency_start(1); 6746 __schedule(SM_PREEMPT); 6747 preempt_latency_stop(1); 6748 preempt_enable_no_resched_notrace(); 6749 6750 /* 6751 * Check again in case we missed a preemption opportunity 6752 * between schedule and now. 6753 */ 6754 } while (need_resched()); 6755 } 6756 6757 #ifdef CONFIG_PREEMPTION 6758 /* 6759 * This is the entry point to schedule() from in-kernel preemption 6760 * off of preempt_enable. 6761 */ 6762 asmlinkage __visible void __sched notrace preempt_schedule(void) 6763 { 6764 /* 6765 * If there is a non-zero preempt_count or interrupts are disabled, 6766 * we do not want to preempt the current task. Just return.. 6767 */ 6768 if (likely(!preemptible())) 6769 return; 6770 preempt_schedule_common(); 6771 } 6772 NOKPROBE_SYMBOL(preempt_schedule); 6773 EXPORT_SYMBOL(preempt_schedule); 6774 6775 #ifdef CONFIG_PREEMPT_DYNAMIC 6776 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6777 #ifndef preempt_schedule_dynamic_enabled 6778 #define preempt_schedule_dynamic_enabled preempt_schedule 6779 #define preempt_schedule_dynamic_disabled NULL 6780 #endif 6781 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6782 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6783 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6784 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 6785 void __sched notrace dynamic_preempt_schedule(void) 6786 { 6787 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 6788 return; 6789 preempt_schedule(); 6790 } 6791 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 6792 EXPORT_SYMBOL(dynamic_preempt_schedule); 6793 #endif 6794 #endif 6795 6796 /** 6797 * preempt_schedule_notrace - preempt_schedule called by tracing 6798 * 6799 * The tracing infrastructure uses preempt_enable_notrace to prevent 6800 * recursion and tracing preempt enabling caused by the tracing 6801 * infrastructure itself. But as tracing can happen in areas coming 6802 * from userspace or just about to enter userspace, a preempt enable 6803 * can occur before user_exit() is called. This will cause the scheduler 6804 * to be called when the system is still in usermode. 6805 * 6806 * To prevent this, the preempt_enable_notrace will use this function 6807 * instead of preempt_schedule() to exit user context if needed before 6808 * calling the scheduler. 6809 */ 6810 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 6811 { 6812 enum ctx_state prev_ctx; 6813 6814 if (likely(!preemptible())) 6815 return; 6816 6817 do { 6818 /* 6819 * Because the function tracer can trace preempt_count_sub() 6820 * and it also uses preempt_enable/disable_notrace(), if 6821 * NEED_RESCHED is set, the preempt_enable_notrace() called 6822 * by the function tracer will call this function again and 6823 * cause infinite recursion. 6824 * 6825 * Preemption must be disabled here before the function 6826 * tracer can trace. Break up preempt_disable() into two 6827 * calls. One to disable preemption without fear of being 6828 * traced. The other to still record the preemption latency, 6829 * which can also be traced by the function tracer. 6830 */ 6831 preempt_disable_notrace(); 6832 preempt_latency_start(1); 6833 /* 6834 * Needs preempt disabled in case user_exit() is traced 6835 * and the tracer calls preempt_enable_notrace() causing 6836 * an infinite recursion. 6837 */ 6838 prev_ctx = exception_enter(); 6839 __schedule(SM_PREEMPT); 6840 exception_exit(prev_ctx); 6841 6842 preempt_latency_stop(1); 6843 preempt_enable_no_resched_notrace(); 6844 } while (need_resched()); 6845 } 6846 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 6847 6848 #ifdef CONFIG_PREEMPT_DYNAMIC 6849 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6850 #ifndef preempt_schedule_notrace_dynamic_enabled 6851 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 6852 #define preempt_schedule_notrace_dynamic_disabled NULL 6853 #endif 6854 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 6855 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 6856 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6857 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 6858 void __sched notrace dynamic_preempt_schedule_notrace(void) 6859 { 6860 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 6861 return; 6862 preempt_schedule_notrace(); 6863 } 6864 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 6865 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 6866 #endif 6867 #endif 6868 6869 #endif /* CONFIG_PREEMPTION */ 6870 6871 /* 6872 * This is the entry point to schedule() from kernel preemption 6873 * off of IRQ context. 6874 * Note, that this is called and return with IRQs disabled. This will 6875 * protect us against recursive calling from IRQ contexts. 6876 */ 6877 asmlinkage __visible void __sched preempt_schedule_irq(void) 6878 { 6879 enum ctx_state prev_state; 6880 6881 /* Catch callers which need to be fixed */ 6882 BUG_ON(preempt_count() || !irqs_disabled()); 6883 6884 prev_state = exception_enter(); 6885 6886 do { 6887 preempt_disable(); 6888 local_irq_enable(); 6889 __schedule(SM_PREEMPT); 6890 local_irq_disable(); 6891 sched_preempt_enable_no_resched(); 6892 } while (need_resched()); 6893 6894 exception_exit(prev_state); 6895 } 6896 6897 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 6898 void *key) 6899 { 6900 WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); 6901 return try_to_wake_up(curr->private, mode, wake_flags); 6902 } 6903 EXPORT_SYMBOL(default_wake_function); 6904 6905 void __setscheduler_prio(struct task_struct *p, int prio) 6906 { 6907 if (dl_prio(prio)) 6908 p->sched_class = &dl_sched_class; 6909 else if (rt_prio(prio)) 6910 p->sched_class = &rt_sched_class; 6911 else 6912 p->sched_class = &fair_sched_class; 6913 6914 p->prio = prio; 6915 } 6916 6917 #ifdef CONFIG_RT_MUTEXES 6918 6919 /* 6920 * Would be more useful with typeof()/auto_type but they don't mix with 6921 * bit-fields. Since it's a local thing, use int. Keep the generic sounding 6922 * name such that if someone were to implement this function we get to compare 6923 * notes. 6924 */ 6925 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; }) 6926 6927 void rt_mutex_pre_schedule(void) 6928 { 6929 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); 6930 sched_submit_work(current); 6931 } 6932 6933 void rt_mutex_schedule(void) 6934 { 6935 lockdep_assert(current->sched_rt_mutex); 6936 __schedule_loop(SM_NONE); 6937 } 6938 6939 void rt_mutex_post_schedule(void) 6940 { 6941 sched_update_worker(current); 6942 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); 6943 } 6944 6945 /* 6946 * rt_mutex_setprio - set the current priority of a task 6947 * @p: task to boost 6948 * @pi_task: donor task 6949 * 6950 * This function changes the 'effective' priority of a task. It does 6951 * not touch ->normal_prio like __setscheduler(). 6952 * 6953 * Used by the rt_mutex code to implement priority inheritance 6954 * logic. Call site only calls if the priority of the task changed. 6955 */ 6956 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 6957 { 6958 int prio, oldprio, queued, running, queue_flag = 6959 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 6960 const struct sched_class *prev_class; 6961 struct rq_flags rf; 6962 struct rq *rq; 6963 6964 /* XXX used to be waiter->prio, not waiter->task->prio */ 6965 prio = __rt_effective_prio(pi_task, p->normal_prio); 6966 6967 /* 6968 * If nothing changed; bail early. 6969 */ 6970 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 6971 return; 6972 6973 rq = __task_rq_lock(p, &rf); 6974 update_rq_clock(rq); 6975 /* 6976 * Set under pi_lock && rq->lock, such that the value can be used under 6977 * either lock. 6978 * 6979 * Note that there is loads of tricky to make this pointer cache work 6980 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 6981 * ensure a task is de-boosted (pi_task is set to NULL) before the 6982 * task is allowed to run again (and can exit). This ensures the pointer 6983 * points to a blocked task -- which guarantees the task is present. 6984 */ 6985 p->pi_top_task = pi_task; 6986 6987 /* 6988 * For FIFO/RR we only need to set prio, if that matches we're done. 6989 */ 6990 if (prio == p->prio && !dl_prio(prio)) 6991 goto out_unlock; 6992 6993 /* 6994 * Idle task boosting is a no-no in general. There is one 6995 * exception, when PREEMPT_RT and NOHZ is active: 6996 * 6997 * The idle task calls get_next_timer_interrupt() and holds 6998 * the timer wheel base->lock on the CPU and another CPU wants 6999 * to access the timer (probably to cancel it). We can safely 7000 * ignore the boosting request, as the idle CPU runs this code 7001 * with interrupts disabled and will complete the lock 7002 * protected section without being interrupted. So there is no 7003 * real need to boost. 7004 */ 7005 if (unlikely(p == rq->idle)) { 7006 WARN_ON(p != rq->curr); 7007 WARN_ON(p->pi_blocked_on); 7008 goto out_unlock; 7009 } 7010 7011 trace_sched_pi_setprio(p, pi_task); 7012 oldprio = p->prio; 7013 7014 if (oldprio == prio) 7015 queue_flag &= ~DEQUEUE_MOVE; 7016 7017 prev_class = p->sched_class; 7018 queued = task_on_rq_queued(p); 7019 running = task_current(rq, p); 7020 if (queued) 7021 dequeue_task(rq, p, queue_flag); 7022 if (running) 7023 put_prev_task(rq, p); 7024 7025 /* 7026 * Boosting condition are: 7027 * 1. -rt task is running and holds mutex A 7028 * --> -dl task blocks on mutex A 7029 * 7030 * 2. -dl task is running and holds mutex A 7031 * --> -dl task blocks on mutex A and could preempt the 7032 * running task 7033 */ 7034 if (dl_prio(prio)) { 7035 if (!dl_prio(p->normal_prio) || 7036 (pi_task && dl_prio(pi_task->prio) && 7037 dl_entity_preempt(&pi_task->dl, &p->dl))) { 7038 p->dl.pi_se = pi_task->dl.pi_se; 7039 queue_flag |= ENQUEUE_REPLENISH; 7040 } else { 7041 p->dl.pi_se = &p->dl; 7042 } 7043 } else if (rt_prio(prio)) { 7044 if (dl_prio(oldprio)) 7045 p->dl.pi_se = &p->dl; 7046 if (oldprio < prio) 7047 queue_flag |= ENQUEUE_HEAD; 7048 } else { 7049 if (dl_prio(oldprio)) 7050 p->dl.pi_se = &p->dl; 7051 if (rt_prio(oldprio)) 7052 p->rt.timeout = 0; 7053 } 7054 7055 __setscheduler_prio(p, prio); 7056 7057 if (queued) 7058 enqueue_task(rq, p, queue_flag); 7059 if (running) 7060 set_next_task(rq, p); 7061 7062 check_class_changed(rq, p, prev_class, oldprio); 7063 out_unlock: 7064 /* Avoid rq from going away on us: */ 7065 preempt_disable(); 7066 7067 rq_unpin_lock(rq, &rf); 7068 __balance_callbacks(rq); 7069 raw_spin_rq_unlock(rq); 7070 7071 preempt_enable(); 7072 } 7073 #endif 7074 7075 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 7076 int __sched __cond_resched(void) 7077 { 7078 if (should_resched(0)) { 7079 preempt_schedule_common(); 7080 return 1; 7081 } 7082 /* 7083 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick 7084 * whether the current CPU is in an RCU read-side critical section, 7085 * so the tick can report quiescent states even for CPUs looping 7086 * in kernel context. In contrast, in non-preemptible kernels, 7087 * RCU readers leave no in-memory hints, which means that CPU-bound 7088 * processes executing in kernel context might never report an 7089 * RCU quiescent state. Therefore, the following code causes 7090 * cond_resched() to report a quiescent state, but only when RCU 7091 * is in urgent need of one. 7092 */ 7093 #ifndef CONFIG_PREEMPT_RCU 7094 rcu_all_qs(); 7095 #endif 7096 return 0; 7097 } 7098 EXPORT_SYMBOL(__cond_resched); 7099 #endif 7100 7101 #ifdef CONFIG_PREEMPT_DYNAMIC 7102 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7103 #define cond_resched_dynamic_enabled __cond_resched 7104 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 7105 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 7106 EXPORT_STATIC_CALL_TRAMP(cond_resched); 7107 7108 #define might_resched_dynamic_enabled __cond_resched 7109 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 7110 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 7111 EXPORT_STATIC_CALL_TRAMP(might_resched); 7112 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7113 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 7114 int __sched dynamic_cond_resched(void) 7115 { 7116 klp_sched_try_switch(); 7117 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 7118 return 0; 7119 return __cond_resched(); 7120 } 7121 EXPORT_SYMBOL(dynamic_cond_resched); 7122 7123 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 7124 int __sched dynamic_might_resched(void) 7125 { 7126 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 7127 return 0; 7128 return __cond_resched(); 7129 } 7130 EXPORT_SYMBOL(dynamic_might_resched); 7131 #endif 7132 #endif 7133 7134 /* 7135 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 7136 * call schedule, and on return reacquire the lock. 7137 * 7138 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 7139 * operations here to prevent schedule() from being called twice (once via 7140 * spin_unlock(), once by hand). 7141 */ 7142 int __cond_resched_lock(spinlock_t *lock) 7143 { 7144 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7145 int ret = 0; 7146 7147 lockdep_assert_held(lock); 7148 7149 if (spin_needbreak(lock) || resched) { 7150 spin_unlock(lock); 7151 if (!_cond_resched()) 7152 cpu_relax(); 7153 ret = 1; 7154 spin_lock(lock); 7155 } 7156 return ret; 7157 } 7158 EXPORT_SYMBOL(__cond_resched_lock); 7159 7160 int __cond_resched_rwlock_read(rwlock_t *lock) 7161 { 7162 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7163 int ret = 0; 7164 7165 lockdep_assert_held_read(lock); 7166 7167 if (rwlock_needbreak(lock) || resched) { 7168 read_unlock(lock); 7169 if (!_cond_resched()) 7170 cpu_relax(); 7171 ret = 1; 7172 read_lock(lock); 7173 } 7174 return ret; 7175 } 7176 EXPORT_SYMBOL(__cond_resched_rwlock_read); 7177 7178 int __cond_resched_rwlock_write(rwlock_t *lock) 7179 { 7180 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7181 int ret = 0; 7182 7183 lockdep_assert_held_write(lock); 7184 7185 if (rwlock_needbreak(lock) || resched) { 7186 write_unlock(lock); 7187 if (!_cond_resched()) 7188 cpu_relax(); 7189 ret = 1; 7190 write_lock(lock); 7191 } 7192 return ret; 7193 } 7194 EXPORT_SYMBOL(__cond_resched_rwlock_write); 7195 7196 #ifdef CONFIG_PREEMPT_DYNAMIC 7197 7198 #ifdef CONFIG_GENERIC_ENTRY 7199 #include <linux/entry-common.h> 7200 #endif 7201 7202 /* 7203 * SC:cond_resched 7204 * SC:might_resched 7205 * SC:preempt_schedule 7206 * SC:preempt_schedule_notrace 7207 * SC:irqentry_exit_cond_resched 7208 * 7209 * 7210 * NONE: 7211 * cond_resched <- __cond_resched 7212 * might_resched <- RET0 7213 * preempt_schedule <- NOP 7214 * preempt_schedule_notrace <- NOP 7215 * irqentry_exit_cond_resched <- NOP 7216 * 7217 * VOLUNTARY: 7218 * cond_resched <- __cond_resched 7219 * might_resched <- __cond_resched 7220 * preempt_schedule <- NOP 7221 * preempt_schedule_notrace <- NOP 7222 * irqentry_exit_cond_resched <- NOP 7223 * 7224 * FULL: 7225 * cond_resched <- RET0 7226 * might_resched <- RET0 7227 * preempt_schedule <- preempt_schedule 7228 * preempt_schedule_notrace <- preempt_schedule_notrace 7229 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 7230 */ 7231 7232 enum { 7233 preempt_dynamic_undefined = -1, 7234 preempt_dynamic_none, 7235 preempt_dynamic_voluntary, 7236 preempt_dynamic_full, 7237 }; 7238 7239 int preempt_dynamic_mode = preempt_dynamic_undefined; 7240 7241 int sched_dynamic_mode(const char *str) 7242 { 7243 if (!strcmp(str, "none")) 7244 return preempt_dynamic_none; 7245 7246 if (!strcmp(str, "voluntary")) 7247 return preempt_dynamic_voluntary; 7248 7249 if (!strcmp(str, "full")) 7250 return preempt_dynamic_full; 7251 7252 return -EINVAL; 7253 } 7254 7255 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7256 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 7257 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 7258 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7259 #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) 7260 #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) 7261 #else 7262 #error "Unsupported PREEMPT_DYNAMIC mechanism" 7263 #endif 7264 7265 static DEFINE_MUTEX(sched_dynamic_mutex); 7266 static bool klp_override; 7267 7268 static void __sched_dynamic_update(int mode) 7269 { 7270 /* 7271 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 7272 * the ZERO state, which is invalid. 7273 */ 7274 if (!klp_override) 7275 preempt_dynamic_enable(cond_resched); 7276 preempt_dynamic_enable(might_resched); 7277 preempt_dynamic_enable(preempt_schedule); 7278 preempt_dynamic_enable(preempt_schedule_notrace); 7279 preempt_dynamic_enable(irqentry_exit_cond_resched); 7280 7281 switch (mode) { 7282 case preempt_dynamic_none: 7283 if (!klp_override) 7284 preempt_dynamic_enable(cond_resched); 7285 preempt_dynamic_disable(might_resched); 7286 preempt_dynamic_disable(preempt_schedule); 7287 preempt_dynamic_disable(preempt_schedule_notrace); 7288 preempt_dynamic_disable(irqentry_exit_cond_resched); 7289 if (mode != preempt_dynamic_mode) 7290 pr_info("Dynamic Preempt: none\n"); 7291 break; 7292 7293 case preempt_dynamic_voluntary: 7294 if (!klp_override) 7295 preempt_dynamic_enable(cond_resched); 7296 preempt_dynamic_enable(might_resched); 7297 preempt_dynamic_disable(preempt_schedule); 7298 preempt_dynamic_disable(preempt_schedule_notrace); 7299 preempt_dynamic_disable(irqentry_exit_cond_resched); 7300 if (mode != preempt_dynamic_mode) 7301 pr_info("Dynamic Preempt: voluntary\n"); 7302 break; 7303 7304 case preempt_dynamic_full: 7305 if (!klp_override) 7306 preempt_dynamic_disable(cond_resched); 7307 preempt_dynamic_disable(might_resched); 7308 preempt_dynamic_enable(preempt_schedule); 7309 preempt_dynamic_enable(preempt_schedule_notrace); 7310 preempt_dynamic_enable(irqentry_exit_cond_resched); 7311 if (mode != preempt_dynamic_mode) 7312 pr_info("Dynamic Preempt: full\n"); 7313 break; 7314 } 7315 7316 preempt_dynamic_mode = mode; 7317 } 7318 7319 void sched_dynamic_update(int mode) 7320 { 7321 mutex_lock(&sched_dynamic_mutex); 7322 __sched_dynamic_update(mode); 7323 mutex_unlock(&sched_dynamic_mutex); 7324 } 7325 7326 #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL 7327 7328 static int klp_cond_resched(void) 7329 { 7330 __klp_sched_try_switch(); 7331 return __cond_resched(); 7332 } 7333 7334 void sched_dynamic_klp_enable(void) 7335 { 7336 mutex_lock(&sched_dynamic_mutex); 7337 7338 klp_override = true; 7339 static_call_update(cond_resched, klp_cond_resched); 7340 7341 mutex_unlock(&sched_dynamic_mutex); 7342 } 7343 7344 void sched_dynamic_klp_disable(void) 7345 { 7346 mutex_lock(&sched_dynamic_mutex); 7347 7348 klp_override = false; 7349 __sched_dynamic_update(preempt_dynamic_mode); 7350 7351 mutex_unlock(&sched_dynamic_mutex); 7352 } 7353 7354 #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ 7355 7356 static int __init setup_preempt_mode(char *str) 7357 { 7358 int mode = sched_dynamic_mode(str); 7359 if (mode < 0) { 7360 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 7361 return 0; 7362 } 7363 7364 sched_dynamic_update(mode); 7365 return 1; 7366 } 7367 __setup("preempt=", setup_preempt_mode); 7368 7369 static void __init preempt_dynamic_init(void) 7370 { 7371 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 7372 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 7373 sched_dynamic_update(preempt_dynamic_none); 7374 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 7375 sched_dynamic_update(preempt_dynamic_voluntary); 7376 } else { 7377 /* Default static call setting, nothing to do */ 7378 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 7379 preempt_dynamic_mode = preempt_dynamic_full; 7380 pr_info("Dynamic Preempt: full\n"); 7381 } 7382 } 7383 } 7384 7385 #define PREEMPT_MODEL_ACCESSOR(mode) \ 7386 bool preempt_model_##mode(void) \ 7387 { \ 7388 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 7389 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 7390 } \ 7391 EXPORT_SYMBOL_GPL(preempt_model_##mode) 7392 7393 PREEMPT_MODEL_ACCESSOR(none); 7394 PREEMPT_MODEL_ACCESSOR(voluntary); 7395 PREEMPT_MODEL_ACCESSOR(full); 7396 7397 #else /* !CONFIG_PREEMPT_DYNAMIC: */ 7398 7399 static inline void preempt_dynamic_init(void) { } 7400 7401 #endif /* CONFIG_PREEMPT_DYNAMIC */ 7402 7403 int io_schedule_prepare(void) 7404 { 7405 int old_iowait = current->in_iowait; 7406 7407 current->in_iowait = 1; 7408 blk_flush_plug(current->plug, true); 7409 return old_iowait; 7410 } 7411 7412 void io_schedule_finish(int token) 7413 { 7414 current->in_iowait = token; 7415 } 7416 7417 /* 7418 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 7419 * that process accounting knows that this is a task in IO wait state. 7420 */ 7421 long __sched io_schedule_timeout(long timeout) 7422 { 7423 int token; 7424 long ret; 7425 7426 token = io_schedule_prepare(); 7427 ret = schedule_timeout(timeout); 7428 io_schedule_finish(token); 7429 7430 return ret; 7431 } 7432 EXPORT_SYMBOL(io_schedule_timeout); 7433 7434 void __sched io_schedule(void) 7435 { 7436 int token; 7437 7438 token = io_schedule_prepare(); 7439 schedule(); 7440 io_schedule_finish(token); 7441 } 7442 EXPORT_SYMBOL(io_schedule); 7443 7444 void sched_show_task(struct task_struct *p) 7445 { 7446 unsigned long free = 0; 7447 int ppid; 7448 7449 if (!try_get_task_stack(p)) 7450 return; 7451 7452 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 7453 7454 if (task_is_running(p)) 7455 pr_cont(" running task "); 7456 #ifdef CONFIG_DEBUG_STACK_USAGE 7457 free = stack_not_used(p); 7458 #endif 7459 ppid = 0; 7460 rcu_read_lock(); 7461 if (pid_alive(p)) 7462 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 7463 rcu_read_unlock(); 7464 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", 7465 free, task_pid_nr(p), task_tgid_nr(p), 7466 ppid, read_task_thread_flags(p)); 7467 7468 print_worker_info(KERN_INFO, p); 7469 print_stop_info(KERN_INFO, p); 7470 show_stack(p, NULL, KERN_INFO); 7471 put_task_stack(p); 7472 } 7473 EXPORT_SYMBOL_GPL(sched_show_task); 7474 7475 static inline bool 7476 state_filter_match(unsigned long state_filter, struct task_struct *p) 7477 { 7478 unsigned int state = READ_ONCE(p->__state); 7479 7480 /* no filter, everything matches */ 7481 if (!state_filter) 7482 return true; 7483 7484 /* filter, but doesn't match */ 7485 if (!(state & state_filter)) 7486 return false; 7487 7488 /* 7489 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 7490 * TASK_KILLABLE). 7491 */ 7492 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) 7493 return false; 7494 7495 return true; 7496 } 7497 7498 7499 void show_state_filter(unsigned int state_filter) 7500 { 7501 struct task_struct *g, *p; 7502 7503 rcu_read_lock(); 7504 for_each_process_thread(g, p) { 7505 /* 7506 * reset the NMI-timeout, listing all files on a slow 7507 * console might take a lot of time: 7508 * Also, reset softlockup watchdogs on all CPUs, because 7509 * another CPU might be blocked waiting for us to process 7510 * an IPI. 7511 */ 7512 touch_nmi_watchdog(); 7513 touch_all_softlockup_watchdogs(); 7514 if (state_filter_match(state_filter, p)) 7515 sched_show_task(p); 7516 } 7517 7518 #ifdef CONFIG_SCHED_DEBUG 7519 if (!state_filter) 7520 sysrq_sched_debug_show(); 7521 #endif 7522 rcu_read_unlock(); 7523 /* 7524 * Only show locks if all tasks are dumped: 7525 */ 7526 if (!state_filter) 7527 debug_show_all_locks(); 7528 } 7529 7530 /** 7531 * init_idle - set up an idle thread for a given CPU 7532 * @idle: task in question 7533 * @cpu: CPU the idle task belongs to 7534 * 7535 * NOTE: this function does not set the idle thread's NEED_RESCHED 7536 * flag, to make booting more robust. 7537 */ 7538 void __init init_idle(struct task_struct *idle, int cpu) 7539 { 7540 #ifdef CONFIG_SMP 7541 struct affinity_context ac = (struct affinity_context) { 7542 .new_mask = cpumask_of(cpu), 7543 .flags = 0, 7544 }; 7545 #endif 7546 struct rq *rq = cpu_rq(cpu); 7547 unsigned long flags; 7548 7549 __sched_fork(0, idle); 7550 7551 raw_spin_lock_irqsave(&idle->pi_lock, flags); 7552 raw_spin_rq_lock(rq); 7553 7554 idle->__state = TASK_RUNNING; 7555 idle->se.exec_start = sched_clock(); 7556 /* 7557 * PF_KTHREAD should already be set at this point; regardless, make it 7558 * look like a proper per-CPU kthread. 7559 */ 7560 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; 7561 kthread_set_per_cpu(idle, cpu); 7562 7563 #ifdef CONFIG_SMP 7564 /* 7565 * It's possible that init_idle() gets called multiple times on a task, 7566 * in that case do_set_cpus_allowed() will not do the right thing. 7567 * 7568 * And since this is boot we can forgo the serialization. 7569 */ 7570 set_cpus_allowed_common(idle, &ac); 7571 #endif 7572 /* 7573 * We're having a chicken and egg problem, even though we are 7574 * holding rq->lock, the CPU isn't yet set to this CPU so the 7575 * lockdep check in task_group() will fail. 7576 * 7577 * Similar case to sched_fork(). / Alternatively we could 7578 * use task_rq_lock() here and obtain the other rq->lock. 7579 * 7580 * Silence PROVE_RCU 7581 */ 7582 rcu_read_lock(); 7583 __set_task_cpu(idle, cpu); 7584 rcu_read_unlock(); 7585 7586 rq->idle = idle; 7587 rcu_assign_pointer(rq->curr, idle); 7588 idle->on_rq = TASK_ON_RQ_QUEUED; 7589 #ifdef CONFIG_SMP 7590 idle->on_cpu = 1; 7591 #endif 7592 raw_spin_rq_unlock(rq); 7593 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7594 7595 /* Set the preempt count _outside_ the spinlocks! */ 7596 init_idle_preempt_count(idle, cpu); 7597 7598 /* 7599 * The idle tasks have their own, simple scheduling class: 7600 */ 7601 idle->sched_class = &idle_sched_class; 7602 ftrace_graph_init_idle_task(idle, cpu); 7603 vtime_init_idle(idle, cpu); 7604 #ifdef CONFIG_SMP 7605 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7606 #endif 7607 } 7608 7609 #ifdef CONFIG_SMP 7610 7611 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7612 const struct cpumask *trial) 7613 { 7614 int ret = 1; 7615 7616 if (cpumask_empty(cur)) 7617 return ret; 7618 7619 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 7620 7621 return ret; 7622 } 7623 7624 int task_can_attach(struct task_struct *p) 7625 { 7626 int ret = 0; 7627 7628 /* 7629 * Kthreads which disallow setaffinity shouldn't be moved 7630 * to a new cpuset; we don't want to change their CPU 7631 * affinity and isolating such threads by their set of 7632 * allowed nodes is unnecessary. Thus, cpusets are not 7633 * applicable for such threads. This prevents checking for 7634 * success of set_cpus_allowed_ptr() on all attached tasks 7635 * before cpus_mask may be changed. 7636 */ 7637 if (p->flags & PF_NO_SETAFFINITY) 7638 ret = -EINVAL; 7639 7640 return ret; 7641 } 7642 7643 bool sched_smp_initialized __read_mostly; 7644 7645 #ifdef CONFIG_NUMA_BALANCING 7646 /* Migrate current task p to target_cpu */ 7647 int migrate_task_to(struct task_struct *p, int target_cpu) 7648 { 7649 struct migration_arg arg = { p, target_cpu }; 7650 int curr_cpu = task_cpu(p); 7651 7652 if (curr_cpu == target_cpu) 7653 return 0; 7654 7655 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7656 return -EINVAL; 7657 7658 /* TODO: This is not properly updating schedstats */ 7659 7660 trace_sched_move_numa(p, curr_cpu, target_cpu); 7661 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7662 } 7663 7664 /* 7665 * Requeue a task on a given node and accurately track the number of NUMA 7666 * tasks on the runqueues 7667 */ 7668 void sched_setnuma(struct task_struct *p, int nid) 7669 { 7670 bool queued, running; 7671 struct rq_flags rf; 7672 struct rq *rq; 7673 7674 rq = task_rq_lock(p, &rf); 7675 queued = task_on_rq_queued(p); 7676 running = task_current(rq, p); 7677 7678 if (queued) 7679 dequeue_task(rq, p, DEQUEUE_SAVE); 7680 if (running) 7681 put_prev_task(rq, p); 7682 7683 p->numa_preferred_nid = nid; 7684 7685 if (queued) 7686 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7687 if (running) 7688 set_next_task(rq, p); 7689 task_rq_unlock(rq, p, &rf); 7690 } 7691 #endif /* CONFIG_NUMA_BALANCING */ 7692 7693 #ifdef CONFIG_HOTPLUG_CPU 7694 /* 7695 * Ensure that the idle task is using init_mm right before its CPU goes 7696 * offline. 7697 */ 7698 void idle_task_exit(void) 7699 { 7700 struct mm_struct *mm = current->active_mm; 7701 7702 BUG_ON(cpu_online(smp_processor_id())); 7703 BUG_ON(current != this_rq()->idle); 7704 7705 if (mm != &init_mm) { 7706 switch_mm(mm, &init_mm, current); 7707 finish_arch_post_lock_switch(); 7708 } 7709 7710 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 7711 } 7712 7713 static int __balance_push_cpu_stop(void *arg) 7714 { 7715 struct task_struct *p = arg; 7716 struct rq *rq = this_rq(); 7717 struct rq_flags rf; 7718 int cpu; 7719 7720 raw_spin_lock_irq(&p->pi_lock); 7721 rq_lock(rq, &rf); 7722 7723 update_rq_clock(rq); 7724 7725 if (task_rq(p) == rq && task_on_rq_queued(p)) { 7726 cpu = select_fallback_rq(rq->cpu, p); 7727 rq = __migrate_task(rq, &rf, p, cpu); 7728 } 7729 7730 rq_unlock(rq, &rf); 7731 raw_spin_unlock_irq(&p->pi_lock); 7732 7733 put_task_struct(p); 7734 7735 return 0; 7736 } 7737 7738 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 7739 7740 /* 7741 * Ensure we only run per-cpu kthreads once the CPU goes !active. 7742 * 7743 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 7744 * effective when the hotplug motion is down. 7745 */ 7746 static void balance_push(struct rq *rq) 7747 { 7748 struct task_struct *push_task = rq->curr; 7749 7750 lockdep_assert_rq_held(rq); 7751 7752 /* 7753 * Ensure the thing is persistent until balance_push_set(.on = false); 7754 */ 7755 rq->balance_callback = &balance_push_callback; 7756 7757 /* 7758 * Only active while going offline and when invoked on the outgoing 7759 * CPU. 7760 */ 7761 if (!cpu_dying(rq->cpu) || rq != this_rq()) 7762 return; 7763 7764 /* 7765 * Both the cpu-hotplug and stop task are in this case and are 7766 * required to complete the hotplug process. 7767 */ 7768 if (kthread_is_per_cpu(push_task) || 7769 is_migration_disabled(push_task)) { 7770 7771 /* 7772 * If this is the idle task on the outgoing CPU try to wake 7773 * up the hotplug control thread which might wait for the 7774 * last task to vanish. The rcuwait_active() check is 7775 * accurate here because the waiter is pinned on this CPU 7776 * and can't obviously be running in parallel. 7777 * 7778 * On RT kernels this also has to check whether there are 7779 * pinned and scheduled out tasks on the runqueue. They 7780 * need to leave the migrate disabled section first. 7781 */ 7782 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 7783 rcuwait_active(&rq->hotplug_wait)) { 7784 raw_spin_rq_unlock(rq); 7785 rcuwait_wake_up(&rq->hotplug_wait); 7786 raw_spin_rq_lock(rq); 7787 } 7788 return; 7789 } 7790 7791 get_task_struct(push_task); 7792 /* 7793 * Temporarily drop rq->lock such that we can wake-up the stop task. 7794 * Both preemption and IRQs are still disabled. 7795 */ 7796 preempt_disable(); 7797 raw_spin_rq_unlock(rq); 7798 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 7799 this_cpu_ptr(&push_work)); 7800 preempt_enable(); 7801 /* 7802 * At this point need_resched() is true and we'll take the loop in 7803 * schedule(). The next pick is obviously going to be the stop task 7804 * which kthread_is_per_cpu() and will push this task away. 7805 */ 7806 raw_spin_rq_lock(rq); 7807 } 7808 7809 static void balance_push_set(int cpu, bool on) 7810 { 7811 struct rq *rq = cpu_rq(cpu); 7812 struct rq_flags rf; 7813 7814 rq_lock_irqsave(rq, &rf); 7815 if (on) { 7816 WARN_ON_ONCE(rq->balance_callback); 7817 rq->balance_callback = &balance_push_callback; 7818 } else if (rq->balance_callback == &balance_push_callback) { 7819 rq->balance_callback = NULL; 7820 } 7821 rq_unlock_irqrestore(rq, &rf); 7822 } 7823 7824 /* 7825 * Invoked from a CPUs hotplug control thread after the CPU has been marked 7826 * inactive. All tasks which are not per CPU kernel threads are either 7827 * pushed off this CPU now via balance_push() or placed on a different CPU 7828 * during wakeup. Wait until the CPU is quiescent. 7829 */ 7830 static void balance_hotplug_wait(void) 7831 { 7832 struct rq *rq = this_rq(); 7833 7834 rcuwait_wait_event(&rq->hotplug_wait, 7835 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 7836 TASK_UNINTERRUPTIBLE); 7837 } 7838 7839 #else 7840 7841 static inline void balance_push(struct rq *rq) 7842 { 7843 } 7844 7845 static inline void balance_push_set(int cpu, bool on) 7846 { 7847 } 7848 7849 static inline void balance_hotplug_wait(void) 7850 { 7851 } 7852 7853 #endif /* CONFIG_HOTPLUG_CPU */ 7854 7855 void set_rq_online(struct rq *rq) 7856 { 7857 if (!rq->online) { 7858 const struct sched_class *class; 7859 7860 cpumask_set_cpu(rq->cpu, rq->rd->online); 7861 rq->online = 1; 7862 7863 for_each_class(class) { 7864 if (class->rq_online) 7865 class->rq_online(rq); 7866 } 7867 } 7868 } 7869 7870 void set_rq_offline(struct rq *rq) 7871 { 7872 if (rq->online) { 7873 const struct sched_class *class; 7874 7875 update_rq_clock(rq); 7876 for_each_class(class) { 7877 if (class->rq_offline) 7878 class->rq_offline(rq); 7879 } 7880 7881 cpumask_clear_cpu(rq->cpu, rq->rd->online); 7882 rq->online = 0; 7883 } 7884 } 7885 7886 static inline void sched_set_rq_online(struct rq *rq, int cpu) 7887 { 7888 struct rq_flags rf; 7889 7890 rq_lock_irqsave(rq, &rf); 7891 if (rq->rd) { 7892 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7893 set_rq_online(rq); 7894 } 7895 rq_unlock_irqrestore(rq, &rf); 7896 } 7897 7898 static inline void sched_set_rq_offline(struct rq *rq, int cpu) 7899 { 7900 struct rq_flags rf; 7901 7902 rq_lock_irqsave(rq, &rf); 7903 if (rq->rd) { 7904 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7905 set_rq_offline(rq); 7906 } 7907 rq_unlock_irqrestore(rq, &rf); 7908 } 7909 7910 /* 7911 * used to mark begin/end of suspend/resume: 7912 */ 7913 static int num_cpus_frozen; 7914 7915 /* 7916 * Update cpusets according to cpu_active mask. If cpusets are 7917 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 7918 * around partition_sched_domains(). 7919 * 7920 * If we come here as part of a suspend/resume, don't touch cpusets because we 7921 * want to restore it back to its original state upon resume anyway. 7922 */ 7923 static void cpuset_cpu_active(void) 7924 { 7925 if (cpuhp_tasks_frozen) { 7926 /* 7927 * num_cpus_frozen tracks how many CPUs are involved in suspend 7928 * resume sequence. As long as this is not the last online 7929 * operation in the resume sequence, just build a single sched 7930 * domain, ignoring cpusets. 7931 */ 7932 partition_sched_domains(1, NULL, NULL); 7933 if (--num_cpus_frozen) 7934 return; 7935 /* 7936 * This is the last CPU online operation. So fall through and 7937 * restore the original sched domains by considering the 7938 * cpuset configurations. 7939 */ 7940 cpuset_force_rebuild(); 7941 } 7942 cpuset_update_active_cpus(); 7943 } 7944 7945 static int cpuset_cpu_inactive(unsigned int cpu) 7946 { 7947 if (!cpuhp_tasks_frozen) { 7948 int ret = dl_bw_check_overflow(cpu); 7949 7950 if (ret) 7951 return ret; 7952 cpuset_update_active_cpus(); 7953 } else { 7954 num_cpus_frozen++; 7955 partition_sched_domains(1, NULL, NULL); 7956 } 7957 return 0; 7958 } 7959 7960 static inline void sched_smt_present_inc(int cpu) 7961 { 7962 #ifdef CONFIG_SCHED_SMT 7963 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7964 static_branch_inc_cpuslocked(&sched_smt_present); 7965 #endif 7966 } 7967 7968 static inline void sched_smt_present_dec(int cpu) 7969 { 7970 #ifdef CONFIG_SCHED_SMT 7971 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 7972 static_branch_dec_cpuslocked(&sched_smt_present); 7973 #endif 7974 } 7975 7976 int sched_cpu_activate(unsigned int cpu) 7977 { 7978 struct rq *rq = cpu_rq(cpu); 7979 7980 /* 7981 * Clear the balance_push callback and prepare to schedule 7982 * regular tasks. 7983 */ 7984 balance_push_set(cpu, false); 7985 7986 /* 7987 * When going up, increment the number of cores with SMT present. 7988 */ 7989 sched_smt_present_inc(cpu); 7990 set_cpu_active(cpu, true); 7991 7992 if (sched_smp_initialized) { 7993 sched_update_numa(cpu, true); 7994 sched_domains_numa_masks_set(cpu); 7995 cpuset_cpu_active(); 7996 } 7997 7998 /* 7999 * Put the rq online, if not already. This happens: 8000 * 8001 * 1) In the early boot process, because we build the real domains 8002 * after all CPUs have been brought up. 8003 * 8004 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 8005 * domains. 8006 */ 8007 sched_set_rq_online(rq, cpu); 8008 8009 return 0; 8010 } 8011 8012 int sched_cpu_deactivate(unsigned int cpu) 8013 { 8014 struct rq *rq = cpu_rq(cpu); 8015 int ret; 8016 8017 /* 8018 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 8019 * load balancing when not active 8020 */ 8021 nohz_balance_exit_idle(rq); 8022 8023 set_cpu_active(cpu, false); 8024 8025 /* 8026 * From this point forward, this CPU will refuse to run any task that 8027 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 8028 * push those tasks away until this gets cleared, see 8029 * sched_cpu_dying(). 8030 */ 8031 balance_push_set(cpu, true); 8032 8033 /* 8034 * We've cleared cpu_active_mask / set balance_push, wait for all 8035 * preempt-disabled and RCU users of this state to go away such that 8036 * all new such users will observe it. 8037 * 8038 * Specifically, we rely on ttwu to no longer target this CPU, see 8039 * ttwu_queue_cond() and is_cpu_allowed(). 8040 * 8041 * Do sync before park smpboot threads to take care the RCU boost case. 8042 */ 8043 synchronize_rcu(); 8044 8045 sched_set_rq_offline(rq, cpu); 8046 8047 /* 8048 * When going down, decrement the number of cores with SMT present. 8049 */ 8050 sched_smt_present_dec(cpu); 8051 8052 #ifdef CONFIG_SCHED_SMT 8053 sched_core_cpu_deactivate(cpu); 8054 #endif 8055 8056 if (!sched_smp_initialized) 8057 return 0; 8058 8059 sched_update_numa(cpu, false); 8060 ret = cpuset_cpu_inactive(cpu); 8061 if (ret) { 8062 sched_smt_present_inc(cpu); 8063 sched_set_rq_online(rq, cpu); 8064 balance_push_set(cpu, false); 8065 set_cpu_active(cpu, true); 8066 sched_update_numa(cpu, true); 8067 return ret; 8068 } 8069 sched_domains_numa_masks_clear(cpu); 8070 return 0; 8071 } 8072 8073 static void sched_rq_cpu_starting(unsigned int cpu) 8074 { 8075 struct rq *rq = cpu_rq(cpu); 8076 8077 rq->calc_load_update = calc_load_update; 8078 update_max_interval(); 8079 } 8080 8081 int sched_cpu_starting(unsigned int cpu) 8082 { 8083 sched_core_cpu_starting(cpu); 8084 sched_rq_cpu_starting(cpu); 8085 sched_tick_start(cpu); 8086 return 0; 8087 } 8088 8089 #ifdef CONFIG_HOTPLUG_CPU 8090 8091 /* 8092 * Invoked immediately before the stopper thread is invoked to bring the 8093 * CPU down completely. At this point all per CPU kthreads except the 8094 * hotplug thread (current) and the stopper thread (inactive) have been 8095 * either parked or have been unbound from the outgoing CPU. Ensure that 8096 * any of those which might be on the way out are gone. 8097 * 8098 * If after this point a bound task is being woken on this CPU then the 8099 * responsible hotplug callback has failed to do it's job. 8100 * sched_cpu_dying() will catch it with the appropriate fireworks. 8101 */ 8102 int sched_cpu_wait_empty(unsigned int cpu) 8103 { 8104 balance_hotplug_wait(); 8105 return 0; 8106 } 8107 8108 /* 8109 * Since this CPU is going 'away' for a while, fold any nr_active delta we 8110 * might have. Called from the CPU stopper task after ensuring that the 8111 * stopper is the last running task on the CPU, so nr_active count is 8112 * stable. We need to take the tear-down thread which is calling this into 8113 * account, so we hand in adjust = 1 to the load calculation. 8114 * 8115 * Also see the comment "Global load-average calculations". 8116 */ 8117 static void calc_load_migrate(struct rq *rq) 8118 { 8119 long delta = calc_load_fold_active(rq, 1); 8120 8121 if (delta) 8122 atomic_long_add(delta, &calc_load_tasks); 8123 } 8124 8125 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 8126 { 8127 struct task_struct *g, *p; 8128 int cpu = cpu_of(rq); 8129 8130 lockdep_assert_rq_held(rq); 8131 8132 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 8133 for_each_process_thread(g, p) { 8134 if (task_cpu(p) != cpu) 8135 continue; 8136 8137 if (!task_on_rq_queued(p)) 8138 continue; 8139 8140 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 8141 } 8142 } 8143 8144 int sched_cpu_dying(unsigned int cpu) 8145 { 8146 struct rq *rq = cpu_rq(cpu); 8147 struct rq_flags rf; 8148 8149 /* Handle pending wakeups and then migrate everything off */ 8150 sched_tick_stop(cpu); 8151 8152 rq_lock_irqsave(rq, &rf); 8153 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 8154 WARN(true, "Dying CPU not properly vacated!"); 8155 dump_rq_tasks(rq, KERN_WARNING); 8156 } 8157 rq_unlock_irqrestore(rq, &rf); 8158 8159 calc_load_migrate(rq); 8160 update_max_interval(); 8161 hrtick_clear(rq); 8162 sched_core_cpu_dying(cpu); 8163 return 0; 8164 } 8165 #endif 8166 8167 void __init sched_init_smp(void) 8168 { 8169 sched_init_numa(NUMA_NO_NODE); 8170 8171 /* 8172 * There's no userspace yet to cause hotplug operations; hence all the 8173 * CPU masks are stable and all blatant races in the below code cannot 8174 * happen. 8175 */ 8176 mutex_lock(&sched_domains_mutex); 8177 sched_init_domains(cpu_active_mask); 8178 mutex_unlock(&sched_domains_mutex); 8179 8180 /* Move init over to a non-isolated CPU */ 8181 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 8182 BUG(); 8183 current->flags &= ~PF_NO_SETAFFINITY; 8184 sched_init_granularity(); 8185 8186 init_sched_rt_class(); 8187 init_sched_dl_class(); 8188 8189 sched_smp_initialized = true; 8190 } 8191 8192 static int __init migration_init(void) 8193 { 8194 sched_cpu_starting(smp_processor_id()); 8195 return 0; 8196 } 8197 early_initcall(migration_init); 8198 8199 #else 8200 void __init sched_init_smp(void) 8201 { 8202 sched_init_granularity(); 8203 } 8204 #endif /* CONFIG_SMP */ 8205 8206 int in_sched_functions(unsigned long addr) 8207 { 8208 return in_lock_functions(addr) || 8209 (addr >= (unsigned long)__sched_text_start 8210 && addr < (unsigned long)__sched_text_end); 8211 } 8212 8213 #ifdef CONFIG_CGROUP_SCHED 8214 /* 8215 * Default task group. 8216 * Every task in system belongs to this group at bootup. 8217 */ 8218 struct task_group root_task_group; 8219 LIST_HEAD(task_groups); 8220 8221 /* Cacheline aligned slab cache for task_group */ 8222 static struct kmem_cache *task_group_cache __ro_after_init; 8223 #endif 8224 8225 void __init sched_init(void) 8226 { 8227 unsigned long ptr = 0; 8228 int i; 8229 8230 /* Make sure the linker didn't screw up */ 8231 BUG_ON(&idle_sched_class != &fair_sched_class + 1 || 8232 &fair_sched_class != &rt_sched_class + 1 || 8233 &rt_sched_class != &dl_sched_class + 1); 8234 #ifdef CONFIG_SMP 8235 BUG_ON(&dl_sched_class != &stop_sched_class + 1); 8236 #endif 8237 8238 wait_bit_init(); 8239 8240 #ifdef CONFIG_FAIR_GROUP_SCHED 8241 ptr += 2 * nr_cpu_ids * sizeof(void **); 8242 #endif 8243 #ifdef CONFIG_RT_GROUP_SCHED 8244 ptr += 2 * nr_cpu_ids * sizeof(void **); 8245 #endif 8246 if (ptr) { 8247 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 8248 8249 #ifdef CONFIG_FAIR_GROUP_SCHED 8250 root_task_group.se = (struct sched_entity **)ptr; 8251 ptr += nr_cpu_ids * sizeof(void **); 8252 8253 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 8254 ptr += nr_cpu_ids * sizeof(void **); 8255 8256 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 8257 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); 8258 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8259 #ifdef CONFIG_RT_GROUP_SCHED 8260 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 8261 ptr += nr_cpu_ids * sizeof(void **); 8262 8263 root_task_group.rt_rq = (struct rt_rq **)ptr; 8264 ptr += nr_cpu_ids * sizeof(void **); 8265 8266 #endif /* CONFIG_RT_GROUP_SCHED */ 8267 } 8268 8269 #ifdef CONFIG_SMP 8270 init_defrootdomain(); 8271 #endif 8272 8273 #ifdef CONFIG_RT_GROUP_SCHED 8274 init_rt_bandwidth(&root_task_group.rt_bandwidth, 8275 global_rt_period(), global_rt_runtime()); 8276 #endif /* CONFIG_RT_GROUP_SCHED */ 8277 8278 #ifdef CONFIG_CGROUP_SCHED 8279 task_group_cache = KMEM_CACHE(task_group, 0); 8280 8281 list_add(&root_task_group.list, &task_groups); 8282 INIT_LIST_HEAD(&root_task_group.children); 8283 INIT_LIST_HEAD(&root_task_group.siblings); 8284 autogroup_init(&init_task); 8285 #endif /* CONFIG_CGROUP_SCHED */ 8286 8287 for_each_possible_cpu(i) { 8288 struct rq *rq; 8289 8290 rq = cpu_rq(i); 8291 raw_spin_lock_init(&rq->__lock); 8292 rq->nr_running = 0; 8293 rq->calc_load_active = 0; 8294 rq->calc_load_update = jiffies + LOAD_FREQ; 8295 init_cfs_rq(&rq->cfs); 8296 init_rt_rq(&rq->rt); 8297 init_dl_rq(&rq->dl); 8298 #ifdef CONFIG_FAIR_GROUP_SCHED 8299 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 8300 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 8301 /* 8302 * How much CPU bandwidth does root_task_group get? 8303 * 8304 * In case of task-groups formed through the cgroup filesystem, it 8305 * gets 100% of the CPU resources in the system. This overall 8306 * system CPU resource is divided among the tasks of 8307 * root_task_group and its child task-groups in a fair manner, 8308 * based on each entity's (task or task-group's) weight 8309 * (se->load.weight). 8310 * 8311 * In other words, if root_task_group has 10 tasks of weight 8312 * 1024) and two child groups A0 and A1 (of weight 1024 each), 8313 * then A0's share of the CPU resource is: 8314 * 8315 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 8316 * 8317 * We achieve this by letting root_task_group's tasks sit 8318 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 8319 */ 8320 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 8321 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8322 8323 #ifdef CONFIG_RT_GROUP_SCHED 8324 /* 8325 * This is required for init cpu because rt.c:__enable_runtime() 8326 * starts working after scheduler_running, which is not the case 8327 * yet. 8328 */ 8329 rq->rt.rt_runtime = global_rt_runtime(); 8330 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 8331 #endif 8332 #ifdef CONFIG_SMP 8333 rq->sd = NULL; 8334 rq->rd = NULL; 8335 rq->cpu_capacity = SCHED_CAPACITY_SCALE; 8336 rq->balance_callback = &balance_push_callback; 8337 rq->active_balance = 0; 8338 rq->next_balance = jiffies; 8339 rq->push_cpu = 0; 8340 rq->cpu = i; 8341 rq->online = 0; 8342 rq->idle_stamp = 0; 8343 rq->avg_idle = 2*sysctl_sched_migration_cost; 8344 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 8345 8346 INIT_LIST_HEAD(&rq->cfs_tasks); 8347 8348 rq_attach_root(rq, &def_root_domain); 8349 #ifdef CONFIG_NO_HZ_COMMON 8350 rq->last_blocked_load_update_tick = jiffies; 8351 atomic_set(&rq->nohz_flags, 0); 8352 8353 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 8354 #endif 8355 #ifdef CONFIG_HOTPLUG_CPU 8356 rcuwait_init(&rq->hotplug_wait); 8357 #endif 8358 #endif /* CONFIG_SMP */ 8359 hrtick_rq_init(rq); 8360 atomic_set(&rq->nr_iowait, 0); 8361 fair_server_init(rq); 8362 8363 #ifdef CONFIG_SCHED_CORE 8364 rq->core = rq; 8365 rq->core_pick = NULL; 8366 rq->core_enabled = 0; 8367 rq->core_tree = RB_ROOT; 8368 rq->core_forceidle_count = 0; 8369 rq->core_forceidle_occupation = 0; 8370 rq->core_forceidle_start = 0; 8371 8372 rq->core_cookie = 0UL; 8373 #endif 8374 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); 8375 } 8376 8377 set_load_weight(&init_task, false); 8378 8379 /* 8380 * The boot idle thread does lazy MMU switching as well: 8381 */ 8382 mmgrab_lazy_tlb(&init_mm); 8383 enter_lazy_tlb(&init_mm, current); 8384 8385 /* 8386 * The idle task doesn't need the kthread struct to function, but it 8387 * is dressed up as a per-CPU kthread and thus needs to play the part 8388 * if we want to avoid special-casing it in code that deals with per-CPU 8389 * kthreads. 8390 */ 8391 WARN_ON(!set_kthread_struct(current)); 8392 8393 /* 8394 * Make us the idle thread. Technically, schedule() should not be 8395 * called from this thread, however somewhere below it might be, 8396 * but because we are the idle thread, we just pick up running again 8397 * when this runqueue becomes "idle". 8398 */ 8399 init_idle(current, smp_processor_id()); 8400 8401 calc_load_update = jiffies + LOAD_FREQ; 8402 8403 #ifdef CONFIG_SMP 8404 idle_thread_set_boot_cpu(); 8405 balance_push_set(smp_processor_id(), false); 8406 #endif 8407 init_sched_fair_class(); 8408 8409 psi_init(); 8410 8411 init_uclamp(); 8412 8413 preempt_dynamic_init(); 8414 8415 scheduler_running = 1; 8416 } 8417 8418 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 8419 8420 void __might_sleep(const char *file, int line) 8421 { 8422 unsigned int state = get_current_state(); 8423 /* 8424 * Blocking primitives will set (and therefore destroy) current->state, 8425 * since we will exit with TASK_RUNNING make sure we enter with it, 8426 * otherwise we will destroy state. 8427 */ 8428 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 8429 "do not call blocking ops when !TASK_RUNNING; " 8430 "state=%x set at [<%p>] %pS\n", state, 8431 (void *)current->task_state_change, 8432 (void *)current->task_state_change); 8433 8434 __might_resched(file, line, 0); 8435 } 8436 EXPORT_SYMBOL(__might_sleep); 8437 8438 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 8439 { 8440 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 8441 return; 8442 8443 if (preempt_count() == preempt_offset) 8444 return; 8445 8446 pr_err("Preemption disabled at:"); 8447 print_ip_sym(KERN_ERR, ip); 8448 } 8449 8450 static inline bool resched_offsets_ok(unsigned int offsets) 8451 { 8452 unsigned int nested = preempt_count(); 8453 8454 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 8455 8456 return nested == offsets; 8457 } 8458 8459 void __might_resched(const char *file, int line, unsigned int offsets) 8460 { 8461 /* Ratelimiting timestamp: */ 8462 static unsigned long prev_jiffy; 8463 8464 unsigned long preempt_disable_ip; 8465 8466 /* WARN_ON_ONCE() by default, no rate limit required: */ 8467 rcu_sleep_check(); 8468 8469 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 8470 !is_idle_task(current) && !current->non_block_count) || 8471 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 8472 oops_in_progress) 8473 return; 8474 8475 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8476 return; 8477 prev_jiffy = jiffies; 8478 8479 /* Save this before calling printk(), since that will clobber it: */ 8480 preempt_disable_ip = get_preempt_disable_ip(current); 8481 8482 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 8483 file, line); 8484 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 8485 in_atomic(), irqs_disabled(), current->non_block_count, 8486 current->pid, current->comm); 8487 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 8488 offsets & MIGHT_RESCHED_PREEMPT_MASK); 8489 8490 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 8491 pr_err("RCU nest depth: %d, expected: %u\n", 8492 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 8493 } 8494 8495 if (task_stack_end_corrupted(current)) 8496 pr_emerg("Thread overran stack, or stack corrupted\n"); 8497 8498 debug_show_held_locks(current); 8499 if (irqs_disabled()) 8500 print_irqtrace_events(current); 8501 8502 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 8503 preempt_disable_ip); 8504 8505 dump_stack(); 8506 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8507 } 8508 EXPORT_SYMBOL(__might_resched); 8509 8510 void __cant_sleep(const char *file, int line, int preempt_offset) 8511 { 8512 static unsigned long prev_jiffy; 8513 8514 if (irqs_disabled()) 8515 return; 8516 8517 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8518 return; 8519 8520 if (preempt_count() > preempt_offset) 8521 return; 8522 8523 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8524 return; 8525 prev_jiffy = jiffies; 8526 8527 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 8528 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 8529 in_atomic(), irqs_disabled(), 8530 current->pid, current->comm); 8531 8532 debug_show_held_locks(current); 8533 dump_stack(); 8534 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8535 } 8536 EXPORT_SYMBOL_GPL(__cant_sleep); 8537 8538 #ifdef CONFIG_SMP 8539 void __cant_migrate(const char *file, int line) 8540 { 8541 static unsigned long prev_jiffy; 8542 8543 if (irqs_disabled()) 8544 return; 8545 8546 if (is_migration_disabled(current)) 8547 return; 8548 8549 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8550 return; 8551 8552 if (preempt_count() > 0) 8553 return; 8554 8555 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8556 return; 8557 prev_jiffy = jiffies; 8558 8559 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 8560 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 8561 in_atomic(), irqs_disabled(), is_migration_disabled(current), 8562 current->pid, current->comm); 8563 8564 debug_show_held_locks(current); 8565 dump_stack(); 8566 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8567 } 8568 EXPORT_SYMBOL_GPL(__cant_migrate); 8569 #endif 8570 #endif 8571 8572 #ifdef CONFIG_MAGIC_SYSRQ 8573 void normalize_rt_tasks(void) 8574 { 8575 struct task_struct *g, *p; 8576 struct sched_attr attr = { 8577 .sched_policy = SCHED_NORMAL, 8578 }; 8579 8580 read_lock(&tasklist_lock); 8581 for_each_process_thread(g, p) { 8582 /* 8583 * Only normalize user tasks: 8584 */ 8585 if (p->flags & PF_KTHREAD) 8586 continue; 8587 8588 p->se.exec_start = 0; 8589 schedstat_set(p->stats.wait_start, 0); 8590 schedstat_set(p->stats.sleep_start, 0); 8591 schedstat_set(p->stats.block_start, 0); 8592 8593 if (!rt_or_dl_task(p)) { 8594 /* 8595 * Renice negative nice level userspace 8596 * tasks back to 0: 8597 */ 8598 if (task_nice(p) < 0) 8599 set_user_nice(p, 0); 8600 continue; 8601 } 8602 8603 __sched_setscheduler(p, &attr, false, false); 8604 } 8605 read_unlock(&tasklist_lock); 8606 } 8607 8608 #endif /* CONFIG_MAGIC_SYSRQ */ 8609 8610 #if defined(CONFIG_KGDB_KDB) 8611 /* 8612 * These functions are only useful for KDB. 8613 * 8614 * They can only be called when the whole system has been 8615 * stopped - every CPU needs to be quiescent, and no scheduling 8616 * activity can take place. Using them for anything else would 8617 * be a serious bug, and as a result, they aren't even visible 8618 * under any other configuration. 8619 */ 8620 8621 /** 8622 * curr_task - return the current task for a given CPU. 8623 * @cpu: the processor in question. 8624 * 8625 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8626 * 8627 * Return: The current task for @cpu. 8628 */ 8629 struct task_struct *curr_task(int cpu) 8630 { 8631 return cpu_curr(cpu); 8632 } 8633 8634 #endif /* defined(CONFIG_KGDB_KDB) */ 8635 8636 #ifdef CONFIG_CGROUP_SCHED 8637 /* task_group_lock serializes the addition/removal of task groups */ 8638 static DEFINE_SPINLOCK(task_group_lock); 8639 8640 static inline void alloc_uclamp_sched_group(struct task_group *tg, 8641 struct task_group *parent) 8642 { 8643 #ifdef CONFIG_UCLAMP_TASK_GROUP 8644 enum uclamp_id clamp_id; 8645 8646 for_each_clamp_id(clamp_id) { 8647 uclamp_se_set(&tg->uclamp_req[clamp_id], 8648 uclamp_none(clamp_id), false); 8649 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 8650 } 8651 #endif 8652 } 8653 8654 static void sched_free_group(struct task_group *tg) 8655 { 8656 free_fair_sched_group(tg); 8657 free_rt_sched_group(tg); 8658 autogroup_free(tg); 8659 kmem_cache_free(task_group_cache, tg); 8660 } 8661 8662 static void sched_free_group_rcu(struct rcu_head *rcu) 8663 { 8664 sched_free_group(container_of(rcu, struct task_group, rcu)); 8665 } 8666 8667 static void sched_unregister_group(struct task_group *tg) 8668 { 8669 unregister_fair_sched_group(tg); 8670 unregister_rt_sched_group(tg); 8671 /* 8672 * We have to wait for yet another RCU grace period to expire, as 8673 * print_cfs_stats() might run concurrently. 8674 */ 8675 call_rcu(&tg->rcu, sched_free_group_rcu); 8676 } 8677 8678 /* allocate runqueue etc for a new task group */ 8679 struct task_group *sched_create_group(struct task_group *parent) 8680 { 8681 struct task_group *tg; 8682 8683 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 8684 if (!tg) 8685 return ERR_PTR(-ENOMEM); 8686 8687 if (!alloc_fair_sched_group(tg, parent)) 8688 goto err; 8689 8690 if (!alloc_rt_sched_group(tg, parent)) 8691 goto err; 8692 8693 alloc_uclamp_sched_group(tg, parent); 8694 8695 return tg; 8696 8697 err: 8698 sched_free_group(tg); 8699 return ERR_PTR(-ENOMEM); 8700 } 8701 8702 void sched_online_group(struct task_group *tg, struct task_group *parent) 8703 { 8704 unsigned long flags; 8705 8706 spin_lock_irqsave(&task_group_lock, flags); 8707 list_add_rcu(&tg->list, &task_groups); 8708 8709 /* Root should already exist: */ 8710 WARN_ON(!parent); 8711 8712 tg->parent = parent; 8713 INIT_LIST_HEAD(&tg->children); 8714 list_add_rcu(&tg->siblings, &parent->children); 8715 spin_unlock_irqrestore(&task_group_lock, flags); 8716 8717 online_fair_sched_group(tg); 8718 } 8719 8720 /* RCU callback to free various structures associated with a task group */ 8721 static void sched_unregister_group_rcu(struct rcu_head *rhp) 8722 { 8723 /* Now it should be safe to free those cfs_rqs: */ 8724 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 8725 } 8726 8727 void sched_destroy_group(struct task_group *tg) 8728 { 8729 /* Wait for possible concurrent references to cfs_rqs complete: */ 8730 call_rcu(&tg->rcu, sched_unregister_group_rcu); 8731 } 8732 8733 void sched_release_group(struct task_group *tg) 8734 { 8735 unsigned long flags; 8736 8737 /* 8738 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 8739 * sched_cfs_period_timer()). 8740 * 8741 * For this to be effective, we have to wait for all pending users of 8742 * this task group to leave their RCU critical section to ensure no new 8743 * user will see our dying task group any more. Specifically ensure 8744 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 8745 * 8746 * We therefore defer calling unregister_fair_sched_group() to 8747 * sched_unregister_group() which is guarantied to get called only after the 8748 * current RCU grace period has expired. 8749 */ 8750 spin_lock_irqsave(&task_group_lock, flags); 8751 list_del_rcu(&tg->list); 8752 list_del_rcu(&tg->siblings); 8753 spin_unlock_irqrestore(&task_group_lock, flags); 8754 } 8755 8756 static struct task_group *sched_get_task_group(struct task_struct *tsk) 8757 { 8758 struct task_group *tg; 8759 8760 /* 8761 * All callers are synchronized by task_rq_lock(); we do not use RCU 8762 * which is pointless here. Thus, we pass "true" to task_css_check() 8763 * to prevent lockdep warnings. 8764 */ 8765 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 8766 struct task_group, css); 8767 tg = autogroup_task_group(tsk, tg); 8768 8769 return tg; 8770 } 8771 8772 static void sched_change_group(struct task_struct *tsk, struct task_group *group) 8773 { 8774 tsk->sched_task_group = group; 8775 8776 #ifdef CONFIG_FAIR_GROUP_SCHED 8777 if (tsk->sched_class->task_change_group) 8778 tsk->sched_class->task_change_group(tsk); 8779 else 8780 #endif 8781 set_task_rq(tsk, task_cpu(tsk)); 8782 } 8783 8784 /* 8785 * Change task's runqueue when it moves between groups. 8786 * 8787 * The caller of this function should have put the task in its new group by 8788 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 8789 * its new group. 8790 */ 8791 void sched_move_task(struct task_struct *tsk) 8792 { 8793 int queued, running, queue_flags = 8794 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 8795 struct task_group *group; 8796 struct rq *rq; 8797 8798 CLASS(task_rq_lock, rq_guard)(tsk); 8799 rq = rq_guard.rq; 8800 8801 /* 8802 * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous 8803 * group changes. 8804 */ 8805 group = sched_get_task_group(tsk); 8806 if (group == tsk->sched_task_group) 8807 return; 8808 8809 update_rq_clock(rq); 8810 8811 running = task_current(rq, tsk); 8812 queued = task_on_rq_queued(tsk); 8813 8814 if (queued) 8815 dequeue_task(rq, tsk, queue_flags); 8816 if (running) 8817 put_prev_task(rq, tsk); 8818 8819 sched_change_group(tsk, group); 8820 8821 if (queued) 8822 enqueue_task(rq, tsk, queue_flags); 8823 if (running) { 8824 set_next_task(rq, tsk); 8825 /* 8826 * After changing group, the running task may have joined a 8827 * throttled one but it's still the running task. Trigger a 8828 * resched to make sure that task can still run. 8829 */ 8830 resched_curr(rq); 8831 } 8832 } 8833 8834 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 8835 { 8836 return css ? container_of(css, struct task_group, css) : NULL; 8837 } 8838 8839 static struct cgroup_subsys_state * 8840 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 8841 { 8842 struct task_group *parent = css_tg(parent_css); 8843 struct task_group *tg; 8844 8845 if (!parent) { 8846 /* This is early initialization for the top cgroup */ 8847 return &root_task_group.css; 8848 } 8849 8850 tg = sched_create_group(parent); 8851 if (IS_ERR(tg)) 8852 return ERR_PTR(-ENOMEM); 8853 8854 return &tg->css; 8855 } 8856 8857 /* Expose task group only after completing cgroup initialization */ 8858 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 8859 { 8860 struct task_group *tg = css_tg(css); 8861 struct task_group *parent = css_tg(css->parent); 8862 8863 if (parent) 8864 sched_online_group(tg, parent); 8865 8866 #ifdef CONFIG_UCLAMP_TASK_GROUP 8867 /* Propagate the effective uclamp value for the new group */ 8868 guard(mutex)(&uclamp_mutex); 8869 guard(rcu)(); 8870 cpu_util_update_eff(css); 8871 #endif 8872 8873 return 0; 8874 } 8875 8876 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 8877 { 8878 struct task_group *tg = css_tg(css); 8879 8880 sched_release_group(tg); 8881 } 8882 8883 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 8884 { 8885 struct task_group *tg = css_tg(css); 8886 8887 /* 8888 * Relies on the RCU grace period between css_released() and this. 8889 */ 8890 sched_unregister_group(tg); 8891 } 8892 8893 #ifdef CONFIG_RT_GROUP_SCHED 8894 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 8895 { 8896 struct task_struct *task; 8897 struct cgroup_subsys_state *css; 8898 8899 cgroup_taskset_for_each(task, css, tset) { 8900 if (!sched_rt_can_attach(css_tg(css), task)) 8901 return -EINVAL; 8902 } 8903 return 0; 8904 } 8905 #endif 8906 8907 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 8908 { 8909 struct task_struct *task; 8910 struct cgroup_subsys_state *css; 8911 8912 cgroup_taskset_for_each(task, css, tset) 8913 sched_move_task(task); 8914 } 8915 8916 #ifdef CONFIG_UCLAMP_TASK_GROUP 8917 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 8918 { 8919 struct cgroup_subsys_state *top_css = css; 8920 struct uclamp_se *uc_parent = NULL; 8921 struct uclamp_se *uc_se = NULL; 8922 unsigned int eff[UCLAMP_CNT]; 8923 enum uclamp_id clamp_id; 8924 unsigned int clamps; 8925 8926 lockdep_assert_held(&uclamp_mutex); 8927 SCHED_WARN_ON(!rcu_read_lock_held()); 8928 8929 css_for_each_descendant_pre(css, top_css) { 8930 uc_parent = css_tg(css)->parent 8931 ? css_tg(css)->parent->uclamp : NULL; 8932 8933 for_each_clamp_id(clamp_id) { 8934 /* Assume effective clamps matches requested clamps */ 8935 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 8936 /* Cap effective clamps with parent's effective clamps */ 8937 if (uc_parent && 8938 eff[clamp_id] > uc_parent[clamp_id].value) { 8939 eff[clamp_id] = uc_parent[clamp_id].value; 8940 } 8941 } 8942 /* Ensure protection is always capped by limit */ 8943 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 8944 8945 /* Propagate most restrictive effective clamps */ 8946 clamps = 0x0; 8947 uc_se = css_tg(css)->uclamp; 8948 for_each_clamp_id(clamp_id) { 8949 if (eff[clamp_id] == uc_se[clamp_id].value) 8950 continue; 8951 uc_se[clamp_id].value = eff[clamp_id]; 8952 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 8953 clamps |= (0x1 << clamp_id); 8954 } 8955 if (!clamps) { 8956 css = css_rightmost_descendant(css); 8957 continue; 8958 } 8959 8960 /* Immediately update descendants RUNNABLE tasks */ 8961 uclamp_update_active_tasks(css); 8962 } 8963 } 8964 8965 /* 8966 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 8967 * C expression. Since there is no way to convert a macro argument (N) into a 8968 * character constant, use two levels of macros. 8969 */ 8970 #define _POW10(exp) ((unsigned int)1e##exp) 8971 #define POW10(exp) _POW10(exp) 8972 8973 struct uclamp_request { 8974 #define UCLAMP_PERCENT_SHIFT 2 8975 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 8976 s64 percent; 8977 u64 util; 8978 int ret; 8979 }; 8980 8981 static inline struct uclamp_request 8982 capacity_from_percent(char *buf) 8983 { 8984 struct uclamp_request req = { 8985 .percent = UCLAMP_PERCENT_SCALE, 8986 .util = SCHED_CAPACITY_SCALE, 8987 .ret = 0, 8988 }; 8989 8990 buf = strim(buf); 8991 if (strcmp(buf, "max")) { 8992 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 8993 &req.percent); 8994 if (req.ret) 8995 return req; 8996 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 8997 req.ret = -ERANGE; 8998 return req; 8999 } 9000 9001 req.util = req.percent << SCHED_CAPACITY_SHIFT; 9002 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 9003 } 9004 9005 return req; 9006 } 9007 9008 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 9009 size_t nbytes, loff_t off, 9010 enum uclamp_id clamp_id) 9011 { 9012 struct uclamp_request req; 9013 struct task_group *tg; 9014 9015 req = capacity_from_percent(buf); 9016 if (req.ret) 9017 return req.ret; 9018 9019 static_branch_enable(&sched_uclamp_used); 9020 9021 guard(mutex)(&uclamp_mutex); 9022 guard(rcu)(); 9023 9024 tg = css_tg(of_css(of)); 9025 if (tg->uclamp_req[clamp_id].value != req.util) 9026 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 9027 9028 /* 9029 * Because of not recoverable conversion rounding we keep track of the 9030 * exact requested value 9031 */ 9032 tg->uclamp_pct[clamp_id] = req.percent; 9033 9034 /* Update effective clamps to track the most restrictive value */ 9035 cpu_util_update_eff(of_css(of)); 9036 9037 return nbytes; 9038 } 9039 9040 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 9041 char *buf, size_t nbytes, 9042 loff_t off) 9043 { 9044 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 9045 } 9046 9047 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 9048 char *buf, size_t nbytes, 9049 loff_t off) 9050 { 9051 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 9052 } 9053 9054 static inline void cpu_uclamp_print(struct seq_file *sf, 9055 enum uclamp_id clamp_id) 9056 { 9057 struct task_group *tg; 9058 u64 util_clamp; 9059 u64 percent; 9060 u32 rem; 9061 9062 scoped_guard (rcu) { 9063 tg = css_tg(seq_css(sf)); 9064 util_clamp = tg->uclamp_req[clamp_id].value; 9065 } 9066 9067 if (util_clamp == SCHED_CAPACITY_SCALE) { 9068 seq_puts(sf, "max\n"); 9069 return; 9070 } 9071 9072 percent = tg->uclamp_pct[clamp_id]; 9073 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 9074 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 9075 } 9076 9077 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 9078 { 9079 cpu_uclamp_print(sf, UCLAMP_MIN); 9080 return 0; 9081 } 9082 9083 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 9084 { 9085 cpu_uclamp_print(sf, UCLAMP_MAX); 9086 return 0; 9087 } 9088 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 9089 9090 #ifdef CONFIG_FAIR_GROUP_SCHED 9091 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 9092 struct cftype *cftype, u64 shareval) 9093 { 9094 if (shareval > scale_load_down(ULONG_MAX)) 9095 shareval = MAX_SHARES; 9096 return sched_group_set_shares(css_tg(css), scale_load(shareval)); 9097 } 9098 9099 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 9100 struct cftype *cft) 9101 { 9102 struct task_group *tg = css_tg(css); 9103 9104 return (u64) scale_load_down(tg->shares); 9105 } 9106 9107 #ifdef CONFIG_CFS_BANDWIDTH 9108 static DEFINE_MUTEX(cfs_constraints_mutex); 9109 9110 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 9111 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 9112 /* More than 203 days if BW_SHIFT equals 20. */ 9113 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 9114 9115 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 9116 9117 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 9118 u64 burst) 9119 { 9120 int i, ret = 0, runtime_enabled, runtime_was_enabled; 9121 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9122 9123 if (tg == &root_task_group) 9124 return -EINVAL; 9125 9126 /* 9127 * Ensure we have at some amount of bandwidth every period. This is 9128 * to prevent reaching a state of large arrears when throttled via 9129 * entity_tick() resulting in prolonged exit starvation. 9130 */ 9131 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 9132 return -EINVAL; 9133 9134 /* 9135 * Likewise, bound things on the other side by preventing insane quota 9136 * periods. This also allows us to normalize in computing quota 9137 * feasibility. 9138 */ 9139 if (period > max_cfs_quota_period) 9140 return -EINVAL; 9141 9142 /* 9143 * Bound quota to defend quota against overflow during bandwidth shift. 9144 */ 9145 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 9146 return -EINVAL; 9147 9148 if (quota != RUNTIME_INF && (burst > quota || 9149 burst + quota > max_cfs_runtime)) 9150 return -EINVAL; 9151 9152 /* 9153 * Prevent race between setting of cfs_rq->runtime_enabled and 9154 * unthrottle_offline_cfs_rqs(). 9155 */ 9156 guard(cpus_read_lock)(); 9157 guard(mutex)(&cfs_constraints_mutex); 9158 9159 ret = __cfs_schedulable(tg, period, quota); 9160 if (ret) 9161 return ret; 9162 9163 runtime_enabled = quota != RUNTIME_INF; 9164 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 9165 /* 9166 * If we need to toggle cfs_bandwidth_used, off->on must occur 9167 * before making related changes, and on->off must occur afterwards 9168 */ 9169 if (runtime_enabled && !runtime_was_enabled) 9170 cfs_bandwidth_usage_inc(); 9171 9172 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { 9173 cfs_b->period = ns_to_ktime(period); 9174 cfs_b->quota = quota; 9175 cfs_b->burst = burst; 9176 9177 __refill_cfs_bandwidth_runtime(cfs_b); 9178 9179 /* 9180 * Restart the period timer (if active) to handle new 9181 * period expiry: 9182 */ 9183 if (runtime_enabled) 9184 start_cfs_bandwidth(cfs_b); 9185 } 9186 9187 for_each_online_cpu(i) { 9188 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 9189 struct rq *rq = cfs_rq->rq; 9190 9191 guard(rq_lock_irq)(rq); 9192 cfs_rq->runtime_enabled = runtime_enabled; 9193 cfs_rq->runtime_remaining = 0; 9194 9195 if (cfs_rq->throttled) 9196 unthrottle_cfs_rq(cfs_rq); 9197 } 9198 9199 if (runtime_was_enabled && !runtime_enabled) 9200 cfs_bandwidth_usage_dec(); 9201 9202 return 0; 9203 } 9204 9205 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 9206 { 9207 u64 quota, period, burst; 9208 9209 period = ktime_to_ns(tg->cfs_bandwidth.period); 9210 burst = tg->cfs_bandwidth.burst; 9211 if (cfs_quota_us < 0) 9212 quota = RUNTIME_INF; 9213 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 9214 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 9215 else 9216 return -EINVAL; 9217 9218 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9219 } 9220 9221 static long tg_get_cfs_quota(struct task_group *tg) 9222 { 9223 u64 quota_us; 9224 9225 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 9226 return -1; 9227 9228 quota_us = tg->cfs_bandwidth.quota; 9229 do_div(quota_us, NSEC_PER_USEC); 9230 9231 return quota_us; 9232 } 9233 9234 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 9235 { 9236 u64 quota, period, burst; 9237 9238 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 9239 return -EINVAL; 9240 9241 period = (u64)cfs_period_us * NSEC_PER_USEC; 9242 quota = tg->cfs_bandwidth.quota; 9243 burst = tg->cfs_bandwidth.burst; 9244 9245 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9246 } 9247 9248 static long tg_get_cfs_period(struct task_group *tg) 9249 { 9250 u64 cfs_period_us; 9251 9252 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 9253 do_div(cfs_period_us, NSEC_PER_USEC); 9254 9255 return cfs_period_us; 9256 } 9257 9258 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 9259 { 9260 u64 quota, period, burst; 9261 9262 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 9263 return -EINVAL; 9264 9265 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 9266 period = ktime_to_ns(tg->cfs_bandwidth.period); 9267 quota = tg->cfs_bandwidth.quota; 9268 9269 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9270 } 9271 9272 static long tg_get_cfs_burst(struct task_group *tg) 9273 { 9274 u64 burst_us; 9275 9276 burst_us = tg->cfs_bandwidth.burst; 9277 do_div(burst_us, NSEC_PER_USEC); 9278 9279 return burst_us; 9280 } 9281 9282 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 9283 struct cftype *cft) 9284 { 9285 return tg_get_cfs_quota(css_tg(css)); 9286 } 9287 9288 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 9289 struct cftype *cftype, s64 cfs_quota_us) 9290 { 9291 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 9292 } 9293 9294 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 9295 struct cftype *cft) 9296 { 9297 return tg_get_cfs_period(css_tg(css)); 9298 } 9299 9300 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 9301 struct cftype *cftype, u64 cfs_period_us) 9302 { 9303 return tg_set_cfs_period(css_tg(css), cfs_period_us); 9304 } 9305 9306 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 9307 struct cftype *cft) 9308 { 9309 return tg_get_cfs_burst(css_tg(css)); 9310 } 9311 9312 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 9313 struct cftype *cftype, u64 cfs_burst_us) 9314 { 9315 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 9316 } 9317 9318 struct cfs_schedulable_data { 9319 struct task_group *tg; 9320 u64 period, quota; 9321 }; 9322 9323 /* 9324 * normalize group quota/period to be quota/max_period 9325 * note: units are usecs 9326 */ 9327 static u64 normalize_cfs_quota(struct task_group *tg, 9328 struct cfs_schedulable_data *d) 9329 { 9330 u64 quota, period; 9331 9332 if (tg == d->tg) { 9333 period = d->period; 9334 quota = d->quota; 9335 } else { 9336 period = tg_get_cfs_period(tg); 9337 quota = tg_get_cfs_quota(tg); 9338 } 9339 9340 /* note: these should typically be equivalent */ 9341 if (quota == RUNTIME_INF || quota == -1) 9342 return RUNTIME_INF; 9343 9344 return to_ratio(period, quota); 9345 } 9346 9347 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 9348 { 9349 struct cfs_schedulable_data *d = data; 9350 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9351 s64 quota = 0, parent_quota = -1; 9352 9353 if (!tg->parent) { 9354 quota = RUNTIME_INF; 9355 } else { 9356 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 9357 9358 quota = normalize_cfs_quota(tg, d); 9359 parent_quota = parent_b->hierarchical_quota; 9360 9361 /* 9362 * Ensure max(child_quota) <= parent_quota. On cgroup2, 9363 * always take the non-RUNTIME_INF min. On cgroup1, only 9364 * inherit when no limit is set. In both cases this is used 9365 * by the scheduler to determine if a given CFS task has a 9366 * bandwidth constraint at some higher level. 9367 */ 9368 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 9369 if (quota == RUNTIME_INF) 9370 quota = parent_quota; 9371 else if (parent_quota != RUNTIME_INF) 9372 quota = min(quota, parent_quota); 9373 } else { 9374 if (quota == RUNTIME_INF) 9375 quota = parent_quota; 9376 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 9377 return -EINVAL; 9378 } 9379 } 9380 cfs_b->hierarchical_quota = quota; 9381 9382 return 0; 9383 } 9384 9385 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 9386 { 9387 struct cfs_schedulable_data data = { 9388 .tg = tg, 9389 .period = period, 9390 .quota = quota, 9391 }; 9392 9393 if (quota != RUNTIME_INF) { 9394 do_div(data.period, NSEC_PER_USEC); 9395 do_div(data.quota, NSEC_PER_USEC); 9396 } 9397 9398 guard(rcu)(); 9399 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 9400 } 9401 9402 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 9403 { 9404 struct task_group *tg = css_tg(seq_css(sf)); 9405 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9406 9407 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 9408 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 9409 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 9410 9411 if (schedstat_enabled() && tg != &root_task_group) { 9412 struct sched_statistics *stats; 9413 u64 ws = 0; 9414 int i; 9415 9416 for_each_possible_cpu(i) { 9417 stats = __schedstats_from_se(tg->se[i]); 9418 ws += schedstat_val(stats->wait_sum); 9419 } 9420 9421 seq_printf(sf, "wait_sum %llu\n", ws); 9422 } 9423 9424 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 9425 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 9426 9427 return 0; 9428 } 9429 9430 static u64 throttled_time_self(struct task_group *tg) 9431 { 9432 int i; 9433 u64 total = 0; 9434 9435 for_each_possible_cpu(i) { 9436 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); 9437 } 9438 9439 return total; 9440 } 9441 9442 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) 9443 { 9444 struct task_group *tg = css_tg(seq_css(sf)); 9445 9446 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); 9447 9448 return 0; 9449 } 9450 #endif /* CONFIG_CFS_BANDWIDTH */ 9451 #endif /* CONFIG_FAIR_GROUP_SCHED */ 9452 9453 #ifdef CONFIG_RT_GROUP_SCHED 9454 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 9455 struct cftype *cft, s64 val) 9456 { 9457 return sched_group_set_rt_runtime(css_tg(css), val); 9458 } 9459 9460 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 9461 struct cftype *cft) 9462 { 9463 return sched_group_rt_runtime(css_tg(css)); 9464 } 9465 9466 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 9467 struct cftype *cftype, u64 rt_period_us) 9468 { 9469 return sched_group_set_rt_period(css_tg(css), rt_period_us); 9470 } 9471 9472 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 9473 struct cftype *cft) 9474 { 9475 return sched_group_rt_period(css_tg(css)); 9476 } 9477 #endif /* CONFIG_RT_GROUP_SCHED */ 9478 9479 #ifdef CONFIG_FAIR_GROUP_SCHED 9480 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 9481 struct cftype *cft) 9482 { 9483 return css_tg(css)->idle; 9484 } 9485 9486 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 9487 struct cftype *cft, s64 idle) 9488 { 9489 return sched_group_set_idle(css_tg(css), idle); 9490 } 9491 #endif 9492 9493 static struct cftype cpu_legacy_files[] = { 9494 #ifdef CONFIG_FAIR_GROUP_SCHED 9495 { 9496 .name = "shares", 9497 .read_u64 = cpu_shares_read_u64, 9498 .write_u64 = cpu_shares_write_u64, 9499 }, 9500 { 9501 .name = "idle", 9502 .read_s64 = cpu_idle_read_s64, 9503 .write_s64 = cpu_idle_write_s64, 9504 }, 9505 #endif 9506 #ifdef CONFIG_CFS_BANDWIDTH 9507 { 9508 .name = "cfs_quota_us", 9509 .read_s64 = cpu_cfs_quota_read_s64, 9510 .write_s64 = cpu_cfs_quota_write_s64, 9511 }, 9512 { 9513 .name = "cfs_period_us", 9514 .read_u64 = cpu_cfs_period_read_u64, 9515 .write_u64 = cpu_cfs_period_write_u64, 9516 }, 9517 { 9518 .name = "cfs_burst_us", 9519 .read_u64 = cpu_cfs_burst_read_u64, 9520 .write_u64 = cpu_cfs_burst_write_u64, 9521 }, 9522 { 9523 .name = "stat", 9524 .seq_show = cpu_cfs_stat_show, 9525 }, 9526 { 9527 .name = "stat.local", 9528 .seq_show = cpu_cfs_local_stat_show, 9529 }, 9530 #endif 9531 #ifdef CONFIG_RT_GROUP_SCHED 9532 { 9533 .name = "rt_runtime_us", 9534 .read_s64 = cpu_rt_runtime_read, 9535 .write_s64 = cpu_rt_runtime_write, 9536 }, 9537 { 9538 .name = "rt_period_us", 9539 .read_u64 = cpu_rt_period_read_uint, 9540 .write_u64 = cpu_rt_period_write_uint, 9541 }, 9542 #endif 9543 #ifdef CONFIG_UCLAMP_TASK_GROUP 9544 { 9545 .name = "uclamp.min", 9546 .flags = CFTYPE_NOT_ON_ROOT, 9547 .seq_show = cpu_uclamp_min_show, 9548 .write = cpu_uclamp_min_write, 9549 }, 9550 { 9551 .name = "uclamp.max", 9552 .flags = CFTYPE_NOT_ON_ROOT, 9553 .seq_show = cpu_uclamp_max_show, 9554 .write = cpu_uclamp_max_write, 9555 }, 9556 #endif 9557 { } /* Terminate */ 9558 }; 9559 9560 static int cpu_extra_stat_show(struct seq_file *sf, 9561 struct cgroup_subsys_state *css) 9562 { 9563 #ifdef CONFIG_CFS_BANDWIDTH 9564 { 9565 struct task_group *tg = css_tg(css); 9566 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9567 u64 throttled_usec, burst_usec; 9568 9569 throttled_usec = cfs_b->throttled_time; 9570 do_div(throttled_usec, NSEC_PER_USEC); 9571 burst_usec = cfs_b->burst_time; 9572 do_div(burst_usec, NSEC_PER_USEC); 9573 9574 seq_printf(sf, "nr_periods %d\n" 9575 "nr_throttled %d\n" 9576 "throttled_usec %llu\n" 9577 "nr_bursts %d\n" 9578 "burst_usec %llu\n", 9579 cfs_b->nr_periods, cfs_b->nr_throttled, 9580 throttled_usec, cfs_b->nr_burst, burst_usec); 9581 } 9582 #endif 9583 return 0; 9584 } 9585 9586 static int cpu_local_stat_show(struct seq_file *sf, 9587 struct cgroup_subsys_state *css) 9588 { 9589 #ifdef CONFIG_CFS_BANDWIDTH 9590 { 9591 struct task_group *tg = css_tg(css); 9592 u64 throttled_self_usec; 9593 9594 throttled_self_usec = throttled_time_self(tg); 9595 do_div(throttled_self_usec, NSEC_PER_USEC); 9596 9597 seq_printf(sf, "throttled_usec %llu\n", 9598 throttled_self_usec); 9599 } 9600 #endif 9601 return 0; 9602 } 9603 9604 #ifdef CONFIG_FAIR_GROUP_SCHED 9605 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 9606 struct cftype *cft) 9607 { 9608 struct task_group *tg = css_tg(css); 9609 u64 weight = scale_load_down(tg->shares); 9610 9611 return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); 9612 } 9613 9614 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 9615 struct cftype *cft, u64 weight) 9616 { 9617 /* 9618 * cgroup weight knobs should use the common MIN, DFL and MAX 9619 * values which are 1, 100 and 10000 respectively. While it loses 9620 * a bit of range on both ends, it maps pretty well onto the shares 9621 * value used by scheduler and the round-trip conversions preserve 9622 * the original value over the entire range. 9623 */ 9624 if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) 9625 return -ERANGE; 9626 9627 weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); 9628 9629 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9630 } 9631 9632 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 9633 struct cftype *cft) 9634 { 9635 unsigned long weight = scale_load_down(css_tg(css)->shares); 9636 int last_delta = INT_MAX; 9637 int prio, delta; 9638 9639 /* find the closest nice value to the current weight */ 9640 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 9641 delta = abs(sched_prio_to_weight[prio] - weight); 9642 if (delta >= last_delta) 9643 break; 9644 last_delta = delta; 9645 } 9646 9647 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 9648 } 9649 9650 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 9651 struct cftype *cft, s64 nice) 9652 { 9653 unsigned long weight; 9654 int idx; 9655 9656 if (nice < MIN_NICE || nice > MAX_NICE) 9657 return -ERANGE; 9658 9659 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 9660 idx = array_index_nospec(idx, 40); 9661 weight = sched_prio_to_weight[idx]; 9662 9663 return sched_group_set_shares(css_tg(css), scale_load(weight)); 9664 } 9665 #endif 9666 9667 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 9668 long period, long quota) 9669 { 9670 if (quota < 0) 9671 seq_puts(sf, "max"); 9672 else 9673 seq_printf(sf, "%ld", quota); 9674 9675 seq_printf(sf, " %ld\n", period); 9676 } 9677 9678 /* caller should put the current value in *@periodp before calling */ 9679 static int __maybe_unused cpu_period_quota_parse(char *buf, 9680 u64 *periodp, u64 *quotap) 9681 { 9682 char tok[21]; /* U64_MAX */ 9683 9684 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 9685 return -EINVAL; 9686 9687 *periodp *= NSEC_PER_USEC; 9688 9689 if (sscanf(tok, "%llu", quotap)) 9690 *quotap *= NSEC_PER_USEC; 9691 else if (!strcmp(tok, "max")) 9692 *quotap = RUNTIME_INF; 9693 else 9694 return -EINVAL; 9695 9696 return 0; 9697 } 9698 9699 #ifdef CONFIG_CFS_BANDWIDTH 9700 static int cpu_max_show(struct seq_file *sf, void *v) 9701 { 9702 struct task_group *tg = css_tg(seq_css(sf)); 9703 9704 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 9705 return 0; 9706 } 9707 9708 static ssize_t cpu_max_write(struct kernfs_open_file *of, 9709 char *buf, size_t nbytes, loff_t off) 9710 { 9711 struct task_group *tg = css_tg(of_css(of)); 9712 u64 period = tg_get_cfs_period(tg); 9713 u64 burst = tg->cfs_bandwidth.burst; 9714 u64 quota; 9715 int ret; 9716 9717 ret = cpu_period_quota_parse(buf, &period, "a); 9718 if (!ret) 9719 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 9720 return ret ?: nbytes; 9721 } 9722 #endif 9723 9724 static struct cftype cpu_files[] = { 9725 #ifdef CONFIG_FAIR_GROUP_SCHED 9726 { 9727 .name = "weight", 9728 .flags = CFTYPE_NOT_ON_ROOT, 9729 .read_u64 = cpu_weight_read_u64, 9730 .write_u64 = cpu_weight_write_u64, 9731 }, 9732 { 9733 .name = "weight.nice", 9734 .flags = CFTYPE_NOT_ON_ROOT, 9735 .read_s64 = cpu_weight_nice_read_s64, 9736 .write_s64 = cpu_weight_nice_write_s64, 9737 }, 9738 { 9739 .name = "idle", 9740 .flags = CFTYPE_NOT_ON_ROOT, 9741 .read_s64 = cpu_idle_read_s64, 9742 .write_s64 = cpu_idle_write_s64, 9743 }, 9744 #endif 9745 #ifdef CONFIG_CFS_BANDWIDTH 9746 { 9747 .name = "max", 9748 .flags = CFTYPE_NOT_ON_ROOT, 9749 .seq_show = cpu_max_show, 9750 .write = cpu_max_write, 9751 }, 9752 { 9753 .name = "max.burst", 9754 .flags = CFTYPE_NOT_ON_ROOT, 9755 .read_u64 = cpu_cfs_burst_read_u64, 9756 .write_u64 = cpu_cfs_burst_write_u64, 9757 }, 9758 #endif 9759 #ifdef CONFIG_UCLAMP_TASK_GROUP 9760 { 9761 .name = "uclamp.min", 9762 .flags = CFTYPE_NOT_ON_ROOT, 9763 .seq_show = cpu_uclamp_min_show, 9764 .write = cpu_uclamp_min_write, 9765 }, 9766 { 9767 .name = "uclamp.max", 9768 .flags = CFTYPE_NOT_ON_ROOT, 9769 .seq_show = cpu_uclamp_max_show, 9770 .write = cpu_uclamp_max_write, 9771 }, 9772 #endif 9773 { } /* terminate */ 9774 }; 9775 9776 struct cgroup_subsys cpu_cgrp_subsys = { 9777 .css_alloc = cpu_cgroup_css_alloc, 9778 .css_online = cpu_cgroup_css_online, 9779 .css_released = cpu_cgroup_css_released, 9780 .css_free = cpu_cgroup_css_free, 9781 .css_extra_stat_show = cpu_extra_stat_show, 9782 .css_local_stat_show = cpu_local_stat_show, 9783 #ifdef CONFIG_RT_GROUP_SCHED 9784 .can_attach = cpu_cgroup_can_attach, 9785 #endif 9786 .attach = cpu_cgroup_attach, 9787 .legacy_cftypes = cpu_legacy_files, 9788 .dfl_cftypes = cpu_files, 9789 .early_init = true, 9790 .threaded = true, 9791 }; 9792 9793 #endif /* CONFIG_CGROUP_SCHED */ 9794 9795 void dump_cpu_task(int cpu) 9796 { 9797 if (cpu == smp_processor_id() && in_hardirq()) { 9798 struct pt_regs *regs; 9799 9800 regs = get_irq_regs(); 9801 if (regs) { 9802 show_regs(regs); 9803 return; 9804 } 9805 } 9806 9807 if (trigger_single_cpu_backtrace(cpu)) 9808 return; 9809 9810 pr_info("Task dump for CPU %d:\n", cpu); 9811 sched_show_task(cpu_curr(cpu)); 9812 } 9813 9814 /* 9815 * Nice levels are multiplicative, with a gentle 10% change for every 9816 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 9817 * nice 1, it will get ~10% less CPU time than another CPU-bound task 9818 * that remained on nice 0. 9819 * 9820 * The "10% effect" is relative and cumulative: from _any_ nice level, 9821 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 9822 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 9823 * If a task goes up by ~10% and another task goes down by ~10% then 9824 * the relative distance between them is ~25%.) 9825 */ 9826 const int sched_prio_to_weight[40] = { 9827 /* -20 */ 88761, 71755, 56483, 46273, 36291, 9828 /* -15 */ 29154, 23254, 18705, 14949, 11916, 9829 /* -10 */ 9548, 7620, 6100, 4904, 3906, 9830 /* -5 */ 3121, 2501, 1991, 1586, 1277, 9831 /* 0 */ 1024, 820, 655, 526, 423, 9832 /* 5 */ 335, 272, 215, 172, 137, 9833 /* 10 */ 110, 87, 70, 56, 45, 9834 /* 15 */ 36, 29, 23, 18, 15, 9835 }; 9836 9837 /* 9838 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated. 9839 * 9840 * In cases where the weight does not change often, we can use the 9841 * pre-calculated inverse to speed up arithmetics by turning divisions 9842 * into multiplications: 9843 */ 9844 const u32 sched_prio_to_wmult[40] = { 9845 /* -20 */ 48388, 59856, 76040, 92818, 118348, 9846 /* -15 */ 147320, 184698, 229616, 287308, 360437, 9847 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 9848 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 9849 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 9850 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 9851 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 9852 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 9853 }; 9854 9855 void call_trace_sched_update_nr_running(struct rq *rq, int count) 9856 { 9857 trace_sched_update_nr_running_tp(rq, count); 9858 } 9859 9860 #ifdef CONFIG_SCHED_MM_CID 9861 9862 /* 9863 * @cid_lock: Guarantee forward-progress of cid allocation. 9864 * 9865 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock 9866 * is only used when contention is detected by the lock-free allocation so 9867 * forward progress can be guaranteed. 9868 */ 9869 DEFINE_RAW_SPINLOCK(cid_lock); 9870 9871 /* 9872 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. 9873 * 9874 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is 9875 * detected, it is set to 1 to ensure that all newly coming allocations are 9876 * serialized by @cid_lock until the allocation which detected contention 9877 * completes and sets @use_cid_lock back to 0. This guarantees forward progress 9878 * of a cid allocation. 9879 */ 9880 int use_cid_lock; 9881 9882 /* 9883 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid 9884 * concurrently with respect to the execution of the source runqueue context 9885 * switch. 9886 * 9887 * There is one basic properties we want to guarantee here: 9888 * 9889 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively 9890 * used by a task. That would lead to concurrent allocation of the cid and 9891 * userspace corruption. 9892 * 9893 * Provide this guarantee by introducing a Dekker memory ordering to guarantee 9894 * that a pair of loads observe at least one of a pair of stores, which can be 9895 * shown as: 9896 * 9897 * X = Y = 0 9898 * 9899 * w[X]=1 w[Y]=1 9900 * MB MB 9901 * r[Y]=y r[X]=x 9902 * 9903 * Which guarantees that x==0 && y==0 is impossible. But rather than using 9904 * values 0 and 1, this algorithm cares about specific state transitions of the 9905 * runqueue current task (as updated by the scheduler context switch), and the 9906 * per-mm/cpu cid value. 9907 * 9908 * Let's introduce task (Y) which has task->mm == mm and task (N) which has 9909 * task->mm != mm for the rest of the discussion. There are two scheduler state 9910 * transitions on context switch we care about: 9911 * 9912 * (TSA) Store to rq->curr with transition from (N) to (Y) 9913 * 9914 * (TSB) Store to rq->curr with transition from (Y) to (N) 9915 * 9916 * On the remote-clear side, there is one transition we care about: 9917 * 9918 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag 9919 * 9920 * There is also a transition to UNSET state which can be performed from all 9921 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which 9922 * guarantees that only a single thread will succeed: 9923 * 9924 * (TMB) cmpxchg to *pcpu_cid to mark UNSET 9925 * 9926 * Just to be clear, what we do _not_ want to happen is a transition to UNSET 9927 * when a thread is actively using the cid (property (1)). 9928 * 9929 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions. 9930 * 9931 * Scenario A) (TSA)+(TMA) (from next task perspective) 9932 * 9933 * CPU0 CPU1 9934 * 9935 * Context switch CS-1 Remote-clear 9936 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA) 9937 * (implied barrier after cmpxchg) 9938 * - switch_mm_cid() 9939 * - memory barrier (see switch_mm_cid() 9940 * comment explaining how this barrier 9941 * is combined with other scheduler 9942 * barriers) 9943 * - mm_cid_get (next) 9944 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) 9945 * 9946 * This Dekker ensures that either task (Y) is observed by the 9947 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are 9948 * observed. 9949 * 9950 * If task (Y) store is observed by rcu_dereference(), it means that there is 9951 * still an active task on the cpu. Remote-clear will therefore not transition 9952 * to UNSET, which fulfills property (1). 9953 * 9954 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(), 9955 * it will move its state to UNSET, which clears the percpu cid perhaps 9956 * uselessly (which is not an issue for correctness). Because task (Y) is not 9957 * observed, CPU1 can move ahead to set the state to UNSET. Because moving 9958 * state to UNSET is done with a cmpxchg expecting that the old state has the 9959 * LAZY flag set, only one thread will successfully UNSET. 9960 * 9961 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0 9962 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and 9963 * CPU1 will observe task (Y) and do nothing more, which is fine. 9964 * 9965 * What we are effectively preventing with this Dekker is a scenario where 9966 * neither LAZY flag nor store (Y) are observed, which would fail property (1) 9967 * because this would UNSET a cid which is actively used. 9968 */ 9969 9970 void sched_mm_cid_migrate_from(struct task_struct *t) 9971 { 9972 t->migrate_from_cpu = task_cpu(t); 9973 } 9974 9975 static 9976 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, 9977 struct task_struct *t, 9978 struct mm_cid *src_pcpu_cid) 9979 { 9980 struct mm_struct *mm = t->mm; 9981 struct task_struct *src_task; 9982 int src_cid, last_mm_cid; 9983 9984 if (!mm) 9985 return -1; 9986 9987 last_mm_cid = t->last_mm_cid; 9988 /* 9989 * If the migrated task has no last cid, or if the current 9990 * task on src rq uses the cid, it means the source cid does not need 9991 * to be moved to the destination cpu. 9992 */ 9993 if (last_mm_cid == -1) 9994 return -1; 9995 src_cid = READ_ONCE(src_pcpu_cid->cid); 9996 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid) 9997 return -1; 9998 9999 /* 10000 * If we observe an active task using the mm on this rq, it means we 10001 * are not the last task to be migrated from this cpu for this mm, so 10002 * there is no need to move src_cid to the destination cpu. 10003 */ 10004 guard(rcu)(); 10005 src_task = rcu_dereference(src_rq->curr); 10006 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 10007 t->last_mm_cid = -1; 10008 return -1; 10009 } 10010 10011 return src_cid; 10012 } 10013 10014 static 10015 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, 10016 struct task_struct *t, 10017 struct mm_cid *src_pcpu_cid, 10018 int src_cid) 10019 { 10020 struct task_struct *src_task; 10021 struct mm_struct *mm = t->mm; 10022 int lazy_cid; 10023 10024 if (src_cid == -1) 10025 return -1; 10026 10027 /* 10028 * Attempt to clear the source cpu cid to move it to the destination 10029 * cpu. 10030 */ 10031 lazy_cid = mm_cid_set_lazy_put(src_cid); 10032 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) 10033 return -1; 10034 10035 /* 10036 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10037 * rq->curr->mm matches the scheduler barrier in context_switch() 10038 * between store to rq->curr and load of prev and next task's 10039 * per-mm/cpu cid. 10040 * 10041 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10042 * rq->curr->mm_cid_active matches the barrier in 10043 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 10044 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 10045 * load of per-mm/cpu cid. 10046 */ 10047 10048 /* 10049 * If we observe an active task using the mm on this rq after setting 10050 * the lazy-put flag, this task will be responsible for transitioning 10051 * from lazy-put flag set to MM_CID_UNSET. 10052 */ 10053 scoped_guard (rcu) { 10054 src_task = rcu_dereference(src_rq->curr); 10055 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 10056 /* 10057 * We observed an active task for this mm, there is therefore 10058 * no point in moving this cid to the destination cpu. 10059 */ 10060 t->last_mm_cid = -1; 10061 return -1; 10062 } 10063 } 10064 10065 /* 10066 * The src_cid is unused, so it can be unset. 10067 */ 10068 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 10069 return -1; 10070 return src_cid; 10071 } 10072 10073 /* 10074 * Migration to dst cpu. Called with dst_rq lock held. 10075 * Interrupts are disabled, which keeps the window of cid ownership without the 10076 * source rq lock held small. 10077 */ 10078 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) 10079 { 10080 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; 10081 struct mm_struct *mm = t->mm; 10082 int src_cid, dst_cid, src_cpu; 10083 struct rq *src_rq; 10084 10085 lockdep_assert_rq_held(dst_rq); 10086 10087 if (!mm) 10088 return; 10089 src_cpu = t->migrate_from_cpu; 10090 if (src_cpu == -1) { 10091 t->last_mm_cid = -1; 10092 return; 10093 } 10094 /* 10095 * Move the src cid if the dst cid is unset. This keeps id 10096 * allocation closest to 0 in cases where few threads migrate around 10097 * many CPUs. 10098 * 10099 * If destination cid is already set, we may have to just clear 10100 * the src cid to ensure compactness in frequent migrations 10101 * scenarios. 10102 * 10103 * It is not useful to clear the src cid when the number of threads is 10104 * greater or equal to the number of allowed CPUs, because user-space 10105 * can expect that the number of allowed cids can reach the number of 10106 * allowed CPUs. 10107 */ 10108 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); 10109 dst_cid = READ_ONCE(dst_pcpu_cid->cid); 10110 if (!mm_cid_is_unset(dst_cid) && 10111 atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) 10112 return; 10113 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); 10114 src_rq = cpu_rq(src_cpu); 10115 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid); 10116 if (src_cid == -1) 10117 return; 10118 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid, 10119 src_cid); 10120 if (src_cid == -1) 10121 return; 10122 if (!mm_cid_is_unset(dst_cid)) { 10123 __mm_cid_put(mm, src_cid); 10124 return; 10125 } 10126 /* Move src_cid to dst cpu. */ 10127 mm_cid_snapshot_time(dst_rq, mm); 10128 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); 10129 } 10130 10131 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, 10132 int cpu) 10133 { 10134 struct rq *rq = cpu_rq(cpu); 10135 struct task_struct *t; 10136 int cid, lazy_cid; 10137 10138 cid = READ_ONCE(pcpu_cid->cid); 10139 if (!mm_cid_is_valid(cid)) 10140 return; 10141 10142 /* 10143 * Clear the cpu cid if it is set to keep cid allocation compact. If 10144 * there happens to be other tasks left on the source cpu using this 10145 * mm, the next task using this mm will reallocate its cid on context 10146 * switch. 10147 */ 10148 lazy_cid = mm_cid_set_lazy_put(cid); 10149 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) 10150 return; 10151 10152 /* 10153 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10154 * rq->curr->mm matches the scheduler barrier in context_switch() 10155 * between store to rq->curr and load of prev and next task's 10156 * per-mm/cpu cid. 10157 * 10158 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10159 * rq->curr->mm_cid_active matches the barrier in 10160 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 10161 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 10162 * load of per-mm/cpu cid. 10163 */ 10164 10165 /* 10166 * If we observe an active task using the mm on this rq after setting 10167 * the lazy-put flag, that task will be responsible for transitioning 10168 * from lazy-put flag set to MM_CID_UNSET. 10169 */ 10170 scoped_guard (rcu) { 10171 t = rcu_dereference(rq->curr); 10172 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) 10173 return; 10174 } 10175 10176 /* 10177 * The cid is unused, so it can be unset. 10178 * Disable interrupts to keep the window of cid ownership without rq 10179 * lock small. 10180 */ 10181 scoped_guard (irqsave) { 10182 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 10183 __mm_cid_put(mm, cid); 10184 } 10185 } 10186 10187 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) 10188 { 10189 struct rq *rq = cpu_rq(cpu); 10190 struct mm_cid *pcpu_cid; 10191 struct task_struct *curr; 10192 u64 rq_clock; 10193 10194 /* 10195 * rq->clock load is racy on 32-bit but one spurious clear once in a 10196 * while is irrelevant. 10197 */ 10198 rq_clock = READ_ONCE(rq->clock); 10199 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 10200 10201 /* 10202 * In order to take care of infrequently scheduled tasks, bump the time 10203 * snapshot associated with this cid if an active task using the mm is 10204 * observed on this rq. 10205 */ 10206 scoped_guard (rcu) { 10207 curr = rcu_dereference(rq->curr); 10208 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { 10209 WRITE_ONCE(pcpu_cid->time, rq_clock); 10210 return; 10211 } 10212 } 10213 10214 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) 10215 return; 10216 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 10217 } 10218 10219 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, 10220 int weight) 10221 { 10222 struct mm_cid *pcpu_cid; 10223 int cid; 10224 10225 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 10226 cid = READ_ONCE(pcpu_cid->cid); 10227 if (!mm_cid_is_valid(cid) || cid < weight) 10228 return; 10229 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 10230 } 10231 10232 static void task_mm_cid_work(struct callback_head *work) 10233 { 10234 unsigned long now = jiffies, old_scan, next_scan; 10235 struct task_struct *t = current; 10236 struct cpumask *cidmask; 10237 struct mm_struct *mm; 10238 int weight, cpu; 10239 10240 SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work)); 10241 10242 work->next = work; /* Prevent double-add */ 10243 if (t->flags & PF_EXITING) 10244 return; 10245 mm = t->mm; 10246 if (!mm) 10247 return; 10248 old_scan = READ_ONCE(mm->mm_cid_next_scan); 10249 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); 10250 if (!old_scan) { 10251 unsigned long res; 10252 10253 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); 10254 if (res != old_scan) 10255 old_scan = res; 10256 else 10257 old_scan = next_scan; 10258 } 10259 if (time_before(now, old_scan)) 10260 return; 10261 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) 10262 return; 10263 cidmask = mm_cidmask(mm); 10264 /* Clear cids that were not recently used. */ 10265 for_each_possible_cpu(cpu) 10266 sched_mm_cid_remote_clear_old(mm, cpu); 10267 weight = cpumask_weight(cidmask); 10268 /* 10269 * Clear cids that are greater or equal to the cidmask weight to 10270 * recompact it. 10271 */ 10272 for_each_possible_cpu(cpu) 10273 sched_mm_cid_remote_clear_weight(mm, cpu, weight); 10274 } 10275 10276 void init_sched_mm_cid(struct task_struct *t) 10277 { 10278 struct mm_struct *mm = t->mm; 10279 int mm_users = 0; 10280 10281 if (mm) { 10282 mm_users = atomic_read(&mm->mm_users); 10283 if (mm_users == 1) 10284 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); 10285 } 10286 t->cid_work.next = &t->cid_work; /* Protect against double add */ 10287 init_task_work(&t->cid_work, task_mm_cid_work); 10288 } 10289 10290 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) 10291 { 10292 struct callback_head *work = &curr->cid_work; 10293 unsigned long now = jiffies; 10294 10295 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || 10296 work->next != work) 10297 return; 10298 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) 10299 return; 10300 task_work_add(curr, work, TWA_RESUME); 10301 } 10302 10303 void sched_mm_cid_exit_signals(struct task_struct *t) 10304 { 10305 struct mm_struct *mm = t->mm; 10306 struct rq *rq; 10307 10308 if (!mm) 10309 return; 10310 10311 preempt_disable(); 10312 rq = this_rq(); 10313 guard(rq_lock_irqsave)(rq); 10314 preempt_enable_no_resched(); /* holding spinlock */ 10315 WRITE_ONCE(t->mm_cid_active, 0); 10316 /* 10317 * Store t->mm_cid_active before loading per-mm/cpu cid. 10318 * Matches barrier in sched_mm_cid_remote_clear_old(). 10319 */ 10320 smp_mb(); 10321 mm_cid_put(mm); 10322 t->last_mm_cid = t->mm_cid = -1; 10323 } 10324 10325 void sched_mm_cid_before_execve(struct task_struct *t) 10326 { 10327 struct mm_struct *mm = t->mm; 10328 struct rq *rq; 10329 10330 if (!mm) 10331 return; 10332 10333 preempt_disable(); 10334 rq = this_rq(); 10335 guard(rq_lock_irqsave)(rq); 10336 preempt_enable_no_resched(); /* holding spinlock */ 10337 WRITE_ONCE(t->mm_cid_active, 0); 10338 /* 10339 * Store t->mm_cid_active before loading per-mm/cpu cid. 10340 * Matches barrier in sched_mm_cid_remote_clear_old(). 10341 */ 10342 smp_mb(); 10343 mm_cid_put(mm); 10344 t->last_mm_cid = t->mm_cid = -1; 10345 } 10346 10347 void sched_mm_cid_after_execve(struct task_struct *t) 10348 { 10349 struct mm_struct *mm = t->mm; 10350 struct rq *rq; 10351 10352 if (!mm) 10353 return; 10354 10355 preempt_disable(); 10356 rq = this_rq(); 10357 scoped_guard (rq_lock_irqsave, rq) { 10358 preempt_enable_no_resched(); /* holding spinlock */ 10359 WRITE_ONCE(t->mm_cid_active, 1); 10360 /* 10361 * Store t->mm_cid_active before loading per-mm/cpu cid. 10362 * Matches barrier in sched_mm_cid_remote_clear_old(). 10363 */ 10364 smp_mb(); 10365 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); 10366 } 10367 rseq_set_notify_resume(t); 10368 } 10369 10370 void sched_mm_cid_fork(struct task_struct *t) 10371 { 10372 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); 10373 t->mm_cid_active = 1; 10374 } 10375 #endif 10376