1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/core.c 4 * 5 * Core kernel CPU scheduler code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/highmem.h> 11 #include <linux/hrtimer_api.h> 12 #include <linux/ktime_api.h> 13 #include <linux/sched/signal.h> 14 #include <linux/syscalls_api.h> 15 #include <linux/debug_locks.h> 16 #include <linux/prefetch.h> 17 #include <linux/capability.h> 18 #include <linux/pgtable_api.h> 19 #include <linux/wait_bit.h> 20 #include <linux/jiffies.h> 21 #include <linux/spinlock_api.h> 22 #include <linux/cpumask_api.h> 23 #include <linux/lockdep_api.h> 24 #include <linux/hardirq.h> 25 #include <linux/softirq.h> 26 #include <linux/refcount_api.h> 27 #include <linux/topology.h> 28 #include <linux/sched/clock.h> 29 #include <linux/sched/cond_resched.h> 30 #include <linux/sched/cputime.h> 31 #include <linux/sched/debug.h> 32 #include <linux/sched/hotplug.h> 33 #include <linux/sched/init.h> 34 #include <linux/sched/isolation.h> 35 #include <linux/sched/loadavg.h> 36 #include <linux/sched/mm.h> 37 #include <linux/sched/nohz.h> 38 #include <linux/sched/rseq_api.h> 39 #include <linux/sched/rt.h> 40 41 #include <linux/blkdev.h> 42 #include <linux/context_tracking.h> 43 #include <linux/cpuset.h> 44 #include <linux/delayacct.h> 45 #include <linux/init_task.h> 46 #include <linux/interrupt.h> 47 #include <linux/ioprio.h> 48 #include <linux/kallsyms.h> 49 #include <linux/kcov.h> 50 #include <linux/kprobes.h> 51 #include <linux/llist_api.h> 52 #include <linux/mmu_context.h> 53 #include <linux/mmzone.h> 54 #include <linux/mutex_api.h> 55 #include <linux/nmi.h> 56 #include <linux/nospec.h> 57 #include <linux/perf_event_api.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcuwait_api.h> 61 #include <linux/rseq.h> 62 #include <linux/sched/wake_q.h> 63 #include <linux/scs.h> 64 #include <linux/slab.h> 65 #include <linux/syscalls.h> 66 #include <linux/vtime.h> 67 #include <linux/wait_api.h> 68 #include <linux/workqueue_api.h> 69 #include <linux/livepatch_sched.h> 70 71 #ifdef CONFIG_PREEMPT_DYNAMIC 72 # ifdef CONFIG_GENERIC_ENTRY 73 # include <linux/entry-common.h> 74 # endif 75 #endif 76 77 #include <uapi/linux/sched/types.h> 78 79 #include <asm/irq_regs.h> 80 #include <asm/switch_to.h> 81 #include <asm/tlb.h> 82 83 #define CREATE_TRACE_POINTS 84 #include <linux/sched/rseq_api.h> 85 #include <trace/events/sched.h> 86 #include <trace/events/ipi.h> 87 #undef CREATE_TRACE_POINTS 88 89 #include "sched.h" 90 #include "stats.h" 91 92 #include "autogroup.h" 93 #include "pelt.h" 94 #include "smp.h" 95 96 #include "../workqueue_internal.h" 97 #include "../../io_uring/io-wq.h" 98 #include "../smpboot.h" 99 100 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); 101 EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); 102 103 /* 104 * Export tracepoints that act as a bare tracehook (ie: have no trace event 105 * associated with them) to allow external modules to probe them. 106 */ 107 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); 108 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); 109 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); 110 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); 111 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); 112 EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp); 113 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); 114 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); 115 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); 116 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); 117 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); 118 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); 119 120 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 121 122 /* 123 * Debugging: various feature bits 124 * 125 * If SCHED_DEBUG is disabled, each compilation unit has its own copy of 126 * sysctl_sched_features, defined in sched.h, to allow constants propagation 127 * at compile time and compiler optimization based on features default. 128 */ 129 #define SCHED_FEAT(name, enabled) \ 130 (1UL << __SCHED_FEAT_##name) * enabled | 131 __read_mostly unsigned int sysctl_sched_features = 132 #include "features.h" 133 0; 134 #undef SCHED_FEAT 135 136 /* 137 * Print a warning if need_resched is set for the given duration (if 138 * LATENCY_WARN is enabled). 139 * 140 * If sysctl_resched_latency_warn_once is set, only one warning will be shown 141 * per boot. 142 */ 143 __read_mostly int sysctl_resched_latency_warn_ms = 100; 144 __read_mostly int sysctl_resched_latency_warn_once = 1; 145 146 /* 147 * Number of tasks to iterate in a single balance run. 148 * Limited because this is done with IRQs disabled. 149 */ 150 __read_mostly unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; 151 152 __read_mostly int scheduler_running; 153 154 #ifdef CONFIG_SCHED_CORE 155 156 DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); 157 158 /* kernel prio, less is more */ 159 static inline int __task_prio(const struct task_struct *p) 160 { 161 if (p->sched_class == &stop_sched_class) /* trumps deadline */ 162 return -2; 163 164 if (p->dl_server) 165 return -1; /* deadline */ 166 167 if (rt_or_dl_prio(p->prio)) 168 return p->prio; /* [-1, 99] */ 169 170 if (p->sched_class == &idle_sched_class) 171 return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ 172 173 if (task_on_scx(p)) 174 return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */ 175 176 return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */ 177 } 178 179 /* 180 * l(a,b) 181 * le(a,b) := !l(b,a) 182 * g(a,b) := l(b,a) 183 * ge(a,b) := !l(a,b) 184 */ 185 186 /* real prio, less is less */ 187 static inline bool prio_less(const struct task_struct *a, 188 const struct task_struct *b, bool in_fi) 189 { 190 191 int pa = __task_prio(a), pb = __task_prio(b); 192 193 if (-pa < -pb) 194 return true; 195 196 if (-pb < -pa) 197 return false; 198 199 if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ 200 const struct sched_dl_entity *a_dl, *b_dl; 201 202 a_dl = &a->dl; 203 /* 204 * Since,'a' and 'b' can be CFS tasks served by DL server, 205 * __task_prio() can return -1 (for DL) even for those. In that 206 * case, get to the dl_server's DL entity. 207 */ 208 if (a->dl_server) 209 a_dl = a->dl_server; 210 211 b_dl = &b->dl; 212 if (b->dl_server) 213 b_dl = b->dl_server; 214 215 return !dl_time_before(a_dl->deadline, b_dl->deadline); 216 } 217 218 if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ 219 return cfs_prio_less(a, b, in_fi); 220 221 #ifdef CONFIG_SCHED_CLASS_EXT 222 if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */ 223 return scx_prio_less(a, b, in_fi); 224 #endif 225 226 return false; 227 } 228 229 static inline bool __sched_core_less(const struct task_struct *a, 230 const struct task_struct *b) 231 { 232 if (a->core_cookie < b->core_cookie) 233 return true; 234 235 if (a->core_cookie > b->core_cookie) 236 return false; 237 238 /* flip prio, so high prio is leftmost */ 239 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) 240 return true; 241 242 return false; 243 } 244 245 #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) 246 247 static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) 248 { 249 return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); 250 } 251 252 static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) 253 { 254 const struct task_struct *p = __node_2_sc(node); 255 unsigned long cookie = (unsigned long)key; 256 257 if (cookie < p->core_cookie) 258 return -1; 259 260 if (cookie > p->core_cookie) 261 return 1; 262 263 return 0; 264 } 265 266 void sched_core_enqueue(struct rq *rq, struct task_struct *p) 267 { 268 if (p->se.sched_delayed) 269 return; 270 271 rq->core->core_task_seq++; 272 273 if (!p->core_cookie) 274 return; 275 276 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); 277 } 278 279 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) 280 { 281 if (p->se.sched_delayed) 282 return; 283 284 rq->core->core_task_seq++; 285 286 if (sched_core_enqueued(p)) { 287 rb_erase(&p->core_node, &rq->core_tree); 288 RB_CLEAR_NODE(&p->core_node); 289 } 290 291 /* 292 * Migrating the last task off the cpu, with the cpu in forced idle 293 * state. Reschedule to create an accounting edge for forced idle, 294 * and re-examine whether the core is still in forced idle state. 295 */ 296 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && 297 rq->core->core_forceidle_count && rq->curr == rq->idle) 298 resched_curr(rq); 299 } 300 301 static int sched_task_is_throttled(struct task_struct *p, int cpu) 302 { 303 if (p->sched_class->task_is_throttled) 304 return p->sched_class->task_is_throttled(p, cpu); 305 306 return 0; 307 } 308 309 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) 310 { 311 struct rb_node *node = &p->core_node; 312 int cpu = task_cpu(p); 313 314 do { 315 node = rb_next(node); 316 if (!node) 317 return NULL; 318 319 p = __node_2_sc(node); 320 if (p->core_cookie != cookie) 321 return NULL; 322 323 } while (sched_task_is_throttled(p, cpu)); 324 325 return p; 326 } 327 328 /* 329 * Find left-most (aka, highest priority) and unthrottled task matching @cookie. 330 * If no suitable task is found, NULL will be returned. 331 */ 332 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) 333 { 334 struct task_struct *p; 335 struct rb_node *node; 336 337 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); 338 if (!node) 339 return NULL; 340 341 p = __node_2_sc(node); 342 if (!sched_task_is_throttled(p, rq->cpu)) 343 return p; 344 345 return sched_core_next(p, cookie); 346 } 347 348 /* 349 * Magic required such that: 350 * 351 * raw_spin_rq_lock(rq); 352 * ... 353 * raw_spin_rq_unlock(rq); 354 * 355 * ends up locking and unlocking the _same_ lock, and all CPUs 356 * always agree on what rq has what lock. 357 * 358 * XXX entirely possible to selectively enable cores, don't bother for now. 359 */ 360 361 static DEFINE_MUTEX(sched_core_mutex); 362 static atomic_t sched_core_count; 363 static struct cpumask sched_core_mask; 364 365 static void sched_core_lock(int cpu, unsigned long *flags) 366 { 367 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 368 int t, i = 0; 369 370 local_irq_save(*flags); 371 for_each_cpu(t, smt_mask) 372 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); 373 } 374 375 static void sched_core_unlock(int cpu, unsigned long *flags) 376 { 377 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 378 int t; 379 380 for_each_cpu(t, smt_mask) 381 raw_spin_unlock(&cpu_rq(t)->__lock); 382 local_irq_restore(*flags); 383 } 384 385 static void __sched_core_flip(bool enabled) 386 { 387 unsigned long flags; 388 int cpu, t; 389 390 cpus_read_lock(); 391 392 /* 393 * Toggle the online cores, one by one. 394 */ 395 cpumask_copy(&sched_core_mask, cpu_online_mask); 396 for_each_cpu(cpu, &sched_core_mask) { 397 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 398 399 sched_core_lock(cpu, &flags); 400 401 for_each_cpu(t, smt_mask) 402 cpu_rq(t)->core_enabled = enabled; 403 404 cpu_rq(cpu)->core->core_forceidle_start = 0; 405 406 sched_core_unlock(cpu, &flags); 407 408 cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); 409 } 410 411 /* 412 * Toggle the offline CPUs. 413 */ 414 for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) 415 cpu_rq(cpu)->core_enabled = enabled; 416 417 cpus_read_unlock(); 418 } 419 420 static void sched_core_assert_empty(void) 421 { 422 int cpu; 423 424 for_each_possible_cpu(cpu) 425 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); 426 } 427 428 static void __sched_core_enable(void) 429 { 430 static_branch_enable(&__sched_core_enabled); 431 /* 432 * Ensure all previous instances of raw_spin_rq_*lock() have finished 433 * and future ones will observe !sched_core_disabled(). 434 */ 435 synchronize_rcu(); 436 __sched_core_flip(true); 437 sched_core_assert_empty(); 438 } 439 440 static void __sched_core_disable(void) 441 { 442 sched_core_assert_empty(); 443 __sched_core_flip(false); 444 static_branch_disable(&__sched_core_enabled); 445 } 446 447 void sched_core_get(void) 448 { 449 if (atomic_inc_not_zero(&sched_core_count)) 450 return; 451 452 mutex_lock(&sched_core_mutex); 453 if (!atomic_read(&sched_core_count)) 454 __sched_core_enable(); 455 456 smp_mb__before_atomic(); 457 atomic_inc(&sched_core_count); 458 mutex_unlock(&sched_core_mutex); 459 } 460 461 static void __sched_core_put(struct work_struct *work) 462 { 463 if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { 464 __sched_core_disable(); 465 mutex_unlock(&sched_core_mutex); 466 } 467 } 468 469 void sched_core_put(void) 470 { 471 static DECLARE_WORK(_work, __sched_core_put); 472 473 /* 474 * "There can be only one" 475 * 476 * Either this is the last one, or we don't actually need to do any 477 * 'work'. If it is the last *again*, we rely on 478 * WORK_STRUCT_PENDING_BIT. 479 */ 480 if (!atomic_add_unless(&sched_core_count, -1, 1)) 481 schedule_work(&_work); 482 } 483 484 #else /* !CONFIG_SCHED_CORE */ 485 486 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } 487 static inline void 488 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } 489 490 #endif /* CONFIG_SCHED_CORE */ 491 492 /* need a wrapper since we may need to trace from modules */ 493 EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp); 494 495 /* Call via the helper macro trace_set_current_state. */ 496 void __trace_set_current_state(int state_value) 497 { 498 trace_sched_set_state_tp(current, state_value); 499 } 500 EXPORT_SYMBOL(__trace_set_current_state); 501 502 /* 503 * Serialization rules: 504 * 505 * Lock order: 506 * 507 * p->pi_lock 508 * rq->lock 509 * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) 510 * 511 * rq1->lock 512 * rq2->lock where: rq1 < rq2 513 * 514 * Regular state: 515 * 516 * Normal scheduling state is serialized by rq->lock. __schedule() takes the 517 * local CPU's rq->lock, it optionally removes the task from the runqueue and 518 * always looks at the local rq data structures to find the most eligible task 519 * to run next. 520 * 521 * Task enqueue is also under rq->lock, possibly taken from another CPU. 522 * Wakeups from another LLC domain might use an IPI to transfer the enqueue to 523 * the local CPU to avoid bouncing the runqueue state around [ see 524 * ttwu_queue_wakelist() ] 525 * 526 * Task wakeup, specifically wakeups that involve migration, are horribly 527 * complicated to avoid having to take two rq->locks. 528 * 529 * Special state: 530 * 531 * System-calls and anything external will use task_rq_lock() which acquires 532 * both p->pi_lock and rq->lock. As a consequence the state they change is 533 * stable while holding either lock: 534 * 535 * - sched_setaffinity()/ 536 * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed 537 * - set_user_nice(): p->se.load, p->*prio 538 * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, 539 * p->se.load, p->rt_priority, 540 * p->dl.dl_{runtime, deadline, period, flags, bw, density} 541 * - sched_setnuma(): p->numa_preferred_nid 542 * - sched_move_task(): p->sched_task_group 543 * - uclamp_update_active() p->uclamp* 544 * 545 * p->state <- TASK_*: 546 * 547 * is changed locklessly using set_current_state(), __set_current_state() or 548 * set_special_state(), see their respective comments, or by 549 * try_to_wake_up(). This latter uses p->pi_lock to serialize against 550 * concurrent self. 551 * 552 * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: 553 * 554 * is set by activate_task() and cleared by deactivate_task(), under 555 * rq->lock. Non-zero indicates the task is runnable, the special 556 * ON_RQ_MIGRATING state is used for migration without holding both 557 * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). 558 * 559 * Additionally it is possible to be ->on_rq but still be considered not 560 * runnable when p->se.sched_delayed is true. These tasks are on the runqueue 561 * but will be dequeued as soon as they get picked again. See the 562 * task_is_runnable() helper. 563 * 564 * p->on_cpu <- { 0, 1 }: 565 * 566 * is set by prepare_task() and cleared by finish_task() such that it will be 567 * set before p is scheduled-in and cleared after p is scheduled-out, both 568 * under rq->lock. Non-zero indicates the task is running on its CPU. 569 * 570 * [ The astute reader will observe that it is possible for two tasks on one 571 * CPU to have ->on_cpu = 1 at the same time. ] 572 * 573 * task_cpu(p): is changed by set_task_cpu(), the rules are: 574 * 575 * - Don't call set_task_cpu() on a blocked task: 576 * 577 * We don't care what CPU we're not running on, this simplifies hotplug, 578 * the CPU assignment of blocked tasks isn't required to be valid. 579 * 580 * - for try_to_wake_up(), called under p->pi_lock: 581 * 582 * This allows try_to_wake_up() to only take one rq->lock, see its comment. 583 * 584 * - for migration called under rq->lock: 585 * [ see task_on_rq_migrating() in task_rq_lock() ] 586 * 587 * o move_queued_task() 588 * o detach_task() 589 * 590 * - for migration called under double_rq_lock(): 591 * 592 * o __migrate_swap_task() 593 * o push_rt_task() / pull_rt_task() 594 * o push_dl_task() / pull_dl_task() 595 * o dl_task_offline_migration() 596 * 597 */ 598 599 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) 600 { 601 raw_spinlock_t *lock; 602 603 /* Matches synchronize_rcu() in __sched_core_enable() */ 604 preempt_disable(); 605 if (sched_core_disabled()) { 606 raw_spin_lock_nested(&rq->__lock, subclass); 607 /* preempt_count *MUST* be > 1 */ 608 preempt_enable_no_resched(); 609 return; 610 } 611 612 for (;;) { 613 lock = __rq_lockp(rq); 614 raw_spin_lock_nested(lock, subclass); 615 if (likely(lock == __rq_lockp(rq))) { 616 /* preempt_count *MUST* be > 1 */ 617 preempt_enable_no_resched(); 618 return; 619 } 620 raw_spin_unlock(lock); 621 } 622 } 623 624 bool raw_spin_rq_trylock(struct rq *rq) 625 { 626 raw_spinlock_t *lock; 627 bool ret; 628 629 /* Matches synchronize_rcu() in __sched_core_enable() */ 630 preempt_disable(); 631 if (sched_core_disabled()) { 632 ret = raw_spin_trylock(&rq->__lock); 633 preempt_enable(); 634 return ret; 635 } 636 637 for (;;) { 638 lock = __rq_lockp(rq); 639 ret = raw_spin_trylock(lock); 640 if (!ret || (likely(lock == __rq_lockp(rq)))) { 641 preempt_enable(); 642 return ret; 643 } 644 raw_spin_unlock(lock); 645 } 646 } 647 648 void raw_spin_rq_unlock(struct rq *rq) 649 { 650 raw_spin_unlock(rq_lockp(rq)); 651 } 652 653 #ifdef CONFIG_SMP 654 /* 655 * double_rq_lock - safely lock two runqueues 656 */ 657 void double_rq_lock(struct rq *rq1, struct rq *rq2) 658 { 659 lockdep_assert_irqs_disabled(); 660 661 if (rq_order_less(rq2, rq1)) 662 swap(rq1, rq2); 663 664 raw_spin_rq_lock(rq1); 665 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 666 raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); 667 668 double_rq_clock_clear_update(rq1, rq2); 669 } 670 #endif 671 672 /* 673 * __task_rq_lock - lock the rq @p resides on. 674 */ 675 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 676 __acquires(rq->lock) 677 { 678 struct rq *rq; 679 680 lockdep_assert_held(&p->pi_lock); 681 682 for (;;) { 683 rq = task_rq(p); 684 raw_spin_rq_lock(rq); 685 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 686 rq_pin_lock(rq, rf); 687 return rq; 688 } 689 raw_spin_rq_unlock(rq); 690 691 while (unlikely(task_on_rq_migrating(p))) 692 cpu_relax(); 693 } 694 } 695 696 /* 697 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 698 */ 699 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 700 __acquires(p->pi_lock) 701 __acquires(rq->lock) 702 { 703 struct rq *rq; 704 705 for (;;) { 706 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 707 rq = task_rq(p); 708 raw_spin_rq_lock(rq); 709 /* 710 * move_queued_task() task_rq_lock() 711 * 712 * ACQUIRE (rq->lock) 713 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 714 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 715 * [S] ->cpu = new_cpu [L] task_rq() 716 * [L] ->on_rq 717 * RELEASE (rq->lock) 718 * 719 * If we observe the old CPU in task_rq_lock(), the acquire of 720 * the old rq->lock will fully serialize against the stores. 721 * 722 * If we observe the new CPU in task_rq_lock(), the address 723 * dependency headed by '[L] rq = task_rq()' and the acquire 724 * will pair with the WMB to ensure we then also see migrating. 725 */ 726 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 727 rq_pin_lock(rq, rf); 728 return rq; 729 } 730 raw_spin_rq_unlock(rq); 731 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 732 733 while (unlikely(task_on_rq_migrating(p))) 734 cpu_relax(); 735 } 736 } 737 738 /* 739 * RQ-clock updating methods: 740 */ 741 742 static void update_rq_clock_task(struct rq *rq, s64 delta) 743 { 744 /* 745 * In theory, the compile should just see 0 here, and optimize out the call 746 * to sched_rt_avg_update. But I don't trust it... 747 */ 748 s64 __maybe_unused steal = 0, irq_delta = 0; 749 750 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 751 if (irqtime_enabled()) { 752 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; 753 754 /* 755 * Since irq_time is only updated on {soft,}irq_exit, we might run into 756 * this case when a previous update_rq_clock() happened inside a 757 * {soft,}IRQ region. 758 * 759 * When this happens, we stop ->clock_task and only update the 760 * prev_irq_time stamp to account for the part that fit, so that a next 761 * update will consume the rest. This ensures ->clock_task is 762 * monotonic. 763 * 764 * It does however cause some slight miss-attribution of {soft,}IRQ 765 * time, a more accurate solution would be to update the irq_time using 766 * the current rq->clock timestamp, except that would require using 767 * atomic ops. 768 */ 769 if (irq_delta > delta) 770 irq_delta = delta; 771 772 rq->prev_irq_time += irq_delta; 773 delta -= irq_delta; 774 delayacct_irq(rq->curr, irq_delta); 775 } 776 #endif 777 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 778 if (static_key_false((¶virt_steal_rq_enabled))) { 779 u64 prev_steal; 780 781 steal = prev_steal = paravirt_steal_clock(cpu_of(rq)); 782 steal -= rq->prev_steal_time_rq; 783 784 if (unlikely(steal > delta)) 785 steal = delta; 786 787 rq->prev_steal_time_rq = prev_steal; 788 delta -= steal; 789 } 790 #endif 791 792 rq->clock_task += delta; 793 794 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 795 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) 796 update_irq_load_avg(rq, irq_delta + steal); 797 #endif 798 update_rq_clock_pelt(rq, delta); 799 } 800 801 void update_rq_clock(struct rq *rq) 802 { 803 s64 delta; 804 u64 clock; 805 806 lockdep_assert_rq_held(rq); 807 808 if (rq->clock_update_flags & RQCF_ACT_SKIP) 809 return; 810 811 if (sched_feat(WARN_DOUBLE_CLOCK)) 812 WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED); 813 rq->clock_update_flags |= RQCF_UPDATED; 814 815 clock = sched_clock_cpu(cpu_of(rq)); 816 scx_rq_clock_update(rq, clock); 817 818 delta = clock - rq->clock; 819 if (delta < 0) 820 return; 821 rq->clock += delta; 822 823 update_rq_clock_task(rq, delta); 824 } 825 826 #ifdef CONFIG_SCHED_HRTICK 827 /* 828 * Use HR-timers to deliver accurate preemption points. 829 */ 830 831 static void hrtick_clear(struct rq *rq) 832 { 833 if (hrtimer_active(&rq->hrtick_timer)) 834 hrtimer_cancel(&rq->hrtick_timer); 835 } 836 837 /* 838 * High-resolution timer tick. 839 * Runs from hardirq context with interrupts disabled. 840 */ 841 static enum hrtimer_restart hrtick(struct hrtimer *timer) 842 { 843 struct rq *rq = container_of(timer, struct rq, hrtick_timer); 844 struct rq_flags rf; 845 846 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 847 848 rq_lock(rq, &rf); 849 update_rq_clock(rq); 850 rq->donor->sched_class->task_tick(rq, rq->curr, 1); 851 rq_unlock(rq, &rf); 852 853 return HRTIMER_NORESTART; 854 } 855 856 #ifdef CONFIG_SMP 857 858 static void __hrtick_restart(struct rq *rq) 859 { 860 struct hrtimer *timer = &rq->hrtick_timer; 861 ktime_t time = rq->hrtick_time; 862 863 hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); 864 } 865 866 /* 867 * called from hardirq (IPI) context 868 */ 869 static void __hrtick_start(void *arg) 870 { 871 struct rq *rq = arg; 872 struct rq_flags rf; 873 874 rq_lock(rq, &rf); 875 __hrtick_restart(rq); 876 rq_unlock(rq, &rf); 877 } 878 879 /* 880 * Called to set the hrtick timer state. 881 * 882 * called with rq->lock held and IRQs disabled 883 */ 884 void hrtick_start(struct rq *rq, u64 delay) 885 { 886 struct hrtimer *timer = &rq->hrtick_timer; 887 s64 delta; 888 889 /* 890 * Don't schedule slices shorter than 10000ns, that just 891 * doesn't make sense and can cause timer DoS. 892 */ 893 delta = max_t(s64, delay, 10000LL); 894 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); 895 896 if (rq == this_rq()) 897 __hrtick_restart(rq); 898 else 899 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); 900 } 901 902 #else 903 /* 904 * Called to set the hrtick timer state. 905 * 906 * called with rq->lock held and IRQs disabled 907 */ 908 void hrtick_start(struct rq *rq, u64 delay) 909 { 910 /* 911 * Don't schedule slices shorter than 10000ns, that just 912 * doesn't make sense. Rely on vruntime for fairness. 913 */ 914 delay = max_t(u64, delay, 10000LL); 915 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), 916 HRTIMER_MODE_REL_PINNED_HARD); 917 } 918 919 #endif /* CONFIG_SMP */ 920 921 static void hrtick_rq_init(struct rq *rq) 922 { 923 #ifdef CONFIG_SMP 924 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); 925 #endif 926 hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); 927 } 928 #else /* CONFIG_SCHED_HRTICK */ 929 static inline void hrtick_clear(struct rq *rq) 930 { 931 } 932 933 static inline void hrtick_rq_init(struct rq *rq) 934 { 935 } 936 #endif /* CONFIG_SCHED_HRTICK */ 937 938 /* 939 * try_cmpxchg based fetch_or() macro so it works for different integer types: 940 */ 941 #define fetch_or(ptr, mask) \ 942 ({ \ 943 typeof(ptr) _ptr = (ptr); \ 944 typeof(mask) _mask = (mask); \ 945 typeof(*_ptr) _val = *_ptr; \ 946 \ 947 do { \ 948 } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ 949 _val; \ 950 }) 951 952 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) 953 /* 954 * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, 955 * this avoids any races wrt polling state changes and thereby avoids 956 * spurious IPIs. 957 */ 958 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif) 959 { 960 return !(fetch_or(&ti->flags, 1 << tif) & _TIF_POLLING_NRFLAG); 961 } 962 963 /* 964 * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. 965 * 966 * If this returns true, then the idle task promises to call 967 * sched_ttwu_pending() and reschedule soon. 968 */ 969 static bool set_nr_if_polling(struct task_struct *p) 970 { 971 struct thread_info *ti = task_thread_info(p); 972 typeof(ti->flags) val = READ_ONCE(ti->flags); 973 974 do { 975 if (!(val & _TIF_POLLING_NRFLAG)) 976 return false; 977 if (val & _TIF_NEED_RESCHED) 978 return true; 979 } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); 980 981 return true; 982 } 983 984 #else 985 static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif) 986 { 987 set_ti_thread_flag(ti, tif); 988 return true; 989 } 990 991 #ifdef CONFIG_SMP 992 static inline bool set_nr_if_polling(struct task_struct *p) 993 { 994 return false; 995 } 996 #endif 997 #endif 998 999 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) 1000 { 1001 struct wake_q_node *node = &task->wake_q; 1002 1003 /* 1004 * Atomically grab the task, if ->wake_q is !nil already it means 1005 * it's already queued (either by us or someone else) and will get the 1006 * wakeup due to that. 1007 * 1008 * In order to ensure that a pending wakeup will observe our pending 1009 * state, even in the failed case, an explicit smp_mb() must be used. 1010 */ 1011 smp_mb__before_atomic(); 1012 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) 1013 return false; 1014 1015 /* 1016 * The head is context local, there can be no concurrency. 1017 */ 1018 *head->lastp = node; 1019 head->lastp = &node->next; 1020 return true; 1021 } 1022 1023 /** 1024 * wake_q_add() - queue a wakeup for 'later' waking. 1025 * @head: the wake_q_head to add @task to 1026 * @task: the task to queue for 'later' wakeup 1027 * 1028 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 1029 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 1030 * instantly. 1031 * 1032 * This function must be used as-if it were wake_up_process(); IOW the task 1033 * must be ready to be woken at this location. 1034 */ 1035 void wake_q_add(struct wake_q_head *head, struct task_struct *task) 1036 { 1037 if (__wake_q_add(head, task)) 1038 get_task_struct(task); 1039 } 1040 1041 /** 1042 * wake_q_add_safe() - safely queue a wakeup for 'later' waking. 1043 * @head: the wake_q_head to add @task to 1044 * @task: the task to queue for 'later' wakeup 1045 * 1046 * Queue a task for later wakeup, most likely by the wake_up_q() call in the 1047 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come 1048 * instantly. 1049 * 1050 * This function must be used as-if it were wake_up_process(); IOW the task 1051 * must be ready to be woken at this location. 1052 * 1053 * This function is essentially a task-safe equivalent to wake_q_add(). Callers 1054 * that already hold reference to @task can call the 'safe' version and trust 1055 * wake_q to do the right thing depending whether or not the @task is already 1056 * queued for wakeup. 1057 */ 1058 void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) 1059 { 1060 if (!__wake_q_add(head, task)) 1061 put_task_struct(task); 1062 } 1063 1064 void wake_up_q(struct wake_q_head *head) 1065 { 1066 struct wake_q_node *node = head->first; 1067 1068 while (node != WAKE_Q_TAIL) { 1069 struct task_struct *task; 1070 1071 task = container_of(node, struct task_struct, wake_q); 1072 node = node->next; 1073 /* pairs with cmpxchg_relaxed() in __wake_q_add() */ 1074 WRITE_ONCE(task->wake_q.next, NULL); 1075 /* Task can safely be re-inserted now. */ 1076 1077 /* 1078 * wake_up_process() executes a full barrier, which pairs with 1079 * the queueing in wake_q_add() so as not to miss wakeups. 1080 */ 1081 wake_up_process(task); 1082 put_task_struct(task); 1083 } 1084 } 1085 1086 /* 1087 * resched_curr - mark rq's current task 'to be rescheduled now'. 1088 * 1089 * On UP this means the setting of the need_resched flag, on SMP it 1090 * might also involve a cross-CPU call to trigger the scheduler on 1091 * the target CPU. 1092 */ 1093 static void __resched_curr(struct rq *rq, int tif) 1094 { 1095 struct task_struct *curr = rq->curr; 1096 struct thread_info *cti = task_thread_info(curr); 1097 int cpu; 1098 1099 lockdep_assert_rq_held(rq); 1100 1101 /* 1102 * Always immediately preempt the idle task; no point in delaying doing 1103 * actual work. 1104 */ 1105 if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY) 1106 tif = TIF_NEED_RESCHED; 1107 1108 if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED)) 1109 return; 1110 1111 cpu = cpu_of(rq); 1112 1113 if (cpu == smp_processor_id()) { 1114 set_ti_thread_flag(cti, tif); 1115 if (tif == TIF_NEED_RESCHED) 1116 set_preempt_need_resched(); 1117 return; 1118 } 1119 1120 if (set_nr_and_not_polling(cti, tif)) { 1121 if (tif == TIF_NEED_RESCHED) 1122 smp_send_reschedule(cpu); 1123 } else { 1124 trace_sched_wake_idle_without_ipi(cpu); 1125 } 1126 } 1127 1128 void resched_curr(struct rq *rq) 1129 { 1130 __resched_curr(rq, TIF_NEED_RESCHED); 1131 } 1132 1133 #ifdef CONFIG_PREEMPT_DYNAMIC 1134 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy); 1135 static __always_inline bool dynamic_preempt_lazy(void) 1136 { 1137 return static_branch_unlikely(&sk_dynamic_preempt_lazy); 1138 } 1139 #else 1140 static __always_inline bool dynamic_preempt_lazy(void) 1141 { 1142 return IS_ENABLED(CONFIG_PREEMPT_LAZY); 1143 } 1144 #endif 1145 1146 static __always_inline int get_lazy_tif_bit(void) 1147 { 1148 if (dynamic_preempt_lazy()) 1149 return TIF_NEED_RESCHED_LAZY; 1150 1151 return TIF_NEED_RESCHED; 1152 } 1153 1154 void resched_curr_lazy(struct rq *rq) 1155 { 1156 __resched_curr(rq, get_lazy_tif_bit()); 1157 } 1158 1159 void resched_cpu(int cpu) 1160 { 1161 struct rq *rq = cpu_rq(cpu); 1162 unsigned long flags; 1163 1164 raw_spin_rq_lock_irqsave(rq, flags); 1165 if (cpu_online(cpu) || cpu == smp_processor_id()) 1166 resched_curr(rq); 1167 raw_spin_rq_unlock_irqrestore(rq, flags); 1168 } 1169 1170 #ifdef CONFIG_SMP 1171 #ifdef CONFIG_NO_HZ_COMMON 1172 /* 1173 * In the semi idle case, use the nearest busy CPU for migrating timers 1174 * from an idle CPU. This is good for power-savings. 1175 * 1176 * We don't do similar optimization for completely idle system, as 1177 * selecting an idle CPU will add more delays to the timers than intended 1178 * (as that CPU's timer base may not be up to date wrt jiffies etc). 1179 */ 1180 int get_nohz_timer_target(void) 1181 { 1182 int i, cpu = smp_processor_id(), default_cpu = -1; 1183 struct sched_domain *sd; 1184 const struct cpumask *hk_mask; 1185 1186 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) { 1187 if (!idle_cpu(cpu)) 1188 return cpu; 1189 default_cpu = cpu; 1190 } 1191 1192 hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE); 1193 1194 guard(rcu)(); 1195 1196 for_each_domain(cpu, sd) { 1197 for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { 1198 if (cpu == i) 1199 continue; 1200 1201 if (!idle_cpu(i)) 1202 return i; 1203 } 1204 } 1205 1206 if (default_cpu == -1) 1207 default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE); 1208 1209 return default_cpu; 1210 } 1211 1212 /* 1213 * When add_timer_on() enqueues a timer into the timer wheel of an 1214 * idle CPU then this timer might expire before the next timer event 1215 * which is scheduled to wake up that CPU. In case of a completely 1216 * idle system the next event might even be infinite time into the 1217 * future. wake_up_idle_cpu() ensures that the CPU is woken up and 1218 * leaves the inner idle loop so the newly added timer is taken into 1219 * account when the CPU goes back to idle and evaluates the timer 1220 * wheel for the next timer event. 1221 */ 1222 static void wake_up_idle_cpu(int cpu) 1223 { 1224 struct rq *rq = cpu_rq(cpu); 1225 1226 if (cpu == smp_processor_id()) 1227 return; 1228 1229 /* 1230 * Set TIF_NEED_RESCHED and send an IPI if in the non-polling 1231 * part of the idle loop. This forces an exit from the idle loop 1232 * and a round trip to schedule(). Now this could be optimized 1233 * because a simple new idle loop iteration is enough to 1234 * re-evaluate the next tick. Provided some re-ordering of tick 1235 * nohz functions that would need to follow TIF_NR_POLLING 1236 * clearing: 1237 * 1238 * - On most architectures, a simple fetch_or on ti::flags with a 1239 * "0" value would be enough to know if an IPI needs to be sent. 1240 * 1241 * - x86 needs to perform a last need_resched() check between 1242 * monitor and mwait which doesn't take timers into account. 1243 * There a dedicated TIF_TIMER flag would be required to 1244 * fetch_or here and be checked along with TIF_NEED_RESCHED 1245 * before mwait(). 1246 * 1247 * However, remote timer enqueue is not such a frequent event 1248 * and testing of the above solutions didn't appear to report 1249 * much benefits. 1250 */ 1251 if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED)) 1252 smp_send_reschedule(cpu); 1253 else 1254 trace_sched_wake_idle_without_ipi(cpu); 1255 } 1256 1257 static bool wake_up_full_nohz_cpu(int cpu) 1258 { 1259 /* 1260 * We just need the target to call irq_exit() and re-evaluate 1261 * the next tick. The nohz full kick at least implies that. 1262 * If needed we can still optimize that later with an 1263 * empty IRQ. 1264 */ 1265 if (cpu_is_offline(cpu)) 1266 return true; /* Don't try to wake offline CPUs. */ 1267 if (tick_nohz_full_cpu(cpu)) { 1268 if (cpu != smp_processor_id() || 1269 tick_nohz_tick_stopped()) 1270 tick_nohz_full_kick_cpu(cpu); 1271 return true; 1272 } 1273 1274 return false; 1275 } 1276 1277 /* 1278 * Wake up the specified CPU. If the CPU is going offline, it is the 1279 * caller's responsibility to deal with the lost wakeup, for example, 1280 * by hooking into the CPU_DEAD notifier like timers and hrtimers do. 1281 */ 1282 void wake_up_nohz_cpu(int cpu) 1283 { 1284 if (!wake_up_full_nohz_cpu(cpu)) 1285 wake_up_idle_cpu(cpu); 1286 } 1287 1288 static void nohz_csd_func(void *info) 1289 { 1290 struct rq *rq = info; 1291 int cpu = cpu_of(rq); 1292 unsigned int flags; 1293 1294 /* 1295 * Release the rq::nohz_csd. 1296 */ 1297 flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); 1298 WARN_ON(!(flags & NOHZ_KICK_MASK)); 1299 1300 rq->idle_balance = idle_cpu(cpu); 1301 if (rq->idle_balance) { 1302 rq->nohz_idle_balance = flags; 1303 __raise_softirq_irqoff(SCHED_SOFTIRQ); 1304 } 1305 } 1306 1307 #endif /* CONFIG_NO_HZ_COMMON */ 1308 1309 #ifdef CONFIG_NO_HZ_FULL 1310 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) 1311 { 1312 if (rq->nr_running != 1) 1313 return false; 1314 1315 if (p->sched_class != &fair_sched_class) 1316 return false; 1317 1318 if (!task_on_rq_queued(p)) 1319 return false; 1320 1321 return true; 1322 } 1323 1324 bool sched_can_stop_tick(struct rq *rq) 1325 { 1326 int fifo_nr_running; 1327 1328 /* Deadline tasks, even if single, need the tick */ 1329 if (rq->dl.dl_nr_running) 1330 return false; 1331 1332 /* 1333 * If there are more than one RR tasks, we need the tick to affect the 1334 * actual RR behaviour. 1335 */ 1336 if (rq->rt.rr_nr_running) { 1337 if (rq->rt.rr_nr_running == 1) 1338 return true; 1339 else 1340 return false; 1341 } 1342 1343 /* 1344 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no 1345 * forced preemption between FIFO tasks. 1346 */ 1347 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; 1348 if (fifo_nr_running) 1349 return true; 1350 1351 /* 1352 * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks 1353 * left. For CFS, if there's more than one we need the tick for 1354 * involuntary preemption. For SCX, ask. 1355 */ 1356 if (scx_enabled() && !scx_can_stop_tick(rq)) 1357 return false; 1358 1359 if (rq->cfs.h_nr_queued > 1) 1360 return false; 1361 1362 /* 1363 * If there is one task and it has CFS runtime bandwidth constraints 1364 * and it's on the cpu now we don't want to stop the tick. 1365 * This check prevents clearing the bit if a newly enqueued task here is 1366 * dequeued by migrating while the constrained task continues to run. 1367 * E.g. going from 2->1 without going through pick_next_task(). 1368 */ 1369 if (__need_bw_check(rq, rq->curr)) { 1370 if (cfs_task_bw_constrained(rq->curr)) 1371 return false; 1372 } 1373 1374 return true; 1375 } 1376 #endif /* CONFIG_NO_HZ_FULL */ 1377 #endif /* CONFIG_SMP */ 1378 1379 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ 1380 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) 1381 /* 1382 * Iterate task_group tree rooted at *from, calling @down when first entering a 1383 * node and @up when leaving it for the final time. 1384 * 1385 * Caller must hold rcu_lock or sufficient equivalent. 1386 */ 1387 int walk_tg_tree_from(struct task_group *from, 1388 tg_visitor down, tg_visitor up, void *data) 1389 { 1390 struct task_group *parent, *child; 1391 int ret; 1392 1393 parent = from; 1394 1395 down: 1396 ret = (*down)(parent, data); 1397 if (ret) 1398 goto out; 1399 list_for_each_entry_rcu(child, &parent->children, siblings) { 1400 parent = child; 1401 goto down; 1402 1403 up: 1404 continue; 1405 } 1406 ret = (*up)(parent, data); 1407 if (ret || parent == from) 1408 goto out; 1409 1410 child = parent; 1411 parent = parent->parent; 1412 if (parent) 1413 goto up; 1414 out: 1415 return ret; 1416 } 1417 1418 int tg_nop(struct task_group *tg, void *data) 1419 { 1420 return 0; 1421 } 1422 #endif 1423 1424 void set_load_weight(struct task_struct *p, bool update_load) 1425 { 1426 int prio = p->static_prio - MAX_RT_PRIO; 1427 struct load_weight lw; 1428 1429 if (task_has_idle_policy(p)) { 1430 lw.weight = scale_load(WEIGHT_IDLEPRIO); 1431 lw.inv_weight = WMULT_IDLEPRIO; 1432 } else { 1433 lw.weight = scale_load(sched_prio_to_weight[prio]); 1434 lw.inv_weight = sched_prio_to_wmult[prio]; 1435 } 1436 1437 /* 1438 * SCHED_OTHER tasks have to update their load when changing their 1439 * weight 1440 */ 1441 if (update_load && p->sched_class->reweight_task) 1442 p->sched_class->reweight_task(task_rq(p), p, &lw); 1443 else 1444 p->se.load = lw; 1445 } 1446 1447 #ifdef CONFIG_UCLAMP_TASK 1448 /* 1449 * Serializes updates of utilization clamp values 1450 * 1451 * The (slow-path) user-space triggers utilization clamp value updates which 1452 * can require updates on (fast-path) scheduler's data structures used to 1453 * support enqueue/dequeue operations. 1454 * While the per-CPU rq lock protects fast-path update operations, user-space 1455 * requests are serialized using a mutex to reduce the risk of conflicting 1456 * updates or API abuses. 1457 */ 1458 static __maybe_unused DEFINE_MUTEX(uclamp_mutex); 1459 1460 /* Max allowed minimum utilization */ 1461 static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; 1462 1463 /* Max allowed maximum utilization */ 1464 static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; 1465 1466 /* 1467 * By default RT tasks run at the maximum performance point/capacity of the 1468 * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to 1469 * SCHED_CAPACITY_SCALE. 1470 * 1471 * This knob allows admins to change the default behavior when uclamp is being 1472 * used. In battery powered devices, particularly, running at the maximum 1473 * capacity and frequency will increase energy consumption and shorten the 1474 * battery life. 1475 * 1476 * This knob only affects RT tasks that their uclamp_se->user_defined == false. 1477 * 1478 * This knob will not override the system default sched_util_clamp_min defined 1479 * above. 1480 */ 1481 unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; 1482 1483 /* All clamps are required to be less or equal than these values */ 1484 static struct uclamp_se uclamp_default[UCLAMP_CNT]; 1485 1486 /* 1487 * This static key is used to reduce the uclamp overhead in the fast path. It 1488 * primarily disables the call to uclamp_rq_{inc, dec}() in 1489 * enqueue/dequeue_task(). 1490 * 1491 * This allows users to continue to enable uclamp in their kernel config with 1492 * minimum uclamp overhead in the fast path. 1493 * 1494 * As soon as userspace modifies any of the uclamp knobs, the static key is 1495 * enabled, since we have an actual users that make use of uclamp 1496 * functionality. 1497 * 1498 * The knobs that would enable this static key are: 1499 * 1500 * * A task modifying its uclamp value with sched_setattr(). 1501 * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. 1502 * * An admin modifying the cgroup cpu.uclamp.{min, max} 1503 */ 1504 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); 1505 1506 static inline unsigned int 1507 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, 1508 unsigned int clamp_value) 1509 { 1510 /* 1511 * Avoid blocked utilization pushing up the frequency when we go 1512 * idle (which drops the max-clamp) by retaining the last known 1513 * max-clamp. 1514 */ 1515 if (clamp_id == UCLAMP_MAX) { 1516 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; 1517 return clamp_value; 1518 } 1519 1520 return uclamp_none(UCLAMP_MIN); 1521 } 1522 1523 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, 1524 unsigned int clamp_value) 1525 { 1526 /* Reset max-clamp retention only on idle exit */ 1527 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1528 return; 1529 1530 uclamp_rq_set(rq, clamp_id, clamp_value); 1531 } 1532 1533 static inline 1534 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, 1535 unsigned int clamp_value) 1536 { 1537 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; 1538 int bucket_id = UCLAMP_BUCKETS - 1; 1539 1540 /* 1541 * Since both min and max clamps are max aggregated, find the 1542 * top most bucket with tasks in. 1543 */ 1544 for ( ; bucket_id >= 0; bucket_id--) { 1545 if (!bucket[bucket_id].tasks) 1546 continue; 1547 return bucket[bucket_id].value; 1548 } 1549 1550 /* No tasks -- default clamp values */ 1551 return uclamp_idle_value(rq, clamp_id, clamp_value); 1552 } 1553 1554 static void __uclamp_update_util_min_rt_default(struct task_struct *p) 1555 { 1556 unsigned int default_util_min; 1557 struct uclamp_se *uc_se; 1558 1559 lockdep_assert_held(&p->pi_lock); 1560 1561 uc_se = &p->uclamp_req[UCLAMP_MIN]; 1562 1563 /* Only sync if user didn't override the default */ 1564 if (uc_se->user_defined) 1565 return; 1566 1567 default_util_min = sysctl_sched_uclamp_util_min_rt_default; 1568 uclamp_se_set(uc_se, default_util_min, false); 1569 } 1570 1571 static void uclamp_update_util_min_rt_default(struct task_struct *p) 1572 { 1573 if (!rt_task(p)) 1574 return; 1575 1576 /* Protect updates to p->uclamp_* */ 1577 guard(task_rq_lock)(p); 1578 __uclamp_update_util_min_rt_default(p); 1579 } 1580 1581 static inline struct uclamp_se 1582 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) 1583 { 1584 /* Copy by value as we could modify it */ 1585 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; 1586 #ifdef CONFIG_UCLAMP_TASK_GROUP 1587 unsigned int tg_min, tg_max, value; 1588 1589 /* 1590 * Tasks in autogroups or root task group will be 1591 * restricted by system defaults. 1592 */ 1593 if (task_group_is_autogroup(task_group(p))) 1594 return uc_req; 1595 if (task_group(p) == &root_task_group) 1596 return uc_req; 1597 1598 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; 1599 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; 1600 value = uc_req.value; 1601 value = clamp(value, tg_min, tg_max); 1602 uclamp_se_set(&uc_req, value, false); 1603 #endif 1604 1605 return uc_req; 1606 } 1607 1608 /* 1609 * The effective clamp bucket index of a task depends on, by increasing 1610 * priority: 1611 * - the task specific clamp value, when explicitly requested from userspace 1612 * - the task group effective clamp value, for tasks not either in the root 1613 * group or in an autogroup 1614 * - the system default clamp value, defined by the sysadmin 1615 */ 1616 static inline struct uclamp_se 1617 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) 1618 { 1619 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); 1620 struct uclamp_se uc_max = uclamp_default[clamp_id]; 1621 1622 /* System default restrictions always apply */ 1623 if (unlikely(uc_req.value > uc_max.value)) 1624 return uc_max; 1625 1626 return uc_req; 1627 } 1628 1629 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 1630 { 1631 struct uclamp_se uc_eff; 1632 1633 /* Task currently refcounted: use back-annotated (effective) value */ 1634 if (p->uclamp[clamp_id].active) 1635 return (unsigned long)p->uclamp[clamp_id].value; 1636 1637 uc_eff = uclamp_eff_get(p, clamp_id); 1638 1639 return (unsigned long)uc_eff.value; 1640 } 1641 1642 /* 1643 * When a task is enqueued on a rq, the clamp bucket currently defined by the 1644 * task's uclamp::bucket_id is refcounted on that rq. This also immediately 1645 * updates the rq's clamp value if required. 1646 * 1647 * Tasks can have a task-specific value requested from user-space, track 1648 * within each bucket the maximum value for tasks refcounted in it. 1649 * This "local max aggregation" allows to track the exact "requested" value 1650 * for each bucket when all its RUNNABLE tasks require the same clamp. 1651 */ 1652 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, 1653 enum uclamp_id clamp_id) 1654 { 1655 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1656 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1657 struct uclamp_bucket *bucket; 1658 1659 lockdep_assert_rq_held(rq); 1660 1661 /* Update task effective clamp */ 1662 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); 1663 1664 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1665 bucket->tasks++; 1666 uc_se->active = true; 1667 1668 uclamp_idle_reset(rq, clamp_id, uc_se->value); 1669 1670 /* 1671 * Local max aggregation: rq buckets always track the max 1672 * "requested" clamp value of its RUNNABLE tasks. 1673 */ 1674 if (bucket->tasks == 1 || uc_se->value > bucket->value) 1675 bucket->value = uc_se->value; 1676 1677 if (uc_se->value > uclamp_rq_get(rq, clamp_id)) 1678 uclamp_rq_set(rq, clamp_id, uc_se->value); 1679 } 1680 1681 /* 1682 * When a task is dequeued from a rq, the clamp bucket refcounted by the task 1683 * is released. If this is the last task reference counting the rq's max 1684 * active clamp value, then the rq's clamp value is updated. 1685 * 1686 * Both refcounted tasks and rq's cached clamp values are expected to be 1687 * always valid. If it's detected they are not, as defensive programming, 1688 * enforce the expected state and warn. 1689 */ 1690 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, 1691 enum uclamp_id clamp_id) 1692 { 1693 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; 1694 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; 1695 struct uclamp_bucket *bucket; 1696 unsigned int bkt_clamp; 1697 unsigned int rq_clamp; 1698 1699 lockdep_assert_rq_held(rq); 1700 1701 /* 1702 * If sched_uclamp_used was enabled after task @p was enqueued, 1703 * we could end up with unbalanced call to uclamp_rq_dec_id(). 1704 * 1705 * In this case the uc_se->active flag should be false since no uclamp 1706 * accounting was performed at enqueue time and we can just return 1707 * here. 1708 * 1709 * Need to be careful of the following enqueue/dequeue ordering 1710 * problem too 1711 * 1712 * enqueue(taskA) 1713 * // sched_uclamp_used gets enabled 1714 * enqueue(taskB) 1715 * dequeue(taskA) 1716 * // Must not decrement bucket->tasks here 1717 * dequeue(taskB) 1718 * 1719 * where we could end up with stale data in uc_se and 1720 * bucket[uc_se->bucket_id]. 1721 * 1722 * The following check here eliminates the possibility of such race. 1723 */ 1724 if (unlikely(!uc_se->active)) 1725 return; 1726 1727 bucket = &uc_rq->bucket[uc_se->bucket_id]; 1728 1729 WARN_ON_ONCE(!bucket->tasks); 1730 if (likely(bucket->tasks)) 1731 bucket->tasks--; 1732 1733 uc_se->active = false; 1734 1735 /* 1736 * Keep "local max aggregation" simple and accept to (possibly) 1737 * overboost some RUNNABLE tasks in the same bucket. 1738 * The rq clamp bucket value is reset to its base value whenever 1739 * there are no more RUNNABLE tasks refcounting it. 1740 */ 1741 if (likely(bucket->tasks)) 1742 return; 1743 1744 rq_clamp = uclamp_rq_get(rq, clamp_id); 1745 /* 1746 * Defensive programming: this should never happen. If it happens, 1747 * e.g. due to future modification, warn and fix up the expected value. 1748 */ 1749 WARN_ON_ONCE(bucket->value > rq_clamp); 1750 if (bucket->value >= rq_clamp) { 1751 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); 1752 uclamp_rq_set(rq, clamp_id, bkt_clamp); 1753 } 1754 } 1755 1756 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) 1757 { 1758 enum uclamp_id clamp_id; 1759 1760 /* 1761 * Avoid any overhead until uclamp is actually used by the userspace. 1762 * 1763 * The condition is constructed such that a NOP is generated when 1764 * sched_uclamp_used is disabled. 1765 */ 1766 if (!uclamp_is_used()) 1767 return; 1768 1769 if (unlikely(!p->sched_class->uclamp_enabled)) 1770 return; 1771 1772 /* Only inc the delayed task which being woken up. */ 1773 if (p->se.sched_delayed && !(flags & ENQUEUE_DELAYED)) 1774 return; 1775 1776 for_each_clamp_id(clamp_id) 1777 uclamp_rq_inc_id(rq, p, clamp_id); 1778 1779 /* Reset clamp idle holding when there is one RUNNABLE task */ 1780 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) 1781 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1782 } 1783 1784 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) 1785 { 1786 enum uclamp_id clamp_id; 1787 1788 /* 1789 * Avoid any overhead until uclamp is actually used by the userspace. 1790 * 1791 * The condition is constructed such that a NOP is generated when 1792 * sched_uclamp_used is disabled. 1793 */ 1794 if (!uclamp_is_used()) 1795 return; 1796 1797 if (unlikely(!p->sched_class->uclamp_enabled)) 1798 return; 1799 1800 if (p->se.sched_delayed) 1801 return; 1802 1803 for_each_clamp_id(clamp_id) 1804 uclamp_rq_dec_id(rq, p, clamp_id); 1805 } 1806 1807 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, 1808 enum uclamp_id clamp_id) 1809 { 1810 if (!p->uclamp[clamp_id].active) 1811 return; 1812 1813 uclamp_rq_dec_id(rq, p, clamp_id); 1814 uclamp_rq_inc_id(rq, p, clamp_id); 1815 1816 /* 1817 * Make sure to clear the idle flag if we've transiently reached 0 1818 * active tasks on rq. 1819 */ 1820 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) 1821 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; 1822 } 1823 1824 static inline void 1825 uclamp_update_active(struct task_struct *p) 1826 { 1827 enum uclamp_id clamp_id; 1828 struct rq_flags rf; 1829 struct rq *rq; 1830 1831 /* 1832 * Lock the task and the rq where the task is (or was) queued. 1833 * 1834 * We might lock the (previous) rq of a !RUNNABLE task, but that's the 1835 * price to pay to safely serialize util_{min,max} updates with 1836 * enqueues, dequeues and migration operations. 1837 * This is the same locking schema used by __set_cpus_allowed_ptr(). 1838 */ 1839 rq = task_rq_lock(p, &rf); 1840 1841 /* 1842 * Setting the clamp bucket is serialized by task_rq_lock(). 1843 * If the task is not yet RUNNABLE and its task_struct is not 1844 * affecting a valid clamp bucket, the next time it's enqueued, 1845 * it will already see the updated clamp bucket value. 1846 */ 1847 for_each_clamp_id(clamp_id) 1848 uclamp_rq_reinc_id(rq, p, clamp_id); 1849 1850 task_rq_unlock(rq, p, &rf); 1851 } 1852 1853 #ifdef CONFIG_UCLAMP_TASK_GROUP 1854 static inline void 1855 uclamp_update_active_tasks(struct cgroup_subsys_state *css) 1856 { 1857 struct css_task_iter it; 1858 struct task_struct *p; 1859 1860 css_task_iter_start(css, 0, &it); 1861 while ((p = css_task_iter_next(&it))) 1862 uclamp_update_active(p); 1863 css_task_iter_end(&it); 1864 } 1865 1866 static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1867 #endif 1868 1869 #ifdef CONFIG_SYSCTL 1870 #ifdef CONFIG_UCLAMP_TASK_GROUP 1871 static void uclamp_update_root_tg(void) 1872 { 1873 struct task_group *tg = &root_task_group; 1874 1875 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], 1876 sysctl_sched_uclamp_util_min, false); 1877 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], 1878 sysctl_sched_uclamp_util_max, false); 1879 1880 guard(rcu)(); 1881 cpu_util_update_eff(&root_task_group.css); 1882 } 1883 #else 1884 static void uclamp_update_root_tg(void) { } 1885 #endif 1886 1887 static void uclamp_sync_util_min_rt_default(void) 1888 { 1889 struct task_struct *g, *p; 1890 1891 /* 1892 * copy_process() sysctl_uclamp 1893 * uclamp_min_rt = X; 1894 * write_lock(&tasklist_lock) read_lock(&tasklist_lock) 1895 * // link thread smp_mb__after_spinlock() 1896 * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); 1897 * sched_post_fork() for_each_process_thread() 1898 * __uclamp_sync_rt() __uclamp_sync_rt() 1899 * 1900 * Ensures that either sched_post_fork() will observe the new 1901 * uclamp_min_rt or for_each_process_thread() will observe the new 1902 * task. 1903 */ 1904 read_lock(&tasklist_lock); 1905 smp_mb__after_spinlock(); 1906 read_unlock(&tasklist_lock); 1907 1908 guard(rcu)(); 1909 for_each_process_thread(g, p) 1910 uclamp_update_util_min_rt_default(p); 1911 } 1912 1913 static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write, 1914 void *buffer, size_t *lenp, loff_t *ppos) 1915 { 1916 bool update_root_tg = false; 1917 int old_min, old_max, old_min_rt; 1918 int result; 1919 1920 guard(mutex)(&uclamp_mutex); 1921 1922 old_min = sysctl_sched_uclamp_util_min; 1923 old_max = sysctl_sched_uclamp_util_max; 1924 old_min_rt = sysctl_sched_uclamp_util_min_rt_default; 1925 1926 result = proc_dointvec(table, write, buffer, lenp, ppos); 1927 if (result) 1928 goto undo; 1929 if (!write) 1930 return 0; 1931 1932 if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || 1933 sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || 1934 sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { 1935 1936 result = -EINVAL; 1937 goto undo; 1938 } 1939 1940 if (old_min != sysctl_sched_uclamp_util_min) { 1941 uclamp_se_set(&uclamp_default[UCLAMP_MIN], 1942 sysctl_sched_uclamp_util_min, false); 1943 update_root_tg = true; 1944 } 1945 if (old_max != sysctl_sched_uclamp_util_max) { 1946 uclamp_se_set(&uclamp_default[UCLAMP_MAX], 1947 sysctl_sched_uclamp_util_max, false); 1948 update_root_tg = true; 1949 } 1950 1951 if (update_root_tg) { 1952 sched_uclamp_enable(); 1953 uclamp_update_root_tg(); 1954 } 1955 1956 if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { 1957 sched_uclamp_enable(); 1958 uclamp_sync_util_min_rt_default(); 1959 } 1960 1961 /* 1962 * We update all RUNNABLE tasks only when task groups are in use. 1963 * Otherwise, keep it simple and do just a lazy update at each next 1964 * task enqueue time. 1965 */ 1966 return 0; 1967 1968 undo: 1969 sysctl_sched_uclamp_util_min = old_min; 1970 sysctl_sched_uclamp_util_max = old_max; 1971 sysctl_sched_uclamp_util_min_rt_default = old_min_rt; 1972 return result; 1973 } 1974 #endif 1975 1976 static void uclamp_fork(struct task_struct *p) 1977 { 1978 enum uclamp_id clamp_id; 1979 1980 /* 1981 * We don't need to hold task_rq_lock() when updating p->uclamp_* here 1982 * as the task is still at its early fork stages. 1983 */ 1984 for_each_clamp_id(clamp_id) 1985 p->uclamp[clamp_id].active = false; 1986 1987 if (likely(!p->sched_reset_on_fork)) 1988 return; 1989 1990 for_each_clamp_id(clamp_id) { 1991 uclamp_se_set(&p->uclamp_req[clamp_id], 1992 uclamp_none(clamp_id), false); 1993 } 1994 } 1995 1996 static void uclamp_post_fork(struct task_struct *p) 1997 { 1998 uclamp_update_util_min_rt_default(p); 1999 } 2000 2001 static void __init init_uclamp_rq(struct rq *rq) 2002 { 2003 enum uclamp_id clamp_id; 2004 struct uclamp_rq *uc_rq = rq->uclamp; 2005 2006 for_each_clamp_id(clamp_id) { 2007 uc_rq[clamp_id] = (struct uclamp_rq) { 2008 .value = uclamp_none(clamp_id) 2009 }; 2010 } 2011 2012 rq->uclamp_flags = UCLAMP_FLAG_IDLE; 2013 } 2014 2015 static void __init init_uclamp(void) 2016 { 2017 struct uclamp_se uc_max = {}; 2018 enum uclamp_id clamp_id; 2019 int cpu; 2020 2021 for_each_possible_cpu(cpu) 2022 init_uclamp_rq(cpu_rq(cpu)); 2023 2024 for_each_clamp_id(clamp_id) { 2025 uclamp_se_set(&init_task.uclamp_req[clamp_id], 2026 uclamp_none(clamp_id), false); 2027 } 2028 2029 /* System defaults allow max clamp values for both indexes */ 2030 uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); 2031 for_each_clamp_id(clamp_id) { 2032 uclamp_default[clamp_id] = uc_max; 2033 #ifdef CONFIG_UCLAMP_TASK_GROUP 2034 root_task_group.uclamp_req[clamp_id] = uc_max; 2035 root_task_group.uclamp[clamp_id] = uc_max; 2036 #endif 2037 } 2038 } 2039 2040 #else /* !CONFIG_UCLAMP_TASK */ 2041 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { } 2042 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } 2043 static inline void uclamp_fork(struct task_struct *p) { } 2044 static inline void uclamp_post_fork(struct task_struct *p) { } 2045 static inline void init_uclamp(void) { } 2046 #endif /* CONFIG_UCLAMP_TASK */ 2047 2048 bool sched_task_on_rq(struct task_struct *p) 2049 { 2050 return task_on_rq_queued(p); 2051 } 2052 2053 unsigned long get_wchan(struct task_struct *p) 2054 { 2055 unsigned long ip = 0; 2056 unsigned int state; 2057 2058 if (!p || p == current) 2059 return 0; 2060 2061 /* Only get wchan if task is blocked and we can keep it that way. */ 2062 raw_spin_lock_irq(&p->pi_lock); 2063 state = READ_ONCE(p->__state); 2064 smp_rmb(); /* see try_to_wake_up() */ 2065 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) 2066 ip = __get_wchan(p); 2067 raw_spin_unlock_irq(&p->pi_lock); 2068 2069 return ip; 2070 } 2071 2072 void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 2073 { 2074 if (!(flags & ENQUEUE_NOCLOCK)) 2075 update_rq_clock(rq); 2076 2077 /* 2078 * Can be before ->enqueue_task() because uclamp considers the 2079 * ENQUEUE_DELAYED task before its ->sched_delayed gets cleared 2080 * in ->enqueue_task(). 2081 */ 2082 uclamp_rq_inc(rq, p, flags); 2083 2084 p->sched_class->enqueue_task(rq, p, flags); 2085 2086 psi_enqueue(p, flags); 2087 2088 if (!(flags & ENQUEUE_RESTORE)) 2089 sched_info_enqueue(rq, p); 2090 2091 if (sched_core_enabled(rq)) 2092 sched_core_enqueue(rq, p); 2093 } 2094 2095 /* 2096 * Must only return false when DEQUEUE_SLEEP. 2097 */ 2098 inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) 2099 { 2100 if (sched_core_enabled(rq)) 2101 sched_core_dequeue(rq, p, flags); 2102 2103 if (!(flags & DEQUEUE_NOCLOCK)) 2104 update_rq_clock(rq); 2105 2106 if (!(flags & DEQUEUE_SAVE)) 2107 sched_info_dequeue(rq, p); 2108 2109 psi_dequeue(p, flags); 2110 2111 /* 2112 * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' 2113 * and mark the task ->sched_delayed. 2114 */ 2115 uclamp_rq_dec(rq, p); 2116 return p->sched_class->dequeue_task(rq, p, flags); 2117 } 2118 2119 void activate_task(struct rq *rq, struct task_struct *p, int flags) 2120 { 2121 if (task_on_rq_migrating(p)) 2122 flags |= ENQUEUE_MIGRATED; 2123 if (flags & ENQUEUE_MIGRATED) 2124 sched_mm_cid_migrate_to(rq, p); 2125 2126 enqueue_task(rq, p, flags); 2127 2128 WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED); 2129 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2130 } 2131 2132 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 2133 { 2134 WARN_ON_ONCE(flags & DEQUEUE_SLEEP); 2135 2136 WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); 2137 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2138 2139 /* 2140 * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before* 2141 * dequeue_task() and cleared *after* enqueue_task(). 2142 */ 2143 2144 dequeue_task(rq, p, flags); 2145 } 2146 2147 static void block_task(struct rq *rq, struct task_struct *p, int flags) 2148 { 2149 if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags)) 2150 __block_task(rq, p); 2151 } 2152 2153 /** 2154 * task_curr - is this task currently executing on a CPU? 2155 * @p: the task in question. 2156 * 2157 * Return: 1 if the task is currently executing. 0 otherwise. 2158 */ 2159 inline int task_curr(const struct task_struct *p) 2160 { 2161 return cpu_curr(task_cpu(p)) == p; 2162 } 2163 2164 /* 2165 * ->switching_to() is called with the pi_lock and rq_lock held and must not 2166 * mess with locking. 2167 */ 2168 void check_class_changing(struct rq *rq, struct task_struct *p, 2169 const struct sched_class *prev_class) 2170 { 2171 if (prev_class != p->sched_class && p->sched_class->switching_to) 2172 p->sched_class->switching_to(rq, p); 2173 } 2174 2175 /* 2176 * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, 2177 * use the balance_callback list if you want balancing. 2178 * 2179 * this means any call to check_class_changed() must be followed by a call to 2180 * balance_callback(). 2181 */ 2182 void check_class_changed(struct rq *rq, struct task_struct *p, 2183 const struct sched_class *prev_class, 2184 int oldprio) 2185 { 2186 if (prev_class != p->sched_class) { 2187 if (prev_class->switched_from) 2188 prev_class->switched_from(rq, p); 2189 2190 p->sched_class->switched_to(rq, p); 2191 } else if (oldprio != p->prio || dl_task(p)) 2192 p->sched_class->prio_changed(rq, p, oldprio); 2193 } 2194 2195 void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) 2196 { 2197 struct task_struct *donor = rq->donor; 2198 2199 if (p->sched_class == donor->sched_class) 2200 donor->sched_class->wakeup_preempt(rq, p, flags); 2201 else if (sched_class_above(p->sched_class, donor->sched_class)) 2202 resched_curr(rq); 2203 2204 /* 2205 * A queue event has occurred, and we're going to schedule. In 2206 * this case, we can save a useless back to back clock update. 2207 */ 2208 if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr)) 2209 rq_clock_skip_update(rq); 2210 } 2211 2212 static __always_inline 2213 int __task_state_match(struct task_struct *p, unsigned int state) 2214 { 2215 if (READ_ONCE(p->__state) & state) 2216 return 1; 2217 2218 if (READ_ONCE(p->saved_state) & state) 2219 return -1; 2220 2221 return 0; 2222 } 2223 2224 static __always_inline 2225 int task_state_match(struct task_struct *p, unsigned int state) 2226 { 2227 /* 2228 * Serialize against current_save_and_set_rtlock_wait_state(), 2229 * current_restore_rtlock_saved_state(), and __refrigerator(). 2230 */ 2231 guard(raw_spinlock_irq)(&p->pi_lock); 2232 return __task_state_match(p, state); 2233 } 2234 2235 /* 2236 * wait_task_inactive - wait for a thread to unschedule. 2237 * 2238 * Wait for the thread to block in any of the states set in @match_state. 2239 * If it changes, i.e. @p might have woken up, then return zero. When we 2240 * succeed in waiting for @p to be off its CPU, we return a positive number 2241 * (its total switch count). If a second call a short while later returns the 2242 * same number, the caller can be sure that @p has remained unscheduled the 2243 * whole time. 2244 * 2245 * The caller must ensure that the task *will* unschedule sometime soon, 2246 * else this function might spin for a *long* time. This function can't 2247 * be called with interrupts off, or it may introduce deadlock with 2248 * smp_call_function() if an IPI is sent by the same process we are 2249 * waiting to become inactive. 2250 */ 2251 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 2252 { 2253 int running, queued, match; 2254 struct rq_flags rf; 2255 unsigned long ncsw; 2256 struct rq *rq; 2257 2258 for (;;) { 2259 /* 2260 * We do the initial early heuristics without holding 2261 * any task-queue locks at all. We'll only try to get 2262 * the runqueue lock when things look like they will 2263 * work out! 2264 */ 2265 rq = task_rq(p); 2266 2267 /* 2268 * If the task is actively running on another CPU 2269 * still, just relax and busy-wait without holding 2270 * any locks. 2271 * 2272 * NOTE! Since we don't hold any locks, it's not 2273 * even sure that "rq" stays as the right runqueue! 2274 * But we don't care, since "task_on_cpu()" will 2275 * return false if the runqueue has changed and p 2276 * is actually now running somewhere else! 2277 */ 2278 while (task_on_cpu(rq, p)) { 2279 if (!task_state_match(p, match_state)) 2280 return 0; 2281 cpu_relax(); 2282 } 2283 2284 /* 2285 * Ok, time to look more closely! We need the rq 2286 * lock now, to be *sure*. If we're wrong, we'll 2287 * just go back and repeat. 2288 */ 2289 rq = task_rq_lock(p, &rf); 2290 /* 2291 * If task is sched_delayed, force dequeue it, to avoid always 2292 * hitting the tick timeout in the queued case 2293 */ 2294 if (p->se.sched_delayed) 2295 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); 2296 trace_sched_wait_task(p); 2297 running = task_on_cpu(rq, p); 2298 queued = task_on_rq_queued(p); 2299 ncsw = 0; 2300 if ((match = __task_state_match(p, match_state))) { 2301 /* 2302 * When matching on p->saved_state, consider this task 2303 * still queued so it will wait. 2304 */ 2305 if (match < 0) 2306 queued = 1; 2307 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ 2308 } 2309 task_rq_unlock(rq, p, &rf); 2310 2311 /* 2312 * If it changed from the expected state, bail out now. 2313 */ 2314 if (unlikely(!ncsw)) 2315 break; 2316 2317 /* 2318 * Was it really running after all now that we 2319 * checked with the proper locks actually held? 2320 * 2321 * Oops. Go back and try again.. 2322 */ 2323 if (unlikely(running)) { 2324 cpu_relax(); 2325 continue; 2326 } 2327 2328 /* 2329 * It's not enough that it's not actively running, 2330 * it must be off the runqueue _entirely_, and not 2331 * preempted! 2332 * 2333 * So if it was still runnable (but just not actively 2334 * running right now), it's preempted, and we should 2335 * yield - it could be a while. 2336 */ 2337 if (unlikely(queued)) { 2338 ktime_t to = NSEC_PER_SEC / HZ; 2339 2340 set_current_state(TASK_UNINTERRUPTIBLE); 2341 schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); 2342 continue; 2343 } 2344 2345 /* 2346 * Ahh, all good. It wasn't running, and it wasn't 2347 * runnable, which means that it will never become 2348 * running in the future either. We're all done! 2349 */ 2350 break; 2351 } 2352 2353 return ncsw; 2354 } 2355 2356 #ifdef CONFIG_SMP 2357 2358 static void 2359 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); 2360 2361 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) 2362 { 2363 struct affinity_context ac = { 2364 .new_mask = cpumask_of(rq->cpu), 2365 .flags = SCA_MIGRATE_DISABLE, 2366 }; 2367 2368 if (likely(!p->migration_disabled)) 2369 return; 2370 2371 if (p->cpus_ptr != &p->cpus_mask) 2372 return; 2373 2374 /* 2375 * Violates locking rules! See comment in __do_set_cpus_allowed(). 2376 */ 2377 __do_set_cpus_allowed(p, &ac); 2378 } 2379 2380 void migrate_disable(void) 2381 { 2382 struct task_struct *p = current; 2383 2384 if (p->migration_disabled) { 2385 #ifdef CONFIG_DEBUG_PREEMPT 2386 /* 2387 *Warn about overflow half-way through the range. 2388 */ 2389 WARN_ON_ONCE((s16)p->migration_disabled < 0); 2390 #endif 2391 p->migration_disabled++; 2392 return; 2393 } 2394 2395 guard(preempt)(); 2396 this_rq()->nr_pinned++; 2397 p->migration_disabled = 1; 2398 } 2399 EXPORT_SYMBOL_GPL(migrate_disable); 2400 2401 void migrate_enable(void) 2402 { 2403 struct task_struct *p = current; 2404 struct affinity_context ac = { 2405 .new_mask = &p->cpus_mask, 2406 .flags = SCA_MIGRATE_ENABLE, 2407 }; 2408 2409 #ifdef CONFIG_DEBUG_PREEMPT 2410 /* 2411 * Check both overflow from migrate_disable() and superfluous 2412 * migrate_enable(). 2413 */ 2414 if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) 2415 return; 2416 #endif 2417 2418 if (p->migration_disabled > 1) { 2419 p->migration_disabled--; 2420 return; 2421 } 2422 2423 /* 2424 * Ensure stop_task runs either before or after this, and that 2425 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). 2426 */ 2427 guard(preempt)(); 2428 if (p->cpus_ptr != &p->cpus_mask) 2429 __set_cpus_allowed_ptr(p, &ac); 2430 /* 2431 * Mustn't clear migration_disabled() until cpus_ptr points back at the 2432 * regular cpus_mask, otherwise things that race (eg. 2433 * select_fallback_rq) get confused. 2434 */ 2435 barrier(); 2436 p->migration_disabled = 0; 2437 this_rq()->nr_pinned--; 2438 } 2439 EXPORT_SYMBOL_GPL(migrate_enable); 2440 2441 static inline bool rq_has_pinned_tasks(struct rq *rq) 2442 { 2443 return rq->nr_pinned; 2444 } 2445 2446 /* 2447 * Per-CPU kthreads are allowed to run on !active && online CPUs, see 2448 * __set_cpus_allowed_ptr() and select_fallback_rq(). 2449 */ 2450 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) 2451 { 2452 /* When not in the task's cpumask, no point in looking further. */ 2453 if (!task_allowed_on_cpu(p, cpu)) 2454 return false; 2455 2456 /* migrate_disabled() must be allowed to finish. */ 2457 if (is_migration_disabled(p)) 2458 return cpu_online(cpu); 2459 2460 /* Non kernel threads are not allowed during either online or offline. */ 2461 if (!(p->flags & PF_KTHREAD)) 2462 return cpu_active(cpu); 2463 2464 /* KTHREAD_IS_PER_CPU is always allowed. */ 2465 if (kthread_is_per_cpu(p)) 2466 return cpu_online(cpu); 2467 2468 /* Regular kernel threads don't get to stay during offline. */ 2469 if (cpu_dying(cpu)) 2470 return false; 2471 2472 /* But are allowed during online. */ 2473 return cpu_online(cpu); 2474 } 2475 2476 /* 2477 * This is how migration works: 2478 * 2479 * 1) we invoke migration_cpu_stop() on the target CPU using 2480 * stop_one_cpu(). 2481 * 2) stopper starts to run (implicitly forcing the migrated thread 2482 * off the CPU) 2483 * 3) it checks whether the migrated task is still in the wrong runqueue. 2484 * 4) if it's in the wrong runqueue then the migration thread removes 2485 * it and puts it into the right queue. 2486 * 5) stopper completes and stop_one_cpu() returns and the migration 2487 * is done. 2488 */ 2489 2490 /* 2491 * move_queued_task - move a queued task to new rq. 2492 * 2493 * Returns (locked) new rq. Old rq's lock is released. 2494 */ 2495 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, 2496 struct task_struct *p, int new_cpu) 2497 { 2498 lockdep_assert_rq_held(rq); 2499 2500 deactivate_task(rq, p, DEQUEUE_NOCLOCK); 2501 set_task_cpu(p, new_cpu); 2502 rq_unlock(rq, rf); 2503 2504 rq = cpu_rq(new_cpu); 2505 2506 rq_lock(rq, rf); 2507 WARN_ON_ONCE(task_cpu(p) != new_cpu); 2508 activate_task(rq, p, 0); 2509 wakeup_preempt(rq, p, 0); 2510 2511 return rq; 2512 } 2513 2514 struct migration_arg { 2515 struct task_struct *task; 2516 int dest_cpu; 2517 struct set_affinity_pending *pending; 2518 }; 2519 2520 /* 2521 * @refs: number of wait_for_completion() 2522 * @stop_pending: is @stop_work in use 2523 */ 2524 struct set_affinity_pending { 2525 refcount_t refs; 2526 unsigned int stop_pending; 2527 struct completion done; 2528 struct cpu_stop_work stop_work; 2529 struct migration_arg arg; 2530 }; 2531 2532 /* 2533 * Move (not current) task off this CPU, onto the destination CPU. We're doing 2534 * this because either it can't run here any more (set_cpus_allowed() 2535 * away from this CPU, or CPU going down), or because we're 2536 * attempting to rebalance this task on exec (sched_exec). 2537 * 2538 * So we race with normal scheduler movements, but that's OK, as long 2539 * as the task is no longer on this CPU. 2540 */ 2541 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 2542 struct task_struct *p, int dest_cpu) 2543 { 2544 /* Affinity changed (again). */ 2545 if (!is_cpu_allowed(p, dest_cpu)) 2546 return rq; 2547 2548 rq = move_queued_task(rq, rf, p, dest_cpu); 2549 2550 return rq; 2551 } 2552 2553 /* 2554 * migration_cpu_stop - this will be executed by a high-prio stopper thread 2555 * and performs thread migration by bumping thread off CPU then 2556 * 'pushing' onto another runqueue. 2557 */ 2558 static int migration_cpu_stop(void *data) 2559 { 2560 struct migration_arg *arg = data; 2561 struct set_affinity_pending *pending = arg->pending; 2562 struct task_struct *p = arg->task; 2563 struct rq *rq = this_rq(); 2564 bool complete = false; 2565 struct rq_flags rf; 2566 2567 /* 2568 * The original target CPU might have gone down and we might 2569 * be on another CPU but it doesn't matter. 2570 */ 2571 local_irq_save(rf.flags); 2572 /* 2573 * We need to explicitly wake pending tasks before running 2574 * __migrate_task() such that we will not miss enforcing cpus_ptr 2575 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. 2576 */ 2577 flush_smp_call_function_queue(); 2578 2579 raw_spin_lock(&p->pi_lock); 2580 rq_lock(rq, &rf); 2581 2582 /* 2583 * If we were passed a pending, then ->stop_pending was set, thus 2584 * p->migration_pending must have remained stable. 2585 */ 2586 WARN_ON_ONCE(pending && pending != p->migration_pending); 2587 2588 /* 2589 * If task_rq(p) != rq, it cannot be migrated here, because we're 2590 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because 2591 * we're holding p->pi_lock. 2592 */ 2593 if (task_rq(p) == rq) { 2594 if (is_migration_disabled(p)) 2595 goto out; 2596 2597 if (pending) { 2598 p->migration_pending = NULL; 2599 complete = true; 2600 2601 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) 2602 goto out; 2603 } 2604 2605 if (task_on_rq_queued(p)) { 2606 update_rq_clock(rq); 2607 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); 2608 } else { 2609 p->wake_cpu = arg->dest_cpu; 2610 } 2611 2612 /* 2613 * XXX __migrate_task() can fail, at which point we might end 2614 * up running on a dodgy CPU, AFAICT this can only happen 2615 * during CPU hotplug, at which point we'll get pushed out 2616 * anyway, so it's probably not a big deal. 2617 */ 2618 2619 } else if (pending) { 2620 /* 2621 * This happens when we get migrated between migrate_enable()'s 2622 * preempt_enable() and scheduling the stopper task. At that 2623 * point we're a regular task again and not current anymore. 2624 * 2625 * A !PREEMPT kernel has a giant hole here, which makes it far 2626 * more likely. 2627 */ 2628 2629 /* 2630 * The task moved before the stopper got to run. We're holding 2631 * ->pi_lock, so the allowed mask is stable - if it got 2632 * somewhere allowed, we're done. 2633 */ 2634 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { 2635 p->migration_pending = NULL; 2636 complete = true; 2637 goto out; 2638 } 2639 2640 /* 2641 * When migrate_enable() hits a rq mis-match we can't reliably 2642 * determine is_migration_disabled() and so have to chase after 2643 * it. 2644 */ 2645 WARN_ON_ONCE(!pending->stop_pending); 2646 preempt_disable(); 2647 task_rq_unlock(rq, p, &rf); 2648 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, 2649 &pending->arg, &pending->stop_work); 2650 preempt_enable(); 2651 return 0; 2652 } 2653 out: 2654 if (pending) 2655 pending->stop_pending = false; 2656 task_rq_unlock(rq, p, &rf); 2657 2658 if (complete) 2659 complete_all(&pending->done); 2660 2661 return 0; 2662 } 2663 2664 int push_cpu_stop(void *arg) 2665 { 2666 struct rq *lowest_rq = NULL, *rq = this_rq(); 2667 struct task_struct *p = arg; 2668 2669 raw_spin_lock_irq(&p->pi_lock); 2670 raw_spin_rq_lock(rq); 2671 2672 if (task_rq(p) != rq) 2673 goto out_unlock; 2674 2675 if (is_migration_disabled(p)) { 2676 p->migration_flags |= MDF_PUSH; 2677 goto out_unlock; 2678 } 2679 2680 p->migration_flags &= ~MDF_PUSH; 2681 2682 if (p->sched_class->find_lock_rq) 2683 lowest_rq = p->sched_class->find_lock_rq(p, rq); 2684 2685 if (!lowest_rq) 2686 goto out_unlock; 2687 2688 // XXX validate p is still the highest prio task 2689 if (task_rq(p) == rq) { 2690 move_queued_task_locked(rq, lowest_rq, p); 2691 resched_curr(lowest_rq); 2692 } 2693 2694 double_unlock_balance(rq, lowest_rq); 2695 2696 out_unlock: 2697 rq->push_busy = false; 2698 raw_spin_rq_unlock(rq); 2699 raw_spin_unlock_irq(&p->pi_lock); 2700 2701 put_task_struct(p); 2702 return 0; 2703 } 2704 2705 /* 2706 * sched_class::set_cpus_allowed must do the below, but is not required to 2707 * actually call this function. 2708 */ 2709 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) 2710 { 2711 if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { 2712 p->cpus_ptr = ctx->new_mask; 2713 return; 2714 } 2715 2716 cpumask_copy(&p->cpus_mask, ctx->new_mask); 2717 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); 2718 2719 /* 2720 * Swap in a new user_cpus_ptr if SCA_USER flag set 2721 */ 2722 if (ctx->flags & SCA_USER) 2723 swap(p->user_cpus_ptr, ctx->user_mask); 2724 } 2725 2726 static void 2727 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) 2728 { 2729 struct rq *rq = task_rq(p); 2730 bool queued, running; 2731 2732 /* 2733 * This here violates the locking rules for affinity, since we're only 2734 * supposed to change these variables while holding both rq->lock and 2735 * p->pi_lock. 2736 * 2737 * HOWEVER, it magically works, because ttwu() is the only code that 2738 * accesses these variables under p->pi_lock and only does so after 2739 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() 2740 * before finish_task(). 2741 * 2742 * XXX do further audits, this smells like something putrid. 2743 */ 2744 if (ctx->flags & SCA_MIGRATE_DISABLE) 2745 WARN_ON_ONCE(!p->on_cpu); 2746 else 2747 lockdep_assert_held(&p->pi_lock); 2748 2749 queued = task_on_rq_queued(p); 2750 running = task_current_donor(rq, p); 2751 2752 if (queued) { 2753 /* 2754 * Because __kthread_bind() calls this on blocked tasks without 2755 * holding rq->lock. 2756 */ 2757 lockdep_assert_rq_held(rq); 2758 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 2759 } 2760 if (running) 2761 put_prev_task(rq, p); 2762 2763 p->sched_class->set_cpus_allowed(p, ctx); 2764 mm_set_cpus_allowed(p->mm, ctx->new_mask); 2765 2766 if (queued) 2767 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 2768 if (running) 2769 set_next_task(rq, p); 2770 } 2771 2772 /* 2773 * Used for kthread_bind() and select_fallback_rq(), in both cases the user 2774 * affinity (if any) should be destroyed too. 2775 */ 2776 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 2777 { 2778 struct affinity_context ac = { 2779 .new_mask = new_mask, 2780 .user_mask = NULL, 2781 .flags = SCA_USER, /* clear the user requested mask */ 2782 }; 2783 union cpumask_rcuhead { 2784 cpumask_t cpumask; 2785 struct rcu_head rcu; 2786 }; 2787 2788 __do_set_cpus_allowed(p, &ac); 2789 2790 /* 2791 * Because this is called with p->pi_lock held, it is not possible 2792 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using 2793 * kfree_rcu(). 2794 */ 2795 kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu); 2796 } 2797 2798 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, 2799 int node) 2800 { 2801 cpumask_t *user_mask; 2802 unsigned long flags; 2803 2804 /* 2805 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's 2806 * may differ by now due to racing. 2807 */ 2808 dst->user_cpus_ptr = NULL; 2809 2810 /* 2811 * This check is racy and losing the race is a valid situation. 2812 * It is not worth the extra overhead of taking the pi_lock on 2813 * every fork/clone. 2814 */ 2815 if (data_race(!src->user_cpus_ptr)) 2816 return 0; 2817 2818 user_mask = alloc_user_cpus_ptr(node); 2819 if (!user_mask) 2820 return -ENOMEM; 2821 2822 /* 2823 * Use pi_lock to protect content of user_cpus_ptr 2824 * 2825 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent 2826 * do_set_cpus_allowed(). 2827 */ 2828 raw_spin_lock_irqsave(&src->pi_lock, flags); 2829 if (src->user_cpus_ptr) { 2830 swap(dst->user_cpus_ptr, user_mask); 2831 cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); 2832 } 2833 raw_spin_unlock_irqrestore(&src->pi_lock, flags); 2834 2835 if (unlikely(user_mask)) 2836 kfree(user_mask); 2837 2838 return 0; 2839 } 2840 2841 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) 2842 { 2843 struct cpumask *user_mask = NULL; 2844 2845 swap(p->user_cpus_ptr, user_mask); 2846 2847 return user_mask; 2848 } 2849 2850 void release_user_cpus_ptr(struct task_struct *p) 2851 { 2852 kfree(clear_user_cpus_ptr(p)); 2853 } 2854 2855 /* 2856 * This function is wildly self concurrent; here be dragons. 2857 * 2858 * 2859 * When given a valid mask, __set_cpus_allowed_ptr() must block until the 2860 * designated task is enqueued on an allowed CPU. If that task is currently 2861 * running, we have to kick it out using the CPU stopper. 2862 * 2863 * Migrate-Disable comes along and tramples all over our nice sandcastle. 2864 * Consider: 2865 * 2866 * Initial conditions: P0->cpus_mask = [0, 1] 2867 * 2868 * P0@CPU0 P1 2869 * 2870 * migrate_disable(); 2871 * <preempted> 2872 * set_cpus_allowed_ptr(P0, [1]); 2873 * 2874 * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes 2875 * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). 2876 * This means we need the following scheme: 2877 * 2878 * P0@CPU0 P1 2879 * 2880 * migrate_disable(); 2881 * <preempted> 2882 * set_cpus_allowed_ptr(P0, [1]); 2883 * <blocks> 2884 * <resumes> 2885 * migrate_enable(); 2886 * __set_cpus_allowed_ptr(); 2887 * <wakes local stopper> 2888 * `--> <woken on migration completion> 2889 * 2890 * Now the fun stuff: there may be several P1-like tasks, i.e. multiple 2891 * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any 2892 * task p are serialized by p->pi_lock, which we can leverage: the one that 2893 * should come into effect at the end of the Migrate-Disable region is the last 2894 * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), 2895 * but we still need to properly signal those waiting tasks at the appropriate 2896 * moment. 2897 * 2898 * This is implemented using struct set_affinity_pending. The first 2899 * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will 2900 * setup an instance of that struct and install it on the targeted task_struct. 2901 * Any and all further callers will reuse that instance. Those then wait for 2902 * a completion signaled at the tail of the CPU stopper callback (1), triggered 2903 * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). 2904 * 2905 * 2906 * (1) In the cases covered above. There is one more where the completion is 2907 * signaled within affine_move_task() itself: when a subsequent affinity request 2908 * occurs after the stopper bailed out due to the targeted task still being 2909 * Migrate-Disable. Consider: 2910 * 2911 * Initial conditions: P0->cpus_mask = [0, 1] 2912 * 2913 * CPU0 P1 P2 2914 * <P0> 2915 * migrate_disable(); 2916 * <preempted> 2917 * set_cpus_allowed_ptr(P0, [1]); 2918 * <blocks> 2919 * <migration/0> 2920 * migration_cpu_stop() 2921 * is_migration_disabled() 2922 * <bails> 2923 * set_cpus_allowed_ptr(P0, [0, 1]); 2924 * <signal completion> 2925 * <awakes> 2926 * 2927 * Note that the above is safe vs a concurrent migrate_enable(), as any 2928 * pending affinity completion is preceded by an uninstallation of 2929 * p->migration_pending done with p->pi_lock held. 2930 */ 2931 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, 2932 int dest_cpu, unsigned int flags) 2933 __releases(rq->lock) 2934 __releases(p->pi_lock) 2935 { 2936 struct set_affinity_pending my_pending = { }, *pending = NULL; 2937 bool stop_pending, complete = false; 2938 2939 /* Can the task run on the task's current CPU? If so, we're done */ 2940 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { 2941 struct task_struct *push_task = NULL; 2942 2943 if ((flags & SCA_MIGRATE_ENABLE) && 2944 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { 2945 rq->push_busy = true; 2946 push_task = get_task_struct(p); 2947 } 2948 2949 /* 2950 * If there are pending waiters, but no pending stop_work, 2951 * then complete now. 2952 */ 2953 pending = p->migration_pending; 2954 if (pending && !pending->stop_pending) { 2955 p->migration_pending = NULL; 2956 complete = true; 2957 } 2958 2959 preempt_disable(); 2960 task_rq_unlock(rq, p, rf); 2961 if (push_task) { 2962 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2963 p, &rq->push_work); 2964 } 2965 preempt_enable(); 2966 2967 if (complete) 2968 complete_all(&pending->done); 2969 2970 return 0; 2971 } 2972 2973 if (!(flags & SCA_MIGRATE_ENABLE)) { 2974 /* serialized by p->pi_lock */ 2975 if (!p->migration_pending) { 2976 /* Install the request */ 2977 refcount_set(&my_pending.refs, 1); 2978 init_completion(&my_pending.done); 2979 my_pending.arg = (struct migration_arg) { 2980 .task = p, 2981 .dest_cpu = dest_cpu, 2982 .pending = &my_pending, 2983 }; 2984 2985 p->migration_pending = &my_pending; 2986 } else { 2987 pending = p->migration_pending; 2988 refcount_inc(&pending->refs); 2989 /* 2990 * Affinity has changed, but we've already installed a 2991 * pending. migration_cpu_stop() *must* see this, else 2992 * we risk a completion of the pending despite having a 2993 * task on a disallowed CPU. 2994 * 2995 * Serialized by p->pi_lock, so this is safe. 2996 */ 2997 pending->arg.dest_cpu = dest_cpu; 2998 } 2999 } 3000 pending = p->migration_pending; 3001 /* 3002 * - !MIGRATE_ENABLE: 3003 * we'll have installed a pending if there wasn't one already. 3004 * 3005 * - MIGRATE_ENABLE: 3006 * we're here because the current CPU isn't matching anymore, 3007 * the only way that can happen is because of a concurrent 3008 * set_cpus_allowed_ptr() call, which should then still be 3009 * pending completion. 3010 * 3011 * Either way, we really should have a @pending here. 3012 */ 3013 if (WARN_ON_ONCE(!pending)) { 3014 task_rq_unlock(rq, p, rf); 3015 return -EINVAL; 3016 } 3017 3018 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { 3019 /* 3020 * MIGRATE_ENABLE gets here because 'p == current', but for 3021 * anything else we cannot do is_migration_disabled(), punt 3022 * and have the stopper function handle it all race-free. 3023 */ 3024 stop_pending = pending->stop_pending; 3025 if (!stop_pending) 3026 pending->stop_pending = true; 3027 3028 if (flags & SCA_MIGRATE_ENABLE) 3029 p->migration_flags &= ~MDF_PUSH; 3030 3031 preempt_disable(); 3032 task_rq_unlock(rq, p, rf); 3033 if (!stop_pending) { 3034 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, 3035 &pending->arg, &pending->stop_work); 3036 } 3037 preempt_enable(); 3038 3039 if (flags & SCA_MIGRATE_ENABLE) 3040 return 0; 3041 } else { 3042 3043 if (!is_migration_disabled(p)) { 3044 if (task_on_rq_queued(p)) 3045 rq = move_queued_task(rq, rf, p, dest_cpu); 3046 3047 if (!pending->stop_pending) { 3048 p->migration_pending = NULL; 3049 complete = true; 3050 } 3051 } 3052 task_rq_unlock(rq, p, rf); 3053 3054 if (complete) 3055 complete_all(&pending->done); 3056 } 3057 3058 wait_for_completion(&pending->done); 3059 3060 if (refcount_dec_and_test(&pending->refs)) 3061 wake_up_var(&pending->refs); /* No UaF, just an address */ 3062 3063 /* 3064 * Block the original owner of &pending until all subsequent callers 3065 * have seen the completion and decremented the refcount 3066 */ 3067 wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); 3068 3069 /* ARGH */ 3070 WARN_ON_ONCE(my_pending.stop_pending); 3071 3072 return 0; 3073 } 3074 3075 /* 3076 * Called with both p->pi_lock and rq->lock held; drops both before returning. 3077 */ 3078 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, 3079 struct affinity_context *ctx, 3080 struct rq *rq, 3081 struct rq_flags *rf) 3082 __releases(rq->lock) 3083 __releases(p->pi_lock) 3084 { 3085 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); 3086 const struct cpumask *cpu_valid_mask = cpu_active_mask; 3087 bool kthread = p->flags & PF_KTHREAD; 3088 unsigned int dest_cpu; 3089 int ret = 0; 3090 3091 update_rq_clock(rq); 3092 3093 if (kthread || is_migration_disabled(p)) { 3094 /* 3095 * Kernel threads are allowed on online && !active CPUs, 3096 * however, during cpu-hot-unplug, even these might get pushed 3097 * away if not KTHREAD_IS_PER_CPU. 3098 * 3099 * Specifically, migration_disabled() tasks must not fail the 3100 * cpumask_any_and_distribute() pick below, esp. so on 3101 * SCA_MIGRATE_ENABLE, otherwise we'll not call 3102 * set_cpus_allowed_common() and actually reset p->cpus_ptr. 3103 */ 3104 cpu_valid_mask = cpu_online_mask; 3105 } 3106 3107 if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { 3108 ret = -EINVAL; 3109 goto out; 3110 } 3111 3112 /* 3113 * Must re-check here, to close a race against __kthread_bind(), 3114 * sched_setaffinity() is not guaranteed to observe the flag. 3115 */ 3116 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { 3117 ret = -EINVAL; 3118 goto out; 3119 } 3120 3121 if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { 3122 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { 3123 if (ctx->flags & SCA_USER) 3124 swap(p->user_cpus_ptr, ctx->user_mask); 3125 goto out; 3126 } 3127 3128 if (WARN_ON_ONCE(p == current && 3129 is_migration_disabled(p) && 3130 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { 3131 ret = -EBUSY; 3132 goto out; 3133 } 3134 } 3135 3136 /* 3137 * Picking a ~random cpu helps in cases where we are changing affinity 3138 * for groups of tasks (ie. cpuset), so that load balancing is not 3139 * immediately required to distribute the tasks within their new mask. 3140 */ 3141 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); 3142 if (dest_cpu >= nr_cpu_ids) { 3143 ret = -EINVAL; 3144 goto out; 3145 } 3146 3147 __do_set_cpus_allowed(p, ctx); 3148 3149 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); 3150 3151 out: 3152 task_rq_unlock(rq, p, rf); 3153 3154 return ret; 3155 } 3156 3157 /* 3158 * Change a given task's CPU affinity. Migrate the thread to a 3159 * proper CPU and schedule it away if the CPU it's executing on 3160 * is removed from the allowed bitmask. 3161 * 3162 * NOTE: the caller must have a valid reference to the task, the 3163 * task must not exit() & deallocate itself prematurely. The 3164 * call is not atomic; no spinlocks may be held. 3165 */ 3166 int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx) 3167 { 3168 struct rq_flags rf; 3169 struct rq *rq; 3170 3171 rq = task_rq_lock(p, &rf); 3172 /* 3173 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_* 3174 * flags are set. 3175 */ 3176 if (p->user_cpus_ptr && 3177 !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && 3178 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) 3179 ctx->new_mask = rq->scratch_mask; 3180 3181 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); 3182 } 3183 3184 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 3185 { 3186 struct affinity_context ac = { 3187 .new_mask = new_mask, 3188 .flags = 0, 3189 }; 3190 3191 return __set_cpus_allowed_ptr(p, &ac); 3192 } 3193 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); 3194 3195 /* 3196 * Change a given task's CPU affinity to the intersection of its current 3197 * affinity mask and @subset_mask, writing the resulting mask to @new_mask. 3198 * If user_cpus_ptr is defined, use it as the basis for restricting CPU 3199 * affinity or use cpu_online_mask instead. 3200 * 3201 * If the resulting mask is empty, leave the affinity unchanged and return 3202 * -EINVAL. 3203 */ 3204 static int restrict_cpus_allowed_ptr(struct task_struct *p, 3205 struct cpumask *new_mask, 3206 const struct cpumask *subset_mask) 3207 { 3208 struct affinity_context ac = { 3209 .new_mask = new_mask, 3210 .flags = 0, 3211 }; 3212 struct rq_flags rf; 3213 struct rq *rq; 3214 int err; 3215 3216 rq = task_rq_lock(p, &rf); 3217 3218 /* 3219 * Forcefully restricting the affinity of a deadline task is 3220 * likely to cause problems, so fail and noisily override the 3221 * mask entirely. 3222 */ 3223 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { 3224 err = -EPERM; 3225 goto err_unlock; 3226 } 3227 3228 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { 3229 err = -EINVAL; 3230 goto err_unlock; 3231 } 3232 3233 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); 3234 3235 err_unlock: 3236 task_rq_unlock(rq, p, &rf); 3237 return err; 3238 } 3239 3240 /* 3241 * Restrict the CPU affinity of task @p so that it is a subset of 3242 * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the 3243 * old affinity mask. If the resulting mask is empty, we warn and walk 3244 * up the cpuset hierarchy until we find a suitable mask. 3245 */ 3246 void force_compatible_cpus_allowed_ptr(struct task_struct *p) 3247 { 3248 cpumask_var_t new_mask; 3249 const struct cpumask *override_mask = task_cpu_possible_mask(p); 3250 3251 alloc_cpumask_var(&new_mask, GFP_KERNEL); 3252 3253 /* 3254 * __migrate_task() can fail silently in the face of concurrent 3255 * offlining of the chosen destination CPU, so take the hotplug 3256 * lock to ensure that the migration succeeds. 3257 */ 3258 cpus_read_lock(); 3259 if (!cpumask_available(new_mask)) 3260 goto out_set_mask; 3261 3262 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) 3263 goto out_free_mask; 3264 3265 /* 3266 * We failed to find a valid subset of the affinity mask for the 3267 * task, so override it based on its cpuset hierarchy. 3268 */ 3269 cpuset_cpus_allowed(p, new_mask); 3270 override_mask = new_mask; 3271 3272 out_set_mask: 3273 if (printk_ratelimit()) { 3274 printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", 3275 task_pid_nr(p), p->comm, 3276 cpumask_pr_args(override_mask)); 3277 } 3278 3279 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); 3280 out_free_mask: 3281 cpus_read_unlock(); 3282 free_cpumask_var(new_mask); 3283 } 3284 3285 /* 3286 * Restore the affinity of a task @p which was previously restricted by a 3287 * call to force_compatible_cpus_allowed_ptr(). 3288 * 3289 * It is the caller's responsibility to serialise this with any calls to 3290 * force_compatible_cpus_allowed_ptr(@p). 3291 */ 3292 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) 3293 { 3294 struct affinity_context ac = { 3295 .new_mask = task_user_cpus(p), 3296 .flags = 0, 3297 }; 3298 int ret; 3299 3300 /* 3301 * Try to restore the old affinity mask with __sched_setaffinity(). 3302 * Cpuset masking will be done there too. 3303 */ 3304 ret = __sched_setaffinity(p, &ac); 3305 WARN_ON_ONCE(ret); 3306 } 3307 3308 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 3309 { 3310 unsigned int state = READ_ONCE(p->__state); 3311 3312 /* 3313 * We should never call set_task_cpu() on a blocked task, 3314 * ttwu() will sort out the placement. 3315 */ 3316 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); 3317 3318 /* 3319 * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, 3320 * because schedstat_wait_{start,end} rebase migrating task's wait_start 3321 * time relying on p->on_rq. 3322 */ 3323 WARN_ON_ONCE(state == TASK_RUNNING && 3324 p->sched_class == &fair_sched_class && 3325 (p->on_rq && !task_on_rq_migrating(p))); 3326 3327 #ifdef CONFIG_LOCKDEP 3328 /* 3329 * The caller should hold either p->pi_lock or rq->lock, when changing 3330 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. 3331 * 3332 * sched_move_task() holds both and thus holding either pins the cgroup, 3333 * see task_group(). 3334 * 3335 * Furthermore, all task_rq users should acquire both locks, see 3336 * task_rq_lock(). 3337 */ 3338 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 3339 lockdep_is_held(__rq_lockp(task_rq(p))))); 3340 #endif 3341 /* 3342 * Clearly, migrating tasks to offline CPUs is a fairly daft thing. 3343 */ 3344 WARN_ON_ONCE(!cpu_online(new_cpu)); 3345 3346 WARN_ON_ONCE(is_migration_disabled(p)); 3347 3348 trace_sched_migrate_task(p, new_cpu); 3349 3350 if (task_cpu(p) != new_cpu) { 3351 if (p->sched_class->migrate_task_rq) 3352 p->sched_class->migrate_task_rq(p, new_cpu); 3353 p->se.nr_migrations++; 3354 rseq_migrate(p); 3355 sched_mm_cid_migrate_from(p); 3356 perf_event_task_migrate(p); 3357 } 3358 3359 __set_task_cpu(p, new_cpu); 3360 } 3361 3362 #ifdef CONFIG_NUMA_BALANCING 3363 static void __migrate_swap_task(struct task_struct *p, int cpu) 3364 { 3365 if (task_on_rq_queued(p)) { 3366 struct rq *src_rq, *dst_rq; 3367 struct rq_flags srf, drf; 3368 3369 src_rq = task_rq(p); 3370 dst_rq = cpu_rq(cpu); 3371 3372 rq_pin_lock(src_rq, &srf); 3373 rq_pin_lock(dst_rq, &drf); 3374 3375 move_queued_task_locked(src_rq, dst_rq, p); 3376 wakeup_preempt(dst_rq, p, 0); 3377 3378 rq_unpin_lock(dst_rq, &drf); 3379 rq_unpin_lock(src_rq, &srf); 3380 3381 } else { 3382 /* 3383 * Task isn't running anymore; make it appear like we migrated 3384 * it before it went to sleep. This means on wakeup we make the 3385 * previous CPU our target instead of where it really is. 3386 */ 3387 p->wake_cpu = cpu; 3388 } 3389 } 3390 3391 struct migration_swap_arg { 3392 struct task_struct *src_task, *dst_task; 3393 int src_cpu, dst_cpu; 3394 }; 3395 3396 static int migrate_swap_stop(void *data) 3397 { 3398 struct migration_swap_arg *arg = data; 3399 struct rq *src_rq, *dst_rq; 3400 3401 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) 3402 return -EAGAIN; 3403 3404 src_rq = cpu_rq(arg->src_cpu); 3405 dst_rq = cpu_rq(arg->dst_cpu); 3406 3407 guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); 3408 guard(double_rq_lock)(src_rq, dst_rq); 3409 3410 if (task_cpu(arg->dst_task) != arg->dst_cpu) 3411 return -EAGAIN; 3412 3413 if (task_cpu(arg->src_task) != arg->src_cpu) 3414 return -EAGAIN; 3415 3416 if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) 3417 return -EAGAIN; 3418 3419 if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) 3420 return -EAGAIN; 3421 3422 __migrate_swap_task(arg->src_task, arg->dst_cpu); 3423 __migrate_swap_task(arg->dst_task, arg->src_cpu); 3424 3425 return 0; 3426 } 3427 3428 /* 3429 * Cross migrate two tasks 3430 */ 3431 int migrate_swap(struct task_struct *cur, struct task_struct *p, 3432 int target_cpu, int curr_cpu) 3433 { 3434 struct migration_swap_arg arg; 3435 int ret = -EINVAL; 3436 3437 arg = (struct migration_swap_arg){ 3438 .src_task = cur, 3439 .src_cpu = curr_cpu, 3440 .dst_task = p, 3441 .dst_cpu = target_cpu, 3442 }; 3443 3444 if (arg.src_cpu == arg.dst_cpu) 3445 goto out; 3446 3447 /* 3448 * These three tests are all lockless; this is OK since all of them 3449 * will be re-checked with proper locks held further down the line. 3450 */ 3451 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) 3452 goto out; 3453 3454 if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) 3455 goto out; 3456 3457 if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) 3458 goto out; 3459 3460 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); 3461 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 3462 3463 out: 3464 return ret; 3465 } 3466 #endif /* CONFIG_NUMA_BALANCING */ 3467 3468 /*** 3469 * kick_process - kick a running thread to enter/exit the kernel 3470 * @p: the to-be-kicked thread 3471 * 3472 * Cause a process which is running on another CPU to enter 3473 * kernel-mode, without any delay. (to get signals handled.) 3474 * 3475 * NOTE: this function doesn't have to take the runqueue lock, 3476 * because all it wants to ensure is that the remote task enters 3477 * the kernel. If the IPI races and the task has been migrated 3478 * to another CPU then no harm is done and the purpose has been 3479 * achieved as well. 3480 */ 3481 void kick_process(struct task_struct *p) 3482 { 3483 guard(preempt)(); 3484 int cpu = task_cpu(p); 3485 3486 if ((cpu != smp_processor_id()) && task_curr(p)) 3487 smp_send_reschedule(cpu); 3488 } 3489 EXPORT_SYMBOL_GPL(kick_process); 3490 3491 /* 3492 * ->cpus_ptr is protected by both rq->lock and p->pi_lock 3493 * 3494 * A few notes on cpu_active vs cpu_online: 3495 * 3496 * - cpu_active must be a subset of cpu_online 3497 * 3498 * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, 3499 * see __set_cpus_allowed_ptr(). At this point the newly online 3500 * CPU isn't yet part of the sched domains, and balancing will not 3501 * see it. 3502 * 3503 * - on CPU-down we clear cpu_active() to mask the sched domains and 3504 * avoid the load balancer to place new tasks on the to be removed 3505 * CPU. Existing tasks will remain running there and will be taken 3506 * off. 3507 * 3508 * This means that fallback selection must not select !active CPUs. 3509 * And can assume that any active CPU must be online. Conversely 3510 * select_task_rq() below may allow selection of !active CPUs in order 3511 * to satisfy the above rules. 3512 */ 3513 static int select_fallback_rq(int cpu, struct task_struct *p) 3514 { 3515 int nid = cpu_to_node(cpu); 3516 const struct cpumask *nodemask = NULL; 3517 enum { cpuset, possible, fail } state = cpuset; 3518 int dest_cpu; 3519 3520 /* 3521 * If the node that the CPU is on has been offlined, cpu_to_node() 3522 * will return -1. There is no CPU on the node, and we should 3523 * select the CPU on the other node. 3524 */ 3525 if (nid != -1) { 3526 nodemask = cpumask_of_node(nid); 3527 3528 /* Look for allowed, online CPU in same node. */ 3529 for_each_cpu(dest_cpu, nodemask) { 3530 if (is_cpu_allowed(p, dest_cpu)) 3531 return dest_cpu; 3532 } 3533 } 3534 3535 for (;;) { 3536 /* Any allowed, online CPU? */ 3537 for_each_cpu(dest_cpu, p->cpus_ptr) { 3538 if (!is_cpu_allowed(p, dest_cpu)) 3539 continue; 3540 3541 goto out; 3542 } 3543 3544 /* No more Mr. Nice Guy. */ 3545 switch (state) { 3546 case cpuset: 3547 if (cpuset_cpus_allowed_fallback(p)) { 3548 state = possible; 3549 break; 3550 } 3551 fallthrough; 3552 case possible: 3553 /* 3554 * XXX When called from select_task_rq() we only 3555 * hold p->pi_lock and again violate locking order. 3556 * 3557 * More yuck to audit. 3558 */ 3559 do_set_cpus_allowed(p, task_cpu_fallback_mask(p)); 3560 state = fail; 3561 break; 3562 case fail: 3563 BUG(); 3564 break; 3565 } 3566 } 3567 3568 out: 3569 if (state != cpuset) { 3570 /* 3571 * Don't tell them about moving exiting tasks or 3572 * kernel threads (both mm NULL), since they never 3573 * leave kernel. 3574 */ 3575 if (p->mm && printk_ratelimit()) { 3576 printk_deferred("process %d (%s) no longer affine to cpu%d\n", 3577 task_pid_nr(p), p->comm, cpu); 3578 } 3579 } 3580 3581 return dest_cpu; 3582 } 3583 3584 /* 3585 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. 3586 */ 3587 static inline 3588 int select_task_rq(struct task_struct *p, int cpu, int *wake_flags) 3589 { 3590 lockdep_assert_held(&p->pi_lock); 3591 3592 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) { 3593 cpu = p->sched_class->select_task_rq(p, cpu, *wake_flags); 3594 *wake_flags |= WF_RQ_SELECTED; 3595 } else { 3596 cpu = cpumask_any(p->cpus_ptr); 3597 } 3598 3599 /* 3600 * In order not to call set_task_cpu() on a blocking task we need 3601 * to rely on ttwu() to place the task on a valid ->cpus_ptr 3602 * CPU. 3603 * 3604 * Since this is common to all placement strategies, this lives here. 3605 * 3606 * [ this allows ->select_task() to simply return task_cpu(p) and 3607 * not worry about this generic constraint ] 3608 */ 3609 if (unlikely(!is_cpu_allowed(p, cpu))) 3610 cpu = select_fallback_rq(task_cpu(p), p); 3611 3612 return cpu; 3613 } 3614 3615 void sched_set_stop_task(int cpu, struct task_struct *stop) 3616 { 3617 static struct lock_class_key stop_pi_lock; 3618 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 3619 struct task_struct *old_stop = cpu_rq(cpu)->stop; 3620 3621 if (stop) { 3622 /* 3623 * Make it appear like a SCHED_FIFO task, its something 3624 * userspace knows about and won't get confused about. 3625 * 3626 * Also, it will make PI more or less work without too 3627 * much confusion -- but then, stop work should not 3628 * rely on PI working anyway. 3629 */ 3630 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); 3631 3632 stop->sched_class = &stop_sched_class; 3633 3634 /* 3635 * The PI code calls rt_mutex_setprio() with ->pi_lock held to 3636 * adjust the effective priority of a task. As a result, 3637 * rt_mutex_setprio() can trigger (RT) balancing operations, 3638 * which can then trigger wakeups of the stop thread to push 3639 * around the current task. 3640 * 3641 * The stop task itself will never be part of the PI-chain, it 3642 * never blocks, therefore that ->pi_lock recursion is safe. 3643 * Tell lockdep about this by placing the stop->pi_lock in its 3644 * own class. 3645 */ 3646 lockdep_set_class(&stop->pi_lock, &stop_pi_lock); 3647 } 3648 3649 cpu_rq(cpu)->stop = stop; 3650 3651 if (old_stop) { 3652 /* 3653 * Reset it back to a normal scheduling class so that 3654 * it can die in pieces. 3655 */ 3656 old_stop->sched_class = &rt_sched_class; 3657 } 3658 } 3659 3660 #else /* CONFIG_SMP */ 3661 3662 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } 3663 3664 static inline bool rq_has_pinned_tasks(struct rq *rq) 3665 { 3666 return false; 3667 } 3668 3669 #endif /* !CONFIG_SMP */ 3670 3671 static void 3672 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) 3673 { 3674 struct rq *rq; 3675 3676 if (!schedstat_enabled()) 3677 return; 3678 3679 rq = this_rq(); 3680 3681 #ifdef CONFIG_SMP 3682 if (cpu == rq->cpu) { 3683 __schedstat_inc(rq->ttwu_local); 3684 __schedstat_inc(p->stats.nr_wakeups_local); 3685 } else { 3686 struct sched_domain *sd; 3687 3688 __schedstat_inc(p->stats.nr_wakeups_remote); 3689 3690 guard(rcu)(); 3691 for_each_domain(rq->cpu, sd) { 3692 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { 3693 __schedstat_inc(sd->ttwu_wake_remote); 3694 break; 3695 } 3696 } 3697 } 3698 3699 if (wake_flags & WF_MIGRATED) 3700 __schedstat_inc(p->stats.nr_wakeups_migrate); 3701 #endif /* CONFIG_SMP */ 3702 3703 __schedstat_inc(rq->ttwu_count); 3704 __schedstat_inc(p->stats.nr_wakeups); 3705 3706 if (wake_flags & WF_SYNC) 3707 __schedstat_inc(p->stats.nr_wakeups_sync); 3708 } 3709 3710 /* 3711 * Mark the task runnable. 3712 */ 3713 static inline void ttwu_do_wakeup(struct task_struct *p) 3714 { 3715 WRITE_ONCE(p->__state, TASK_RUNNING); 3716 trace_sched_wakeup(p); 3717 } 3718 3719 static void 3720 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, 3721 struct rq_flags *rf) 3722 { 3723 int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; 3724 3725 lockdep_assert_rq_held(rq); 3726 3727 if (p->sched_contributes_to_load) 3728 rq->nr_uninterruptible--; 3729 3730 #ifdef CONFIG_SMP 3731 if (wake_flags & WF_RQ_SELECTED) 3732 en_flags |= ENQUEUE_RQ_SELECTED; 3733 if (wake_flags & WF_MIGRATED) 3734 en_flags |= ENQUEUE_MIGRATED; 3735 else 3736 #endif 3737 if (p->in_iowait) { 3738 delayacct_blkio_end(p); 3739 atomic_dec(&task_rq(p)->nr_iowait); 3740 } 3741 3742 activate_task(rq, p, en_flags); 3743 wakeup_preempt(rq, p, wake_flags); 3744 3745 ttwu_do_wakeup(p); 3746 3747 #ifdef CONFIG_SMP 3748 if (p->sched_class->task_woken) { 3749 /* 3750 * Our task @p is fully woken up and running; so it's safe to 3751 * drop the rq->lock, hereafter rq is only used for statistics. 3752 */ 3753 rq_unpin_lock(rq, rf); 3754 p->sched_class->task_woken(rq, p); 3755 rq_repin_lock(rq, rf); 3756 } 3757 3758 if (rq->idle_stamp) { 3759 u64 delta = rq_clock(rq) - rq->idle_stamp; 3760 u64 max = 2*rq->max_idle_balance_cost; 3761 3762 update_avg(&rq->avg_idle, delta); 3763 3764 if (rq->avg_idle > max) 3765 rq->avg_idle = max; 3766 3767 rq->idle_stamp = 0; 3768 } 3769 #endif 3770 } 3771 3772 /* 3773 * Consider @p being inside a wait loop: 3774 * 3775 * for (;;) { 3776 * set_current_state(TASK_UNINTERRUPTIBLE); 3777 * 3778 * if (CONDITION) 3779 * break; 3780 * 3781 * schedule(); 3782 * } 3783 * __set_current_state(TASK_RUNNING); 3784 * 3785 * between set_current_state() and schedule(). In this case @p is still 3786 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in 3787 * an atomic manner. 3788 * 3789 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq 3790 * then schedule() must still happen and p->state can be changed to 3791 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we 3792 * need to do a full wakeup with enqueue. 3793 * 3794 * Returns: %true when the wakeup is done, 3795 * %false otherwise. 3796 */ 3797 static int ttwu_runnable(struct task_struct *p, int wake_flags) 3798 { 3799 struct rq_flags rf; 3800 struct rq *rq; 3801 int ret = 0; 3802 3803 rq = __task_rq_lock(p, &rf); 3804 if (task_on_rq_queued(p)) { 3805 update_rq_clock(rq); 3806 if (p->se.sched_delayed) 3807 enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); 3808 if (!task_on_cpu(rq, p)) { 3809 /* 3810 * When on_rq && !on_cpu the task is preempted, see if 3811 * it should preempt the task that is current now. 3812 */ 3813 wakeup_preempt(rq, p, wake_flags); 3814 } 3815 ttwu_do_wakeup(p); 3816 ret = 1; 3817 } 3818 __task_rq_unlock(rq, &rf); 3819 3820 return ret; 3821 } 3822 3823 #ifdef CONFIG_SMP 3824 void sched_ttwu_pending(void *arg) 3825 { 3826 struct llist_node *llist = arg; 3827 struct rq *rq = this_rq(); 3828 struct task_struct *p, *t; 3829 struct rq_flags rf; 3830 3831 if (!llist) 3832 return; 3833 3834 rq_lock_irqsave(rq, &rf); 3835 update_rq_clock(rq); 3836 3837 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { 3838 if (WARN_ON_ONCE(p->on_cpu)) 3839 smp_cond_load_acquire(&p->on_cpu, !VAL); 3840 3841 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) 3842 set_task_cpu(p, cpu_of(rq)); 3843 3844 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); 3845 } 3846 3847 /* 3848 * Must be after enqueueing at least once task such that 3849 * idle_cpu() does not observe a false-negative -- if it does, 3850 * it is possible for select_idle_siblings() to stack a number 3851 * of tasks on this CPU during that window. 3852 * 3853 * It is OK to clear ttwu_pending when another task pending. 3854 * We will receive IPI after local IRQ enabled and then enqueue it. 3855 * Since now nr_running > 0, idle_cpu() will always get correct result. 3856 */ 3857 WRITE_ONCE(rq->ttwu_pending, 0); 3858 rq_unlock_irqrestore(rq, &rf); 3859 } 3860 3861 /* 3862 * Prepare the scene for sending an IPI for a remote smp_call 3863 * 3864 * Returns true if the caller can proceed with sending the IPI. 3865 * Returns false otherwise. 3866 */ 3867 bool call_function_single_prep_ipi(int cpu) 3868 { 3869 if (set_nr_if_polling(cpu_rq(cpu)->idle)) { 3870 trace_sched_wake_idle_without_ipi(cpu); 3871 return false; 3872 } 3873 3874 return true; 3875 } 3876 3877 /* 3878 * Queue a task on the target CPUs wake_list and wake the CPU via IPI if 3879 * necessary. The wakee CPU on receipt of the IPI will queue the task 3880 * via sched_ttwu_wakeup() for activation so the wakee incurs the cost 3881 * of the wakeup instead of the waker. 3882 */ 3883 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3884 { 3885 struct rq *rq = cpu_rq(cpu); 3886 3887 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); 3888 3889 WRITE_ONCE(rq->ttwu_pending, 1); 3890 __smp_call_single_queue(cpu, &p->wake_entry.llist); 3891 } 3892 3893 void wake_up_if_idle(int cpu) 3894 { 3895 struct rq *rq = cpu_rq(cpu); 3896 3897 guard(rcu)(); 3898 if (is_idle_task(rcu_dereference(rq->curr))) { 3899 guard(rq_lock_irqsave)(rq); 3900 if (is_idle_task(rq->curr)) 3901 resched_curr(rq); 3902 } 3903 } 3904 3905 bool cpus_equal_capacity(int this_cpu, int that_cpu) 3906 { 3907 if (!sched_asym_cpucap_active()) 3908 return true; 3909 3910 if (this_cpu == that_cpu) 3911 return true; 3912 3913 return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu); 3914 } 3915 3916 bool cpus_share_cache(int this_cpu, int that_cpu) 3917 { 3918 if (this_cpu == that_cpu) 3919 return true; 3920 3921 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 3922 } 3923 3924 /* 3925 * Whether CPUs are share cache resources, which means LLC on non-cluster 3926 * machines and LLC tag or L2 on machines with clusters. 3927 */ 3928 bool cpus_share_resources(int this_cpu, int that_cpu) 3929 { 3930 if (this_cpu == that_cpu) 3931 return true; 3932 3933 return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); 3934 } 3935 3936 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) 3937 { 3938 /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */ 3939 if (!scx_allow_ttwu_queue(p)) 3940 return false; 3941 3942 /* 3943 * Do not complicate things with the async wake_list while the CPU is 3944 * in hotplug state. 3945 */ 3946 if (!cpu_active(cpu)) 3947 return false; 3948 3949 /* Ensure the task will still be allowed to run on the CPU. */ 3950 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 3951 return false; 3952 3953 /* 3954 * If the CPU does not share cache, then queue the task on the 3955 * remote rqs wakelist to avoid accessing remote data. 3956 */ 3957 if (!cpus_share_cache(smp_processor_id(), cpu)) 3958 return true; 3959 3960 if (cpu == smp_processor_id()) 3961 return false; 3962 3963 /* 3964 * If the wakee cpu is idle, or the task is descheduling and the 3965 * only running task on the CPU, then use the wakelist to offload 3966 * the task activation to the idle (or soon-to-be-idle) CPU as 3967 * the current CPU is likely busy. nr_running is checked to 3968 * avoid unnecessary task stacking. 3969 * 3970 * Note that we can only get here with (wakee) p->on_rq=0, 3971 * p->on_cpu can be whatever, we've done the dequeue, so 3972 * the wakee has been accounted out of ->nr_running. 3973 */ 3974 if (!cpu_rq(cpu)->nr_running) 3975 return true; 3976 3977 return false; 3978 } 3979 3980 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3981 { 3982 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { 3983 sched_clock_cpu(cpu); /* Sync clocks across CPUs */ 3984 __ttwu_queue_wakelist(p, cpu, wake_flags); 3985 return true; 3986 } 3987 3988 return false; 3989 } 3990 3991 #else /* !CONFIG_SMP */ 3992 3993 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) 3994 { 3995 return false; 3996 } 3997 3998 #endif /* CONFIG_SMP */ 3999 4000 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) 4001 { 4002 struct rq *rq = cpu_rq(cpu); 4003 struct rq_flags rf; 4004 4005 if (ttwu_queue_wakelist(p, cpu, wake_flags)) 4006 return; 4007 4008 rq_lock(rq, &rf); 4009 update_rq_clock(rq); 4010 ttwu_do_activate(rq, p, wake_flags, &rf); 4011 rq_unlock(rq, &rf); 4012 } 4013 4014 /* 4015 * Invoked from try_to_wake_up() to check whether the task can be woken up. 4016 * 4017 * The caller holds p::pi_lock if p != current or has preemption 4018 * disabled when p == current. 4019 * 4020 * The rules of saved_state: 4021 * 4022 * The related locking code always holds p::pi_lock when updating 4023 * p::saved_state, which means the code is fully serialized in both cases. 4024 * 4025 * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. 4026 * No other bits set. This allows to distinguish all wakeup scenarios. 4027 * 4028 * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This 4029 * allows us to prevent early wakeup of tasks before they can be run on 4030 * asymmetric ISA architectures (eg ARMv9). 4031 */ 4032 static __always_inline 4033 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) 4034 { 4035 int match; 4036 4037 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 4038 WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && 4039 state != TASK_RTLOCK_WAIT); 4040 } 4041 4042 *success = !!(match = __task_state_match(p, state)); 4043 4044 /* 4045 * Saved state preserves the task state across blocking on 4046 * an RT lock or TASK_FREEZABLE tasks. If the state matches, 4047 * set p::saved_state to TASK_RUNNING, but do not wake the task 4048 * because it waits for a lock wakeup or __thaw_task(). Also 4049 * indicate success because from the regular waker's point of 4050 * view this has succeeded. 4051 * 4052 * After acquiring the lock the task will restore p::__state 4053 * from p::saved_state which ensures that the regular 4054 * wakeup is not lost. The restore will also set 4055 * p::saved_state to TASK_RUNNING so any further tests will 4056 * not result in false positives vs. @success 4057 */ 4058 if (match < 0) 4059 p->saved_state = TASK_RUNNING; 4060 4061 return match > 0; 4062 } 4063 4064 /* 4065 * Notes on Program-Order guarantees on SMP systems. 4066 * 4067 * MIGRATION 4068 * 4069 * The basic program-order guarantee on SMP systems is that when a task [t] 4070 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 4071 * execution on its new CPU [c1]. 4072 * 4073 * For migration (of runnable tasks) this is provided by the following means: 4074 * 4075 * A) UNLOCK of the rq(c0)->lock scheduling out task t 4076 * B) migration for t is required to synchronize *both* rq(c0)->lock and 4077 * rq(c1)->lock (if not at the same time, then in that order). 4078 * C) LOCK of the rq(c1)->lock scheduling in task 4079 * 4080 * Release/acquire chaining guarantees that B happens after A and C after B. 4081 * Note: the CPU doing B need not be c0 or c1 4082 * 4083 * Example: 4084 * 4085 * CPU0 CPU1 CPU2 4086 * 4087 * LOCK rq(0)->lock 4088 * sched-out X 4089 * sched-in Y 4090 * UNLOCK rq(0)->lock 4091 * 4092 * LOCK rq(0)->lock // orders against CPU0 4093 * dequeue X 4094 * UNLOCK rq(0)->lock 4095 * 4096 * LOCK rq(1)->lock 4097 * enqueue X 4098 * UNLOCK rq(1)->lock 4099 * 4100 * LOCK rq(1)->lock // orders against CPU2 4101 * sched-out Z 4102 * sched-in X 4103 * UNLOCK rq(1)->lock 4104 * 4105 * 4106 * BLOCKING -- aka. SLEEP + WAKEUP 4107 * 4108 * For blocking we (obviously) need to provide the same guarantee as for 4109 * migration. However the means are completely different as there is no lock 4110 * chain to provide order. Instead we do: 4111 * 4112 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 4113 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 4114 * 4115 * Example: 4116 * 4117 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 4118 * 4119 * LOCK rq(0)->lock LOCK X->pi_lock 4120 * dequeue X 4121 * sched-out X 4122 * smp_store_release(X->on_cpu, 0); 4123 * 4124 * smp_cond_load_acquire(&X->on_cpu, !VAL); 4125 * X->state = WAKING 4126 * set_task_cpu(X,2) 4127 * 4128 * LOCK rq(2)->lock 4129 * enqueue X 4130 * X->state = RUNNING 4131 * UNLOCK rq(2)->lock 4132 * 4133 * LOCK rq(2)->lock // orders against CPU1 4134 * sched-out Z 4135 * sched-in X 4136 * UNLOCK rq(2)->lock 4137 * 4138 * UNLOCK X->pi_lock 4139 * UNLOCK rq(0)->lock 4140 * 4141 * 4142 * However, for wakeups there is a second guarantee we must provide, namely we 4143 * must ensure that CONDITION=1 done by the caller can not be reordered with 4144 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4145 */ 4146 4147 /** 4148 * try_to_wake_up - wake up a thread 4149 * @p: the thread to be awakened 4150 * @state: the mask of task states that can be woken 4151 * @wake_flags: wake modifier flags (WF_*) 4152 * 4153 * Conceptually does: 4154 * 4155 * If (@state & @p->state) @p->state = TASK_RUNNING. 4156 * 4157 * If the task was not queued/runnable, also place it back on a runqueue. 4158 * 4159 * This function is atomic against schedule() which would dequeue the task. 4160 * 4161 * It issues a full memory barrier before accessing @p->state, see the comment 4162 * with set_current_state(). 4163 * 4164 * Uses p->pi_lock to serialize against concurrent wake-ups. 4165 * 4166 * Relies on p->pi_lock stabilizing: 4167 * - p->sched_class 4168 * - p->cpus_ptr 4169 * - p->sched_task_group 4170 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4171 * 4172 * Tries really hard to only take one task_rq(p)->lock for performance. 4173 * Takes rq->lock in: 4174 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4175 * - ttwu_queue() -- new rq, for enqueue of the task; 4176 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4177 * 4178 * As a consequence we race really badly with just about everything. See the 4179 * many memory barriers and their comments for details. 4180 * 4181 * Return: %true if @p->state changes (an actual wakeup was done), 4182 * %false otherwise. 4183 */ 4184 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4185 { 4186 guard(preempt)(); 4187 int cpu, success = 0; 4188 4189 wake_flags |= WF_TTWU; 4190 4191 if (p == current) { 4192 /* 4193 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4194 * == smp_processor_id()'. Together this means we can special 4195 * case the whole 'p->on_rq && ttwu_runnable()' case below 4196 * without taking any locks. 4197 * 4198 * Specifically, given current runs ttwu() we must be before 4199 * schedule()'s block_task(), as such this must not observe 4200 * sched_delayed. 4201 * 4202 * In particular: 4203 * - we rely on Program-Order guarantees for all the ordering, 4204 * - we're serialized against set_special_state() by virtue of 4205 * it disabling IRQs (this allows not taking ->pi_lock). 4206 */ 4207 WARN_ON_ONCE(p->se.sched_delayed); 4208 if (!ttwu_state_match(p, state, &success)) 4209 goto out; 4210 4211 trace_sched_waking(p); 4212 ttwu_do_wakeup(p); 4213 goto out; 4214 } 4215 4216 /* 4217 * If we are going to wake up a thread waiting for CONDITION we 4218 * need to ensure that CONDITION=1 done by the caller can not be 4219 * reordered with p->state check below. This pairs with smp_store_mb() 4220 * in set_current_state() that the waiting thread does. 4221 */ 4222 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 4223 smp_mb__after_spinlock(); 4224 if (!ttwu_state_match(p, state, &success)) 4225 break; 4226 4227 trace_sched_waking(p); 4228 4229 /* 4230 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4231 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4232 * in smp_cond_load_acquire() below. 4233 * 4234 * sched_ttwu_pending() try_to_wake_up() 4235 * STORE p->on_rq = 1 LOAD p->state 4236 * UNLOCK rq->lock 4237 * 4238 * __schedule() (switch to task 'p') 4239 * LOCK rq->lock smp_rmb(); 4240 * smp_mb__after_spinlock(); 4241 * UNLOCK rq->lock 4242 * 4243 * [task p] 4244 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4245 * 4246 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4247 * __schedule(). See the comment for smp_mb__after_spinlock(). 4248 * 4249 * A similar smp_rmb() lives in __task_needs_rq_lock(). 4250 */ 4251 smp_rmb(); 4252 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4253 break; 4254 4255 #ifdef CONFIG_SMP 4256 /* 4257 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4258 * possible to, falsely, observe p->on_cpu == 0. 4259 * 4260 * One must be running (->on_cpu == 1) in order to remove oneself 4261 * from the runqueue. 4262 * 4263 * __schedule() (switch to task 'p') try_to_wake_up() 4264 * STORE p->on_cpu = 1 LOAD p->on_rq 4265 * UNLOCK rq->lock 4266 * 4267 * __schedule() (put 'p' to sleep) 4268 * LOCK rq->lock smp_rmb(); 4269 * smp_mb__after_spinlock(); 4270 * STORE p->on_rq = 0 LOAD p->on_cpu 4271 * 4272 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4273 * __schedule(). See the comment for smp_mb__after_spinlock(). 4274 * 4275 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4276 * schedule()'s deactivate_task() has 'happened' and p will no longer 4277 * care about it's own p->state. See the comment in __schedule(). 4278 */ 4279 smp_acquire__after_ctrl_dep(); 4280 4281 /* 4282 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4283 * == 0), which means we need to do an enqueue, change p->state to 4284 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4285 * enqueue, such as ttwu_queue_wakelist(). 4286 */ 4287 WRITE_ONCE(p->__state, TASK_WAKING); 4288 4289 /* 4290 * If the owning (remote) CPU is still in the middle of schedule() with 4291 * this task as prev, considering queueing p on the remote CPUs wake_list 4292 * which potentially sends an IPI instead of spinning on p->on_cpu to 4293 * let the waker make forward progress. This is safe because IRQs are 4294 * disabled and the IPI will deliver after on_cpu is cleared. 4295 * 4296 * Ensure we load task_cpu(p) after p->on_cpu: 4297 * 4298 * set_task_cpu(p, cpu); 4299 * STORE p->cpu = @cpu 4300 * __schedule() (switch to task 'p') 4301 * LOCK rq->lock 4302 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4303 * STORE p->on_cpu = 1 LOAD p->cpu 4304 * 4305 * to ensure we observe the correct CPU on which the task is currently 4306 * scheduling. 4307 */ 4308 if (smp_load_acquire(&p->on_cpu) && 4309 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4310 break; 4311 4312 /* 4313 * If the owning (remote) CPU is still in the middle of schedule() with 4314 * this task as prev, wait until it's done referencing the task. 4315 * 4316 * Pairs with the smp_store_release() in finish_task(). 4317 * 4318 * This ensures that tasks getting woken will be fully ordered against 4319 * their previous state and preserve Program Order. 4320 */ 4321 smp_cond_load_acquire(&p->on_cpu, !VAL); 4322 4323 cpu = select_task_rq(p, p->wake_cpu, &wake_flags); 4324 if (task_cpu(p) != cpu) { 4325 if (p->in_iowait) { 4326 delayacct_blkio_end(p); 4327 atomic_dec(&task_rq(p)->nr_iowait); 4328 } 4329 4330 wake_flags |= WF_MIGRATED; 4331 psi_ttwu_dequeue(p); 4332 set_task_cpu(p, cpu); 4333 } 4334 #else 4335 cpu = task_cpu(p); 4336 #endif /* CONFIG_SMP */ 4337 4338 ttwu_queue(p, cpu, wake_flags); 4339 } 4340 out: 4341 if (success) 4342 ttwu_stat(p, task_cpu(p), wake_flags); 4343 4344 return success; 4345 } 4346 4347 static bool __task_needs_rq_lock(struct task_struct *p) 4348 { 4349 unsigned int state = READ_ONCE(p->__state); 4350 4351 /* 4352 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when 4353 * the task is blocked. Make sure to check @state since ttwu() can drop 4354 * locks at the end, see ttwu_queue_wakelist(). 4355 */ 4356 if (state == TASK_RUNNING || state == TASK_WAKING) 4357 return true; 4358 4359 /* 4360 * Ensure we load p->on_rq after p->__state, otherwise it would be 4361 * possible to, falsely, observe p->on_rq == 0. 4362 * 4363 * See try_to_wake_up() for a longer comment. 4364 */ 4365 smp_rmb(); 4366 if (p->on_rq) 4367 return true; 4368 4369 #ifdef CONFIG_SMP 4370 /* 4371 * Ensure the task has finished __schedule() and will not be referenced 4372 * anymore. Again, see try_to_wake_up() for a longer comment. 4373 */ 4374 smp_rmb(); 4375 smp_cond_load_acquire(&p->on_cpu, !VAL); 4376 #endif 4377 4378 return false; 4379 } 4380 4381 /** 4382 * task_call_func - Invoke a function on task in fixed state 4383 * @p: Process for which the function is to be invoked, can be @current. 4384 * @func: Function to invoke. 4385 * @arg: Argument to function. 4386 * 4387 * Fix the task in it's current state by avoiding wakeups and or rq operations 4388 * and call @func(@arg) on it. This function can use task_is_runnable() and 4389 * task_curr() to work out what the state is, if required. Given that @func 4390 * can be invoked with a runqueue lock held, it had better be quite 4391 * lightweight. 4392 * 4393 * Returns: 4394 * Whatever @func returns 4395 */ 4396 int task_call_func(struct task_struct *p, task_call_f func, void *arg) 4397 { 4398 struct rq *rq = NULL; 4399 struct rq_flags rf; 4400 int ret; 4401 4402 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4403 4404 if (__task_needs_rq_lock(p)) 4405 rq = __task_rq_lock(p, &rf); 4406 4407 /* 4408 * At this point the task is pinned; either: 4409 * - blocked and we're holding off wakeups (pi->lock) 4410 * - woken, and we're holding off enqueue (rq->lock) 4411 * - queued, and we're holding off schedule (rq->lock) 4412 * - running, and we're holding off de-schedule (rq->lock) 4413 * 4414 * The called function (@func) can use: task_curr(), p->on_rq and 4415 * p->__state to differentiate between these states. 4416 */ 4417 ret = func(p, arg); 4418 4419 if (rq) 4420 rq_unlock(rq, &rf); 4421 4422 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); 4423 return ret; 4424 } 4425 4426 /** 4427 * cpu_curr_snapshot - Return a snapshot of the currently running task 4428 * @cpu: The CPU on which to snapshot the task. 4429 * 4430 * Returns the task_struct pointer of the task "currently" running on 4431 * the specified CPU. 4432 * 4433 * If the specified CPU was offline, the return value is whatever it 4434 * is, perhaps a pointer to the task_struct structure of that CPU's idle 4435 * task, but there is no guarantee. Callers wishing a useful return 4436 * value must take some action to ensure that the specified CPU remains 4437 * online throughout. 4438 * 4439 * This function executes full memory barriers before and after fetching 4440 * the pointer, which permits the caller to confine this function's fetch 4441 * with respect to the caller's accesses to other shared variables. 4442 */ 4443 struct task_struct *cpu_curr_snapshot(int cpu) 4444 { 4445 struct rq *rq = cpu_rq(cpu); 4446 struct task_struct *t; 4447 struct rq_flags rf; 4448 4449 rq_lock_irqsave(rq, &rf); 4450 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ 4451 t = rcu_dereference(cpu_curr(cpu)); 4452 rq_unlock_irqrestore(rq, &rf); 4453 smp_mb(); /* Pairing determined by caller's synchronization design. */ 4454 4455 return t; 4456 } 4457 4458 /** 4459 * wake_up_process - Wake up a specific process 4460 * @p: The process to be woken up. 4461 * 4462 * Attempt to wake up the nominated process and move it to the set of runnable 4463 * processes. 4464 * 4465 * Return: 1 if the process was woken up, 0 if it was already running. 4466 * 4467 * This function executes a full memory barrier before accessing the task state. 4468 */ 4469 int wake_up_process(struct task_struct *p) 4470 { 4471 return try_to_wake_up(p, TASK_NORMAL, 0); 4472 } 4473 EXPORT_SYMBOL(wake_up_process); 4474 4475 int wake_up_state(struct task_struct *p, unsigned int state) 4476 { 4477 return try_to_wake_up(p, state, 0); 4478 } 4479 4480 /* 4481 * Perform scheduler related setup for a newly forked process p. 4482 * p is forked by current. 4483 * 4484 * __sched_fork() is basic setup which is also used by sched_init() to 4485 * initialize the boot CPU's idle task. 4486 */ 4487 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) 4488 { 4489 p->on_rq = 0; 4490 4491 p->se.on_rq = 0; 4492 p->se.exec_start = 0; 4493 p->se.sum_exec_runtime = 0; 4494 p->se.prev_sum_exec_runtime = 0; 4495 p->se.nr_migrations = 0; 4496 p->se.vruntime = 0; 4497 p->se.vlag = 0; 4498 INIT_LIST_HEAD(&p->se.group_node); 4499 4500 /* A delayed task cannot be in clone(). */ 4501 WARN_ON_ONCE(p->se.sched_delayed); 4502 4503 #ifdef CONFIG_FAIR_GROUP_SCHED 4504 p->se.cfs_rq = NULL; 4505 #endif 4506 4507 #ifdef CONFIG_SCHEDSTATS 4508 /* Even if schedstat is disabled, there should not be garbage */ 4509 memset(&p->stats, 0, sizeof(p->stats)); 4510 #endif 4511 4512 init_dl_entity(&p->dl); 4513 4514 INIT_LIST_HEAD(&p->rt.run_list); 4515 p->rt.timeout = 0; 4516 p->rt.time_slice = sched_rr_timeslice; 4517 p->rt.on_rq = 0; 4518 p->rt.on_list = 0; 4519 4520 #ifdef CONFIG_SCHED_CLASS_EXT 4521 init_scx_entity(&p->scx); 4522 #endif 4523 4524 #ifdef CONFIG_PREEMPT_NOTIFIERS 4525 INIT_HLIST_HEAD(&p->preempt_notifiers); 4526 #endif 4527 4528 #ifdef CONFIG_COMPACTION 4529 p->capture_control = NULL; 4530 #endif 4531 init_numa_balancing(clone_flags, p); 4532 #ifdef CONFIG_SMP 4533 p->wake_entry.u_flags = CSD_TYPE_TTWU; 4534 p->migration_pending = NULL; 4535 #endif 4536 init_sched_mm_cid(p); 4537 } 4538 4539 DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); 4540 4541 #ifdef CONFIG_NUMA_BALANCING 4542 4543 int sysctl_numa_balancing_mode; 4544 4545 static void __set_numabalancing_state(bool enabled) 4546 { 4547 if (enabled) 4548 static_branch_enable(&sched_numa_balancing); 4549 else 4550 static_branch_disable(&sched_numa_balancing); 4551 } 4552 4553 void set_numabalancing_state(bool enabled) 4554 { 4555 if (enabled) 4556 sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; 4557 else 4558 sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; 4559 __set_numabalancing_state(enabled); 4560 } 4561 4562 #ifdef CONFIG_PROC_SYSCTL 4563 static void reset_memory_tiering(void) 4564 { 4565 struct pglist_data *pgdat; 4566 4567 for_each_online_pgdat(pgdat) { 4568 pgdat->nbp_threshold = 0; 4569 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4570 pgdat->nbp_th_start = jiffies_to_msecs(jiffies); 4571 } 4572 } 4573 4574 static int sysctl_numa_balancing(const struct ctl_table *table, int write, 4575 void *buffer, size_t *lenp, loff_t *ppos) 4576 { 4577 struct ctl_table t; 4578 int err; 4579 int state = sysctl_numa_balancing_mode; 4580 4581 if (write && !capable(CAP_SYS_ADMIN)) 4582 return -EPERM; 4583 4584 t = *table; 4585 t.data = &state; 4586 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4587 if (err < 0) 4588 return err; 4589 if (write) { 4590 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4591 (state & NUMA_BALANCING_MEMORY_TIERING)) 4592 reset_memory_tiering(); 4593 sysctl_numa_balancing_mode = state; 4594 __set_numabalancing_state(state); 4595 } 4596 return err; 4597 } 4598 #endif 4599 #endif 4600 4601 #ifdef CONFIG_SCHEDSTATS 4602 4603 DEFINE_STATIC_KEY_FALSE(sched_schedstats); 4604 4605 static void set_schedstats(bool enabled) 4606 { 4607 if (enabled) 4608 static_branch_enable(&sched_schedstats); 4609 else 4610 static_branch_disable(&sched_schedstats); 4611 } 4612 4613 void force_schedstat_enabled(void) 4614 { 4615 if (!schedstat_enabled()) { 4616 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); 4617 static_branch_enable(&sched_schedstats); 4618 } 4619 } 4620 4621 static int __init setup_schedstats(char *str) 4622 { 4623 int ret = 0; 4624 if (!str) 4625 goto out; 4626 4627 if (!strcmp(str, "enable")) { 4628 set_schedstats(true); 4629 ret = 1; 4630 } else if (!strcmp(str, "disable")) { 4631 set_schedstats(false); 4632 ret = 1; 4633 } 4634 out: 4635 if (!ret) 4636 pr_warn("Unable to parse schedstats=\n"); 4637 4638 return ret; 4639 } 4640 __setup("schedstats=", setup_schedstats); 4641 4642 #ifdef CONFIG_PROC_SYSCTL 4643 static int sysctl_schedstats(const struct ctl_table *table, int write, void *buffer, 4644 size_t *lenp, loff_t *ppos) 4645 { 4646 struct ctl_table t; 4647 int err; 4648 int state = static_branch_likely(&sched_schedstats); 4649 4650 if (write && !capable(CAP_SYS_ADMIN)) 4651 return -EPERM; 4652 4653 t = *table; 4654 t.data = &state; 4655 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 4656 if (err < 0) 4657 return err; 4658 if (write) 4659 set_schedstats(state); 4660 return err; 4661 } 4662 #endif /* CONFIG_PROC_SYSCTL */ 4663 #endif /* CONFIG_SCHEDSTATS */ 4664 4665 #ifdef CONFIG_SYSCTL 4666 static const struct ctl_table sched_core_sysctls[] = { 4667 #ifdef CONFIG_SCHEDSTATS 4668 { 4669 .procname = "sched_schedstats", 4670 .data = NULL, 4671 .maxlen = sizeof(unsigned int), 4672 .mode = 0644, 4673 .proc_handler = sysctl_schedstats, 4674 .extra1 = SYSCTL_ZERO, 4675 .extra2 = SYSCTL_ONE, 4676 }, 4677 #endif /* CONFIG_SCHEDSTATS */ 4678 #ifdef CONFIG_UCLAMP_TASK 4679 { 4680 .procname = "sched_util_clamp_min", 4681 .data = &sysctl_sched_uclamp_util_min, 4682 .maxlen = sizeof(unsigned int), 4683 .mode = 0644, 4684 .proc_handler = sysctl_sched_uclamp_handler, 4685 }, 4686 { 4687 .procname = "sched_util_clamp_max", 4688 .data = &sysctl_sched_uclamp_util_max, 4689 .maxlen = sizeof(unsigned int), 4690 .mode = 0644, 4691 .proc_handler = sysctl_sched_uclamp_handler, 4692 }, 4693 { 4694 .procname = "sched_util_clamp_min_rt_default", 4695 .data = &sysctl_sched_uclamp_util_min_rt_default, 4696 .maxlen = sizeof(unsigned int), 4697 .mode = 0644, 4698 .proc_handler = sysctl_sched_uclamp_handler, 4699 }, 4700 #endif /* CONFIG_UCLAMP_TASK */ 4701 #ifdef CONFIG_NUMA_BALANCING 4702 { 4703 .procname = "numa_balancing", 4704 .data = NULL, /* filled in by handler */ 4705 .maxlen = sizeof(unsigned int), 4706 .mode = 0644, 4707 .proc_handler = sysctl_numa_balancing, 4708 .extra1 = SYSCTL_ZERO, 4709 .extra2 = SYSCTL_FOUR, 4710 }, 4711 #endif /* CONFIG_NUMA_BALANCING */ 4712 }; 4713 static int __init sched_core_sysctl_init(void) 4714 { 4715 register_sysctl_init("kernel", sched_core_sysctls); 4716 return 0; 4717 } 4718 late_initcall(sched_core_sysctl_init); 4719 #endif /* CONFIG_SYSCTL */ 4720 4721 /* 4722 * fork()/clone()-time setup: 4723 */ 4724 int sched_fork(unsigned long clone_flags, struct task_struct *p) 4725 { 4726 __sched_fork(clone_flags, p); 4727 /* 4728 * We mark the process as NEW here. This guarantees that 4729 * nobody will actually run it, and a signal or other external 4730 * event cannot wake it up and insert it on the runqueue either. 4731 */ 4732 p->__state = TASK_NEW; 4733 4734 /* 4735 * Make sure we do not leak PI boosting priority to the child. 4736 */ 4737 p->prio = current->normal_prio; 4738 4739 uclamp_fork(p); 4740 4741 /* 4742 * Revert to default priority/policy on fork if requested. 4743 */ 4744 if (unlikely(p->sched_reset_on_fork)) { 4745 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 4746 p->policy = SCHED_NORMAL; 4747 p->static_prio = NICE_TO_PRIO(0); 4748 p->rt_priority = 0; 4749 } else if (PRIO_TO_NICE(p->static_prio) < 0) 4750 p->static_prio = NICE_TO_PRIO(0); 4751 4752 p->prio = p->normal_prio = p->static_prio; 4753 set_load_weight(p, false); 4754 p->se.custom_slice = 0; 4755 p->se.slice = sysctl_sched_base_slice; 4756 4757 /* 4758 * We don't need the reset flag anymore after the fork. It has 4759 * fulfilled its duty: 4760 */ 4761 p->sched_reset_on_fork = 0; 4762 } 4763 4764 if (dl_prio(p->prio)) 4765 return -EAGAIN; 4766 4767 scx_pre_fork(p); 4768 4769 if (rt_prio(p->prio)) { 4770 p->sched_class = &rt_sched_class; 4771 #ifdef CONFIG_SCHED_CLASS_EXT 4772 } else if (task_should_scx(p->policy)) { 4773 p->sched_class = &ext_sched_class; 4774 #endif 4775 } else { 4776 p->sched_class = &fair_sched_class; 4777 } 4778 4779 init_entity_runnable_average(&p->se); 4780 4781 4782 #ifdef CONFIG_SCHED_INFO 4783 if (likely(sched_info_on())) 4784 memset(&p->sched_info, 0, sizeof(p->sched_info)); 4785 #endif 4786 #if defined(CONFIG_SMP) 4787 p->on_cpu = 0; 4788 #endif 4789 init_task_preempt_count(p); 4790 #ifdef CONFIG_SMP 4791 plist_node_init(&p->pushable_tasks, MAX_PRIO); 4792 RB_CLEAR_NODE(&p->pushable_dl_tasks); 4793 #endif 4794 return 0; 4795 } 4796 4797 int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) 4798 { 4799 unsigned long flags; 4800 4801 /* 4802 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly 4803 * required yet, but lockdep gets upset if rules are violated. 4804 */ 4805 raw_spin_lock_irqsave(&p->pi_lock, flags); 4806 #ifdef CONFIG_CGROUP_SCHED 4807 if (1) { 4808 struct task_group *tg; 4809 tg = container_of(kargs->cset->subsys[cpu_cgrp_id], 4810 struct task_group, css); 4811 tg = autogroup_task_group(p, tg); 4812 p->sched_task_group = tg; 4813 } 4814 #endif 4815 rseq_migrate(p); 4816 /* 4817 * We're setting the CPU for the first time, we don't migrate, 4818 * so use __set_task_cpu(). 4819 */ 4820 __set_task_cpu(p, smp_processor_id()); 4821 if (p->sched_class->task_fork) 4822 p->sched_class->task_fork(p); 4823 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4824 4825 return scx_fork(p); 4826 } 4827 4828 void sched_cancel_fork(struct task_struct *p) 4829 { 4830 scx_cancel_fork(p); 4831 } 4832 4833 void sched_post_fork(struct task_struct *p) 4834 { 4835 uclamp_post_fork(p); 4836 scx_post_fork(p); 4837 } 4838 4839 unsigned long to_ratio(u64 period, u64 runtime) 4840 { 4841 if (runtime == RUNTIME_INF) 4842 return BW_UNIT; 4843 4844 /* 4845 * Doing this here saves a lot of checks in all 4846 * the calling paths, and returning zero seems 4847 * safe for them anyway. 4848 */ 4849 if (period == 0) 4850 return 0; 4851 4852 return div64_u64(runtime << BW_SHIFT, period); 4853 } 4854 4855 /* 4856 * wake_up_new_task - wake up a newly created task for the first time. 4857 * 4858 * This function will do some initial scheduler statistics housekeeping 4859 * that must be done for every newly created context, then puts the task 4860 * on the runqueue and wakes it. 4861 */ 4862 void wake_up_new_task(struct task_struct *p) 4863 { 4864 struct rq_flags rf; 4865 struct rq *rq; 4866 int wake_flags = WF_FORK; 4867 4868 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); 4869 WRITE_ONCE(p->__state, TASK_RUNNING); 4870 #ifdef CONFIG_SMP 4871 /* 4872 * Fork balancing, do it here and not earlier because: 4873 * - cpus_ptr can change in the fork path 4874 * - any previously selected CPU might disappear through hotplug 4875 * 4876 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 4877 * as we're not fully set-up yet. 4878 */ 4879 p->recent_used_cpu = task_cpu(p); 4880 rseq_migrate(p); 4881 __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags)); 4882 #endif 4883 rq = __task_rq_lock(p, &rf); 4884 update_rq_clock(rq); 4885 post_init_entity_util_avg(p); 4886 4887 activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL); 4888 trace_sched_wakeup_new(p); 4889 wakeup_preempt(rq, p, wake_flags); 4890 #ifdef CONFIG_SMP 4891 if (p->sched_class->task_woken) { 4892 /* 4893 * Nothing relies on rq->lock after this, so it's fine to 4894 * drop it. 4895 */ 4896 rq_unpin_lock(rq, &rf); 4897 p->sched_class->task_woken(rq, p); 4898 rq_repin_lock(rq, &rf); 4899 } 4900 #endif 4901 task_rq_unlock(rq, p, &rf); 4902 } 4903 4904 #ifdef CONFIG_PREEMPT_NOTIFIERS 4905 4906 static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); 4907 4908 void preempt_notifier_inc(void) 4909 { 4910 static_branch_inc(&preempt_notifier_key); 4911 } 4912 EXPORT_SYMBOL_GPL(preempt_notifier_inc); 4913 4914 void preempt_notifier_dec(void) 4915 { 4916 static_branch_dec(&preempt_notifier_key); 4917 } 4918 EXPORT_SYMBOL_GPL(preempt_notifier_dec); 4919 4920 /** 4921 * preempt_notifier_register - tell me when current is being preempted & rescheduled 4922 * @notifier: notifier struct to register 4923 */ 4924 void preempt_notifier_register(struct preempt_notifier *notifier) 4925 { 4926 if (!static_branch_unlikely(&preempt_notifier_key)) 4927 WARN(1, "registering preempt_notifier while notifiers disabled\n"); 4928 4929 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); 4930 } 4931 EXPORT_SYMBOL_GPL(preempt_notifier_register); 4932 4933 /** 4934 * preempt_notifier_unregister - no longer interested in preemption notifications 4935 * @notifier: notifier struct to unregister 4936 * 4937 * This is *not* safe to call from within a preemption notifier. 4938 */ 4939 void preempt_notifier_unregister(struct preempt_notifier *notifier) 4940 { 4941 hlist_del(¬ifier->link); 4942 } 4943 EXPORT_SYMBOL_GPL(preempt_notifier_unregister); 4944 4945 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) 4946 { 4947 struct preempt_notifier *notifier; 4948 4949 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4950 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 4951 } 4952 4953 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4954 { 4955 if (static_branch_unlikely(&preempt_notifier_key)) 4956 __fire_sched_in_preempt_notifiers(curr); 4957 } 4958 4959 static void 4960 __fire_sched_out_preempt_notifiers(struct task_struct *curr, 4961 struct task_struct *next) 4962 { 4963 struct preempt_notifier *notifier; 4964 4965 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) 4966 notifier->ops->sched_out(notifier, next); 4967 } 4968 4969 static __always_inline void 4970 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4971 struct task_struct *next) 4972 { 4973 if (static_branch_unlikely(&preempt_notifier_key)) 4974 __fire_sched_out_preempt_notifiers(curr, next); 4975 } 4976 4977 #else /* !CONFIG_PREEMPT_NOTIFIERS */ 4978 4979 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) 4980 { 4981 } 4982 4983 static inline void 4984 fire_sched_out_preempt_notifiers(struct task_struct *curr, 4985 struct task_struct *next) 4986 { 4987 } 4988 4989 #endif /* CONFIG_PREEMPT_NOTIFIERS */ 4990 4991 static inline void prepare_task(struct task_struct *next) 4992 { 4993 #ifdef CONFIG_SMP 4994 /* 4995 * Claim the task as running, we do this before switching to it 4996 * such that any running task will have this set. 4997 * 4998 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and 4999 * its ordering comment. 5000 */ 5001 WRITE_ONCE(next->on_cpu, 1); 5002 #endif 5003 } 5004 5005 static inline void finish_task(struct task_struct *prev) 5006 { 5007 #ifdef CONFIG_SMP 5008 /* 5009 * This must be the very last reference to @prev from this CPU. After 5010 * p->on_cpu is cleared, the task can be moved to a different CPU. We 5011 * must ensure this doesn't happen until the switch is completely 5012 * finished. 5013 * 5014 * In particular, the load of prev->state in finish_task_switch() must 5015 * happen before this. 5016 * 5017 * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). 5018 */ 5019 smp_store_release(&prev->on_cpu, 0); 5020 #endif 5021 } 5022 5023 #ifdef CONFIG_SMP 5024 5025 static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) 5026 { 5027 void (*func)(struct rq *rq); 5028 struct balance_callback *next; 5029 5030 lockdep_assert_rq_held(rq); 5031 5032 while (head) { 5033 func = (void (*)(struct rq *))head->func; 5034 next = head->next; 5035 head->next = NULL; 5036 head = next; 5037 5038 func(rq); 5039 } 5040 } 5041 5042 static void balance_push(struct rq *rq); 5043 5044 /* 5045 * balance_push_callback is a right abuse of the callback interface and plays 5046 * by significantly different rules. 5047 * 5048 * Where the normal balance_callback's purpose is to be ran in the same context 5049 * that queued it (only later, when it's safe to drop rq->lock again), 5050 * balance_push_callback is specifically targeted at __schedule(). 5051 * 5052 * This abuse is tolerated because it places all the unlikely/odd cases behind 5053 * a single test, namely: rq->balance_callback == NULL. 5054 */ 5055 struct balance_callback balance_push_callback = { 5056 .next = NULL, 5057 .func = balance_push, 5058 }; 5059 5060 static inline struct balance_callback * 5061 __splice_balance_callbacks(struct rq *rq, bool split) 5062 { 5063 struct balance_callback *head = rq->balance_callback; 5064 5065 if (likely(!head)) 5066 return NULL; 5067 5068 lockdep_assert_rq_held(rq); 5069 /* 5070 * Must not take balance_push_callback off the list when 5071 * splice_balance_callbacks() and balance_callbacks() are not 5072 * in the same rq->lock section. 5073 * 5074 * In that case it would be possible for __schedule() to interleave 5075 * and observe the list empty. 5076 */ 5077 if (split && head == &balance_push_callback) 5078 head = NULL; 5079 else 5080 rq->balance_callback = NULL; 5081 5082 return head; 5083 } 5084 5085 struct balance_callback *splice_balance_callbacks(struct rq *rq) 5086 { 5087 return __splice_balance_callbacks(rq, true); 5088 } 5089 5090 static void __balance_callbacks(struct rq *rq) 5091 { 5092 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); 5093 } 5094 5095 void balance_callbacks(struct rq *rq, struct balance_callback *head) 5096 { 5097 unsigned long flags; 5098 5099 if (unlikely(head)) { 5100 raw_spin_rq_lock_irqsave(rq, flags); 5101 do_balance_callbacks(rq, head); 5102 raw_spin_rq_unlock_irqrestore(rq, flags); 5103 } 5104 } 5105 5106 #else 5107 5108 static inline void __balance_callbacks(struct rq *rq) 5109 { 5110 } 5111 5112 #endif 5113 5114 static inline void 5115 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) 5116 { 5117 /* 5118 * Since the runqueue lock will be released by the next 5119 * task (which is an invalid locking op but in the case 5120 * of the scheduler it's an obvious special-case), so we 5121 * do an early lockdep release here: 5122 */ 5123 rq_unpin_lock(rq, rf); 5124 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); 5125 #ifdef CONFIG_DEBUG_SPINLOCK 5126 /* this is a valid case when another task releases the spinlock */ 5127 rq_lockp(rq)->owner = next; 5128 #endif 5129 } 5130 5131 static inline void finish_lock_switch(struct rq *rq) 5132 { 5133 /* 5134 * If we are tracking spinlock dependencies then we have to 5135 * fix up the runqueue lock - which gets 'carried over' from 5136 * prev into current: 5137 */ 5138 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); 5139 __balance_callbacks(rq); 5140 raw_spin_rq_unlock_irq(rq); 5141 } 5142 5143 /* 5144 * NOP if the arch has not defined these: 5145 */ 5146 5147 #ifndef prepare_arch_switch 5148 # define prepare_arch_switch(next) do { } while (0) 5149 #endif 5150 5151 #ifndef finish_arch_post_lock_switch 5152 # define finish_arch_post_lock_switch() do { } while (0) 5153 #endif 5154 5155 static inline void kmap_local_sched_out(void) 5156 { 5157 #ifdef CONFIG_KMAP_LOCAL 5158 if (unlikely(current->kmap_ctrl.idx)) 5159 __kmap_local_sched_out(); 5160 #endif 5161 } 5162 5163 static inline void kmap_local_sched_in(void) 5164 { 5165 #ifdef CONFIG_KMAP_LOCAL 5166 if (unlikely(current->kmap_ctrl.idx)) 5167 __kmap_local_sched_in(); 5168 #endif 5169 } 5170 5171 /** 5172 * prepare_task_switch - prepare to switch tasks 5173 * @rq: the runqueue preparing to switch 5174 * @prev: the current task that is being switched out 5175 * @next: the task we are going to switch to. 5176 * 5177 * This is called with the rq lock held and interrupts off. It must 5178 * be paired with a subsequent finish_task_switch after the context 5179 * switch. 5180 * 5181 * prepare_task_switch sets up locking and calls architecture specific 5182 * hooks. 5183 */ 5184 static inline void 5185 prepare_task_switch(struct rq *rq, struct task_struct *prev, 5186 struct task_struct *next) 5187 { 5188 kcov_prepare_switch(prev); 5189 sched_info_switch(rq, prev, next); 5190 perf_event_task_sched_out(prev, next); 5191 rseq_preempt(prev); 5192 fire_sched_out_preempt_notifiers(prev, next); 5193 kmap_local_sched_out(); 5194 prepare_task(next); 5195 prepare_arch_switch(next); 5196 } 5197 5198 /** 5199 * finish_task_switch - clean up after a task-switch 5200 * @prev: the thread we just switched away from. 5201 * 5202 * finish_task_switch must be called after the context switch, paired 5203 * with a prepare_task_switch call before the context switch. 5204 * finish_task_switch will reconcile locking set up by prepare_task_switch, 5205 * and do any other architecture-specific cleanup actions. 5206 * 5207 * Note that we may have delayed dropping an mm in context_switch(). If 5208 * so, we finish that here outside of the runqueue lock. (Doing it 5209 * with the lock held can cause deadlocks; see schedule() for 5210 * details.) 5211 * 5212 * The context switch have flipped the stack from under us and restored the 5213 * local variables which were saved when this task called schedule() in the 5214 * past. 'prev == current' is still correct but we need to recalculate this_rq 5215 * because prev may have moved to another CPU. 5216 */ 5217 static struct rq *finish_task_switch(struct task_struct *prev) 5218 __releases(rq->lock) 5219 { 5220 struct rq *rq = this_rq(); 5221 struct mm_struct *mm = rq->prev_mm; 5222 unsigned int prev_state; 5223 5224 /* 5225 * The previous task will have left us with a preempt_count of 2 5226 * because it left us after: 5227 * 5228 * schedule() 5229 * preempt_disable(); // 1 5230 * __schedule() 5231 * raw_spin_lock_irq(&rq->lock) // 2 5232 * 5233 * Also, see FORK_PREEMPT_COUNT. 5234 */ 5235 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, 5236 "corrupted preempt_count: %s/%d/0x%x\n", 5237 current->comm, current->pid, preempt_count())) 5238 preempt_count_set(FORK_PREEMPT_COUNT); 5239 5240 rq->prev_mm = NULL; 5241 5242 /* 5243 * A task struct has one reference for the use as "current". 5244 * If a task dies, then it sets TASK_DEAD in tsk->state and calls 5245 * schedule one last time. The schedule call will never return, and 5246 * the scheduled task must drop that reference. 5247 * 5248 * We must observe prev->state before clearing prev->on_cpu (in 5249 * finish_task), otherwise a concurrent wakeup can get prev 5250 * running on another CPU and we could rave with its RUNNING -> DEAD 5251 * transition, resulting in a double drop. 5252 */ 5253 prev_state = READ_ONCE(prev->__state); 5254 vtime_task_switch(prev); 5255 perf_event_task_sched_in(prev, current); 5256 finish_task(prev); 5257 tick_nohz_task_switch(); 5258 finish_lock_switch(rq); 5259 finish_arch_post_lock_switch(); 5260 kcov_finish_switch(current); 5261 /* 5262 * kmap_local_sched_out() is invoked with rq::lock held and 5263 * interrupts disabled. There is no requirement for that, but the 5264 * sched out code does not have an interrupt enabled section. 5265 * Restoring the maps on sched in does not require interrupts being 5266 * disabled either. 5267 */ 5268 kmap_local_sched_in(); 5269 5270 fire_sched_in_preempt_notifiers(current); 5271 /* 5272 * When switching through a kernel thread, the loop in 5273 * membarrier_{private,global}_expedited() may have observed that 5274 * kernel thread and not issued an IPI. It is therefore possible to 5275 * schedule between user->kernel->user threads without passing though 5276 * switch_mm(). Membarrier requires a barrier after storing to 5277 * rq->curr, before returning to userspace, so provide them here: 5278 * 5279 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly 5280 * provided by mmdrop_lazy_tlb(), 5281 * - a sync_core for SYNC_CORE. 5282 */ 5283 if (mm) { 5284 membarrier_mm_sync_core_before_usermode(mm); 5285 mmdrop_lazy_tlb_sched(mm); 5286 } 5287 5288 if (unlikely(prev_state == TASK_DEAD)) { 5289 if (prev->sched_class->task_dead) 5290 prev->sched_class->task_dead(prev); 5291 5292 /* Task is done with its stack. */ 5293 put_task_stack(prev); 5294 5295 put_task_struct_rcu_user(prev); 5296 } 5297 5298 return rq; 5299 } 5300 5301 /** 5302 * schedule_tail - first thing a freshly forked thread must call. 5303 * @prev: the thread we just switched away from. 5304 */ 5305 asmlinkage __visible void schedule_tail(struct task_struct *prev) 5306 __releases(rq->lock) 5307 { 5308 /* 5309 * New tasks start with FORK_PREEMPT_COUNT, see there and 5310 * finish_task_switch() for details. 5311 * 5312 * finish_task_switch() will drop rq->lock() and lower preempt_count 5313 * and the preempt_enable() will end up enabling preemption (on 5314 * PREEMPT_COUNT kernels). 5315 */ 5316 5317 finish_task_switch(prev); 5318 /* 5319 * This is a special case: the newly created task has just 5320 * switched the context for the first time. It is returning from 5321 * schedule for the first time in this path. 5322 */ 5323 trace_sched_exit_tp(true, CALLER_ADDR0); 5324 preempt_enable(); 5325 5326 if (current->set_child_tid) 5327 put_user(task_pid_vnr(current), current->set_child_tid); 5328 5329 calculate_sigpending(); 5330 } 5331 5332 /* 5333 * context_switch - switch to the new MM and the new thread's register state. 5334 */ 5335 static __always_inline struct rq * 5336 context_switch(struct rq *rq, struct task_struct *prev, 5337 struct task_struct *next, struct rq_flags *rf) 5338 { 5339 prepare_task_switch(rq, prev, next); 5340 5341 /* 5342 * For paravirt, this is coupled with an exit in switch_to to 5343 * combine the page table reload and the switch backend into 5344 * one hypercall. 5345 */ 5346 arch_start_context_switch(prev); 5347 5348 /* 5349 * kernel -> kernel lazy + transfer active 5350 * user -> kernel lazy + mmgrab_lazy_tlb() active 5351 * 5352 * kernel -> user switch + mmdrop_lazy_tlb() active 5353 * user -> user switch 5354 * 5355 * switch_mm_cid() needs to be updated if the barriers provided 5356 * by context_switch() are modified. 5357 */ 5358 if (!next->mm) { // to kernel 5359 enter_lazy_tlb(prev->active_mm, next); 5360 5361 next->active_mm = prev->active_mm; 5362 if (prev->mm) // from user 5363 mmgrab_lazy_tlb(prev->active_mm); 5364 else 5365 prev->active_mm = NULL; 5366 } else { // to user 5367 membarrier_switch_mm(rq, prev->active_mm, next->mm); 5368 /* 5369 * sys_membarrier() requires an smp_mb() between setting 5370 * rq->curr / membarrier_switch_mm() and returning to userspace. 5371 * 5372 * The below provides this either through switch_mm(), or in 5373 * case 'prev->active_mm == next->mm' through 5374 * finish_task_switch()'s mmdrop(). 5375 */ 5376 switch_mm_irqs_off(prev->active_mm, next->mm, next); 5377 lru_gen_use_mm(next->mm); 5378 5379 if (!prev->mm) { // from kernel 5380 /* will mmdrop_lazy_tlb() in finish_task_switch(). */ 5381 rq->prev_mm = prev->active_mm; 5382 prev->active_mm = NULL; 5383 } 5384 } 5385 5386 /* switch_mm_cid() requires the memory barriers above. */ 5387 switch_mm_cid(rq, prev, next); 5388 5389 prepare_lock_switch(rq, next, rf); 5390 5391 /* Here we just switch the register state and the stack. */ 5392 switch_to(prev, next, prev); 5393 barrier(); 5394 5395 return finish_task_switch(prev); 5396 } 5397 5398 /* 5399 * nr_running and nr_context_switches: 5400 * 5401 * externally visible scheduler statistics: current number of runnable 5402 * threads, total number of context switches performed since bootup. 5403 */ 5404 unsigned int nr_running(void) 5405 { 5406 unsigned int i, sum = 0; 5407 5408 for_each_online_cpu(i) 5409 sum += cpu_rq(i)->nr_running; 5410 5411 return sum; 5412 } 5413 5414 /* 5415 * Check if only the current task is running on the CPU. 5416 * 5417 * Caution: this function does not check that the caller has disabled 5418 * preemption, thus the result might have a time-of-check-to-time-of-use 5419 * race. The caller is responsible to use it correctly, for example: 5420 * 5421 * - from a non-preemptible section (of course) 5422 * 5423 * - from a thread that is bound to a single CPU 5424 * 5425 * - in a loop with very short iterations (e.g. a polling loop) 5426 */ 5427 bool single_task_running(void) 5428 { 5429 return raw_rq()->nr_running == 1; 5430 } 5431 EXPORT_SYMBOL(single_task_running); 5432 5433 unsigned long long nr_context_switches_cpu(int cpu) 5434 { 5435 return cpu_rq(cpu)->nr_switches; 5436 } 5437 5438 unsigned long long nr_context_switches(void) 5439 { 5440 int i; 5441 unsigned long long sum = 0; 5442 5443 for_each_possible_cpu(i) 5444 sum += cpu_rq(i)->nr_switches; 5445 5446 return sum; 5447 } 5448 5449 /* 5450 * Consumers of these two interfaces, like for example the cpuidle menu 5451 * governor, are using nonsensical data. Preferring shallow idle state selection 5452 * for a CPU that has IO-wait which might not even end up running the task when 5453 * it does become runnable. 5454 */ 5455 5456 unsigned int nr_iowait_cpu(int cpu) 5457 { 5458 return atomic_read(&cpu_rq(cpu)->nr_iowait); 5459 } 5460 5461 /* 5462 * IO-wait accounting, and how it's mostly bollocks (on SMP). 5463 * 5464 * The idea behind IO-wait account is to account the idle time that we could 5465 * have spend running if it were not for IO. That is, if we were to improve the 5466 * storage performance, we'd have a proportional reduction in IO-wait time. 5467 * 5468 * This all works nicely on UP, where, when a task blocks on IO, we account 5469 * idle time as IO-wait, because if the storage were faster, it could've been 5470 * running and we'd not be idle. 5471 * 5472 * This has been extended to SMP, by doing the same for each CPU. This however 5473 * is broken. 5474 * 5475 * Imagine for instance the case where two tasks block on one CPU, only the one 5476 * CPU will have IO-wait accounted, while the other has regular idle. Even 5477 * though, if the storage were faster, both could've ran at the same time, 5478 * utilising both CPUs. 5479 * 5480 * This means, that when looking globally, the current IO-wait accounting on 5481 * SMP is a lower bound, by reason of under accounting. 5482 * 5483 * Worse, since the numbers are provided per CPU, they are sometimes 5484 * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly 5485 * associated with any one particular CPU, it can wake to another CPU than it 5486 * blocked on. This means the per CPU IO-wait number is meaningless. 5487 * 5488 * Task CPU affinities can make all that even more 'interesting'. 5489 */ 5490 5491 unsigned int nr_iowait(void) 5492 { 5493 unsigned int i, sum = 0; 5494 5495 for_each_possible_cpu(i) 5496 sum += nr_iowait_cpu(i); 5497 5498 return sum; 5499 } 5500 5501 #ifdef CONFIG_SMP 5502 5503 /* 5504 * sched_exec - execve() is a valuable balancing opportunity, because at 5505 * this point the task has the smallest effective memory and cache footprint. 5506 */ 5507 void sched_exec(void) 5508 { 5509 struct task_struct *p = current; 5510 struct migration_arg arg; 5511 int dest_cpu; 5512 5513 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 5514 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); 5515 if (dest_cpu == smp_processor_id()) 5516 return; 5517 5518 if (unlikely(!cpu_active(dest_cpu))) 5519 return; 5520 5521 arg = (struct migration_arg){ p, dest_cpu }; 5522 } 5523 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); 5524 } 5525 5526 #endif 5527 5528 DEFINE_PER_CPU(struct kernel_stat, kstat); 5529 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 5530 5531 EXPORT_PER_CPU_SYMBOL(kstat); 5532 EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 5533 5534 /* 5535 * The function fair_sched_class.update_curr accesses the struct curr 5536 * and its field curr->exec_start; when called from task_sched_runtime(), 5537 * we observe a high rate of cache misses in practice. 5538 * Prefetching this data results in improved performance. 5539 */ 5540 static inline void prefetch_curr_exec_start(struct task_struct *p) 5541 { 5542 #ifdef CONFIG_FAIR_GROUP_SCHED 5543 struct sched_entity *curr = p->se.cfs_rq->curr; 5544 #else 5545 struct sched_entity *curr = task_rq(p)->cfs.curr; 5546 #endif 5547 prefetch(curr); 5548 prefetch(&curr->exec_start); 5549 } 5550 5551 /* 5552 * Return accounted runtime for the task. 5553 * In case the task is currently running, return the runtime plus current's 5554 * pending runtime that have not been accounted yet. 5555 */ 5556 unsigned long long task_sched_runtime(struct task_struct *p) 5557 { 5558 struct rq_flags rf; 5559 struct rq *rq; 5560 u64 ns; 5561 5562 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 5563 /* 5564 * 64-bit doesn't need locks to atomically read a 64-bit value. 5565 * So we have a optimization chance when the task's delta_exec is 0. 5566 * Reading ->on_cpu is racy, but this is OK. 5567 * 5568 * If we race with it leaving CPU, we'll take a lock. So we're correct. 5569 * If we race with it entering CPU, unaccounted time is 0. This is 5570 * indistinguishable from the read occurring a few cycles earlier. 5571 * If we see ->on_cpu without ->on_rq, the task is leaving, and has 5572 * been accounted, so we're correct here as well. 5573 */ 5574 if (!p->on_cpu || !task_on_rq_queued(p)) 5575 return p->se.sum_exec_runtime; 5576 #endif 5577 5578 rq = task_rq_lock(p, &rf); 5579 /* 5580 * Must be ->curr _and_ ->on_rq. If dequeued, we would 5581 * project cycles that may never be accounted to this 5582 * thread, breaking clock_gettime(). 5583 */ 5584 if (task_current_donor(rq, p) && task_on_rq_queued(p)) { 5585 prefetch_curr_exec_start(p); 5586 update_rq_clock(rq); 5587 p->sched_class->update_curr(rq); 5588 } 5589 ns = p->se.sum_exec_runtime; 5590 task_rq_unlock(rq, p, &rf); 5591 5592 return ns; 5593 } 5594 5595 static u64 cpu_resched_latency(struct rq *rq) 5596 { 5597 int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); 5598 u64 resched_latency, now = rq_clock(rq); 5599 static bool warned_once; 5600 5601 if (sysctl_resched_latency_warn_once && warned_once) 5602 return 0; 5603 5604 if (!need_resched() || !latency_warn_ms) 5605 return 0; 5606 5607 if (system_state == SYSTEM_BOOTING) 5608 return 0; 5609 5610 if (!rq->last_seen_need_resched_ns) { 5611 rq->last_seen_need_resched_ns = now; 5612 rq->ticks_without_resched = 0; 5613 return 0; 5614 } 5615 5616 rq->ticks_without_resched++; 5617 resched_latency = now - rq->last_seen_need_resched_ns; 5618 if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) 5619 return 0; 5620 5621 warned_once = true; 5622 5623 return resched_latency; 5624 } 5625 5626 static int __init setup_resched_latency_warn_ms(char *str) 5627 { 5628 long val; 5629 5630 if ((kstrtol(str, 0, &val))) { 5631 pr_warn("Unable to set resched_latency_warn_ms\n"); 5632 return 1; 5633 } 5634 5635 sysctl_resched_latency_warn_ms = val; 5636 return 1; 5637 } 5638 __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); 5639 5640 /* 5641 * This function gets called by the timer code, with HZ frequency. 5642 * We call it with interrupts disabled. 5643 */ 5644 void sched_tick(void) 5645 { 5646 int cpu = smp_processor_id(); 5647 struct rq *rq = cpu_rq(cpu); 5648 /* accounting goes to the donor task */ 5649 struct task_struct *donor; 5650 struct rq_flags rf; 5651 unsigned long hw_pressure; 5652 u64 resched_latency; 5653 5654 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) 5655 arch_scale_freq_tick(); 5656 5657 sched_clock_tick(); 5658 5659 rq_lock(rq, &rf); 5660 donor = rq->donor; 5661 5662 psi_account_irqtime(rq, donor, NULL); 5663 5664 update_rq_clock(rq); 5665 hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); 5666 update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure); 5667 5668 if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY)) 5669 resched_curr(rq); 5670 5671 donor->sched_class->task_tick(rq, donor, 0); 5672 if (sched_feat(LATENCY_WARN)) 5673 resched_latency = cpu_resched_latency(rq); 5674 calc_global_load_tick(rq); 5675 sched_core_tick(rq); 5676 task_tick_mm_cid(rq, donor); 5677 scx_tick(rq); 5678 5679 rq_unlock(rq, &rf); 5680 5681 if (sched_feat(LATENCY_WARN) && resched_latency) 5682 resched_latency_warn(cpu, resched_latency); 5683 5684 perf_event_task_tick(); 5685 5686 if (donor->flags & PF_WQ_WORKER) 5687 wq_worker_tick(donor); 5688 5689 #ifdef CONFIG_SMP 5690 if (!scx_switched_all()) { 5691 rq->idle_balance = idle_cpu(cpu); 5692 sched_balance_trigger(rq); 5693 } 5694 #endif 5695 } 5696 5697 #ifdef CONFIG_NO_HZ_FULL 5698 5699 struct tick_work { 5700 int cpu; 5701 atomic_t state; 5702 struct delayed_work work; 5703 }; 5704 /* Values for ->state, see diagram below. */ 5705 #define TICK_SCHED_REMOTE_OFFLINE 0 5706 #define TICK_SCHED_REMOTE_OFFLINING 1 5707 #define TICK_SCHED_REMOTE_RUNNING 2 5708 5709 /* 5710 * State diagram for ->state: 5711 * 5712 * 5713 * TICK_SCHED_REMOTE_OFFLINE 5714 * | ^ 5715 * | | 5716 * | | sched_tick_remote() 5717 * | | 5718 * | | 5719 * +--TICK_SCHED_REMOTE_OFFLINING 5720 * | ^ 5721 * | | 5722 * sched_tick_start() | | sched_tick_stop() 5723 * | | 5724 * V | 5725 * TICK_SCHED_REMOTE_RUNNING 5726 * 5727 * 5728 * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() 5729 * and sched_tick_start() are happy to leave the state in RUNNING. 5730 */ 5731 5732 static struct tick_work __percpu *tick_work_cpu; 5733 5734 static void sched_tick_remote(struct work_struct *work) 5735 { 5736 struct delayed_work *dwork = to_delayed_work(work); 5737 struct tick_work *twork = container_of(dwork, struct tick_work, work); 5738 int cpu = twork->cpu; 5739 struct rq *rq = cpu_rq(cpu); 5740 int os; 5741 5742 /* 5743 * Handle the tick only if it appears the remote CPU is running in full 5744 * dynticks mode. The check is racy by nature, but missing a tick or 5745 * having one too much is no big deal because the scheduler tick updates 5746 * statistics and checks timeslices in a time-independent way, regardless 5747 * of when exactly it is running. 5748 */ 5749 if (tick_nohz_tick_stopped_cpu(cpu)) { 5750 guard(rq_lock_irq)(rq); 5751 struct task_struct *curr = rq->curr; 5752 5753 if (cpu_online(cpu)) { 5754 /* 5755 * Since this is a remote tick for full dynticks mode, 5756 * we are always sure that there is no proxy (only a 5757 * single task is running). 5758 */ 5759 WARN_ON_ONCE(rq->curr != rq->donor); 5760 update_rq_clock(rq); 5761 5762 if (!is_idle_task(curr)) { 5763 /* 5764 * Make sure the next tick runs within a 5765 * reasonable amount of time. 5766 */ 5767 u64 delta = rq_clock_task(rq) - curr->se.exec_start; 5768 WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); 5769 } 5770 curr->sched_class->task_tick(rq, curr, 0); 5771 5772 calc_load_nohz_remote(rq); 5773 } 5774 } 5775 5776 /* 5777 * Run the remote tick once per second (1Hz). This arbitrary 5778 * frequency is large enough to avoid overload but short enough 5779 * to keep scheduler internal stats reasonably up to date. But 5780 * first update state to reflect hotplug activity if required. 5781 */ 5782 os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); 5783 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); 5784 if (os == TICK_SCHED_REMOTE_RUNNING) 5785 queue_delayed_work(system_unbound_wq, dwork, HZ); 5786 } 5787 5788 static void sched_tick_start(int cpu) 5789 { 5790 int os; 5791 struct tick_work *twork; 5792 5793 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) 5794 return; 5795 5796 WARN_ON_ONCE(!tick_work_cpu); 5797 5798 twork = per_cpu_ptr(tick_work_cpu, cpu); 5799 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); 5800 WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); 5801 if (os == TICK_SCHED_REMOTE_OFFLINE) { 5802 twork->cpu = cpu; 5803 INIT_DELAYED_WORK(&twork->work, sched_tick_remote); 5804 queue_delayed_work(system_unbound_wq, &twork->work, HZ); 5805 } 5806 } 5807 5808 #ifdef CONFIG_HOTPLUG_CPU 5809 static void sched_tick_stop(int cpu) 5810 { 5811 struct tick_work *twork; 5812 int os; 5813 5814 if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) 5815 return; 5816 5817 WARN_ON_ONCE(!tick_work_cpu); 5818 5819 twork = per_cpu_ptr(tick_work_cpu, cpu); 5820 /* There cannot be competing actions, but don't rely on stop-machine. */ 5821 os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); 5822 WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); 5823 /* Don't cancel, as this would mess up the state machine. */ 5824 } 5825 #endif /* CONFIG_HOTPLUG_CPU */ 5826 5827 int __init sched_tick_offload_init(void) 5828 { 5829 tick_work_cpu = alloc_percpu(struct tick_work); 5830 BUG_ON(!tick_work_cpu); 5831 return 0; 5832 } 5833 5834 #else /* !CONFIG_NO_HZ_FULL */ 5835 static inline void sched_tick_start(int cpu) { } 5836 static inline void sched_tick_stop(int cpu) { } 5837 #endif 5838 5839 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ 5840 defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 5841 /* 5842 * If the value passed in is equal to the current preempt count 5843 * then we just disabled preemption. Start timing the latency. 5844 */ 5845 static inline void preempt_latency_start(int val) 5846 { 5847 if (preempt_count() == val) { 5848 unsigned long ip = get_lock_parent_ip(); 5849 #ifdef CONFIG_DEBUG_PREEMPT 5850 current->preempt_disable_ip = ip; 5851 #endif 5852 trace_preempt_off(CALLER_ADDR0, ip); 5853 } 5854 } 5855 5856 void preempt_count_add(int val) 5857 { 5858 #ifdef CONFIG_DEBUG_PREEMPT 5859 /* 5860 * Underflow? 5861 */ 5862 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 5863 return; 5864 #endif 5865 __preempt_count_add(val); 5866 #ifdef CONFIG_DEBUG_PREEMPT 5867 /* 5868 * Spinlock count overflowing soon? 5869 */ 5870 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 5871 PREEMPT_MASK - 10); 5872 #endif 5873 preempt_latency_start(val); 5874 } 5875 EXPORT_SYMBOL(preempt_count_add); 5876 NOKPROBE_SYMBOL(preempt_count_add); 5877 5878 /* 5879 * If the value passed in equals to the current preempt count 5880 * then we just enabled preemption. Stop timing the latency. 5881 */ 5882 static inline void preempt_latency_stop(int val) 5883 { 5884 if (preempt_count() == val) 5885 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 5886 } 5887 5888 void preempt_count_sub(int val) 5889 { 5890 #ifdef CONFIG_DEBUG_PREEMPT 5891 /* 5892 * Underflow? 5893 */ 5894 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 5895 return; 5896 /* 5897 * Is the spinlock portion underflowing? 5898 */ 5899 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 5900 !(preempt_count() & PREEMPT_MASK))) 5901 return; 5902 #endif 5903 5904 preempt_latency_stop(val); 5905 __preempt_count_sub(val); 5906 } 5907 EXPORT_SYMBOL(preempt_count_sub); 5908 NOKPROBE_SYMBOL(preempt_count_sub); 5909 5910 #else 5911 static inline void preempt_latency_start(int val) { } 5912 static inline void preempt_latency_stop(int val) { } 5913 #endif 5914 5915 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) 5916 { 5917 #ifdef CONFIG_DEBUG_PREEMPT 5918 return p->preempt_disable_ip; 5919 #else 5920 return 0; 5921 #endif 5922 } 5923 5924 /* 5925 * Print scheduling while atomic bug: 5926 */ 5927 static noinline void __schedule_bug(struct task_struct *prev) 5928 { 5929 /* Save this before calling printk(), since that will clobber it */ 5930 unsigned long preempt_disable_ip = get_preempt_disable_ip(current); 5931 5932 if (oops_in_progress) 5933 return; 5934 5935 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", 5936 prev->comm, prev->pid, preempt_count()); 5937 5938 debug_show_held_locks(prev); 5939 print_modules(); 5940 if (irqs_disabled()) 5941 print_irqtrace_events(prev); 5942 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { 5943 pr_err("Preemption disabled at:"); 5944 print_ip_sym(KERN_ERR, preempt_disable_ip); 5945 } 5946 check_panic_on_warn("scheduling while atomic"); 5947 5948 dump_stack(); 5949 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5950 } 5951 5952 /* 5953 * Various schedule()-time debugging checks and statistics: 5954 */ 5955 static inline void schedule_debug(struct task_struct *prev, bool preempt) 5956 { 5957 #ifdef CONFIG_SCHED_STACK_END_CHECK 5958 if (task_stack_end_corrupted(prev)) 5959 panic("corrupted stack end detected inside scheduler\n"); 5960 5961 if (task_scs_end_corrupted(prev)) 5962 panic("corrupted shadow stack detected inside scheduler\n"); 5963 #endif 5964 5965 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 5966 if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { 5967 printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", 5968 prev->comm, prev->pid, prev->non_block_count); 5969 dump_stack(); 5970 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 5971 } 5972 #endif 5973 5974 if (unlikely(in_atomic_preempt_off())) { 5975 __schedule_bug(prev); 5976 preempt_count_set(PREEMPT_DISABLED); 5977 } 5978 rcu_sleep_check(); 5979 WARN_ON_ONCE(ct_state() == CT_STATE_USER); 5980 5981 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 5982 5983 schedstat_inc(this_rq()->sched_count); 5984 } 5985 5986 static void prev_balance(struct rq *rq, struct task_struct *prev, 5987 struct rq_flags *rf) 5988 { 5989 const struct sched_class *start_class = prev->sched_class; 5990 const struct sched_class *class; 5991 5992 #ifdef CONFIG_SCHED_CLASS_EXT 5993 /* 5994 * SCX requires a balance() call before every pick_task() including when 5995 * waking up from SCHED_IDLE. If @start_class is below SCX, start from 5996 * SCX instead. Also, set a flag to detect missing balance() call. 5997 */ 5998 if (scx_enabled()) { 5999 rq->scx.flags |= SCX_RQ_BAL_PENDING; 6000 if (sched_class_above(&ext_sched_class, start_class)) 6001 start_class = &ext_sched_class; 6002 } 6003 #endif 6004 6005 /* 6006 * We must do the balancing pass before put_prev_task(), such 6007 * that when we release the rq->lock the task is in the same 6008 * state as before we took rq->lock. 6009 * 6010 * We can terminate the balance pass as soon as we know there is 6011 * a runnable task of @class priority or higher. 6012 */ 6013 for_active_class_range(class, start_class, &idle_sched_class) { 6014 if (class->balance && class->balance(rq, prev, rf)) 6015 break; 6016 } 6017 } 6018 6019 /* 6020 * Pick up the highest-prio task: 6021 */ 6022 static inline struct task_struct * 6023 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6024 { 6025 const struct sched_class *class; 6026 struct task_struct *p; 6027 6028 rq->dl_server = NULL; 6029 6030 if (scx_enabled()) 6031 goto restart; 6032 6033 /* 6034 * Optimization: we know that if all tasks are in the fair class we can 6035 * call that function directly, but only if the @prev task wasn't of a 6036 * higher scheduling class, because otherwise those lose the 6037 * opportunity to pull in more work from other CPUs. 6038 */ 6039 if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && 6040 rq->nr_running == rq->cfs.h_nr_queued)) { 6041 6042 p = pick_next_task_fair(rq, prev, rf); 6043 if (unlikely(p == RETRY_TASK)) 6044 goto restart; 6045 6046 /* Assume the next prioritized class is idle_sched_class */ 6047 if (!p) { 6048 p = pick_task_idle(rq); 6049 put_prev_set_next_task(rq, prev, p); 6050 } 6051 6052 return p; 6053 } 6054 6055 restart: 6056 prev_balance(rq, prev, rf); 6057 6058 for_each_active_class(class) { 6059 if (class->pick_next_task) { 6060 p = class->pick_next_task(rq, prev); 6061 if (p) 6062 return p; 6063 } else { 6064 p = class->pick_task(rq); 6065 if (p) { 6066 put_prev_set_next_task(rq, prev, p); 6067 return p; 6068 } 6069 } 6070 } 6071 6072 BUG(); /* The idle class should always have a runnable task. */ 6073 } 6074 6075 #ifdef CONFIG_SCHED_CORE 6076 static inline bool is_task_rq_idle(struct task_struct *t) 6077 { 6078 return (task_rq(t)->idle == t); 6079 } 6080 6081 static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) 6082 { 6083 return is_task_rq_idle(a) || (a->core_cookie == cookie); 6084 } 6085 6086 static inline bool cookie_match(struct task_struct *a, struct task_struct *b) 6087 { 6088 if (is_task_rq_idle(a) || is_task_rq_idle(b)) 6089 return true; 6090 6091 return a->core_cookie == b->core_cookie; 6092 } 6093 6094 static inline struct task_struct *pick_task(struct rq *rq) 6095 { 6096 const struct sched_class *class; 6097 struct task_struct *p; 6098 6099 rq->dl_server = NULL; 6100 6101 for_each_active_class(class) { 6102 p = class->pick_task(rq); 6103 if (p) 6104 return p; 6105 } 6106 6107 BUG(); /* The idle class should always have a runnable task. */ 6108 } 6109 6110 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 6111 6112 static void queue_core_balance(struct rq *rq); 6113 6114 static struct task_struct * 6115 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6116 { 6117 struct task_struct *next, *p, *max = NULL; 6118 const struct cpumask *smt_mask; 6119 bool fi_before = false; 6120 bool core_clock_updated = (rq == rq->core); 6121 unsigned long cookie; 6122 int i, cpu, occ = 0; 6123 struct rq *rq_i; 6124 bool need_sync; 6125 6126 if (!sched_core_enabled(rq)) 6127 return __pick_next_task(rq, prev, rf); 6128 6129 cpu = cpu_of(rq); 6130 6131 /* Stopper task is switching into idle, no need core-wide selection. */ 6132 if (cpu_is_offline(cpu)) { 6133 /* 6134 * Reset core_pick so that we don't enter the fastpath when 6135 * coming online. core_pick would already be migrated to 6136 * another cpu during offline. 6137 */ 6138 rq->core_pick = NULL; 6139 rq->core_dl_server = NULL; 6140 return __pick_next_task(rq, prev, rf); 6141 } 6142 6143 /* 6144 * If there were no {en,de}queues since we picked (IOW, the task 6145 * pointers are all still valid), and we haven't scheduled the last 6146 * pick yet, do so now. 6147 * 6148 * rq->core_pick can be NULL if no selection was made for a CPU because 6149 * it was either offline or went offline during a sibling's core-wide 6150 * selection. In this case, do a core-wide selection. 6151 */ 6152 if (rq->core->core_pick_seq == rq->core->core_task_seq && 6153 rq->core->core_pick_seq != rq->core_sched_seq && 6154 rq->core_pick) { 6155 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); 6156 6157 next = rq->core_pick; 6158 rq->dl_server = rq->core_dl_server; 6159 rq->core_pick = NULL; 6160 rq->core_dl_server = NULL; 6161 goto out_set_next; 6162 } 6163 6164 prev_balance(rq, prev, rf); 6165 6166 smt_mask = cpu_smt_mask(cpu); 6167 need_sync = !!rq->core->core_cookie; 6168 6169 /* reset state */ 6170 rq->core->core_cookie = 0UL; 6171 if (rq->core->core_forceidle_count) { 6172 if (!core_clock_updated) { 6173 update_rq_clock(rq->core); 6174 core_clock_updated = true; 6175 } 6176 sched_core_account_forceidle(rq); 6177 /* reset after accounting force idle */ 6178 rq->core->core_forceidle_start = 0; 6179 rq->core->core_forceidle_count = 0; 6180 rq->core->core_forceidle_occupation = 0; 6181 need_sync = true; 6182 fi_before = true; 6183 } 6184 6185 /* 6186 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq 6187 * 6188 * @task_seq guards the task state ({en,de}queues) 6189 * @pick_seq is the @task_seq we did a selection on 6190 * @sched_seq is the @pick_seq we scheduled 6191 * 6192 * However, preemptions can cause multiple picks on the same task set. 6193 * 'Fix' this by also increasing @task_seq for every pick. 6194 */ 6195 rq->core->core_task_seq++; 6196 6197 /* 6198 * Optimize for common case where this CPU has no cookies 6199 * and there are no cookied tasks running on siblings. 6200 */ 6201 if (!need_sync) { 6202 next = pick_task(rq); 6203 if (!next->core_cookie) { 6204 rq->core_pick = NULL; 6205 rq->core_dl_server = NULL; 6206 /* 6207 * For robustness, update the min_vruntime_fi for 6208 * unconstrained picks as well. 6209 */ 6210 WARN_ON_ONCE(fi_before); 6211 task_vruntime_update(rq, next, false); 6212 goto out_set_next; 6213 } 6214 } 6215 6216 /* 6217 * For each thread: do the regular task pick and find the max prio task 6218 * amongst them. 6219 * 6220 * Tie-break prio towards the current CPU 6221 */ 6222 for_each_cpu_wrap(i, smt_mask, cpu) { 6223 rq_i = cpu_rq(i); 6224 6225 /* 6226 * Current cpu always has its clock updated on entrance to 6227 * pick_next_task(). If the current cpu is not the core, 6228 * the core may also have been updated above. 6229 */ 6230 if (i != cpu && (rq_i != rq->core || !core_clock_updated)) 6231 update_rq_clock(rq_i); 6232 6233 rq_i->core_pick = p = pick_task(rq_i); 6234 rq_i->core_dl_server = rq_i->dl_server; 6235 6236 if (!max || prio_less(max, p, fi_before)) 6237 max = p; 6238 } 6239 6240 cookie = rq->core->core_cookie = max->core_cookie; 6241 6242 /* 6243 * For each thread: try and find a runnable task that matches @max or 6244 * force idle. 6245 */ 6246 for_each_cpu(i, smt_mask) { 6247 rq_i = cpu_rq(i); 6248 p = rq_i->core_pick; 6249 6250 if (!cookie_equals(p, cookie)) { 6251 p = NULL; 6252 if (cookie) 6253 p = sched_core_find(rq_i, cookie); 6254 if (!p) 6255 p = idle_sched_class.pick_task(rq_i); 6256 } 6257 6258 rq_i->core_pick = p; 6259 rq_i->core_dl_server = NULL; 6260 6261 if (p == rq_i->idle) { 6262 if (rq_i->nr_running) { 6263 rq->core->core_forceidle_count++; 6264 if (!fi_before) 6265 rq->core->core_forceidle_seq++; 6266 } 6267 } else { 6268 occ++; 6269 } 6270 } 6271 6272 if (schedstat_enabled() && rq->core->core_forceidle_count) { 6273 rq->core->core_forceidle_start = rq_clock(rq->core); 6274 rq->core->core_forceidle_occupation = occ; 6275 } 6276 6277 rq->core->core_pick_seq = rq->core->core_task_seq; 6278 next = rq->core_pick; 6279 rq->core_sched_seq = rq->core->core_pick_seq; 6280 6281 /* Something should have been selected for current CPU */ 6282 WARN_ON_ONCE(!next); 6283 6284 /* 6285 * Reschedule siblings 6286 * 6287 * NOTE: L1TF -- at this point we're no longer running the old task and 6288 * sending an IPI (below) ensures the sibling will no longer be running 6289 * their task. This ensures there is no inter-sibling overlap between 6290 * non-matching user state. 6291 */ 6292 for_each_cpu(i, smt_mask) { 6293 rq_i = cpu_rq(i); 6294 6295 /* 6296 * An online sibling might have gone offline before a task 6297 * could be picked for it, or it might be offline but later 6298 * happen to come online, but its too late and nothing was 6299 * picked for it. That's Ok - it will pick tasks for itself, 6300 * so ignore it. 6301 */ 6302 if (!rq_i->core_pick) 6303 continue; 6304 6305 /* 6306 * Update for new !FI->FI transitions, or if continuing to be in !FI: 6307 * fi_before fi update? 6308 * 0 0 1 6309 * 0 1 1 6310 * 1 0 1 6311 * 1 1 0 6312 */ 6313 if (!(fi_before && rq->core->core_forceidle_count)) 6314 task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); 6315 6316 rq_i->core_pick->core_occupation = occ; 6317 6318 if (i == cpu) { 6319 rq_i->core_pick = NULL; 6320 rq_i->core_dl_server = NULL; 6321 continue; 6322 } 6323 6324 /* Did we break L1TF mitigation requirements? */ 6325 WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); 6326 6327 if (rq_i->curr == rq_i->core_pick) { 6328 rq_i->core_pick = NULL; 6329 rq_i->core_dl_server = NULL; 6330 continue; 6331 } 6332 6333 resched_curr(rq_i); 6334 } 6335 6336 out_set_next: 6337 put_prev_set_next_task(rq, prev, next); 6338 if (rq->core->core_forceidle_count && next == rq->idle) 6339 queue_core_balance(rq); 6340 6341 return next; 6342 } 6343 6344 static bool try_steal_cookie(int this, int that) 6345 { 6346 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); 6347 struct task_struct *p; 6348 unsigned long cookie; 6349 bool success = false; 6350 6351 guard(irq)(); 6352 guard(double_rq_lock)(dst, src); 6353 6354 cookie = dst->core->core_cookie; 6355 if (!cookie) 6356 return false; 6357 6358 if (dst->curr != dst->idle) 6359 return false; 6360 6361 p = sched_core_find(src, cookie); 6362 if (!p) 6363 return false; 6364 6365 do { 6366 if (p == src->core_pick || p == src->curr) 6367 goto next; 6368 6369 if (!is_cpu_allowed(p, this)) 6370 goto next; 6371 6372 if (p->core_occupation > dst->idle->core_occupation) 6373 goto next; 6374 /* 6375 * sched_core_find() and sched_core_next() will ensure 6376 * that task @p is not throttled now, we also need to 6377 * check whether the runqueue of the destination CPU is 6378 * being throttled. 6379 */ 6380 if (sched_task_is_throttled(p, this)) 6381 goto next; 6382 6383 move_queued_task_locked(src, dst, p); 6384 resched_curr(dst); 6385 6386 success = true; 6387 break; 6388 6389 next: 6390 p = sched_core_next(p, cookie); 6391 } while (p); 6392 6393 return success; 6394 } 6395 6396 static bool steal_cookie_task(int cpu, struct sched_domain *sd) 6397 { 6398 int i; 6399 6400 for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { 6401 if (i == cpu) 6402 continue; 6403 6404 if (need_resched()) 6405 break; 6406 6407 if (try_steal_cookie(cpu, i)) 6408 return true; 6409 } 6410 6411 return false; 6412 } 6413 6414 static void sched_core_balance(struct rq *rq) 6415 { 6416 struct sched_domain *sd; 6417 int cpu = cpu_of(rq); 6418 6419 guard(preempt)(); 6420 guard(rcu)(); 6421 6422 raw_spin_rq_unlock_irq(rq); 6423 for_each_domain(cpu, sd) { 6424 if (need_resched()) 6425 break; 6426 6427 if (steal_cookie_task(cpu, sd)) 6428 break; 6429 } 6430 raw_spin_rq_lock_irq(rq); 6431 } 6432 6433 static DEFINE_PER_CPU(struct balance_callback, core_balance_head); 6434 6435 static void queue_core_balance(struct rq *rq) 6436 { 6437 if (!sched_core_enabled(rq)) 6438 return; 6439 6440 if (!rq->core->core_cookie) 6441 return; 6442 6443 if (!rq->nr_running) /* not forced idle */ 6444 return; 6445 6446 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); 6447 } 6448 6449 DEFINE_LOCK_GUARD_1(core_lock, int, 6450 sched_core_lock(*_T->lock, &_T->flags), 6451 sched_core_unlock(*_T->lock, &_T->flags), 6452 unsigned long flags) 6453 6454 static void sched_core_cpu_starting(unsigned int cpu) 6455 { 6456 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6457 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6458 int t; 6459 6460 guard(core_lock)(&cpu); 6461 6462 WARN_ON_ONCE(rq->core != rq); 6463 6464 /* if we're the first, we'll be our own leader */ 6465 if (cpumask_weight(smt_mask) == 1) 6466 return; 6467 6468 /* find the leader */ 6469 for_each_cpu(t, smt_mask) { 6470 if (t == cpu) 6471 continue; 6472 rq = cpu_rq(t); 6473 if (rq->core == rq) { 6474 core_rq = rq; 6475 break; 6476 } 6477 } 6478 6479 if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ 6480 return; 6481 6482 /* install and validate core_rq */ 6483 for_each_cpu(t, smt_mask) { 6484 rq = cpu_rq(t); 6485 6486 if (t == cpu) 6487 rq->core = core_rq; 6488 6489 WARN_ON_ONCE(rq->core != core_rq); 6490 } 6491 } 6492 6493 static void sched_core_cpu_deactivate(unsigned int cpu) 6494 { 6495 const struct cpumask *smt_mask = cpu_smt_mask(cpu); 6496 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; 6497 int t; 6498 6499 guard(core_lock)(&cpu); 6500 6501 /* if we're the last man standing, nothing to do */ 6502 if (cpumask_weight(smt_mask) == 1) { 6503 WARN_ON_ONCE(rq->core != rq); 6504 return; 6505 } 6506 6507 /* if we're not the leader, nothing to do */ 6508 if (rq->core != rq) 6509 return; 6510 6511 /* find a new leader */ 6512 for_each_cpu(t, smt_mask) { 6513 if (t == cpu) 6514 continue; 6515 core_rq = cpu_rq(t); 6516 break; 6517 } 6518 6519 if (WARN_ON_ONCE(!core_rq)) /* impossible */ 6520 return; 6521 6522 /* copy the shared state to the new leader */ 6523 core_rq->core_task_seq = rq->core_task_seq; 6524 core_rq->core_pick_seq = rq->core_pick_seq; 6525 core_rq->core_cookie = rq->core_cookie; 6526 core_rq->core_forceidle_count = rq->core_forceidle_count; 6527 core_rq->core_forceidle_seq = rq->core_forceidle_seq; 6528 core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; 6529 6530 /* 6531 * Accounting edge for forced idle is handled in pick_next_task(). 6532 * Don't need another one here, since the hotplug thread shouldn't 6533 * have a cookie. 6534 */ 6535 core_rq->core_forceidle_start = 0; 6536 6537 /* install new leader */ 6538 for_each_cpu(t, smt_mask) { 6539 rq = cpu_rq(t); 6540 rq->core = core_rq; 6541 } 6542 } 6543 6544 static inline void sched_core_cpu_dying(unsigned int cpu) 6545 { 6546 struct rq *rq = cpu_rq(cpu); 6547 6548 if (rq->core != rq) 6549 rq->core = rq; 6550 } 6551 6552 #else /* !CONFIG_SCHED_CORE */ 6553 6554 static inline void sched_core_cpu_starting(unsigned int cpu) {} 6555 static inline void sched_core_cpu_deactivate(unsigned int cpu) {} 6556 static inline void sched_core_cpu_dying(unsigned int cpu) {} 6557 6558 static struct task_struct * 6559 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6560 { 6561 return __pick_next_task(rq, prev, rf); 6562 } 6563 6564 #endif /* CONFIG_SCHED_CORE */ 6565 6566 /* 6567 * Constants for the sched_mode argument of __schedule(). 6568 * 6569 * The mode argument allows RT enabled kernels to differentiate a 6570 * preemption from blocking on an 'sleeping' spin/rwlock. 6571 */ 6572 #define SM_IDLE (-1) 6573 #define SM_NONE 0 6574 #define SM_PREEMPT 1 6575 #define SM_RTLOCK_WAIT 2 6576 6577 /* 6578 * Helper function for __schedule() 6579 * 6580 * If a task does not have signals pending, deactivate it 6581 * Otherwise marks the task's __state as RUNNING 6582 */ 6583 static bool try_to_block_task(struct rq *rq, struct task_struct *p, 6584 unsigned long *task_state_p) 6585 { 6586 unsigned long task_state = *task_state_p; 6587 int flags = DEQUEUE_NOCLOCK; 6588 6589 if (signal_pending_state(task_state, p)) { 6590 WRITE_ONCE(p->__state, TASK_RUNNING); 6591 *task_state_p = TASK_RUNNING; 6592 return false; 6593 } 6594 6595 p->sched_contributes_to_load = 6596 (task_state & TASK_UNINTERRUPTIBLE) && 6597 !(task_state & TASK_NOLOAD) && 6598 !(task_state & TASK_FROZEN); 6599 6600 if (unlikely(is_special_task_state(task_state))) 6601 flags |= DEQUEUE_SPECIAL; 6602 6603 /* 6604 * __schedule() ttwu() 6605 * prev_state = prev->state; if (p->on_rq && ...) 6606 * if (prev_state) goto out; 6607 * p->on_rq = 0; smp_acquire__after_ctrl_dep(); 6608 * p->state = TASK_WAKING 6609 * 6610 * Where __schedule() and ttwu() have matching control dependencies. 6611 * 6612 * After this, schedule() must not care about p->state any more. 6613 */ 6614 block_task(rq, p, flags); 6615 return true; 6616 } 6617 6618 /* 6619 * __schedule() is the main scheduler function. 6620 * 6621 * The main means of driving the scheduler and thus entering this function are: 6622 * 6623 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. 6624 * 6625 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return 6626 * paths. For example, see arch/x86/entry_64.S. 6627 * 6628 * To drive preemption between tasks, the scheduler sets the flag in timer 6629 * interrupt handler sched_tick(). 6630 * 6631 * 3. Wakeups don't really cause entry into schedule(). They add a 6632 * task to the run-queue and that's it. 6633 * 6634 * Now, if the new task added to the run-queue preempts the current 6635 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets 6636 * called on the nearest possible occasion: 6637 * 6638 * - If the kernel is preemptible (CONFIG_PREEMPTION=y): 6639 * 6640 * - in syscall or exception context, at the next outmost 6641 * preempt_enable(). (this might be as soon as the wake_up()'s 6642 * spin_unlock()!) 6643 * 6644 * - in IRQ context, return from interrupt-handler to 6645 * preemptible context 6646 * 6647 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) 6648 * then at the next: 6649 * 6650 * - cond_resched() call 6651 * - explicit schedule() call 6652 * - return from syscall or exception to user-space 6653 * - return from interrupt-handler to user-space 6654 * 6655 * WARNING: must be called with preemption disabled! 6656 */ 6657 static void __sched notrace __schedule(int sched_mode) 6658 { 6659 struct task_struct *prev, *next; 6660 /* 6661 * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted 6662 * as a preemption by schedule_debug() and RCU. 6663 */ 6664 bool preempt = sched_mode > SM_NONE; 6665 bool is_switch = false; 6666 unsigned long *switch_count; 6667 unsigned long prev_state; 6668 struct rq_flags rf; 6669 struct rq *rq; 6670 int cpu; 6671 6672 trace_sched_entry_tp(preempt, CALLER_ADDR0); 6673 6674 cpu = smp_processor_id(); 6675 rq = cpu_rq(cpu); 6676 prev = rq->curr; 6677 6678 schedule_debug(prev, preempt); 6679 6680 if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) 6681 hrtick_clear(rq); 6682 6683 klp_sched_try_switch(prev); 6684 6685 local_irq_disable(); 6686 rcu_note_context_switch(preempt); 6687 6688 /* 6689 * Make sure that signal_pending_state()->signal_pending() below 6690 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) 6691 * done by the caller to avoid the race with signal_wake_up(): 6692 * 6693 * __set_current_state(@state) signal_wake_up() 6694 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) 6695 * wake_up_state(p, state) 6696 * LOCK rq->lock LOCK p->pi_state 6697 * smp_mb__after_spinlock() smp_mb__after_spinlock() 6698 * if (signal_pending_state()) if (p->state & @state) 6699 * 6700 * Also, the membarrier system call requires a full memory barrier 6701 * after coming from user-space, before storing to rq->curr; this 6702 * barrier matches a full barrier in the proximity of the membarrier 6703 * system call exit. 6704 */ 6705 rq_lock(rq, &rf); 6706 smp_mb__after_spinlock(); 6707 6708 /* Promote REQ to ACT */ 6709 rq->clock_update_flags <<= 1; 6710 update_rq_clock(rq); 6711 rq->clock_update_flags = RQCF_UPDATED; 6712 6713 switch_count = &prev->nivcsw; 6714 6715 /* Task state changes only considers SM_PREEMPT as preemption */ 6716 preempt = sched_mode == SM_PREEMPT; 6717 6718 /* 6719 * We must load prev->state once (task_struct::state is volatile), such 6720 * that we form a control dependency vs deactivate_task() below. 6721 */ 6722 prev_state = READ_ONCE(prev->__state); 6723 if (sched_mode == SM_IDLE) { 6724 /* SCX must consult the BPF scheduler to tell if rq is empty */ 6725 if (!rq->nr_running && !scx_enabled()) { 6726 next = prev; 6727 goto picked; 6728 } 6729 } else if (!preempt && prev_state) { 6730 try_to_block_task(rq, prev, &prev_state); 6731 switch_count = &prev->nvcsw; 6732 } 6733 6734 next = pick_next_task(rq, prev, &rf); 6735 rq_set_donor(rq, next); 6736 picked: 6737 clear_tsk_need_resched(prev); 6738 clear_preempt_need_resched(); 6739 rq->last_seen_need_resched_ns = 0; 6740 6741 is_switch = prev != next; 6742 if (likely(is_switch)) { 6743 rq->nr_switches++; 6744 /* 6745 * RCU users of rcu_dereference(rq->curr) may not see 6746 * changes to task_struct made by pick_next_task(). 6747 */ 6748 RCU_INIT_POINTER(rq->curr, next); 6749 /* 6750 * The membarrier system call requires each architecture 6751 * to have a full memory barrier after updating 6752 * rq->curr, before returning to user-space. 6753 * 6754 * Here are the schemes providing that barrier on the 6755 * various architectures: 6756 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC, 6757 * RISC-V. switch_mm() relies on membarrier_arch_switch_mm() 6758 * on PowerPC and on RISC-V. 6759 * - finish_lock_switch() for weakly-ordered 6760 * architectures where spin_unlock is a full barrier, 6761 * - switch_to() for arm64 (weakly-ordered, spin_unlock 6762 * is a RELEASE barrier), 6763 * 6764 * The barrier matches a full barrier in the proximity of 6765 * the membarrier system call entry. 6766 * 6767 * On RISC-V, this barrier pairing is also needed for the 6768 * SYNC_CORE command when switching between processes, cf. 6769 * the inline comments in membarrier_arch_switch_mm(). 6770 */ 6771 ++*switch_count; 6772 6773 migrate_disable_switch(rq, prev); 6774 psi_account_irqtime(rq, prev, next); 6775 psi_sched_switch(prev, next, !task_on_rq_queued(prev) || 6776 prev->se.sched_delayed); 6777 6778 trace_sched_switch(preempt, prev, next, prev_state); 6779 6780 /* Also unlocks the rq: */ 6781 rq = context_switch(rq, prev, next, &rf); 6782 } else { 6783 rq_unpin_lock(rq, &rf); 6784 __balance_callbacks(rq); 6785 raw_spin_rq_unlock_irq(rq); 6786 } 6787 trace_sched_exit_tp(is_switch, CALLER_ADDR0); 6788 } 6789 6790 void __noreturn do_task_dead(void) 6791 { 6792 /* Causes final put_task_struct in finish_task_switch(): */ 6793 set_special_state(TASK_DEAD); 6794 6795 /* Tell freezer to ignore us: */ 6796 current->flags |= PF_NOFREEZE; 6797 6798 __schedule(SM_NONE); 6799 BUG(); 6800 6801 /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ 6802 for (;;) 6803 cpu_relax(); 6804 } 6805 6806 static inline void sched_submit_work(struct task_struct *tsk) 6807 { 6808 static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG); 6809 unsigned int task_flags; 6810 6811 /* 6812 * Establish LD_WAIT_CONFIG context to ensure none of the code called 6813 * will use a blocking primitive -- which would lead to recursion. 6814 */ 6815 lock_map_acquire_try(&sched_map); 6816 6817 task_flags = tsk->flags; 6818 /* 6819 * If a worker goes to sleep, notify and ask workqueue whether it 6820 * wants to wake up a task to maintain concurrency. 6821 */ 6822 if (task_flags & PF_WQ_WORKER) 6823 wq_worker_sleeping(tsk); 6824 else if (task_flags & PF_IO_WORKER) 6825 io_wq_worker_sleeping(tsk); 6826 6827 /* 6828 * spinlock and rwlock must not flush block requests. This will 6829 * deadlock if the callback attempts to acquire a lock which is 6830 * already acquired. 6831 */ 6832 WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT); 6833 6834 /* 6835 * If we are going to sleep and we have plugged IO queued, 6836 * make sure to submit it to avoid deadlocks. 6837 */ 6838 blk_flush_plug(tsk->plug, true); 6839 6840 lock_map_release(&sched_map); 6841 } 6842 6843 static void sched_update_worker(struct task_struct *tsk) 6844 { 6845 if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { 6846 if (tsk->flags & PF_BLOCK_TS) 6847 blk_plug_invalidate_ts(tsk); 6848 if (tsk->flags & PF_WQ_WORKER) 6849 wq_worker_running(tsk); 6850 else if (tsk->flags & PF_IO_WORKER) 6851 io_wq_worker_running(tsk); 6852 } 6853 } 6854 6855 static __always_inline void __schedule_loop(int sched_mode) 6856 { 6857 do { 6858 preempt_disable(); 6859 __schedule(sched_mode); 6860 sched_preempt_enable_no_resched(); 6861 } while (need_resched()); 6862 } 6863 6864 asmlinkage __visible void __sched schedule(void) 6865 { 6866 struct task_struct *tsk = current; 6867 6868 #ifdef CONFIG_RT_MUTEXES 6869 lockdep_assert(!tsk->sched_rt_mutex); 6870 #endif 6871 6872 if (!task_is_running(tsk)) 6873 sched_submit_work(tsk); 6874 __schedule_loop(SM_NONE); 6875 sched_update_worker(tsk); 6876 } 6877 EXPORT_SYMBOL(schedule); 6878 6879 /* 6880 * synchronize_rcu_tasks() makes sure that no task is stuck in preempted 6881 * state (have scheduled out non-voluntarily) by making sure that all 6882 * tasks have either left the run queue or have gone into user space. 6883 * As idle tasks do not do either, they must not ever be preempted 6884 * (schedule out non-voluntarily). 6885 * 6886 * schedule_idle() is similar to schedule_preempt_disable() except that it 6887 * never enables preemption because it does not call sched_submit_work(). 6888 */ 6889 void __sched schedule_idle(void) 6890 { 6891 /* 6892 * As this skips calling sched_submit_work(), which the idle task does 6893 * regardless because that function is a NOP when the task is in a 6894 * TASK_RUNNING state, make sure this isn't used someplace that the 6895 * current task can be in any other state. Note, idle is always in the 6896 * TASK_RUNNING state. 6897 */ 6898 WARN_ON_ONCE(current->__state); 6899 do { 6900 __schedule(SM_IDLE); 6901 } while (need_resched()); 6902 } 6903 6904 #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) 6905 asmlinkage __visible void __sched schedule_user(void) 6906 { 6907 /* 6908 * If we come here after a random call to set_need_resched(), 6909 * or we have been woken up remotely but the IPI has not yet arrived, 6910 * we haven't yet exited the RCU idle mode. Do it here manually until 6911 * we find a better solution. 6912 * 6913 * NB: There are buggy callers of this function. Ideally we 6914 * should warn if prev_state != CT_STATE_USER, but that will trigger 6915 * too frequently to make sense yet. 6916 */ 6917 enum ctx_state prev_state = exception_enter(); 6918 schedule(); 6919 exception_exit(prev_state); 6920 } 6921 #endif 6922 6923 /** 6924 * schedule_preempt_disabled - called with preemption disabled 6925 * 6926 * Returns with preemption disabled. Note: preempt_count must be 1 6927 */ 6928 void __sched schedule_preempt_disabled(void) 6929 { 6930 sched_preempt_enable_no_resched(); 6931 schedule(); 6932 preempt_disable(); 6933 } 6934 6935 #ifdef CONFIG_PREEMPT_RT 6936 void __sched notrace schedule_rtlock(void) 6937 { 6938 __schedule_loop(SM_RTLOCK_WAIT); 6939 } 6940 NOKPROBE_SYMBOL(schedule_rtlock); 6941 #endif 6942 6943 static void __sched notrace preempt_schedule_common(void) 6944 { 6945 do { 6946 /* 6947 * Because the function tracer can trace preempt_count_sub() 6948 * and it also uses preempt_enable/disable_notrace(), if 6949 * NEED_RESCHED is set, the preempt_enable_notrace() called 6950 * by the function tracer will call this function again and 6951 * cause infinite recursion. 6952 * 6953 * Preemption must be disabled here before the function 6954 * tracer can trace. Break up preempt_disable() into two 6955 * calls. One to disable preemption without fear of being 6956 * traced. The other to still record the preemption latency, 6957 * which can also be traced by the function tracer. 6958 */ 6959 preempt_disable_notrace(); 6960 preempt_latency_start(1); 6961 __schedule(SM_PREEMPT); 6962 preempt_latency_stop(1); 6963 preempt_enable_no_resched_notrace(); 6964 6965 /* 6966 * Check again in case we missed a preemption opportunity 6967 * between schedule and now. 6968 */ 6969 } while (need_resched()); 6970 } 6971 6972 #ifdef CONFIG_PREEMPTION 6973 /* 6974 * This is the entry point to schedule() from in-kernel preemption 6975 * off of preempt_enable. 6976 */ 6977 asmlinkage __visible void __sched notrace preempt_schedule(void) 6978 { 6979 /* 6980 * If there is a non-zero preempt_count or interrupts are disabled, 6981 * we do not want to preempt the current task. Just return.. 6982 */ 6983 if (likely(!preemptible())) 6984 return; 6985 preempt_schedule_common(); 6986 } 6987 NOKPROBE_SYMBOL(preempt_schedule); 6988 EXPORT_SYMBOL(preempt_schedule); 6989 6990 #ifdef CONFIG_PREEMPT_DYNAMIC 6991 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 6992 #ifndef preempt_schedule_dynamic_enabled 6993 #define preempt_schedule_dynamic_enabled preempt_schedule 6994 #define preempt_schedule_dynamic_disabled NULL 6995 #endif 6996 DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); 6997 EXPORT_STATIC_CALL_TRAMP(preempt_schedule); 6998 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 6999 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); 7000 void __sched notrace dynamic_preempt_schedule(void) 7001 { 7002 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) 7003 return; 7004 preempt_schedule(); 7005 } 7006 NOKPROBE_SYMBOL(dynamic_preempt_schedule); 7007 EXPORT_SYMBOL(dynamic_preempt_schedule); 7008 #endif 7009 #endif 7010 7011 /** 7012 * preempt_schedule_notrace - preempt_schedule called by tracing 7013 * 7014 * The tracing infrastructure uses preempt_enable_notrace to prevent 7015 * recursion and tracing preempt enabling caused by the tracing 7016 * infrastructure itself. But as tracing can happen in areas coming 7017 * from userspace or just about to enter userspace, a preempt enable 7018 * can occur before user_exit() is called. This will cause the scheduler 7019 * to be called when the system is still in usermode. 7020 * 7021 * To prevent this, the preempt_enable_notrace will use this function 7022 * instead of preempt_schedule() to exit user context if needed before 7023 * calling the scheduler. 7024 */ 7025 asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) 7026 { 7027 enum ctx_state prev_ctx; 7028 7029 if (likely(!preemptible())) 7030 return; 7031 7032 do { 7033 /* 7034 * Because the function tracer can trace preempt_count_sub() 7035 * and it also uses preempt_enable/disable_notrace(), if 7036 * NEED_RESCHED is set, the preempt_enable_notrace() called 7037 * by the function tracer will call this function again and 7038 * cause infinite recursion. 7039 * 7040 * Preemption must be disabled here before the function 7041 * tracer can trace. Break up preempt_disable() into two 7042 * calls. One to disable preemption without fear of being 7043 * traced. The other to still record the preemption latency, 7044 * which can also be traced by the function tracer. 7045 */ 7046 preempt_disable_notrace(); 7047 preempt_latency_start(1); 7048 /* 7049 * Needs preempt disabled in case user_exit() is traced 7050 * and the tracer calls preempt_enable_notrace() causing 7051 * an infinite recursion. 7052 */ 7053 prev_ctx = exception_enter(); 7054 __schedule(SM_PREEMPT); 7055 exception_exit(prev_ctx); 7056 7057 preempt_latency_stop(1); 7058 preempt_enable_no_resched_notrace(); 7059 } while (need_resched()); 7060 } 7061 EXPORT_SYMBOL_GPL(preempt_schedule_notrace); 7062 7063 #ifdef CONFIG_PREEMPT_DYNAMIC 7064 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7065 #ifndef preempt_schedule_notrace_dynamic_enabled 7066 #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace 7067 #define preempt_schedule_notrace_dynamic_disabled NULL 7068 #endif 7069 DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); 7070 EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); 7071 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7072 static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); 7073 void __sched notrace dynamic_preempt_schedule_notrace(void) 7074 { 7075 if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) 7076 return; 7077 preempt_schedule_notrace(); 7078 } 7079 NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); 7080 EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); 7081 #endif 7082 #endif 7083 7084 #endif /* CONFIG_PREEMPTION */ 7085 7086 /* 7087 * This is the entry point to schedule() from kernel preemption 7088 * off of IRQ context. 7089 * Note, that this is called and return with IRQs disabled. This will 7090 * protect us against recursive calling from IRQ contexts. 7091 */ 7092 asmlinkage __visible void __sched preempt_schedule_irq(void) 7093 { 7094 enum ctx_state prev_state; 7095 7096 /* Catch callers which need to be fixed */ 7097 BUG_ON(preempt_count() || !irqs_disabled()); 7098 7099 prev_state = exception_enter(); 7100 7101 do { 7102 preempt_disable(); 7103 local_irq_enable(); 7104 __schedule(SM_PREEMPT); 7105 local_irq_disable(); 7106 sched_preempt_enable_no_resched(); 7107 } while (need_resched()); 7108 7109 exception_exit(prev_state); 7110 } 7111 7112 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 7113 void *key) 7114 { 7115 WARN_ON_ONCE(wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); 7116 return try_to_wake_up(curr->private, mode, wake_flags); 7117 } 7118 EXPORT_SYMBOL(default_wake_function); 7119 7120 const struct sched_class *__setscheduler_class(int policy, int prio) 7121 { 7122 if (dl_prio(prio)) 7123 return &dl_sched_class; 7124 7125 if (rt_prio(prio)) 7126 return &rt_sched_class; 7127 7128 #ifdef CONFIG_SCHED_CLASS_EXT 7129 if (task_should_scx(policy)) 7130 return &ext_sched_class; 7131 #endif 7132 7133 return &fair_sched_class; 7134 } 7135 7136 #ifdef CONFIG_RT_MUTEXES 7137 7138 /* 7139 * Would be more useful with typeof()/auto_type but they don't mix with 7140 * bit-fields. Since it's a local thing, use int. Keep the generic sounding 7141 * name such that if someone were to implement this function we get to compare 7142 * notes. 7143 */ 7144 #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; }) 7145 7146 void rt_mutex_pre_schedule(void) 7147 { 7148 lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); 7149 sched_submit_work(current); 7150 } 7151 7152 void rt_mutex_schedule(void) 7153 { 7154 lockdep_assert(current->sched_rt_mutex); 7155 __schedule_loop(SM_NONE); 7156 } 7157 7158 void rt_mutex_post_schedule(void) 7159 { 7160 sched_update_worker(current); 7161 lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); 7162 } 7163 7164 /* 7165 * rt_mutex_setprio - set the current priority of a task 7166 * @p: task to boost 7167 * @pi_task: donor task 7168 * 7169 * This function changes the 'effective' priority of a task. It does 7170 * not touch ->normal_prio like __setscheduler(). 7171 * 7172 * Used by the rt_mutex code to implement priority inheritance 7173 * logic. Call site only calls if the priority of the task changed. 7174 */ 7175 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) 7176 { 7177 int prio, oldprio, queued, running, queue_flag = 7178 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 7179 const struct sched_class *prev_class, *next_class; 7180 struct rq_flags rf; 7181 struct rq *rq; 7182 7183 /* XXX used to be waiter->prio, not waiter->task->prio */ 7184 prio = __rt_effective_prio(pi_task, p->normal_prio); 7185 7186 /* 7187 * If nothing changed; bail early. 7188 */ 7189 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) 7190 return; 7191 7192 rq = __task_rq_lock(p, &rf); 7193 update_rq_clock(rq); 7194 /* 7195 * Set under pi_lock && rq->lock, such that the value can be used under 7196 * either lock. 7197 * 7198 * Note that there is loads of tricky to make this pointer cache work 7199 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to 7200 * ensure a task is de-boosted (pi_task is set to NULL) before the 7201 * task is allowed to run again (and can exit). This ensures the pointer 7202 * points to a blocked task -- which guarantees the task is present. 7203 */ 7204 p->pi_top_task = pi_task; 7205 7206 /* 7207 * For FIFO/RR we only need to set prio, if that matches we're done. 7208 */ 7209 if (prio == p->prio && !dl_prio(prio)) 7210 goto out_unlock; 7211 7212 /* 7213 * Idle task boosting is a no-no in general. There is one 7214 * exception, when PREEMPT_RT and NOHZ is active: 7215 * 7216 * The idle task calls get_next_timer_interrupt() and holds 7217 * the timer wheel base->lock on the CPU and another CPU wants 7218 * to access the timer (probably to cancel it). We can safely 7219 * ignore the boosting request, as the idle CPU runs this code 7220 * with interrupts disabled and will complete the lock 7221 * protected section without being interrupted. So there is no 7222 * real need to boost. 7223 */ 7224 if (unlikely(p == rq->idle)) { 7225 WARN_ON(p != rq->curr); 7226 WARN_ON(p->pi_blocked_on); 7227 goto out_unlock; 7228 } 7229 7230 trace_sched_pi_setprio(p, pi_task); 7231 oldprio = p->prio; 7232 7233 if (oldprio == prio) 7234 queue_flag &= ~DEQUEUE_MOVE; 7235 7236 prev_class = p->sched_class; 7237 next_class = __setscheduler_class(p->policy, prio); 7238 7239 if (prev_class != next_class && p->se.sched_delayed) 7240 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); 7241 7242 queued = task_on_rq_queued(p); 7243 running = task_current_donor(rq, p); 7244 if (queued) 7245 dequeue_task(rq, p, queue_flag); 7246 if (running) 7247 put_prev_task(rq, p); 7248 7249 /* 7250 * Boosting condition are: 7251 * 1. -rt task is running and holds mutex A 7252 * --> -dl task blocks on mutex A 7253 * 7254 * 2. -dl task is running and holds mutex A 7255 * --> -dl task blocks on mutex A and could preempt the 7256 * running task 7257 */ 7258 if (dl_prio(prio)) { 7259 if (!dl_prio(p->normal_prio) || 7260 (pi_task && dl_prio(pi_task->prio) && 7261 dl_entity_preempt(&pi_task->dl, &p->dl))) { 7262 p->dl.pi_se = pi_task->dl.pi_se; 7263 queue_flag |= ENQUEUE_REPLENISH; 7264 } else { 7265 p->dl.pi_se = &p->dl; 7266 } 7267 } else if (rt_prio(prio)) { 7268 if (dl_prio(oldprio)) 7269 p->dl.pi_se = &p->dl; 7270 if (oldprio < prio) 7271 queue_flag |= ENQUEUE_HEAD; 7272 } else { 7273 if (dl_prio(oldprio)) 7274 p->dl.pi_se = &p->dl; 7275 if (rt_prio(oldprio)) 7276 p->rt.timeout = 0; 7277 } 7278 7279 p->sched_class = next_class; 7280 p->prio = prio; 7281 7282 check_class_changing(rq, p, prev_class); 7283 7284 if (queued) 7285 enqueue_task(rq, p, queue_flag); 7286 if (running) 7287 set_next_task(rq, p); 7288 7289 check_class_changed(rq, p, prev_class, oldprio); 7290 out_unlock: 7291 /* Avoid rq from going away on us: */ 7292 preempt_disable(); 7293 7294 rq_unpin_lock(rq, &rf); 7295 __balance_callbacks(rq); 7296 raw_spin_rq_unlock(rq); 7297 7298 preempt_enable(); 7299 } 7300 #endif 7301 7302 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 7303 int __sched __cond_resched(void) 7304 { 7305 if (should_resched(0) && !irqs_disabled()) { 7306 preempt_schedule_common(); 7307 return 1; 7308 } 7309 /* 7310 * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick 7311 * whether the current CPU is in an RCU read-side critical section, 7312 * so the tick can report quiescent states even for CPUs looping 7313 * in kernel context. In contrast, in non-preemptible kernels, 7314 * RCU readers leave no in-memory hints, which means that CPU-bound 7315 * processes executing in kernel context might never report an 7316 * RCU quiescent state. Therefore, the following code causes 7317 * cond_resched() to report a quiescent state, but only when RCU 7318 * is in urgent need of one. 7319 * A third case, preemptible, but non-PREEMPT_RCU provides for 7320 * urgently needed quiescent states via rcu_flavor_sched_clock_irq(). 7321 */ 7322 #ifndef CONFIG_PREEMPT_RCU 7323 rcu_all_qs(); 7324 #endif 7325 return 0; 7326 } 7327 EXPORT_SYMBOL(__cond_resched); 7328 #endif 7329 7330 #ifdef CONFIG_PREEMPT_DYNAMIC 7331 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7332 #define cond_resched_dynamic_enabled __cond_resched 7333 #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) 7334 DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); 7335 EXPORT_STATIC_CALL_TRAMP(cond_resched); 7336 7337 #define might_resched_dynamic_enabled __cond_resched 7338 #define might_resched_dynamic_disabled ((void *)&__static_call_return0) 7339 DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); 7340 EXPORT_STATIC_CALL_TRAMP(might_resched); 7341 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7342 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); 7343 int __sched dynamic_cond_resched(void) 7344 { 7345 if (!static_branch_unlikely(&sk_dynamic_cond_resched)) 7346 return 0; 7347 return __cond_resched(); 7348 } 7349 EXPORT_SYMBOL(dynamic_cond_resched); 7350 7351 static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); 7352 int __sched dynamic_might_resched(void) 7353 { 7354 if (!static_branch_unlikely(&sk_dynamic_might_resched)) 7355 return 0; 7356 return __cond_resched(); 7357 } 7358 EXPORT_SYMBOL(dynamic_might_resched); 7359 #endif 7360 #endif 7361 7362 /* 7363 * __cond_resched_lock() - if a reschedule is pending, drop the given lock, 7364 * call schedule, and on return reacquire the lock. 7365 * 7366 * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level 7367 * operations here to prevent schedule() from being called twice (once via 7368 * spin_unlock(), once by hand). 7369 */ 7370 int __cond_resched_lock(spinlock_t *lock) 7371 { 7372 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7373 int ret = 0; 7374 7375 lockdep_assert_held(lock); 7376 7377 if (spin_needbreak(lock) || resched) { 7378 spin_unlock(lock); 7379 if (!_cond_resched()) 7380 cpu_relax(); 7381 ret = 1; 7382 spin_lock(lock); 7383 } 7384 return ret; 7385 } 7386 EXPORT_SYMBOL(__cond_resched_lock); 7387 7388 int __cond_resched_rwlock_read(rwlock_t *lock) 7389 { 7390 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7391 int ret = 0; 7392 7393 lockdep_assert_held_read(lock); 7394 7395 if (rwlock_needbreak(lock) || resched) { 7396 read_unlock(lock); 7397 if (!_cond_resched()) 7398 cpu_relax(); 7399 ret = 1; 7400 read_lock(lock); 7401 } 7402 return ret; 7403 } 7404 EXPORT_SYMBOL(__cond_resched_rwlock_read); 7405 7406 int __cond_resched_rwlock_write(rwlock_t *lock) 7407 { 7408 int resched = should_resched(PREEMPT_LOCK_OFFSET); 7409 int ret = 0; 7410 7411 lockdep_assert_held_write(lock); 7412 7413 if (rwlock_needbreak(lock) || resched) { 7414 write_unlock(lock); 7415 if (!_cond_resched()) 7416 cpu_relax(); 7417 ret = 1; 7418 write_lock(lock); 7419 } 7420 return ret; 7421 } 7422 EXPORT_SYMBOL(__cond_resched_rwlock_write); 7423 7424 #ifdef CONFIG_PREEMPT_DYNAMIC 7425 7426 #ifdef CONFIG_GENERIC_ENTRY 7427 #include <linux/entry-common.h> 7428 #endif 7429 7430 /* 7431 * SC:cond_resched 7432 * SC:might_resched 7433 * SC:preempt_schedule 7434 * SC:preempt_schedule_notrace 7435 * SC:irqentry_exit_cond_resched 7436 * 7437 * 7438 * NONE: 7439 * cond_resched <- __cond_resched 7440 * might_resched <- RET0 7441 * preempt_schedule <- NOP 7442 * preempt_schedule_notrace <- NOP 7443 * irqentry_exit_cond_resched <- NOP 7444 * dynamic_preempt_lazy <- false 7445 * 7446 * VOLUNTARY: 7447 * cond_resched <- __cond_resched 7448 * might_resched <- __cond_resched 7449 * preempt_schedule <- NOP 7450 * preempt_schedule_notrace <- NOP 7451 * irqentry_exit_cond_resched <- NOP 7452 * dynamic_preempt_lazy <- false 7453 * 7454 * FULL: 7455 * cond_resched <- RET0 7456 * might_resched <- RET0 7457 * preempt_schedule <- preempt_schedule 7458 * preempt_schedule_notrace <- preempt_schedule_notrace 7459 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 7460 * dynamic_preempt_lazy <- false 7461 * 7462 * LAZY: 7463 * cond_resched <- RET0 7464 * might_resched <- RET0 7465 * preempt_schedule <- preempt_schedule 7466 * preempt_schedule_notrace <- preempt_schedule_notrace 7467 * irqentry_exit_cond_resched <- irqentry_exit_cond_resched 7468 * dynamic_preempt_lazy <- true 7469 */ 7470 7471 enum { 7472 preempt_dynamic_undefined = -1, 7473 preempt_dynamic_none, 7474 preempt_dynamic_voluntary, 7475 preempt_dynamic_full, 7476 preempt_dynamic_lazy, 7477 }; 7478 7479 int preempt_dynamic_mode = preempt_dynamic_undefined; 7480 7481 int sched_dynamic_mode(const char *str) 7482 { 7483 #ifndef CONFIG_PREEMPT_RT 7484 if (!strcmp(str, "none")) 7485 return preempt_dynamic_none; 7486 7487 if (!strcmp(str, "voluntary")) 7488 return preempt_dynamic_voluntary; 7489 #endif 7490 7491 if (!strcmp(str, "full")) 7492 return preempt_dynamic_full; 7493 7494 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY 7495 if (!strcmp(str, "lazy")) 7496 return preempt_dynamic_lazy; 7497 #endif 7498 7499 return -EINVAL; 7500 } 7501 7502 #define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.key) 7503 #define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.key) 7504 7505 #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 7506 #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) 7507 #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) 7508 #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 7509 #define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f) 7510 #define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f) 7511 #else 7512 #error "Unsupported PREEMPT_DYNAMIC mechanism" 7513 #endif 7514 7515 static DEFINE_MUTEX(sched_dynamic_mutex); 7516 7517 static void __sched_dynamic_update(int mode) 7518 { 7519 /* 7520 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in 7521 * the ZERO state, which is invalid. 7522 */ 7523 preempt_dynamic_enable(cond_resched); 7524 preempt_dynamic_enable(might_resched); 7525 preempt_dynamic_enable(preempt_schedule); 7526 preempt_dynamic_enable(preempt_schedule_notrace); 7527 preempt_dynamic_enable(irqentry_exit_cond_resched); 7528 preempt_dynamic_key_disable(preempt_lazy); 7529 7530 switch (mode) { 7531 case preempt_dynamic_none: 7532 preempt_dynamic_enable(cond_resched); 7533 preempt_dynamic_disable(might_resched); 7534 preempt_dynamic_disable(preempt_schedule); 7535 preempt_dynamic_disable(preempt_schedule_notrace); 7536 preempt_dynamic_disable(irqentry_exit_cond_resched); 7537 preempt_dynamic_key_disable(preempt_lazy); 7538 if (mode != preempt_dynamic_mode) 7539 pr_info("Dynamic Preempt: none\n"); 7540 break; 7541 7542 case preempt_dynamic_voluntary: 7543 preempt_dynamic_enable(cond_resched); 7544 preempt_dynamic_enable(might_resched); 7545 preempt_dynamic_disable(preempt_schedule); 7546 preempt_dynamic_disable(preempt_schedule_notrace); 7547 preempt_dynamic_disable(irqentry_exit_cond_resched); 7548 preempt_dynamic_key_disable(preempt_lazy); 7549 if (mode != preempt_dynamic_mode) 7550 pr_info("Dynamic Preempt: voluntary\n"); 7551 break; 7552 7553 case preempt_dynamic_full: 7554 preempt_dynamic_disable(cond_resched); 7555 preempt_dynamic_disable(might_resched); 7556 preempt_dynamic_enable(preempt_schedule); 7557 preempt_dynamic_enable(preempt_schedule_notrace); 7558 preempt_dynamic_enable(irqentry_exit_cond_resched); 7559 preempt_dynamic_key_disable(preempt_lazy); 7560 if (mode != preempt_dynamic_mode) 7561 pr_info("Dynamic Preempt: full\n"); 7562 break; 7563 7564 case preempt_dynamic_lazy: 7565 preempt_dynamic_disable(cond_resched); 7566 preempt_dynamic_disable(might_resched); 7567 preempt_dynamic_enable(preempt_schedule); 7568 preempt_dynamic_enable(preempt_schedule_notrace); 7569 preempt_dynamic_enable(irqentry_exit_cond_resched); 7570 preempt_dynamic_key_enable(preempt_lazy); 7571 if (mode != preempt_dynamic_mode) 7572 pr_info("Dynamic Preempt: lazy\n"); 7573 break; 7574 } 7575 7576 preempt_dynamic_mode = mode; 7577 } 7578 7579 void sched_dynamic_update(int mode) 7580 { 7581 mutex_lock(&sched_dynamic_mutex); 7582 __sched_dynamic_update(mode); 7583 mutex_unlock(&sched_dynamic_mutex); 7584 } 7585 7586 static int __init setup_preempt_mode(char *str) 7587 { 7588 int mode = sched_dynamic_mode(str); 7589 if (mode < 0) { 7590 pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); 7591 return 0; 7592 } 7593 7594 sched_dynamic_update(mode); 7595 return 1; 7596 } 7597 __setup("preempt=", setup_preempt_mode); 7598 7599 static void __init preempt_dynamic_init(void) 7600 { 7601 if (preempt_dynamic_mode == preempt_dynamic_undefined) { 7602 if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { 7603 sched_dynamic_update(preempt_dynamic_none); 7604 } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { 7605 sched_dynamic_update(preempt_dynamic_voluntary); 7606 } else if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) { 7607 sched_dynamic_update(preempt_dynamic_lazy); 7608 } else { 7609 /* Default static call setting, nothing to do */ 7610 WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); 7611 preempt_dynamic_mode = preempt_dynamic_full; 7612 pr_info("Dynamic Preempt: full\n"); 7613 } 7614 } 7615 } 7616 7617 #define PREEMPT_MODEL_ACCESSOR(mode) \ 7618 bool preempt_model_##mode(void) \ 7619 { \ 7620 WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ 7621 return preempt_dynamic_mode == preempt_dynamic_##mode; \ 7622 } \ 7623 EXPORT_SYMBOL_GPL(preempt_model_##mode) 7624 7625 PREEMPT_MODEL_ACCESSOR(none); 7626 PREEMPT_MODEL_ACCESSOR(voluntary); 7627 PREEMPT_MODEL_ACCESSOR(full); 7628 PREEMPT_MODEL_ACCESSOR(lazy); 7629 7630 #else /* !CONFIG_PREEMPT_DYNAMIC: */ 7631 7632 #define preempt_dynamic_mode -1 7633 7634 static inline void preempt_dynamic_init(void) { } 7635 7636 #endif /* CONFIG_PREEMPT_DYNAMIC */ 7637 7638 const char *preempt_modes[] = { 7639 "none", "voluntary", "full", "lazy", NULL, 7640 }; 7641 7642 const char *preempt_model_str(void) 7643 { 7644 bool brace = IS_ENABLED(CONFIG_PREEMPT_RT) && 7645 (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC) || 7646 IS_ENABLED(CONFIG_PREEMPT_LAZY)); 7647 static char buf[128]; 7648 7649 if (IS_ENABLED(CONFIG_PREEMPT_BUILD)) { 7650 struct seq_buf s; 7651 7652 seq_buf_init(&s, buf, sizeof(buf)); 7653 seq_buf_puts(&s, "PREEMPT"); 7654 7655 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 7656 seq_buf_printf(&s, "%sRT%s", 7657 brace ? "_{" : "_", 7658 brace ? "," : ""); 7659 7660 if (IS_ENABLED(CONFIG_PREEMPT_DYNAMIC)) { 7661 seq_buf_printf(&s, "(%s)%s", 7662 preempt_dynamic_mode > 0 ? 7663 preempt_modes[preempt_dynamic_mode] : "undef", 7664 brace ? "}" : ""); 7665 return seq_buf_str(&s); 7666 } 7667 7668 if (IS_ENABLED(CONFIG_PREEMPT_LAZY)) { 7669 seq_buf_printf(&s, "LAZY%s", 7670 brace ? "}" : ""); 7671 return seq_buf_str(&s); 7672 } 7673 7674 return seq_buf_str(&s); 7675 } 7676 7677 if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY_BUILD)) 7678 return "VOLUNTARY"; 7679 7680 return "NONE"; 7681 } 7682 7683 int io_schedule_prepare(void) 7684 { 7685 int old_iowait = current->in_iowait; 7686 7687 current->in_iowait = 1; 7688 blk_flush_plug(current->plug, true); 7689 return old_iowait; 7690 } 7691 7692 void io_schedule_finish(int token) 7693 { 7694 current->in_iowait = token; 7695 } 7696 7697 /* 7698 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 7699 * that process accounting knows that this is a task in IO wait state. 7700 */ 7701 long __sched io_schedule_timeout(long timeout) 7702 { 7703 int token; 7704 long ret; 7705 7706 token = io_schedule_prepare(); 7707 ret = schedule_timeout(timeout); 7708 io_schedule_finish(token); 7709 7710 return ret; 7711 } 7712 EXPORT_SYMBOL(io_schedule_timeout); 7713 7714 void __sched io_schedule(void) 7715 { 7716 int token; 7717 7718 token = io_schedule_prepare(); 7719 schedule(); 7720 io_schedule_finish(token); 7721 } 7722 EXPORT_SYMBOL(io_schedule); 7723 7724 void sched_show_task(struct task_struct *p) 7725 { 7726 unsigned long free; 7727 int ppid; 7728 7729 if (!try_get_task_stack(p)) 7730 return; 7731 7732 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); 7733 7734 if (task_is_running(p)) 7735 pr_cont(" running task "); 7736 free = stack_not_used(p); 7737 ppid = 0; 7738 rcu_read_lock(); 7739 if (pid_alive(p)) 7740 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 7741 rcu_read_unlock(); 7742 pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", 7743 free, task_pid_nr(p), task_tgid_nr(p), 7744 ppid, p->flags, read_task_thread_flags(p)); 7745 7746 print_worker_info(KERN_INFO, p); 7747 print_stop_info(KERN_INFO, p); 7748 print_scx_info(KERN_INFO, p); 7749 show_stack(p, NULL, KERN_INFO); 7750 put_task_stack(p); 7751 } 7752 EXPORT_SYMBOL_GPL(sched_show_task); 7753 7754 static inline bool 7755 state_filter_match(unsigned long state_filter, struct task_struct *p) 7756 { 7757 unsigned int state = READ_ONCE(p->__state); 7758 7759 /* no filter, everything matches */ 7760 if (!state_filter) 7761 return true; 7762 7763 /* filter, but doesn't match */ 7764 if (!(state & state_filter)) 7765 return false; 7766 7767 /* 7768 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows 7769 * TASK_KILLABLE). 7770 */ 7771 if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) 7772 return false; 7773 7774 return true; 7775 } 7776 7777 7778 void show_state_filter(unsigned int state_filter) 7779 { 7780 struct task_struct *g, *p; 7781 7782 rcu_read_lock(); 7783 for_each_process_thread(g, p) { 7784 /* 7785 * reset the NMI-timeout, listing all files on a slow 7786 * console might take a lot of time: 7787 * Also, reset softlockup watchdogs on all CPUs, because 7788 * another CPU might be blocked waiting for us to process 7789 * an IPI. 7790 */ 7791 touch_nmi_watchdog(); 7792 touch_all_softlockup_watchdogs(); 7793 if (state_filter_match(state_filter, p)) 7794 sched_show_task(p); 7795 } 7796 7797 if (!state_filter) 7798 sysrq_sched_debug_show(); 7799 7800 rcu_read_unlock(); 7801 /* 7802 * Only show locks if all tasks are dumped: 7803 */ 7804 if (!state_filter) 7805 debug_show_all_locks(); 7806 } 7807 7808 /** 7809 * init_idle - set up an idle thread for a given CPU 7810 * @idle: task in question 7811 * @cpu: CPU the idle task belongs to 7812 * 7813 * NOTE: this function does not set the idle thread's NEED_RESCHED 7814 * flag, to make booting more robust. 7815 */ 7816 void __init init_idle(struct task_struct *idle, int cpu) 7817 { 7818 #ifdef CONFIG_SMP 7819 struct affinity_context ac = (struct affinity_context) { 7820 .new_mask = cpumask_of(cpu), 7821 .flags = 0, 7822 }; 7823 #endif 7824 struct rq *rq = cpu_rq(cpu); 7825 unsigned long flags; 7826 7827 raw_spin_lock_irqsave(&idle->pi_lock, flags); 7828 raw_spin_rq_lock(rq); 7829 7830 idle->__state = TASK_RUNNING; 7831 idle->se.exec_start = sched_clock(); 7832 /* 7833 * PF_KTHREAD should already be set at this point; regardless, make it 7834 * look like a proper per-CPU kthread. 7835 */ 7836 idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; 7837 kthread_set_per_cpu(idle, cpu); 7838 7839 #ifdef CONFIG_SMP 7840 /* 7841 * No validation and serialization required at boot time and for 7842 * setting up the idle tasks of not yet online CPUs. 7843 */ 7844 set_cpus_allowed_common(idle, &ac); 7845 #endif 7846 /* 7847 * We're having a chicken and egg problem, even though we are 7848 * holding rq->lock, the CPU isn't yet set to this CPU so the 7849 * lockdep check in task_group() will fail. 7850 * 7851 * Similar case to sched_fork(). / Alternatively we could 7852 * use task_rq_lock() here and obtain the other rq->lock. 7853 * 7854 * Silence PROVE_RCU 7855 */ 7856 rcu_read_lock(); 7857 __set_task_cpu(idle, cpu); 7858 rcu_read_unlock(); 7859 7860 rq->idle = idle; 7861 rq_set_donor(rq, idle); 7862 rcu_assign_pointer(rq->curr, idle); 7863 idle->on_rq = TASK_ON_RQ_QUEUED; 7864 #ifdef CONFIG_SMP 7865 idle->on_cpu = 1; 7866 #endif 7867 raw_spin_rq_unlock(rq); 7868 raw_spin_unlock_irqrestore(&idle->pi_lock, flags); 7869 7870 /* Set the preempt count _outside_ the spinlocks! */ 7871 init_idle_preempt_count(idle, cpu); 7872 7873 /* 7874 * The idle tasks have their own, simple scheduling class: 7875 */ 7876 idle->sched_class = &idle_sched_class; 7877 ftrace_graph_init_idle_task(idle, cpu); 7878 vtime_init_idle(idle, cpu); 7879 #ifdef CONFIG_SMP 7880 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 7881 #endif 7882 } 7883 7884 #ifdef CONFIG_SMP 7885 7886 int cpuset_cpumask_can_shrink(const struct cpumask *cur, 7887 const struct cpumask *trial) 7888 { 7889 int ret = 1; 7890 7891 if (cpumask_empty(cur)) 7892 return ret; 7893 7894 ret = dl_cpuset_cpumask_can_shrink(cur, trial); 7895 7896 return ret; 7897 } 7898 7899 int task_can_attach(struct task_struct *p) 7900 { 7901 int ret = 0; 7902 7903 /* 7904 * Kthreads which disallow setaffinity shouldn't be moved 7905 * to a new cpuset; we don't want to change their CPU 7906 * affinity and isolating such threads by their set of 7907 * allowed nodes is unnecessary. Thus, cpusets are not 7908 * applicable for such threads. This prevents checking for 7909 * success of set_cpus_allowed_ptr() on all attached tasks 7910 * before cpus_mask may be changed. 7911 */ 7912 if (p->flags & PF_NO_SETAFFINITY) 7913 ret = -EINVAL; 7914 7915 return ret; 7916 } 7917 7918 bool sched_smp_initialized __read_mostly; 7919 7920 #ifdef CONFIG_NUMA_BALANCING 7921 /* Migrate current task p to target_cpu */ 7922 int migrate_task_to(struct task_struct *p, int target_cpu) 7923 { 7924 struct migration_arg arg = { p, target_cpu }; 7925 int curr_cpu = task_cpu(p); 7926 7927 if (curr_cpu == target_cpu) 7928 return 0; 7929 7930 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) 7931 return -EINVAL; 7932 7933 /* TODO: This is not properly updating schedstats */ 7934 7935 trace_sched_move_numa(p, curr_cpu, target_cpu); 7936 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 7937 } 7938 7939 /* 7940 * Requeue a task on a given node and accurately track the number of NUMA 7941 * tasks on the runqueues 7942 */ 7943 void sched_setnuma(struct task_struct *p, int nid) 7944 { 7945 bool queued, running; 7946 struct rq_flags rf; 7947 struct rq *rq; 7948 7949 rq = task_rq_lock(p, &rf); 7950 queued = task_on_rq_queued(p); 7951 running = task_current_donor(rq, p); 7952 7953 if (queued) 7954 dequeue_task(rq, p, DEQUEUE_SAVE); 7955 if (running) 7956 put_prev_task(rq, p); 7957 7958 p->numa_preferred_nid = nid; 7959 7960 if (queued) 7961 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 7962 if (running) 7963 set_next_task(rq, p); 7964 task_rq_unlock(rq, p, &rf); 7965 } 7966 #endif /* CONFIG_NUMA_BALANCING */ 7967 7968 #ifdef CONFIG_HOTPLUG_CPU 7969 /* 7970 * Invoked on the outgoing CPU in context of the CPU hotplug thread 7971 * after ensuring that there are no user space tasks left on the CPU. 7972 * 7973 * If there is a lazy mm in use on the hotplug thread, drop it and 7974 * switch to init_mm. 7975 * 7976 * The reference count on init_mm is dropped in finish_cpu(). 7977 */ 7978 static void sched_force_init_mm(void) 7979 { 7980 struct mm_struct *mm = current->active_mm; 7981 7982 if (mm != &init_mm) { 7983 mmgrab_lazy_tlb(&init_mm); 7984 local_irq_disable(); 7985 current->active_mm = &init_mm; 7986 switch_mm_irqs_off(mm, &init_mm, current); 7987 local_irq_enable(); 7988 finish_arch_post_lock_switch(); 7989 mmdrop_lazy_tlb(mm); 7990 } 7991 7992 /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ 7993 } 7994 7995 static int __balance_push_cpu_stop(void *arg) 7996 { 7997 struct task_struct *p = arg; 7998 struct rq *rq = this_rq(); 7999 struct rq_flags rf; 8000 int cpu; 8001 8002 raw_spin_lock_irq(&p->pi_lock); 8003 rq_lock(rq, &rf); 8004 8005 update_rq_clock(rq); 8006 8007 if (task_rq(p) == rq && task_on_rq_queued(p)) { 8008 cpu = select_fallback_rq(rq->cpu, p); 8009 rq = __migrate_task(rq, &rf, p, cpu); 8010 } 8011 8012 rq_unlock(rq, &rf); 8013 raw_spin_unlock_irq(&p->pi_lock); 8014 8015 put_task_struct(p); 8016 8017 return 0; 8018 } 8019 8020 static DEFINE_PER_CPU(struct cpu_stop_work, push_work); 8021 8022 /* 8023 * Ensure we only run per-cpu kthreads once the CPU goes !active. 8024 * 8025 * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only 8026 * effective when the hotplug motion is down. 8027 */ 8028 static void balance_push(struct rq *rq) 8029 { 8030 struct task_struct *push_task = rq->curr; 8031 8032 lockdep_assert_rq_held(rq); 8033 8034 /* 8035 * Ensure the thing is persistent until balance_push_set(.on = false); 8036 */ 8037 rq->balance_callback = &balance_push_callback; 8038 8039 /* 8040 * Only active while going offline and when invoked on the outgoing 8041 * CPU. 8042 */ 8043 if (!cpu_dying(rq->cpu) || rq != this_rq()) 8044 return; 8045 8046 /* 8047 * Both the cpu-hotplug and stop task are in this case and are 8048 * required to complete the hotplug process. 8049 */ 8050 if (kthread_is_per_cpu(push_task) || 8051 is_migration_disabled(push_task)) { 8052 8053 /* 8054 * If this is the idle task on the outgoing CPU try to wake 8055 * up the hotplug control thread which might wait for the 8056 * last task to vanish. The rcuwait_active() check is 8057 * accurate here because the waiter is pinned on this CPU 8058 * and can't obviously be running in parallel. 8059 * 8060 * On RT kernels this also has to check whether there are 8061 * pinned and scheduled out tasks on the runqueue. They 8062 * need to leave the migrate disabled section first. 8063 */ 8064 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && 8065 rcuwait_active(&rq->hotplug_wait)) { 8066 raw_spin_rq_unlock(rq); 8067 rcuwait_wake_up(&rq->hotplug_wait); 8068 raw_spin_rq_lock(rq); 8069 } 8070 return; 8071 } 8072 8073 get_task_struct(push_task); 8074 /* 8075 * Temporarily drop rq->lock such that we can wake-up the stop task. 8076 * Both preemption and IRQs are still disabled. 8077 */ 8078 preempt_disable(); 8079 raw_spin_rq_unlock(rq); 8080 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, 8081 this_cpu_ptr(&push_work)); 8082 preempt_enable(); 8083 /* 8084 * At this point need_resched() is true and we'll take the loop in 8085 * schedule(). The next pick is obviously going to be the stop task 8086 * which kthread_is_per_cpu() and will push this task away. 8087 */ 8088 raw_spin_rq_lock(rq); 8089 } 8090 8091 static void balance_push_set(int cpu, bool on) 8092 { 8093 struct rq *rq = cpu_rq(cpu); 8094 struct rq_flags rf; 8095 8096 rq_lock_irqsave(rq, &rf); 8097 if (on) { 8098 WARN_ON_ONCE(rq->balance_callback); 8099 rq->balance_callback = &balance_push_callback; 8100 } else if (rq->balance_callback == &balance_push_callback) { 8101 rq->balance_callback = NULL; 8102 } 8103 rq_unlock_irqrestore(rq, &rf); 8104 } 8105 8106 /* 8107 * Invoked from a CPUs hotplug control thread after the CPU has been marked 8108 * inactive. All tasks which are not per CPU kernel threads are either 8109 * pushed off this CPU now via balance_push() or placed on a different CPU 8110 * during wakeup. Wait until the CPU is quiescent. 8111 */ 8112 static void balance_hotplug_wait(void) 8113 { 8114 struct rq *rq = this_rq(); 8115 8116 rcuwait_wait_event(&rq->hotplug_wait, 8117 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), 8118 TASK_UNINTERRUPTIBLE); 8119 } 8120 8121 #else 8122 8123 static inline void balance_push(struct rq *rq) 8124 { 8125 } 8126 8127 static inline void balance_push_set(int cpu, bool on) 8128 { 8129 } 8130 8131 static inline void balance_hotplug_wait(void) 8132 { 8133 } 8134 8135 #endif /* CONFIG_HOTPLUG_CPU */ 8136 8137 void set_rq_online(struct rq *rq) 8138 { 8139 if (!rq->online) { 8140 const struct sched_class *class; 8141 8142 cpumask_set_cpu(rq->cpu, rq->rd->online); 8143 rq->online = 1; 8144 8145 for_each_class(class) { 8146 if (class->rq_online) 8147 class->rq_online(rq); 8148 } 8149 } 8150 } 8151 8152 void set_rq_offline(struct rq *rq) 8153 { 8154 if (rq->online) { 8155 const struct sched_class *class; 8156 8157 update_rq_clock(rq); 8158 for_each_class(class) { 8159 if (class->rq_offline) 8160 class->rq_offline(rq); 8161 } 8162 8163 cpumask_clear_cpu(rq->cpu, rq->rd->online); 8164 rq->online = 0; 8165 } 8166 } 8167 8168 static inline void sched_set_rq_online(struct rq *rq, int cpu) 8169 { 8170 struct rq_flags rf; 8171 8172 rq_lock_irqsave(rq, &rf); 8173 if (rq->rd) { 8174 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 8175 set_rq_online(rq); 8176 } 8177 rq_unlock_irqrestore(rq, &rf); 8178 } 8179 8180 static inline void sched_set_rq_offline(struct rq *rq, int cpu) 8181 { 8182 struct rq_flags rf; 8183 8184 rq_lock_irqsave(rq, &rf); 8185 if (rq->rd) { 8186 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 8187 set_rq_offline(rq); 8188 } 8189 rq_unlock_irqrestore(rq, &rf); 8190 } 8191 8192 /* 8193 * used to mark begin/end of suspend/resume: 8194 */ 8195 static int num_cpus_frozen; 8196 8197 /* 8198 * Update cpusets according to cpu_active mask. If cpusets are 8199 * disabled, cpuset_update_active_cpus() becomes a simple wrapper 8200 * around partition_sched_domains(). 8201 * 8202 * If we come here as part of a suspend/resume, don't touch cpusets because we 8203 * want to restore it back to its original state upon resume anyway. 8204 */ 8205 static void cpuset_cpu_active(void) 8206 { 8207 if (cpuhp_tasks_frozen) { 8208 /* 8209 * num_cpus_frozen tracks how many CPUs are involved in suspend 8210 * resume sequence. As long as this is not the last online 8211 * operation in the resume sequence, just build a single sched 8212 * domain, ignoring cpusets. 8213 */ 8214 cpuset_reset_sched_domains(); 8215 if (--num_cpus_frozen) 8216 return; 8217 /* 8218 * This is the last CPU online operation. So fall through and 8219 * restore the original sched domains by considering the 8220 * cpuset configurations. 8221 */ 8222 cpuset_force_rebuild(); 8223 } 8224 cpuset_update_active_cpus(); 8225 } 8226 8227 static void cpuset_cpu_inactive(unsigned int cpu) 8228 { 8229 if (!cpuhp_tasks_frozen) { 8230 cpuset_update_active_cpus(); 8231 } else { 8232 num_cpus_frozen++; 8233 cpuset_reset_sched_domains(); 8234 } 8235 } 8236 8237 static inline void sched_smt_present_inc(int cpu) 8238 { 8239 #ifdef CONFIG_SCHED_SMT 8240 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 8241 static_branch_inc_cpuslocked(&sched_smt_present); 8242 #endif 8243 } 8244 8245 static inline void sched_smt_present_dec(int cpu) 8246 { 8247 #ifdef CONFIG_SCHED_SMT 8248 if (cpumask_weight(cpu_smt_mask(cpu)) == 2) 8249 static_branch_dec_cpuslocked(&sched_smt_present); 8250 #endif 8251 } 8252 8253 int sched_cpu_activate(unsigned int cpu) 8254 { 8255 struct rq *rq = cpu_rq(cpu); 8256 8257 /* 8258 * Clear the balance_push callback and prepare to schedule 8259 * regular tasks. 8260 */ 8261 balance_push_set(cpu, false); 8262 8263 /* 8264 * When going up, increment the number of cores with SMT present. 8265 */ 8266 sched_smt_present_inc(cpu); 8267 set_cpu_active(cpu, true); 8268 8269 if (sched_smp_initialized) { 8270 sched_update_numa(cpu, true); 8271 sched_domains_numa_masks_set(cpu); 8272 cpuset_cpu_active(); 8273 } 8274 8275 scx_rq_activate(rq); 8276 8277 /* 8278 * Put the rq online, if not already. This happens: 8279 * 8280 * 1) In the early boot process, because we build the real domains 8281 * after all CPUs have been brought up. 8282 * 8283 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the 8284 * domains. 8285 */ 8286 sched_set_rq_online(rq, cpu); 8287 8288 return 0; 8289 } 8290 8291 int sched_cpu_deactivate(unsigned int cpu) 8292 { 8293 struct rq *rq = cpu_rq(cpu); 8294 int ret; 8295 8296 ret = dl_bw_deactivate(cpu); 8297 8298 if (ret) 8299 return ret; 8300 8301 /* 8302 * Remove CPU from nohz.idle_cpus_mask to prevent participating in 8303 * load balancing when not active 8304 */ 8305 nohz_balance_exit_idle(rq); 8306 8307 set_cpu_active(cpu, false); 8308 8309 /* 8310 * From this point forward, this CPU will refuse to run any task that 8311 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively 8312 * push those tasks away until this gets cleared, see 8313 * sched_cpu_dying(). 8314 */ 8315 balance_push_set(cpu, true); 8316 8317 /* 8318 * We've cleared cpu_active_mask / set balance_push, wait for all 8319 * preempt-disabled and RCU users of this state to go away such that 8320 * all new such users will observe it. 8321 * 8322 * Specifically, we rely on ttwu to no longer target this CPU, see 8323 * ttwu_queue_cond() and is_cpu_allowed(). 8324 * 8325 * Do sync before park smpboot threads to take care the RCU boost case. 8326 */ 8327 synchronize_rcu(); 8328 8329 sched_set_rq_offline(rq, cpu); 8330 8331 scx_rq_deactivate(rq); 8332 8333 /* 8334 * When going down, decrement the number of cores with SMT present. 8335 */ 8336 sched_smt_present_dec(cpu); 8337 8338 #ifdef CONFIG_SCHED_SMT 8339 sched_core_cpu_deactivate(cpu); 8340 #endif 8341 8342 if (!sched_smp_initialized) 8343 return 0; 8344 8345 sched_update_numa(cpu, false); 8346 cpuset_cpu_inactive(cpu); 8347 sched_domains_numa_masks_clear(cpu); 8348 return 0; 8349 } 8350 8351 static void sched_rq_cpu_starting(unsigned int cpu) 8352 { 8353 struct rq *rq = cpu_rq(cpu); 8354 8355 rq->calc_load_update = calc_load_update; 8356 update_max_interval(); 8357 } 8358 8359 int sched_cpu_starting(unsigned int cpu) 8360 { 8361 sched_core_cpu_starting(cpu); 8362 sched_rq_cpu_starting(cpu); 8363 sched_tick_start(cpu); 8364 return 0; 8365 } 8366 8367 #ifdef CONFIG_HOTPLUG_CPU 8368 8369 /* 8370 * Invoked immediately before the stopper thread is invoked to bring the 8371 * CPU down completely. At this point all per CPU kthreads except the 8372 * hotplug thread (current) and the stopper thread (inactive) have been 8373 * either parked or have been unbound from the outgoing CPU. Ensure that 8374 * any of those which might be on the way out are gone. 8375 * 8376 * If after this point a bound task is being woken on this CPU then the 8377 * responsible hotplug callback has failed to do it's job. 8378 * sched_cpu_dying() will catch it with the appropriate fireworks. 8379 */ 8380 int sched_cpu_wait_empty(unsigned int cpu) 8381 { 8382 balance_hotplug_wait(); 8383 sched_force_init_mm(); 8384 return 0; 8385 } 8386 8387 /* 8388 * Since this CPU is going 'away' for a while, fold any nr_active delta we 8389 * might have. Called from the CPU stopper task after ensuring that the 8390 * stopper is the last running task on the CPU, so nr_active count is 8391 * stable. We need to take the tear-down thread which is calling this into 8392 * account, so we hand in adjust = 1 to the load calculation. 8393 * 8394 * Also see the comment "Global load-average calculations". 8395 */ 8396 static void calc_load_migrate(struct rq *rq) 8397 { 8398 long delta = calc_load_fold_active(rq, 1); 8399 8400 if (delta) 8401 atomic_long_add(delta, &calc_load_tasks); 8402 } 8403 8404 static void dump_rq_tasks(struct rq *rq, const char *loglvl) 8405 { 8406 struct task_struct *g, *p; 8407 int cpu = cpu_of(rq); 8408 8409 lockdep_assert_rq_held(rq); 8410 8411 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); 8412 for_each_process_thread(g, p) { 8413 if (task_cpu(p) != cpu) 8414 continue; 8415 8416 if (!task_on_rq_queued(p)) 8417 continue; 8418 8419 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); 8420 } 8421 } 8422 8423 int sched_cpu_dying(unsigned int cpu) 8424 { 8425 struct rq *rq = cpu_rq(cpu); 8426 struct rq_flags rf; 8427 8428 /* Handle pending wakeups and then migrate everything off */ 8429 sched_tick_stop(cpu); 8430 8431 rq_lock_irqsave(rq, &rf); 8432 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { 8433 WARN(true, "Dying CPU not properly vacated!"); 8434 dump_rq_tasks(rq, KERN_WARNING); 8435 } 8436 rq_unlock_irqrestore(rq, &rf); 8437 8438 calc_load_migrate(rq); 8439 update_max_interval(); 8440 hrtick_clear(rq); 8441 sched_core_cpu_dying(cpu); 8442 return 0; 8443 } 8444 #endif 8445 8446 void __init sched_init_smp(void) 8447 { 8448 sched_init_numa(NUMA_NO_NODE); 8449 8450 /* 8451 * There's no userspace yet to cause hotplug operations; hence all the 8452 * CPU masks are stable and all blatant races in the below code cannot 8453 * happen. 8454 */ 8455 sched_domains_mutex_lock(); 8456 sched_init_domains(cpu_active_mask); 8457 sched_domains_mutex_unlock(); 8458 8459 /* Move init over to a non-isolated CPU */ 8460 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) 8461 BUG(); 8462 current->flags &= ~PF_NO_SETAFFINITY; 8463 sched_init_granularity(); 8464 8465 init_sched_rt_class(); 8466 init_sched_dl_class(); 8467 8468 sched_smp_initialized = true; 8469 } 8470 8471 static int __init migration_init(void) 8472 { 8473 sched_cpu_starting(smp_processor_id()); 8474 return 0; 8475 } 8476 early_initcall(migration_init); 8477 8478 #else 8479 void __init sched_init_smp(void) 8480 { 8481 sched_init_granularity(); 8482 } 8483 #endif /* CONFIG_SMP */ 8484 8485 int in_sched_functions(unsigned long addr) 8486 { 8487 return in_lock_functions(addr) || 8488 (addr >= (unsigned long)__sched_text_start 8489 && addr < (unsigned long)__sched_text_end); 8490 } 8491 8492 #ifdef CONFIG_CGROUP_SCHED 8493 /* 8494 * Default task group. 8495 * Every task in system belongs to this group at bootup. 8496 */ 8497 struct task_group root_task_group; 8498 LIST_HEAD(task_groups); 8499 8500 /* Cacheline aligned slab cache for task_group */ 8501 static struct kmem_cache *task_group_cache __ro_after_init; 8502 #endif 8503 8504 void __init sched_init(void) 8505 { 8506 unsigned long ptr = 0; 8507 int i; 8508 8509 /* Make sure the linker didn't screw up */ 8510 #ifdef CONFIG_SMP 8511 BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class)); 8512 #endif 8513 BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); 8514 BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); 8515 BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); 8516 #ifdef CONFIG_SCHED_CLASS_EXT 8517 BUG_ON(!sched_class_above(&fair_sched_class, &ext_sched_class)); 8518 BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class)); 8519 #endif 8520 8521 wait_bit_init(); 8522 8523 #ifdef CONFIG_FAIR_GROUP_SCHED 8524 ptr += 2 * nr_cpu_ids * sizeof(void **); 8525 #endif 8526 #ifdef CONFIG_RT_GROUP_SCHED 8527 ptr += 2 * nr_cpu_ids * sizeof(void **); 8528 #endif 8529 if (ptr) { 8530 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); 8531 8532 #ifdef CONFIG_FAIR_GROUP_SCHED 8533 root_task_group.se = (struct sched_entity **)ptr; 8534 ptr += nr_cpu_ids * sizeof(void **); 8535 8536 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 8537 ptr += nr_cpu_ids * sizeof(void **); 8538 8539 root_task_group.shares = ROOT_TASK_GROUP_LOAD; 8540 init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); 8541 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8542 #ifdef CONFIG_EXT_GROUP_SCHED 8543 root_task_group.scx_weight = CGROUP_WEIGHT_DFL; 8544 #endif /* CONFIG_EXT_GROUP_SCHED */ 8545 #ifdef CONFIG_RT_GROUP_SCHED 8546 root_task_group.rt_se = (struct sched_rt_entity **)ptr; 8547 ptr += nr_cpu_ids * sizeof(void **); 8548 8549 root_task_group.rt_rq = (struct rt_rq **)ptr; 8550 ptr += nr_cpu_ids * sizeof(void **); 8551 8552 #endif /* CONFIG_RT_GROUP_SCHED */ 8553 } 8554 8555 #ifdef CONFIG_SMP 8556 init_defrootdomain(); 8557 #endif 8558 8559 #ifdef CONFIG_RT_GROUP_SCHED 8560 init_rt_bandwidth(&root_task_group.rt_bandwidth, 8561 global_rt_period(), global_rt_runtime()); 8562 #endif /* CONFIG_RT_GROUP_SCHED */ 8563 8564 #ifdef CONFIG_CGROUP_SCHED 8565 task_group_cache = KMEM_CACHE(task_group, 0); 8566 8567 list_add(&root_task_group.list, &task_groups); 8568 INIT_LIST_HEAD(&root_task_group.children); 8569 INIT_LIST_HEAD(&root_task_group.siblings); 8570 autogroup_init(&init_task); 8571 #endif /* CONFIG_CGROUP_SCHED */ 8572 8573 for_each_possible_cpu(i) { 8574 struct rq *rq; 8575 8576 rq = cpu_rq(i); 8577 raw_spin_lock_init(&rq->__lock); 8578 rq->nr_running = 0; 8579 rq->calc_load_active = 0; 8580 rq->calc_load_update = jiffies + LOAD_FREQ; 8581 init_cfs_rq(&rq->cfs); 8582 init_rt_rq(&rq->rt); 8583 init_dl_rq(&rq->dl); 8584 #ifdef CONFIG_FAIR_GROUP_SCHED 8585 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 8586 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; 8587 /* 8588 * How much CPU bandwidth does root_task_group get? 8589 * 8590 * In case of task-groups formed through the cgroup filesystem, it 8591 * gets 100% of the CPU resources in the system. This overall 8592 * system CPU resource is divided among the tasks of 8593 * root_task_group and its child task-groups in a fair manner, 8594 * based on each entity's (task or task-group's) weight 8595 * (se->load.weight). 8596 * 8597 * In other words, if root_task_group has 10 tasks of weight 8598 * 1024) and two child groups A0 and A1 (of weight 1024 each), 8599 * then A0's share of the CPU resource is: 8600 * 8601 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 8602 * 8603 * We achieve this by letting root_task_group's tasks sit 8604 * directly in rq->cfs (i.e root_task_group->se[] = NULL). 8605 */ 8606 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); 8607 #endif /* CONFIG_FAIR_GROUP_SCHED */ 8608 8609 #ifdef CONFIG_RT_GROUP_SCHED 8610 /* 8611 * This is required for init cpu because rt.c:__enable_runtime() 8612 * starts working after scheduler_running, which is not the case 8613 * yet. 8614 */ 8615 rq->rt.rt_runtime = global_rt_runtime(); 8616 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); 8617 #endif 8618 #ifdef CONFIG_SMP 8619 rq->sd = NULL; 8620 rq->rd = NULL; 8621 rq->cpu_capacity = SCHED_CAPACITY_SCALE; 8622 rq->balance_callback = &balance_push_callback; 8623 rq->active_balance = 0; 8624 rq->next_balance = jiffies; 8625 rq->push_cpu = 0; 8626 rq->cpu = i; 8627 rq->online = 0; 8628 rq->idle_stamp = 0; 8629 rq->avg_idle = 2*sysctl_sched_migration_cost; 8630 rq->max_idle_balance_cost = sysctl_sched_migration_cost; 8631 8632 INIT_LIST_HEAD(&rq->cfs_tasks); 8633 8634 rq_attach_root(rq, &def_root_domain); 8635 #ifdef CONFIG_NO_HZ_COMMON 8636 rq->last_blocked_load_update_tick = jiffies; 8637 atomic_set(&rq->nohz_flags, 0); 8638 8639 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); 8640 #endif 8641 #ifdef CONFIG_HOTPLUG_CPU 8642 rcuwait_init(&rq->hotplug_wait); 8643 #endif 8644 #endif /* CONFIG_SMP */ 8645 hrtick_rq_init(rq); 8646 atomic_set(&rq->nr_iowait, 0); 8647 fair_server_init(rq); 8648 8649 #ifdef CONFIG_SCHED_CORE 8650 rq->core = rq; 8651 rq->core_pick = NULL; 8652 rq->core_dl_server = NULL; 8653 rq->core_enabled = 0; 8654 rq->core_tree = RB_ROOT; 8655 rq->core_forceidle_count = 0; 8656 rq->core_forceidle_occupation = 0; 8657 rq->core_forceidle_start = 0; 8658 8659 rq->core_cookie = 0UL; 8660 #endif 8661 zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); 8662 } 8663 8664 set_load_weight(&init_task, false); 8665 init_task.se.slice = sysctl_sched_base_slice, 8666 8667 /* 8668 * The boot idle thread does lazy MMU switching as well: 8669 */ 8670 mmgrab_lazy_tlb(&init_mm); 8671 enter_lazy_tlb(&init_mm, current); 8672 8673 /* 8674 * The idle task doesn't need the kthread struct to function, but it 8675 * is dressed up as a per-CPU kthread and thus needs to play the part 8676 * if we want to avoid special-casing it in code that deals with per-CPU 8677 * kthreads. 8678 */ 8679 WARN_ON(!set_kthread_struct(current)); 8680 8681 /* 8682 * Make us the idle thread. Technically, schedule() should not be 8683 * called from this thread, however somewhere below it might be, 8684 * but because we are the idle thread, we just pick up running again 8685 * when this runqueue becomes "idle". 8686 */ 8687 __sched_fork(0, current); 8688 init_idle(current, smp_processor_id()); 8689 8690 calc_load_update = jiffies + LOAD_FREQ; 8691 8692 #ifdef CONFIG_SMP 8693 idle_thread_set_boot_cpu(); 8694 balance_push_set(smp_processor_id(), false); 8695 #endif 8696 init_sched_fair_class(); 8697 init_sched_ext_class(); 8698 8699 psi_init(); 8700 8701 init_uclamp(); 8702 8703 preempt_dynamic_init(); 8704 8705 scheduler_running = 1; 8706 } 8707 8708 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 8709 8710 void __might_sleep(const char *file, int line) 8711 { 8712 unsigned int state = get_current_state(); 8713 /* 8714 * Blocking primitives will set (and therefore destroy) current->state, 8715 * since we will exit with TASK_RUNNING make sure we enter with it, 8716 * otherwise we will destroy state. 8717 */ 8718 WARN_ONCE(state != TASK_RUNNING && current->task_state_change, 8719 "do not call blocking ops when !TASK_RUNNING; " 8720 "state=%x set at [<%p>] %pS\n", state, 8721 (void *)current->task_state_change, 8722 (void *)current->task_state_change); 8723 8724 __might_resched(file, line, 0); 8725 } 8726 EXPORT_SYMBOL(__might_sleep); 8727 8728 static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) 8729 { 8730 if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 8731 return; 8732 8733 if (preempt_count() == preempt_offset) 8734 return; 8735 8736 pr_err("Preemption disabled at:"); 8737 print_ip_sym(KERN_ERR, ip); 8738 } 8739 8740 static inline bool resched_offsets_ok(unsigned int offsets) 8741 { 8742 unsigned int nested = preempt_count(); 8743 8744 nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; 8745 8746 return nested == offsets; 8747 } 8748 8749 void __might_resched(const char *file, int line, unsigned int offsets) 8750 { 8751 /* Ratelimiting timestamp: */ 8752 static unsigned long prev_jiffy; 8753 8754 unsigned long preempt_disable_ip; 8755 8756 /* WARN_ON_ONCE() by default, no rate limit required: */ 8757 rcu_sleep_check(); 8758 8759 if ((resched_offsets_ok(offsets) && !irqs_disabled() && 8760 !is_idle_task(current) && !current->non_block_count) || 8761 system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || 8762 oops_in_progress) 8763 return; 8764 8765 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8766 return; 8767 prev_jiffy = jiffies; 8768 8769 /* Save this before calling printk(), since that will clobber it: */ 8770 preempt_disable_ip = get_preempt_disable_ip(current); 8771 8772 pr_err("BUG: sleeping function called from invalid context at %s:%d\n", 8773 file, line); 8774 pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", 8775 in_atomic(), irqs_disabled(), current->non_block_count, 8776 current->pid, current->comm); 8777 pr_err("preempt_count: %x, expected: %x\n", preempt_count(), 8778 offsets & MIGHT_RESCHED_PREEMPT_MASK); 8779 8780 if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { 8781 pr_err("RCU nest depth: %d, expected: %u\n", 8782 rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); 8783 } 8784 8785 if (task_stack_end_corrupted(current)) 8786 pr_emerg("Thread overran stack, or stack corrupted\n"); 8787 8788 debug_show_held_locks(current); 8789 if (irqs_disabled()) 8790 print_irqtrace_events(current); 8791 8792 print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, 8793 preempt_disable_ip); 8794 8795 dump_stack(); 8796 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8797 } 8798 EXPORT_SYMBOL(__might_resched); 8799 8800 void __cant_sleep(const char *file, int line, int preempt_offset) 8801 { 8802 static unsigned long prev_jiffy; 8803 8804 if (irqs_disabled()) 8805 return; 8806 8807 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8808 return; 8809 8810 if (preempt_count() > preempt_offset) 8811 return; 8812 8813 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8814 return; 8815 prev_jiffy = jiffies; 8816 8817 printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); 8818 printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", 8819 in_atomic(), irqs_disabled(), 8820 current->pid, current->comm); 8821 8822 debug_show_held_locks(current); 8823 dump_stack(); 8824 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8825 } 8826 EXPORT_SYMBOL_GPL(__cant_sleep); 8827 8828 #ifdef CONFIG_SMP 8829 void __cant_migrate(const char *file, int line) 8830 { 8831 static unsigned long prev_jiffy; 8832 8833 if (irqs_disabled()) 8834 return; 8835 8836 if (is_migration_disabled(current)) 8837 return; 8838 8839 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 8840 return; 8841 8842 if (preempt_count() > 0) 8843 return; 8844 8845 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8846 return; 8847 prev_jiffy = jiffies; 8848 8849 pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); 8850 pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", 8851 in_atomic(), irqs_disabled(), is_migration_disabled(current), 8852 current->pid, current->comm); 8853 8854 debug_show_held_locks(current); 8855 dump_stack(); 8856 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 8857 } 8858 EXPORT_SYMBOL_GPL(__cant_migrate); 8859 #endif 8860 #endif 8861 8862 #ifdef CONFIG_MAGIC_SYSRQ 8863 void normalize_rt_tasks(void) 8864 { 8865 struct task_struct *g, *p; 8866 struct sched_attr attr = { 8867 .sched_policy = SCHED_NORMAL, 8868 }; 8869 8870 read_lock(&tasklist_lock); 8871 for_each_process_thread(g, p) { 8872 /* 8873 * Only normalize user tasks: 8874 */ 8875 if (p->flags & PF_KTHREAD) 8876 continue; 8877 8878 p->se.exec_start = 0; 8879 schedstat_set(p->stats.wait_start, 0); 8880 schedstat_set(p->stats.sleep_start, 0); 8881 schedstat_set(p->stats.block_start, 0); 8882 8883 if (!rt_or_dl_task(p)) { 8884 /* 8885 * Renice negative nice level userspace 8886 * tasks back to 0: 8887 */ 8888 if (task_nice(p) < 0) 8889 set_user_nice(p, 0); 8890 continue; 8891 } 8892 8893 __sched_setscheduler(p, &attr, false, false); 8894 } 8895 read_unlock(&tasklist_lock); 8896 } 8897 8898 #endif /* CONFIG_MAGIC_SYSRQ */ 8899 8900 #if defined(CONFIG_KGDB_KDB) 8901 /* 8902 * These functions are only useful for KDB. 8903 * 8904 * They can only be called when the whole system has been 8905 * stopped - every CPU needs to be quiescent, and no scheduling 8906 * activity can take place. Using them for anything else would 8907 * be a serious bug, and as a result, they aren't even visible 8908 * under any other configuration. 8909 */ 8910 8911 /** 8912 * curr_task - return the current task for a given CPU. 8913 * @cpu: the processor in question. 8914 * 8915 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 8916 * 8917 * Return: The current task for @cpu. 8918 */ 8919 struct task_struct *curr_task(int cpu) 8920 { 8921 return cpu_curr(cpu); 8922 } 8923 8924 #endif /* defined(CONFIG_KGDB_KDB) */ 8925 8926 #ifdef CONFIG_CGROUP_SCHED 8927 /* task_group_lock serializes the addition/removal of task groups */ 8928 static DEFINE_SPINLOCK(task_group_lock); 8929 8930 static inline void alloc_uclamp_sched_group(struct task_group *tg, 8931 struct task_group *parent) 8932 { 8933 #ifdef CONFIG_UCLAMP_TASK_GROUP 8934 enum uclamp_id clamp_id; 8935 8936 for_each_clamp_id(clamp_id) { 8937 uclamp_se_set(&tg->uclamp_req[clamp_id], 8938 uclamp_none(clamp_id), false); 8939 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; 8940 } 8941 #endif 8942 } 8943 8944 static void sched_free_group(struct task_group *tg) 8945 { 8946 free_fair_sched_group(tg); 8947 free_rt_sched_group(tg); 8948 autogroup_free(tg); 8949 kmem_cache_free(task_group_cache, tg); 8950 } 8951 8952 static void sched_free_group_rcu(struct rcu_head *rcu) 8953 { 8954 sched_free_group(container_of(rcu, struct task_group, rcu)); 8955 } 8956 8957 static void sched_unregister_group(struct task_group *tg) 8958 { 8959 unregister_fair_sched_group(tg); 8960 unregister_rt_sched_group(tg); 8961 /* 8962 * We have to wait for yet another RCU grace period to expire, as 8963 * print_cfs_stats() might run concurrently. 8964 */ 8965 call_rcu(&tg->rcu, sched_free_group_rcu); 8966 } 8967 8968 /* allocate runqueue etc for a new task group */ 8969 struct task_group *sched_create_group(struct task_group *parent) 8970 { 8971 struct task_group *tg; 8972 8973 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); 8974 if (!tg) 8975 return ERR_PTR(-ENOMEM); 8976 8977 if (!alloc_fair_sched_group(tg, parent)) 8978 goto err; 8979 8980 if (!alloc_rt_sched_group(tg, parent)) 8981 goto err; 8982 8983 scx_group_set_weight(tg, CGROUP_WEIGHT_DFL); 8984 alloc_uclamp_sched_group(tg, parent); 8985 8986 return tg; 8987 8988 err: 8989 sched_free_group(tg); 8990 return ERR_PTR(-ENOMEM); 8991 } 8992 8993 void sched_online_group(struct task_group *tg, struct task_group *parent) 8994 { 8995 unsigned long flags; 8996 8997 spin_lock_irqsave(&task_group_lock, flags); 8998 list_add_tail_rcu(&tg->list, &task_groups); 8999 9000 /* Root should already exist: */ 9001 WARN_ON(!parent); 9002 9003 tg->parent = parent; 9004 INIT_LIST_HEAD(&tg->children); 9005 list_add_rcu(&tg->siblings, &parent->children); 9006 spin_unlock_irqrestore(&task_group_lock, flags); 9007 9008 online_fair_sched_group(tg); 9009 } 9010 9011 /* RCU callback to free various structures associated with a task group */ 9012 static void sched_unregister_group_rcu(struct rcu_head *rhp) 9013 { 9014 /* Now it should be safe to free those cfs_rqs: */ 9015 sched_unregister_group(container_of(rhp, struct task_group, rcu)); 9016 } 9017 9018 void sched_destroy_group(struct task_group *tg) 9019 { 9020 /* Wait for possible concurrent references to cfs_rqs complete: */ 9021 call_rcu(&tg->rcu, sched_unregister_group_rcu); 9022 } 9023 9024 void sched_release_group(struct task_group *tg) 9025 { 9026 unsigned long flags; 9027 9028 /* 9029 * Unlink first, to avoid walk_tg_tree_from() from finding us (via 9030 * sched_cfs_period_timer()). 9031 * 9032 * For this to be effective, we have to wait for all pending users of 9033 * this task group to leave their RCU critical section to ensure no new 9034 * user will see our dying task group any more. Specifically ensure 9035 * that tg_unthrottle_up() won't add decayed cfs_rq's to it. 9036 * 9037 * We therefore defer calling unregister_fair_sched_group() to 9038 * sched_unregister_group() which is guarantied to get called only after the 9039 * current RCU grace period has expired. 9040 */ 9041 spin_lock_irqsave(&task_group_lock, flags); 9042 list_del_rcu(&tg->list); 9043 list_del_rcu(&tg->siblings); 9044 spin_unlock_irqrestore(&task_group_lock, flags); 9045 } 9046 9047 static void sched_change_group(struct task_struct *tsk) 9048 { 9049 struct task_group *tg; 9050 9051 /* 9052 * All callers are synchronized by task_rq_lock(); we do not use RCU 9053 * which is pointless here. Thus, we pass "true" to task_css_check() 9054 * to prevent lockdep warnings. 9055 */ 9056 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), 9057 struct task_group, css); 9058 tg = autogroup_task_group(tsk, tg); 9059 tsk->sched_task_group = tg; 9060 9061 #ifdef CONFIG_FAIR_GROUP_SCHED 9062 if (tsk->sched_class->task_change_group) 9063 tsk->sched_class->task_change_group(tsk); 9064 else 9065 #endif 9066 set_task_rq(tsk, task_cpu(tsk)); 9067 } 9068 9069 /* 9070 * Change task's runqueue when it moves between groups. 9071 * 9072 * The caller of this function should have put the task in its new group by 9073 * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect 9074 * its new group. 9075 */ 9076 void sched_move_task(struct task_struct *tsk, bool for_autogroup) 9077 { 9078 int queued, running, queue_flags = 9079 DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 9080 struct rq *rq; 9081 9082 CLASS(task_rq_lock, rq_guard)(tsk); 9083 rq = rq_guard.rq; 9084 9085 update_rq_clock(rq); 9086 9087 running = task_current_donor(rq, tsk); 9088 queued = task_on_rq_queued(tsk); 9089 9090 if (queued) 9091 dequeue_task(rq, tsk, queue_flags); 9092 if (running) 9093 put_prev_task(rq, tsk); 9094 9095 sched_change_group(tsk); 9096 if (!for_autogroup) 9097 scx_cgroup_move_task(tsk); 9098 9099 if (queued) 9100 enqueue_task(rq, tsk, queue_flags); 9101 if (running) { 9102 set_next_task(rq, tsk); 9103 /* 9104 * After changing group, the running task may have joined a 9105 * throttled one but it's still the running task. Trigger a 9106 * resched to make sure that task can still run. 9107 */ 9108 resched_curr(rq); 9109 } 9110 } 9111 9112 static struct cgroup_subsys_state * 9113 cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 9114 { 9115 struct task_group *parent = css_tg(parent_css); 9116 struct task_group *tg; 9117 9118 if (!parent) { 9119 /* This is early initialization for the top cgroup */ 9120 return &root_task_group.css; 9121 } 9122 9123 tg = sched_create_group(parent); 9124 if (IS_ERR(tg)) 9125 return ERR_PTR(-ENOMEM); 9126 9127 return &tg->css; 9128 } 9129 9130 /* Expose task group only after completing cgroup initialization */ 9131 static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) 9132 { 9133 struct task_group *tg = css_tg(css); 9134 struct task_group *parent = css_tg(css->parent); 9135 int ret; 9136 9137 ret = scx_tg_online(tg); 9138 if (ret) 9139 return ret; 9140 9141 if (parent) 9142 sched_online_group(tg, parent); 9143 9144 #ifdef CONFIG_UCLAMP_TASK_GROUP 9145 /* Propagate the effective uclamp value for the new group */ 9146 guard(mutex)(&uclamp_mutex); 9147 guard(rcu)(); 9148 cpu_util_update_eff(css); 9149 #endif 9150 9151 return 0; 9152 } 9153 9154 static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) 9155 { 9156 struct task_group *tg = css_tg(css); 9157 9158 scx_tg_offline(tg); 9159 } 9160 9161 static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) 9162 { 9163 struct task_group *tg = css_tg(css); 9164 9165 sched_release_group(tg); 9166 } 9167 9168 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) 9169 { 9170 struct task_group *tg = css_tg(css); 9171 9172 /* 9173 * Relies on the RCU grace period between css_released() and this. 9174 */ 9175 sched_unregister_group(tg); 9176 } 9177 9178 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) 9179 { 9180 #ifdef CONFIG_RT_GROUP_SCHED 9181 struct task_struct *task; 9182 struct cgroup_subsys_state *css; 9183 9184 if (!rt_group_sched_enabled()) 9185 goto scx_check; 9186 9187 cgroup_taskset_for_each(task, css, tset) { 9188 if (!sched_rt_can_attach(css_tg(css), task)) 9189 return -EINVAL; 9190 } 9191 scx_check: 9192 #endif /* CONFIG_RT_GROUP_SCHED */ 9193 return scx_cgroup_can_attach(tset); 9194 } 9195 9196 static void cpu_cgroup_attach(struct cgroup_taskset *tset) 9197 { 9198 struct task_struct *task; 9199 struct cgroup_subsys_state *css; 9200 9201 cgroup_taskset_for_each(task, css, tset) 9202 sched_move_task(task, false); 9203 9204 scx_cgroup_finish_attach(); 9205 } 9206 9207 static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset) 9208 { 9209 scx_cgroup_cancel_attach(tset); 9210 } 9211 9212 #ifdef CONFIG_UCLAMP_TASK_GROUP 9213 static void cpu_util_update_eff(struct cgroup_subsys_state *css) 9214 { 9215 struct cgroup_subsys_state *top_css = css; 9216 struct uclamp_se *uc_parent = NULL; 9217 struct uclamp_se *uc_se = NULL; 9218 unsigned int eff[UCLAMP_CNT]; 9219 enum uclamp_id clamp_id; 9220 unsigned int clamps; 9221 9222 lockdep_assert_held(&uclamp_mutex); 9223 WARN_ON_ONCE(!rcu_read_lock_held()); 9224 9225 css_for_each_descendant_pre(css, top_css) { 9226 uc_parent = css_tg(css)->parent 9227 ? css_tg(css)->parent->uclamp : NULL; 9228 9229 for_each_clamp_id(clamp_id) { 9230 /* Assume effective clamps matches requested clamps */ 9231 eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; 9232 /* Cap effective clamps with parent's effective clamps */ 9233 if (uc_parent && 9234 eff[clamp_id] > uc_parent[clamp_id].value) { 9235 eff[clamp_id] = uc_parent[clamp_id].value; 9236 } 9237 } 9238 /* Ensure protection is always capped by limit */ 9239 eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); 9240 9241 /* Propagate most restrictive effective clamps */ 9242 clamps = 0x0; 9243 uc_se = css_tg(css)->uclamp; 9244 for_each_clamp_id(clamp_id) { 9245 if (eff[clamp_id] == uc_se[clamp_id].value) 9246 continue; 9247 uc_se[clamp_id].value = eff[clamp_id]; 9248 uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); 9249 clamps |= (0x1 << clamp_id); 9250 } 9251 if (!clamps) { 9252 css = css_rightmost_descendant(css); 9253 continue; 9254 } 9255 9256 /* Immediately update descendants RUNNABLE tasks */ 9257 uclamp_update_active_tasks(css); 9258 } 9259 } 9260 9261 /* 9262 * Integer 10^N with a given N exponent by casting to integer the literal "1eN" 9263 * C expression. Since there is no way to convert a macro argument (N) into a 9264 * character constant, use two levels of macros. 9265 */ 9266 #define _POW10(exp) ((unsigned int)1e##exp) 9267 #define POW10(exp) _POW10(exp) 9268 9269 struct uclamp_request { 9270 #define UCLAMP_PERCENT_SHIFT 2 9271 #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) 9272 s64 percent; 9273 u64 util; 9274 int ret; 9275 }; 9276 9277 static inline struct uclamp_request 9278 capacity_from_percent(char *buf) 9279 { 9280 struct uclamp_request req = { 9281 .percent = UCLAMP_PERCENT_SCALE, 9282 .util = SCHED_CAPACITY_SCALE, 9283 .ret = 0, 9284 }; 9285 9286 buf = strim(buf); 9287 if (strcmp(buf, "max")) { 9288 req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, 9289 &req.percent); 9290 if (req.ret) 9291 return req; 9292 if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { 9293 req.ret = -ERANGE; 9294 return req; 9295 } 9296 9297 req.util = req.percent << SCHED_CAPACITY_SHIFT; 9298 req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); 9299 } 9300 9301 return req; 9302 } 9303 9304 static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, 9305 size_t nbytes, loff_t off, 9306 enum uclamp_id clamp_id) 9307 { 9308 struct uclamp_request req; 9309 struct task_group *tg; 9310 9311 req = capacity_from_percent(buf); 9312 if (req.ret) 9313 return req.ret; 9314 9315 sched_uclamp_enable(); 9316 9317 guard(mutex)(&uclamp_mutex); 9318 guard(rcu)(); 9319 9320 tg = css_tg(of_css(of)); 9321 if (tg->uclamp_req[clamp_id].value != req.util) 9322 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); 9323 9324 /* 9325 * Because of not recoverable conversion rounding we keep track of the 9326 * exact requested value 9327 */ 9328 tg->uclamp_pct[clamp_id] = req.percent; 9329 9330 /* Update effective clamps to track the most restrictive value */ 9331 cpu_util_update_eff(of_css(of)); 9332 9333 return nbytes; 9334 } 9335 9336 static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, 9337 char *buf, size_t nbytes, 9338 loff_t off) 9339 { 9340 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); 9341 } 9342 9343 static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, 9344 char *buf, size_t nbytes, 9345 loff_t off) 9346 { 9347 return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); 9348 } 9349 9350 static inline void cpu_uclamp_print(struct seq_file *sf, 9351 enum uclamp_id clamp_id) 9352 { 9353 struct task_group *tg; 9354 u64 util_clamp; 9355 u64 percent; 9356 u32 rem; 9357 9358 scoped_guard (rcu) { 9359 tg = css_tg(seq_css(sf)); 9360 util_clamp = tg->uclamp_req[clamp_id].value; 9361 } 9362 9363 if (util_clamp == SCHED_CAPACITY_SCALE) { 9364 seq_puts(sf, "max\n"); 9365 return; 9366 } 9367 9368 percent = tg->uclamp_pct[clamp_id]; 9369 percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); 9370 seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); 9371 } 9372 9373 static int cpu_uclamp_min_show(struct seq_file *sf, void *v) 9374 { 9375 cpu_uclamp_print(sf, UCLAMP_MIN); 9376 return 0; 9377 } 9378 9379 static int cpu_uclamp_max_show(struct seq_file *sf, void *v) 9380 { 9381 cpu_uclamp_print(sf, UCLAMP_MAX); 9382 return 0; 9383 } 9384 #endif /* CONFIG_UCLAMP_TASK_GROUP */ 9385 9386 #ifdef CONFIG_GROUP_SCHED_WEIGHT 9387 static unsigned long tg_weight(struct task_group *tg) 9388 { 9389 #ifdef CONFIG_FAIR_GROUP_SCHED 9390 return scale_load_down(tg->shares); 9391 #else 9392 return sched_weight_from_cgroup(tg->scx_weight); 9393 #endif 9394 } 9395 9396 static int cpu_shares_write_u64(struct cgroup_subsys_state *css, 9397 struct cftype *cftype, u64 shareval) 9398 { 9399 int ret; 9400 9401 if (shareval > scale_load_down(ULONG_MAX)) 9402 shareval = MAX_SHARES; 9403 ret = sched_group_set_shares(css_tg(css), scale_load(shareval)); 9404 if (!ret) 9405 scx_group_set_weight(css_tg(css), 9406 sched_weight_to_cgroup(shareval)); 9407 return ret; 9408 } 9409 9410 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, 9411 struct cftype *cft) 9412 { 9413 return tg_weight(css_tg(css)); 9414 } 9415 #endif /* CONFIG_GROUP_SCHED_WEIGHT */ 9416 9417 #ifdef CONFIG_CFS_BANDWIDTH 9418 static DEFINE_MUTEX(cfs_constraints_mutex); 9419 9420 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ 9421 static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ 9422 /* More than 203 days if BW_SHIFT equals 20. */ 9423 static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; 9424 9425 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); 9426 9427 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, 9428 u64 burst) 9429 { 9430 int i, ret = 0, runtime_enabled, runtime_was_enabled; 9431 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9432 9433 if (tg == &root_task_group) 9434 return -EINVAL; 9435 9436 /* 9437 * Ensure we have at some amount of bandwidth every period. This is 9438 * to prevent reaching a state of large arrears when throttled via 9439 * entity_tick() resulting in prolonged exit starvation. 9440 */ 9441 if (quota < min_cfs_quota_period || period < min_cfs_quota_period) 9442 return -EINVAL; 9443 9444 /* 9445 * Likewise, bound things on the other side by preventing insane quota 9446 * periods. This also allows us to normalize in computing quota 9447 * feasibility. 9448 */ 9449 if (period > max_cfs_quota_period) 9450 return -EINVAL; 9451 9452 /* 9453 * Bound quota to defend quota against overflow during bandwidth shift. 9454 */ 9455 if (quota != RUNTIME_INF && quota > max_cfs_runtime) 9456 return -EINVAL; 9457 9458 if (quota != RUNTIME_INF && (burst > quota || 9459 burst + quota > max_cfs_runtime)) 9460 return -EINVAL; 9461 9462 /* 9463 * Prevent race between setting of cfs_rq->runtime_enabled and 9464 * unthrottle_offline_cfs_rqs(). 9465 */ 9466 guard(cpus_read_lock)(); 9467 guard(mutex)(&cfs_constraints_mutex); 9468 9469 ret = __cfs_schedulable(tg, period, quota); 9470 if (ret) 9471 return ret; 9472 9473 runtime_enabled = quota != RUNTIME_INF; 9474 runtime_was_enabled = cfs_b->quota != RUNTIME_INF; 9475 /* 9476 * If we need to toggle cfs_bandwidth_used, off->on must occur 9477 * before making related changes, and on->off must occur afterwards 9478 */ 9479 if (runtime_enabled && !runtime_was_enabled) 9480 cfs_bandwidth_usage_inc(); 9481 9482 scoped_guard (raw_spinlock_irq, &cfs_b->lock) { 9483 cfs_b->period = ns_to_ktime(period); 9484 cfs_b->quota = quota; 9485 cfs_b->burst = burst; 9486 9487 __refill_cfs_bandwidth_runtime(cfs_b); 9488 9489 /* 9490 * Restart the period timer (if active) to handle new 9491 * period expiry: 9492 */ 9493 if (runtime_enabled) 9494 start_cfs_bandwidth(cfs_b); 9495 } 9496 9497 for_each_online_cpu(i) { 9498 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; 9499 struct rq *rq = cfs_rq->rq; 9500 9501 guard(rq_lock_irq)(rq); 9502 cfs_rq->runtime_enabled = runtime_enabled; 9503 cfs_rq->runtime_remaining = 0; 9504 9505 if (cfs_rq->throttled) 9506 unthrottle_cfs_rq(cfs_rq); 9507 } 9508 9509 if (runtime_was_enabled && !runtime_enabled) 9510 cfs_bandwidth_usage_dec(); 9511 9512 return 0; 9513 } 9514 9515 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) 9516 { 9517 u64 quota, period, burst; 9518 9519 period = ktime_to_ns(tg->cfs_bandwidth.period); 9520 burst = tg->cfs_bandwidth.burst; 9521 if (cfs_quota_us < 0) 9522 quota = RUNTIME_INF; 9523 else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) 9524 quota = (u64)cfs_quota_us * NSEC_PER_USEC; 9525 else 9526 return -EINVAL; 9527 9528 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9529 } 9530 9531 static long tg_get_cfs_quota(struct task_group *tg) 9532 { 9533 u64 quota_us; 9534 9535 if (tg->cfs_bandwidth.quota == RUNTIME_INF) 9536 return -1; 9537 9538 quota_us = tg->cfs_bandwidth.quota; 9539 do_div(quota_us, NSEC_PER_USEC); 9540 9541 return quota_us; 9542 } 9543 9544 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) 9545 { 9546 u64 quota, period, burst; 9547 9548 if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) 9549 return -EINVAL; 9550 9551 period = (u64)cfs_period_us * NSEC_PER_USEC; 9552 quota = tg->cfs_bandwidth.quota; 9553 burst = tg->cfs_bandwidth.burst; 9554 9555 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9556 } 9557 9558 static long tg_get_cfs_period(struct task_group *tg) 9559 { 9560 u64 cfs_period_us; 9561 9562 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); 9563 do_div(cfs_period_us, NSEC_PER_USEC); 9564 9565 return cfs_period_us; 9566 } 9567 9568 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) 9569 { 9570 u64 quota, period, burst; 9571 9572 if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) 9573 return -EINVAL; 9574 9575 burst = (u64)cfs_burst_us * NSEC_PER_USEC; 9576 period = ktime_to_ns(tg->cfs_bandwidth.period); 9577 quota = tg->cfs_bandwidth.quota; 9578 9579 return tg_set_cfs_bandwidth(tg, period, quota, burst); 9580 } 9581 9582 static long tg_get_cfs_burst(struct task_group *tg) 9583 { 9584 u64 burst_us; 9585 9586 burst_us = tg->cfs_bandwidth.burst; 9587 do_div(burst_us, NSEC_PER_USEC); 9588 9589 return burst_us; 9590 } 9591 9592 static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, 9593 struct cftype *cft) 9594 { 9595 return tg_get_cfs_quota(css_tg(css)); 9596 } 9597 9598 static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, 9599 struct cftype *cftype, s64 cfs_quota_us) 9600 { 9601 return tg_set_cfs_quota(css_tg(css), cfs_quota_us); 9602 } 9603 9604 static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, 9605 struct cftype *cft) 9606 { 9607 return tg_get_cfs_period(css_tg(css)); 9608 } 9609 9610 static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, 9611 struct cftype *cftype, u64 cfs_period_us) 9612 { 9613 return tg_set_cfs_period(css_tg(css), cfs_period_us); 9614 } 9615 9616 static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, 9617 struct cftype *cft) 9618 { 9619 return tg_get_cfs_burst(css_tg(css)); 9620 } 9621 9622 static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, 9623 struct cftype *cftype, u64 cfs_burst_us) 9624 { 9625 return tg_set_cfs_burst(css_tg(css), cfs_burst_us); 9626 } 9627 9628 struct cfs_schedulable_data { 9629 struct task_group *tg; 9630 u64 period, quota; 9631 }; 9632 9633 /* 9634 * normalize group quota/period to be quota/max_period 9635 * note: units are usecs 9636 */ 9637 static u64 normalize_cfs_quota(struct task_group *tg, 9638 struct cfs_schedulable_data *d) 9639 { 9640 u64 quota, period; 9641 9642 if (tg == d->tg) { 9643 period = d->period; 9644 quota = d->quota; 9645 } else { 9646 period = tg_get_cfs_period(tg); 9647 quota = tg_get_cfs_quota(tg); 9648 } 9649 9650 /* note: these should typically be equivalent */ 9651 if (quota == RUNTIME_INF || quota == -1) 9652 return RUNTIME_INF; 9653 9654 return to_ratio(period, quota); 9655 } 9656 9657 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) 9658 { 9659 struct cfs_schedulable_data *d = data; 9660 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9661 s64 quota = 0, parent_quota = -1; 9662 9663 if (!tg->parent) { 9664 quota = RUNTIME_INF; 9665 } else { 9666 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 9667 9668 quota = normalize_cfs_quota(tg, d); 9669 parent_quota = parent_b->hierarchical_quota; 9670 9671 /* 9672 * Ensure max(child_quota) <= parent_quota. On cgroup2, 9673 * always take the non-RUNTIME_INF min. On cgroup1, only 9674 * inherit when no limit is set. In both cases this is used 9675 * by the scheduler to determine if a given CFS task has a 9676 * bandwidth constraint at some higher level. 9677 */ 9678 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { 9679 if (quota == RUNTIME_INF) 9680 quota = parent_quota; 9681 else if (parent_quota != RUNTIME_INF) 9682 quota = min(quota, parent_quota); 9683 } else { 9684 if (quota == RUNTIME_INF) 9685 quota = parent_quota; 9686 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 9687 return -EINVAL; 9688 } 9689 } 9690 cfs_b->hierarchical_quota = quota; 9691 9692 return 0; 9693 } 9694 9695 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) 9696 { 9697 struct cfs_schedulable_data data = { 9698 .tg = tg, 9699 .period = period, 9700 .quota = quota, 9701 }; 9702 9703 if (quota != RUNTIME_INF) { 9704 do_div(data.period, NSEC_PER_USEC); 9705 do_div(data.quota, NSEC_PER_USEC); 9706 } 9707 9708 guard(rcu)(); 9709 return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); 9710 } 9711 9712 static int cpu_cfs_stat_show(struct seq_file *sf, void *v) 9713 { 9714 struct task_group *tg = css_tg(seq_css(sf)); 9715 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9716 9717 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); 9718 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); 9719 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); 9720 9721 if (schedstat_enabled() && tg != &root_task_group) { 9722 struct sched_statistics *stats; 9723 u64 ws = 0; 9724 int i; 9725 9726 for_each_possible_cpu(i) { 9727 stats = __schedstats_from_se(tg->se[i]); 9728 ws += schedstat_val(stats->wait_sum); 9729 } 9730 9731 seq_printf(sf, "wait_sum %llu\n", ws); 9732 } 9733 9734 seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); 9735 seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); 9736 9737 return 0; 9738 } 9739 9740 static u64 throttled_time_self(struct task_group *tg) 9741 { 9742 int i; 9743 u64 total = 0; 9744 9745 for_each_possible_cpu(i) { 9746 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); 9747 } 9748 9749 return total; 9750 } 9751 9752 static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) 9753 { 9754 struct task_group *tg = css_tg(seq_css(sf)); 9755 9756 seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); 9757 9758 return 0; 9759 } 9760 #endif /* CONFIG_CFS_BANDWIDTH */ 9761 9762 #ifdef CONFIG_RT_GROUP_SCHED 9763 static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, 9764 struct cftype *cft, s64 val) 9765 { 9766 return sched_group_set_rt_runtime(css_tg(css), val); 9767 } 9768 9769 static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, 9770 struct cftype *cft) 9771 { 9772 return sched_group_rt_runtime(css_tg(css)); 9773 } 9774 9775 static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, 9776 struct cftype *cftype, u64 rt_period_us) 9777 { 9778 return sched_group_set_rt_period(css_tg(css), rt_period_us); 9779 } 9780 9781 static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, 9782 struct cftype *cft) 9783 { 9784 return sched_group_rt_period(css_tg(css)); 9785 } 9786 #endif /* CONFIG_RT_GROUP_SCHED */ 9787 9788 #ifdef CONFIG_GROUP_SCHED_WEIGHT 9789 static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, 9790 struct cftype *cft) 9791 { 9792 return css_tg(css)->idle; 9793 } 9794 9795 static int cpu_idle_write_s64(struct cgroup_subsys_state *css, 9796 struct cftype *cft, s64 idle) 9797 { 9798 int ret; 9799 9800 ret = sched_group_set_idle(css_tg(css), idle); 9801 if (!ret) 9802 scx_group_set_idle(css_tg(css), idle); 9803 return ret; 9804 } 9805 #endif 9806 9807 static struct cftype cpu_legacy_files[] = { 9808 #ifdef CONFIG_GROUP_SCHED_WEIGHT 9809 { 9810 .name = "shares", 9811 .read_u64 = cpu_shares_read_u64, 9812 .write_u64 = cpu_shares_write_u64, 9813 }, 9814 { 9815 .name = "idle", 9816 .read_s64 = cpu_idle_read_s64, 9817 .write_s64 = cpu_idle_write_s64, 9818 }, 9819 #endif 9820 #ifdef CONFIG_CFS_BANDWIDTH 9821 { 9822 .name = "cfs_quota_us", 9823 .read_s64 = cpu_cfs_quota_read_s64, 9824 .write_s64 = cpu_cfs_quota_write_s64, 9825 }, 9826 { 9827 .name = "cfs_period_us", 9828 .read_u64 = cpu_cfs_period_read_u64, 9829 .write_u64 = cpu_cfs_period_write_u64, 9830 }, 9831 { 9832 .name = "cfs_burst_us", 9833 .read_u64 = cpu_cfs_burst_read_u64, 9834 .write_u64 = cpu_cfs_burst_write_u64, 9835 }, 9836 { 9837 .name = "stat", 9838 .seq_show = cpu_cfs_stat_show, 9839 }, 9840 { 9841 .name = "stat.local", 9842 .seq_show = cpu_cfs_local_stat_show, 9843 }, 9844 #endif 9845 #ifdef CONFIG_UCLAMP_TASK_GROUP 9846 { 9847 .name = "uclamp.min", 9848 .flags = CFTYPE_NOT_ON_ROOT, 9849 .seq_show = cpu_uclamp_min_show, 9850 .write = cpu_uclamp_min_write, 9851 }, 9852 { 9853 .name = "uclamp.max", 9854 .flags = CFTYPE_NOT_ON_ROOT, 9855 .seq_show = cpu_uclamp_max_show, 9856 .write = cpu_uclamp_max_write, 9857 }, 9858 #endif 9859 { } /* Terminate */ 9860 }; 9861 9862 #ifdef CONFIG_RT_GROUP_SCHED 9863 static struct cftype rt_group_files[] = { 9864 { 9865 .name = "rt_runtime_us", 9866 .read_s64 = cpu_rt_runtime_read, 9867 .write_s64 = cpu_rt_runtime_write, 9868 }, 9869 { 9870 .name = "rt_period_us", 9871 .read_u64 = cpu_rt_period_read_uint, 9872 .write_u64 = cpu_rt_period_write_uint, 9873 }, 9874 { } /* Terminate */ 9875 }; 9876 9877 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED 9878 DEFINE_STATIC_KEY_FALSE(rt_group_sched); 9879 # else 9880 DEFINE_STATIC_KEY_TRUE(rt_group_sched); 9881 # endif 9882 9883 static int __init setup_rt_group_sched(char *str) 9884 { 9885 long val; 9886 9887 if (kstrtol(str, 0, &val) || val < 0 || val > 1) { 9888 pr_warn("Unable to set rt_group_sched\n"); 9889 return 1; 9890 } 9891 if (val) 9892 static_branch_enable(&rt_group_sched); 9893 else 9894 static_branch_disable(&rt_group_sched); 9895 9896 return 1; 9897 } 9898 __setup("rt_group_sched=", setup_rt_group_sched); 9899 9900 static int __init cpu_rt_group_init(void) 9901 { 9902 if (!rt_group_sched_enabled()) 9903 return 0; 9904 9905 WARN_ON(cgroup_add_legacy_cftypes(&cpu_cgrp_subsys, rt_group_files)); 9906 return 0; 9907 } 9908 subsys_initcall(cpu_rt_group_init); 9909 #endif /* CONFIG_RT_GROUP_SCHED */ 9910 9911 static int cpu_extra_stat_show(struct seq_file *sf, 9912 struct cgroup_subsys_state *css) 9913 { 9914 #ifdef CONFIG_CFS_BANDWIDTH 9915 { 9916 struct task_group *tg = css_tg(css); 9917 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; 9918 u64 throttled_usec, burst_usec; 9919 9920 throttled_usec = cfs_b->throttled_time; 9921 do_div(throttled_usec, NSEC_PER_USEC); 9922 burst_usec = cfs_b->burst_time; 9923 do_div(burst_usec, NSEC_PER_USEC); 9924 9925 seq_printf(sf, "nr_periods %d\n" 9926 "nr_throttled %d\n" 9927 "throttled_usec %llu\n" 9928 "nr_bursts %d\n" 9929 "burst_usec %llu\n", 9930 cfs_b->nr_periods, cfs_b->nr_throttled, 9931 throttled_usec, cfs_b->nr_burst, burst_usec); 9932 } 9933 #endif 9934 return 0; 9935 } 9936 9937 static int cpu_local_stat_show(struct seq_file *sf, 9938 struct cgroup_subsys_state *css) 9939 { 9940 #ifdef CONFIG_CFS_BANDWIDTH 9941 { 9942 struct task_group *tg = css_tg(css); 9943 u64 throttled_self_usec; 9944 9945 throttled_self_usec = throttled_time_self(tg); 9946 do_div(throttled_self_usec, NSEC_PER_USEC); 9947 9948 seq_printf(sf, "throttled_usec %llu\n", 9949 throttled_self_usec); 9950 } 9951 #endif 9952 return 0; 9953 } 9954 9955 #ifdef CONFIG_GROUP_SCHED_WEIGHT 9956 9957 static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, 9958 struct cftype *cft) 9959 { 9960 return sched_weight_to_cgroup(tg_weight(css_tg(css))); 9961 } 9962 9963 static int cpu_weight_write_u64(struct cgroup_subsys_state *css, 9964 struct cftype *cft, u64 cgrp_weight) 9965 { 9966 unsigned long weight; 9967 int ret; 9968 9969 if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX) 9970 return -ERANGE; 9971 9972 weight = sched_weight_from_cgroup(cgrp_weight); 9973 9974 ret = sched_group_set_shares(css_tg(css), scale_load(weight)); 9975 if (!ret) 9976 scx_group_set_weight(css_tg(css), cgrp_weight); 9977 return ret; 9978 } 9979 9980 static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, 9981 struct cftype *cft) 9982 { 9983 unsigned long weight = tg_weight(css_tg(css)); 9984 int last_delta = INT_MAX; 9985 int prio, delta; 9986 9987 /* find the closest nice value to the current weight */ 9988 for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { 9989 delta = abs(sched_prio_to_weight[prio] - weight); 9990 if (delta >= last_delta) 9991 break; 9992 last_delta = delta; 9993 } 9994 9995 return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); 9996 } 9997 9998 static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, 9999 struct cftype *cft, s64 nice) 10000 { 10001 unsigned long weight; 10002 int idx, ret; 10003 10004 if (nice < MIN_NICE || nice > MAX_NICE) 10005 return -ERANGE; 10006 10007 idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; 10008 idx = array_index_nospec(idx, 40); 10009 weight = sched_prio_to_weight[idx]; 10010 10011 ret = sched_group_set_shares(css_tg(css), scale_load(weight)); 10012 if (!ret) 10013 scx_group_set_weight(css_tg(css), 10014 sched_weight_to_cgroup(weight)); 10015 return ret; 10016 } 10017 #endif /* CONFIG_GROUP_SCHED_WEIGHT */ 10018 10019 static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, 10020 long period, long quota) 10021 { 10022 if (quota < 0) 10023 seq_puts(sf, "max"); 10024 else 10025 seq_printf(sf, "%ld", quota); 10026 10027 seq_printf(sf, " %ld\n", period); 10028 } 10029 10030 /* caller should put the current value in *@periodp before calling */ 10031 static int __maybe_unused cpu_period_quota_parse(char *buf, 10032 u64 *periodp, u64 *quotap) 10033 { 10034 char tok[21]; /* U64_MAX */ 10035 10036 if (sscanf(buf, "%20s %llu", tok, periodp) < 1) 10037 return -EINVAL; 10038 10039 *periodp *= NSEC_PER_USEC; 10040 10041 if (sscanf(tok, "%llu", quotap)) 10042 *quotap *= NSEC_PER_USEC; 10043 else if (!strcmp(tok, "max")) 10044 *quotap = RUNTIME_INF; 10045 else 10046 return -EINVAL; 10047 10048 return 0; 10049 } 10050 10051 #ifdef CONFIG_CFS_BANDWIDTH 10052 static int cpu_max_show(struct seq_file *sf, void *v) 10053 { 10054 struct task_group *tg = css_tg(seq_css(sf)); 10055 10056 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); 10057 return 0; 10058 } 10059 10060 static ssize_t cpu_max_write(struct kernfs_open_file *of, 10061 char *buf, size_t nbytes, loff_t off) 10062 { 10063 struct task_group *tg = css_tg(of_css(of)); 10064 u64 period = tg_get_cfs_period(tg); 10065 u64 burst = tg->cfs_bandwidth.burst; 10066 u64 quota; 10067 int ret; 10068 10069 ret = cpu_period_quota_parse(buf, &period, "a); 10070 if (!ret) 10071 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); 10072 return ret ?: nbytes; 10073 } 10074 #endif 10075 10076 static struct cftype cpu_files[] = { 10077 #ifdef CONFIG_GROUP_SCHED_WEIGHT 10078 { 10079 .name = "weight", 10080 .flags = CFTYPE_NOT_ON_ROOT, 10081 .read_u64 = cpu_weight_read_u64, 10082 .write_u64 = cpu_weight_write_u64, 10083 }, 10084 { 10085 .name = "weight.nice", 10086 .flags = CFTYPE_NOT_ON_ROOT, 10087 .read_s64 = cpu_weight_nice_read_s64, 10088 .write_s64 = cpu_weight_nice_write_s64, 10089 }, 10090 { 10091 .name = "idle", 10092 .flags = CFTYPE_NOT_ON_ROOT, 10093 .read_s64 = cpu_idle_read_s64, 10094 .write_s64 = cpu_idle_write_s64, 10095 }, 10096 #endif 10097 #ifdef CONFIG_CFS_BANDWIDTH 10098 { 10099 .name = "max", 10100 .flags = CFTYPE_NOT_ON_ROOT, 10101 .seq_show = cpu_max_show, 10102 .write = cpu_max_write, 10103 }, 10104 { 10105 .name = "max.burst", 10106 .flags = CFTYPE_NOT_ON_ROOT, 10107 .read_u64 = cpu_cfs_burst_read_u64, 10108 .write_u64 = cpu_cfs_burst_write_u64, 10109 }, 10110 #endif 10111 #ifdef CONFIG_UCLAMP_TASK_GROUP 10112 { 10113 .name = "uclamp.min", 10114 .flags = CFTYPE_NOT_ON_ROOT, 10115 .seq_show = cpu_uclamp_min_show, 10116 .write = cpu_uclamp_min_write, 10117 }, 10118 { 10119 .name = "uclamp.max", 10120 .flags = CFTYPE_NOT_ON_ROOT, 10121 .seq_show = cpu_uclamp_max_show, 10122 .write = cpu_uclamp_max_write, 10123 }, 10124 #endif 10125 { } /* terminate */ 10126 }; 10127 10128 struct cgroup_subsys cpu_cgrp_subsys = { 10129 .css_alloc = cpu_cgroup_css_alloc, 10130 .css_online = cpu_cgroup_css_online, 10131 .css_offline = cpu_cgroup_css_offline, 10132 .css_released = cpu_cgroup_css_released, 10133 .css_free = cpu_cgroup_css_free, 10134 .css_extra_stat_show = cpu_extra_stat_show, 10135 .css_local_stat_show = cpu_local_stat_show, 10136 .can_attach = cpu_cgroup_can_attach, 10137 .attach = cpu_cgroup_attach, 10138 .cancel_attach = cpu_cgroup_cancel_attach, 10139 .legacy_cftypes = cpu_legacy_files, 10140 .dfl_cftypes = cpu_files, 10141 .early_init = true, 10142 .threaded = true, 10143 }; 10144 10145 #endif /* CONFIG_CGROUP_SCHED */ 10146 10147 void dump_cpu_task(int cpu) 10148 { 10149 if (in_hardirq() && cpu == smp_processor_id()) { 10150 struct pt_regs *regs; 10151 10152 regs = get_irq_regs(); 10153 if (regs) { 10154 show_regs(regs); 10155 return; 10156 } 10157 } 10158 10159 if (trigger_single_cpu_backtrace(cpu)) 10160 return; 10161 10162 pr_info("Task dump for CPU %d:\n", cpu); 10163 sched_show_task(cpu_curr(cpu)); 10164 } 10165 10166 /* 10167 * Nice levels are multiplicative, with a gentle 10% change for every 10168 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 10169 * nice 1, it will get ~10% less CPU time than another CPU-bound task 10170 * that remained on nice 0. 10171 * 10172 * The "10% effect" is relative and cumulative: from _any_ nice level, 10173 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 10174 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 10175 * If a task goes up by ~10% and another task goes down by ~10% then 10176 * the relative distance between them is ~25%.) 10177 */ 10178 const int sched_prio_to_weight[40] = { 10179 /* -20 */ 88761, 71755, 56483, 46273, 36291, 10180 /* -15 */ 29154, 23254, 18705, 14949, 11916, 10181 /* -10 */ 9548, 7620, 6100, 4904, 3906, 10182 /* -5 */ 3121, 2501, 1991, 1586, 1277, 10183 /* 0 */ 1024, 820, 655, 526, 423, 10184 /* 5 */ 335, 272, 215, 172, 137, 10185 /* 10 */ 110, 87, 70, 56, 45, 10186 /* 15 */ 36, 29, 23, 18, 15, 10187 }; 10188 10189 /* 10190 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated. 10191 * 10192 * In cases where the weight does not change often, we can use the 10193 * pre-calculated inverse to speed up arithmetics by turning divisions 10194 * into multiplications: 10195 */ 10196 const u32 sched_prio_to_wmult[40] = { 10197 /* -20 */ 48388, 59856, 76040, 92818, 118348, 10198 /* -15 */ 147320, 184698, 229616, 287308, 360437, 10199 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 10200 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 10201 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 10202 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 10203 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 10204 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 10205 }; 10206 10207 void call_trace_sched_update_nr_running(struct rq *rq, int count) 10208 { 10209 trace_sched_update_nr_running_tp(rq, count); 10210 } 10211 10212 #ifdef CONFIG_SCHED_MM_CID 10213 10214 /* 10215 * @cid_lock: Guarantee forward-progress of cid allocation. 10216 * 10217 * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock 10218 * is only used when contention is detected by the lock-free allocation so 10219 * forward progress can be guaranteed. 10220 */ 10221 DEFINE_RAW_SPINLOCK(cid_lock); 10222 10223 /* 10224 * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. 10225 * 10226 * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is 10227 * detected, it is set to 1 to ensure that all newly coming allocations are 10228 * serialized by @cid_lock until the allocation which detected contention 10229 * completes and sets @use_cid_lock back to 0. This guarantees forward progress 10230 * of a cid allocation. 10231 */ 10232 int use_cid_lock; 10233 10234 /* 10235 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid 10236 * concurrently with respect to the execution of the source runqueue context 10237 * switch. 10238 * 10239 * There is one basic properties we want to guarantee here: 10240 * 10241 * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively 10242 * used by a task. That would lead to concurrent allocation of the cid and 10243 * userspace corruption. 10244 * 10245 * Provide this guarantee by introducing a Dekker memory ordering to guarantee 10246 * that a pair of loads observe at least one of a pair of stores, which can be 10247 * shown as: 10248 * 10249 * X = Y = 0 10250 * 10251 * w[X]=1 w[Y]=1 10252 * MB MB 10253 * r[Y]=y r[X]=x 10254 * 10255 * Which guarantees that x==0 && y==0 is impossible. But rather than using 10256 * values 0 and 1, this algorithm cares about specific state transitions of the 10257 * runqueue current task (as updated by the scheduler context switch), and the 10258 * per-mm/cpu cid value. 10259 * 10260 * Let's introduce task (Y) which has task->mm == mm and task (N) which has 10261 * task->mm != mm for the rest of the discussion. There are two scheduler state 10262 * transitions on context switch we care about: 10263 * 10264 * (TSA) Store to rq->curr with transition from (N) to (Y) 10265 * 10266 * (TSB) Store to rq->curr with transition from (Y) to (N) 10267 * 10268 * On the remote-clear side, there is one transition we care about: 10269 * 10270 * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag 10271 * 10272 * There is also a transition to UNSET state which can be performed from all 10273 * sides (scheduler, remote-clear). It is always performed with a cmpxchg which 10274 * guarantees that only a single thread will succeed: 10275 * 10276 * (TMB) cmpxchg to *pcpu_cid to mark UNSET 10277 * 10278 * Just to be clear, what we do _not_ want to happen is a transition to UNSET 10279 * when a thread is actively using the cid (property (1)). 10280 * 10281 * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions. 10282 * 10283 * Scenario A) (TSA)+(TMA) (from next task perspective) 10284 * 10285 * CPU0 CPU1 10286 * 10287 * Context switch CS-1 Remote-clear 10288 * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA) 10289 * (implied barrier after cmpxchg) 10290 * - switch_mm_cid() 10291 * - memory barrier (see switch_mm_cid() 10292 * comment explaining how this barrier 10293 * is combined with other scheduler 10294 * barriers) 10295 * - mm_cid_get (next) 10296 * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) 10297 * 10298 * This Dekker ensures that either task (Y) is observed by the 10299 * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are 10300 * observed. 10301 * 10302 * If task (Y) store is observed by rcu_dereference(), it means that there is 10303 * still an active task on the cpu. Remote-clear will therefore not transition 10304 * to UNSET, which fulfills property (1). 10305 * 10306 * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(), 10307 * it will move its state to UNSET, which clears the percpu cid perhaps 10308 * uselessly (which is not an issue for correctness). Because task (Y) is not 10309 * observed, CPU1 can move ahead to set the state to UNSET. Because moving 10310 * state to UNSET is done with a cmpxchg expecting that the old state has the 10311 * LAZY flag set, only one thread will successfully UNSET. 10312 * 10313 * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0 10314 * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and 10315 * CPU1 will observe task (Y) and do nothing more, which is fine. 10316 * 10317 * What we are effectively preventing with this Dekker is a scenario where 10318 * neither LAZY flag nor store (Y) are observed, which would fail property (1) 10319 * because this would UNSET a cid which is actively used. 10320 */ 10321 10322 void sched_mm_cid_migrate_from(struct task_struct *t) 10323 { 10324 t->migrate_from_cpu = task_cpu(t); 10325 } 10326 10327 static 10328 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, 10329 struct task_struct *t, 10330 struct mm_cid *src_pcpu_cid) 10331 { 10332 struct mm_struct *mm = t->mm; 10333 struct task_struct *src_task; 10334 int src_cid, last_mm_cid; 10335 10336 if (!mm) 10337 return -1; 10338 10339 last_mm_cid = t->last_mm_cid; 10340 /* 10341 * If the migrated task has no last cid, or if the current 10342 * task on src rq uses the cid, it means the source cid does not need 10343 * to be moved to the destination cpu. 10344 */ 10345 if (last_mm_cid == -1) 10346 return -1; 10347 src_cid = READ_ONCE(src_pcpu_cid->cid); 10348 if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid) 10349 return -1; 10350 10351 /* 10352 * If we observe an active task using the mm on this rq, it means we 10353 * are not the last task to be migrated from this cpu for this mm, so 10354 * there is no need to move src_cid to the destination cpu. 10355 */ 10356 guard(rcu)(); 10357 src_task = rcu_dereference(src_rq->curr); 10358 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 10359 t->last_mm_cid = -1; 10360 return -1; 10361 } 10362 10363 return src_cid; 10364 } 10365 10366 static 10367 int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, 10368 struct task_struct *t, 10369 struct mm_cid *src_pcpu_cid, 10370 int src_cid) 10371 { 10372 struct task_struct *src_task; 10373 struct mm_struct *mm = t->mm; 10374 int lazy_cid; 10375 10376 if (src_cid == -1) 10377 return -1; 10378 10379 /* 10380 * Attempt to clear the source cpu cid to move it to the destination 10381 * cpu. 10382 */ 10383 lazy_cid = mm_cid_set_lazy_put(src_cid); 10384 if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) 10385 return -1; 10386 10387 /* 10388 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10389 * rq->curr->mm matches the scheduler barrier in context_switch() 10390 * between store to rq->curr and load of prev and next task's 10391 * per-mm/cpu cid. 10392 * 10393 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10394 * rq->curr->mm_cid_active matches the barrier in 10395 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 10396 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 10397 * load of per-mm/cpu cid. 10398 */ 10399 10400 /* 10401 * If we observe an active task using the mm on this rq after setting 10402 * the lazy-put flag, this task will be responsible for transitioning 10403 * from lazy-put flag set to MM_CID_UNSET. 10404 */ 10405 scoped_guard (rcu) { 10406 src_task = rcu_dereference(src_rq->curr); 10407 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { 10408 /* 10409 * We observed an active task for this mm, there is therefore 10410 * no point in moving this cid to the destination cpu. 10411 */ 10412 t->last_mm_cid = -1; 10413 return -1; 10414 } 10415 } 10416 10417 /* 10418 * The src_cid is unused, so it can be unset. 10419 */ 10420 if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 10421 return -1; 10422 WRITE_ONCE(src_pcpu_cid->recent_cid, MM_CID_UNSET); 10423 return src_cid; 10424 } 10425 10426 /* 10427 * Migration to dst cpu. Called with dst_rq lock held. 10428 * Interrupts are disabled, which keeps the window of cid ownership without the 10429 * source rq lock held small. 10430 */ 10431 void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) 10432 { 10433 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; 10434 struct mm_struct *mm = t->mm; 10435 int src_cid, src_cpu; 10436 bool dst_cid_is_set; 10437 struct rq *src_rq; 10438 10439 lockdep_assert_rq_held(dst_rq); 10440 10441 if (!mm) 10442 return; 10443 src_cpu = t->migrate_from_cpu; 10444 if (src_cpu == -1) { 10445 t->last_mm_cid = -1; 10446 return; 10447 } 10448 /* 10449 * Move the src cid if the dst cid is unset. This keeps id 10450 * allocation closest to 0 in cases where few threads migrate around 10451 * many CPUs. 10452 * 10453 * If destination cid or recent cid is already set, we may have 10454 * to just clear the src cid to ensure compactness in frequent 10455 * migrations scenarios. 10456 * 10457 * It is not useful to clear the src cid when the number of threads is 10458 * greater or equal to the number of allowed CPUs, because user-space 10459 * can expect that the number of allowed cids can reach the number of 10460 * allowed CPUs. 10461 */ 10462 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); 10463 dst_cid_is_set = !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->cid)) || 10464 !mm_cid_is_unset(READ_ONCE(dst_pcpu_cid->recent_cid)); 10465 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) 10466 return; 10467 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); 10468 src_rq = cpu_rq(src_cpu); 10469 src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid); 10470 if (src_cid == -1) 10471 return; 10472 src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid, 10473 src_cid); 10474 if (src_cid == -1) 10475 return; 10476 if (dst_cid_is_set) { 10477 __mm_cid_put(mm, src_cid); 10478 return; 10479 } 10480 /* Move src_cid to dst cpu. */ 10481 mm_cid_snapshot_time(dst_rq, mm); 10482 WRITE_ONCE(dst_pcpu_cid->cid, src_cid); 10483 WRITE_ONCE(dst_pcpu_cid->recent_cid, src_cid); 10484 } 10485 10486 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, 10487 int cpu) 10488 { 10489 struct rq *rq = cpu_rq(cpu); 10490 struct task_struct *t; 10491 int cid, lazy_cid; 10492 10493 cid = READ_ONCE(pcpu_cid->cid); 10494 if (!mm_cid_is_valid(cid)) 10495 return; 10496 10497 /* 10498 * Clear the cpu cid if it is set to keep cid allocation compact. If 10499 * there happens to be other tasks left on the source cpu using this 10500 * mm, the next task using this mm will reallocate its cid on context 10501 * switch. 10502 */ 10503 lazy_cid = mm_cid_set_lazy_put(cid); 10504 if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) 10505 return; 10506 10507 /* 10508 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10509 * rq->curr->mm matches the scheduler barrier in context_switch() 10510 * between store to rq->curr and load of prev and next task's 10511 * per-mm/cpu cid. 10512 * 10513 * The implicit barrier after cmpxchg per-mm/cpu cid before loading 10514 * rq->curr->mm_cid_active matches the barrier in 10515 * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and 10516 * sched_mm_cid_after_execve() between store to t->mm_cid_active and 10517 * load of per-mm/cpu cid. 10518 */ 10519 10520 /* 10521 * If we observe an active task using the mm on this rq after setting 10522 * the lazy-put flag, that task will be responsible for transitioning 10523 * from lazy-put flag set to MM_CID_UNSET. 10524 */ 10525 scoped_guard (rcu) { 10526 t = rcu_dereference(rq->curr); 10527 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) 10528 return; 10529 } 10530 10531 /* 10532 * The cid is unused, so it can be unset. 10533 * Disable interrupts to keep the window of cid ownership without rq 10534 * lock small. 10535 */ 10536 scoped_guard (irqsave) { 10537 if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) 10538 __mm_cid_put(mm, cid); 10539 } 10540 } 10541 10542 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) 10543 { 10544 struct rq *rq = cpu_rq(cpu); 10545 struct mm_cid *pcpu_cid; 10546 struct task_struct *curr; 10547 u64 rq_clock; 10548 10549 /* 10550 * rq->clock load is racy on 32-bit but one spurious clear once in a 10551 * while is irrelevant. 10552 */ 10553 rq_clock = READ_ONCE(rq->clock); 10554 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 10555 10556 /* 10557 * In order to take care of infrequently scheduled tasks, bump the time 10558 * snapshot associated with this cid if an active task using the mm is 10559 * observed on this rq. 10560 */ 10561 scoped_guard (rcu) { 10562 curr = rcu_dereference(rq->curr); 10563 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { 10564 WRITE_ONCE(pcpu_cid->time, rq_clock); 10565 return; 10566 } 10567 } 10568 10569 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) 10570 return; 10571 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 10572 } 10573 10574 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, 10575 int weight) 10576 { 10577 struct mm_cid *pcpu_cid; 10578 int cid; 10579 10580 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); 10581 cid = READ_ONCE(pcpu_cid->cid); 10582 if (!mm_cid_is_valid(cid) || cid < weight) 10583 return; 10584 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); 10585 } 10586 10587 static void task_mm_cid_work(struct callback_head *work) 10588 { 10589 unsigned long now = jiffies, old_scan, next_scan; 10590 struct task_struct *t = current; 10591 struct cpumask *cidmask; 10592 struct mm_struct *mm; 10593 int weight, cpu; 10594 10595 WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work)); 10596 10597 work->next = work; /* Prevent double-add */ 10598 if (t->flags & PF_EXITING) 10599 return; 10600 mm = t->mm; 10601 if (!mm) 10602 return; 10603 old_scan = READ_ONCE(mm->mm_cid_next_scan); 10604 next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); 10605 if (!old_scan) { 10606 unsigned long res; 10607 10608 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); 10609 if (res != old_scan) 10610 old_scan = res; 10611 else 10612 old_scan = next_scan; 10613 } 10614 if (time_before(now, old_scan)) 10615 return; 10616 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) 10617 return; 10618 cidmask = mm_cidmask(mm); 10619 /* Clear cids that were not recently used. */ 10620 for_each_possible_cpu(cpu) 10621 sched_mm_cid_remote_clear_old(mm, cpu); 10622 weight = cpumask_weight(cidmask); 10623 /* 10624 * Clear cids that are greater or equal to the cidmask weight to 10625 * recompact it. 10626 */ 10627 for_each_possible_cpu(cpu) 10628 sched_mm_cid_remote_clear_weight(mm, cpu, weight); 10629 } 10630 10631 void init_sched_mm_cid(struct task_struct *t) 10632 { 10633 struct mm_struct *mm = t->mm; 10634 int mm_users = 0; 10635 10636 if (mm) { 10637 mm_users = atomic_read(&mm->mm_users); 10638 if (mm_users == 1) 10639 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); 10640 } 10641 t->cid_work.next = &t->cid_work; /* Protect against double add */ 10642 init_task_work(&t->cid_work, task_mm_cid_work); 10643 } 10644 10645 void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) 10646 { 10647 struct callback_head *work = &curr->cid_work; 10648 unsigned long now = jiffies; 10649 10650 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || 10651 work->next != work) 10652 return; 10653 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) 10654 return; 10655 10656 /* No page allocation under rq lock */ 10657 task_work_add(curr, work, TWA_RESUME); 10658 } 10659 10660 void sched_mm_cid_exit_signals(struct task_struct *t) 10661 { 10662 struct mm_struct *mm = t->mm; 10663 struct rq *rq; 10664 10665 if (!mm) 10666 return; 10667 10668 preempt_disable(); 10669 rq = this_rq(); 10670 guard(rq_lock_irqsave)(rq); 10671 preempt_enable_no_resched(); /* holding spinlock */ 10672 WRITE_ONCE(t->mm_cid_active, 0); 10673 /* 10674 * Store t->mm_cid_active before loading per-mm/cpu cid. 10675 * Matches barrier in sched_mm_cid_remote_clear_old(). 10676 */ 10677 smp_mb(); 10678 mm_cid_put(mm); 10679 t->last_mm_cid = t->mm_cid = -1; 10680 } 10681 10682 void sched_mm_cid_before_execve(struct task_struct *t) 10683 { 10684 struct mm_struct *mm = t->mm; 10685 struct rq *rq; 10686 10687 if (!mm) 10688 return; 10689 10690 preempt_disable(); 10691 rq = this_rq(); 10692 guard(rq_lock_irqsave)(rq); 10693 preempt_enable_no_resched(); /* holding spinlock */ 10694 WRITE_ONCE(t->mm_cid_active, 0); 10695 /* 10696 * Store t->mm_cid_active before loading per-mm/cpu cid. 10697 * Matches barrier in sched_mm_cid_remote_clear_old(). 10698 */ 10699 smp_mb(); 10700 mm_cid_put(mm); 10701 t->last_mm_cid = t->mm_cid = -1; 10702 } 10703 10704 void sched_mm_cid_after_execve(struct task_struct *t) 10705 { 10706 struct mm_struct *mm = t->mm; 10707 struct rq *rq; 10708 10709 if (!mm) 10710 return; 10711 10712 preempt_disable(); 10713 rq = this_rq(); 10714 scoped_guard (rq_lock_irqsave, rq) { 10715 preempt_enable_no_resched(); /* holding spinlock */ 10716 WRITE_ONCE(t->mm_cid_active, 1); 10717 /* 10718 * Store t->mm_cid_active before loading per-mm/cpu cid. 10719 * Matches barrier in sched_mm_cid_remote_clear_old(). 10720 */ 10721 smp_mb(); 10722 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm); 10723 } 10724 } 10725 10726 void sched_mm_cid_fork(struct task_struct *t) 10727 { 10728 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); 10729 t->mm_cid_active = 1; 10730 } 10731 #endif 10732 10733 #ifdef CONFIG_SCHED_CLASS_EXT 10734 void sched_deq_and_put_task(struct task_struct *p, int queue_flags, 10735 struct sched_enq_and_set_ctx *ctx) 10736 { 10737 struct rq *rq = task_rq(p); 10738 10739 lockdep_assert_rq_held(rq); 10740 10741 *ctx = (struct sched_enq_and_set_ctx){ 10742 .p = p, 10743 .queue_flags = queue_flags, 10744 .queued = task_on_rq_queued(p), 10745 .running = task_current(rq, p), 10746 }; 10747 10748 update_rq_clock(rq); 10749 if (ctx->queued) 10750 dequeue_task(rq, p, queue_flags | DEQUEUE_NOCLOCK); 10751 if (ctx->running) 10752 put_prev_task(rq, p); 10753 } 10754 10755 void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx) 10756 { 10757 struct rq *rq = task_rq(ctx->p); 10758 10759 lockdep_assert_rq_held(rq); 10760 10761 if (ctx->queued) 10762 enqueue_task(rq, ctx->p, ctx->queue_flags | ENQUEUE_NOCLOCK); 10763 if (ctx->running) 10764 set_next_task(rq, ctx->p); 10765 } 10766 #endif /* CONFIG_SCHED_CLASS_EXT */ 10767