1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #include <linux/sched.h> 6 7 #include <linux/sched/autogroup.h> 8 #include <linux/sched/clock.h> 9 #include <linux/sched/coredump.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/cputime.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched/debug.h> 14 #include <linux/sched/hotplug.h> 15 #include <linux/sched/idle.h> 16 #include <linux/sched/init.h> 17 #include <linux/sched/isolation.h> 18 #include <linux/sched/jobctl.h> 19 #include <linux/sched/loadavg.h> 20 #include <linux/sched/mm.h> 21 #include <linux/sched/nohz.h> 22 #include <linux/sched/numa_balancing.h> 23 #include <linux/sched/prio.h> 24 #include <linux/sched/rt.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/smt.h> 27 #include <linux/sched/stat.h> 28 #include <linux/sched/sysctl.h> 29 #include <linux/sched/task.h> 30 #include <linux/sched/task_stack.h> 31 #include <linux/sched/topology.h> 32 #include <linux/sched/user.h> 33 #include <linux/sched/wake_q.h> 34 #include <linux/sched/xacct.h> 35 36 #include <uapi/linux/sched/types.h> 37 38 #include <linux/binfmts.h> 39 #include <linux/blkdev.h> 40 #include <linux/compat.h> 41 #include <linux/context_tracking.h> 42 #include <linux/cpufreq.h> 43 #include <linux/cpuidle.h> 44 #include <linux/cpuset.h> 45 #include <linux/ctype.h> 46 #include <linux/debugfs.h> 47 #include <linux/delayacct.h> 48 #include <linux/energy_model.h> 49 #include <linux/init_task.h> 50 #include <linux/kprobes.h> 51 #include <linux/kthread.h> 52 #include <linux/membarrier.h> 53 #include <linux/migrate.h> 54 #include <linux/mmu_context.h> 55 #include <linux/nmi.h> 56 #include <linux/proc_fs.h> 57 #include <linux/prefetch.h> 58 #include <linux/profile.h> 59 #include <linux/psi.h> 60 #include <linux/rcupdate_wait.h> 61 #include <linux/security.h> 62 #include <linux/stop_machine.h> 63 #include <linux/suspend.h> 64 #include <linux/swait.h> 65 #include <linux/syscalls.h> 66 #include <linux/task_work.h> 67 #include <linux/tsacct_kern.h> 68 69 #include <asm/tlb.h> 70 71 #ifdef CONFIG_PARAVIRT 72 # include <asm/paravirt.h> 73 #endif 74 75 #include "cpupri.h" 76 #include "cpudeadline.h" 77 78 #ifdef CONFIG_SCHED_DEBUG 79 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 80 #else 81 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 82 #endif 83 84 struct rq; 85 struct cpuidle_state; 86 87 /* task_struct::on_rq states: */ 88 #define TASK_ON_RQ_QUEUED 1 89 #define TASK_ON_RQ_MIGRATING 2 90 91 extern __read_mostly int scheduler_running; 92 93 extern unsigned long calc_load_update; 94 extern atomic_long_t calc_load_tasks; 95 96 extern void calc_global_load_tick(struct rq *this_rq); 97 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 98 99 /* 100 * Helpers for converting nanosecond timing to jiffy resolution 101 */ 102 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 103 104 /* 105 * Increase resolution of nice-level calculations for 64-bit architectures. 106 * The extra resolution improves shares distribution and load balancing of 107 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 108 * hierarchies, especially on larger systems. This is not a user-visible change 109 * and does not change the user-interface for setting shares/weights. 110 * 111 * We increase resolution only if we have enough bits to allow this increased 112 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 113 * are pretty high and the returns do not justify the increased costs. 114 * 115 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 116 * increase coverage and consistency always enable it on 64-bit platforms. 117 */ 118 #ifdef CONFIG_64BIT 119 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 120 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 121 # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) 122 #else 123 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 124 # define scale_load(w) (w) 125 # define scale_load_down(w) (w) 126 #endif 127 128 /* 129 * Task weight (visible to users) and its load (invisible to users) have 130 * independent resolution, but they should be well calibrated. We use 131 * scale_load() and scale_load_down(w) to convert between them. The 132 * following must be true: 133 * 134 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 135 * 136 */ 137 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 138 139 /* 140 * Single value that decides SCHED_DEADLINE internal math precision. 141 * 10 -> just above 1us 142 * 9 -> just above 0.5us 143 */ 144 #define DL_SCALE 10 145 146 /* 147 * Single value that denotes runtime == period, ie unlimited time. 148 */ 149 #define RUNTIME_INF ((u64)~0ULL) 150 151 static inline int idle_policy(int policy) 152 { 153 return policy == SCHED_IDLE; 154 } 155 static inline int fair_policy(int policy) 156 { 157 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 158 } 159 160 static inline int rt_policy(int policy) 161 { 162 return policy == SCHED_FIFO || policy == SCHED_RR; 163 } 164 165 static inline int dl_policy(int policy) 166 { 167 return policy == SCHED_DEADLINE; 168 } 169 static inline bool valid_policy(int policy) 170 { 171 return idle_policy(policy) || fair_policy(policy) || 172 rt_policy(policy) || dl_policy(policy); 173 } 174 175 static inline int task_has_idle_policy(struct task_struct *p) 176 { 177 return idle_policy(p->policy); 178 } 179 180 static inline int task_has_rt_policy(struct task_struct *p) 181 { 182 return rt_policy(p->policy); 183 } 184 185 static inline int task_has_dl_policy(struct task_struct *p) 186 { 187 return dl_policy(p->policy); 188 } 189 190 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 191 192 /* 193 * !! For sched_setattr_nocheck() (kernel) only !! 194 * 195 * This is actually gross. :( 196 * 197 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 198 * tasks, but still be able to sleep. We need this on platforms that cannot 199 * atomically change clock frequency. Remove once fast switching will be 200 * available on such platforms. 201 * 202 * SUGOV stands for SchedUtil GOVernor. 203 */ 204 #define SCHED_FLAG_SUGOV 0x10000000 205 206 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 207 { 208 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 209 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 210 #else 211 return false; 212 #endif 213 } 214 215 /* 216 * Tells if entity @a should preempt entity @b. 217 */ 218 static inline bool 219 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 220 { 221 return dl_entity_is_special(a) || 222 dl_time_before(a->deadline, b->deadline); 223 } 224 225 /* 226 * This is the priority-queue data structure of the RT scheduling class: 227 */ 228 struct rt_prio_array { 229 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 230 struct list_head queue[MAX_RT_PRIO]; 231 }; 232 233 struct rt_bandwidth { 234 /* nests inside the rq lock: */ 235 raw_spinlock_t rt_runtime_lock; 236 ktime_t rt_period; 237 u64 rt_runtime; 238 struct hrtimer rt_period_timer; 239 unsigned int rt_period_active; 240 }; 241 242 void __dl_clear_params(struct task_struct *p); 243 244 /* 245 * To keep the bandwidth of -deadline tasks and groups under control 246 * we need some place where: 247 * - store the maximum -deadline bandwidth of the system (the group); 248 * - cache the fraction of that bandwidth that is currently allocated. 249 * 250 * This is all done in the data structure below. It is similar to the 251 * one used for RT-throttling (rt_bandwidth), with the main difference 252 * that, since here we are only interested in admission control, we 253 * do not decrease any runtime while the group "executes", neither we 254 * need a timer to replenish it. 255 * 256 * With respect to SMP, the bandwidth is given on a per-CPU basis, 257 * meaning that: 258 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 259 * - dl_total_bw array contains, in the i-eth element, the currently 260 * allocated bandwidth on the i-eth CPU. 261 * Moreover, groups consume bandwidth on each CPU, while tasks only 262 * consume bandwidth on the CPU they're running on. 263 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 264 * that will be shown the next time the proc or cgroup controls will 265 * be red. It on its turn can be changed by writing on its own 266 * control. 267 */ 268 struct dl_bandwidth { 269 raw_spinlock_t dl_runtime_lock; 270 u64 dl_runtime; 271 u64 dl_period; 272 }; 273 274 static inline int dl_bandwidth_enabled(void) 275 { 276 return sysctl_sched_rt_runtime >= 0; 277 } 278 279 struct dl_bw { 280 raw_spinlock_t lock; 281 u64 bw; 282 u64 total_bw; 283 }; 284 285 static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 286 287 static inline 288 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 289 { 290 dl_b->total_bw -= tsk_bw; 291 __dl_update(dl_b, (s32)tsk_bw / cpus); 292 } 293 294 static inline 295 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 296 { 297 dl_b->total_bw += tsk_bw; 298 __dl_update(dl_b, -((s32)tsk_bw / cpus)); 299 } 300 301 static inline 302 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 303 { 304 return dl_b->bw != -1 && 305 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 306 } 307 308 extern void dl_change_utilization(struct task_struct *p, u64 new_bw); 309 extern void init_dl_bw(struct dl_bw *dl_b); 310 extern int sched_dl_global_validate(void); 311 extern void sched_dl_do_global(void); 312 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 313 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 314 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 315 extern bool __checkparam_dl(const struct sched_attr *attr); 316 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 317 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 318 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 319 extern bool dl_cpu_busy(unsigned int cpu); 320 321 #ifdef CONFIG_CGROUP_SCHED 322 323 #include <linux/cgroup.h> 324 #include <linux/psi.h> 325 326 struct cfs_rq; 327 struct rt_rq; 328 329 extern struct list_head task_groups; 330 331 struct cfs_bandwidth { 332 #ifdef CONFIG_CFS_BANDWIDTH 333 raw_spinlock_t lock; 334 ktime_t period; 335 u64 quota; 336 u64 runtime; 337 s64 hierarchical_quota; 338 339 u8 idle; 340 u8 period_active; 341 u8 distribute_running; 342 u8 slack_started; 343 struct hrtimer period_timer; 344 struct hrtimer slack_timer; 345 struct list_head throttled_cfs_rq; 346 347 /* Statistics: */ 348 int nr_periods; 349 int nr_throttled; 350 u64 throttled_time; 351 #endif 352 }; 353 354 /* Task group related information */ 355 struct task_group { 356 struct cgroup_subsys_state css; 357 358 #ifdef CONFIG_FAIR_GROUP_SCHED 359 /* schedulable entities of this group on each CPU */ 360 struct sched_entity **se; 361 /* runqueue "owned" by this group on each CPU */ 362 struct cfs_rq **cfs_rq; 363 unsigned long shares; 364 365 #ifdef CONFIG_SMP 366 /* 367 * load_avg can be heavily contended at clock tick time, so put 368 * it in its own cacheline separated from the fields above which 369 * will also be accessed at each tick. 370 */ 371 atomic_long_t load_avg ____cacheline_aligned; 372 #endif 373 #endif 374 375 #ifdef CONFIG_RT_GROUP_SCHED 376 struct sched_rt_entity **rt_se; 377 struct rt_rq **rt_rq; 378 379 struct rt_bandwidth rt_bandwidth; 380 #endif 381 382 struct rcu_head rcu; 383 struct list_head list; 384 385 struct task_group *parent; 386 struct list_head siblings; 387 struct list_head children; 388 389 #ifdef CONFIG_SCHED_AUTOGROUP 390 struct autogroup *autogroup; 391 #endif 392 393 struct cfs_bandwidth cfs_bandwidth; 394 395 #ifdef CONFIG_UCLAMP_TASK_GROUP 396 /* The two decimal precision [%] value requested from user-space */ 397 unsigned int uclamp_pct[UCLAMP_CNT]; 398 /* Clamp values requested for a task group */ 399 struct uclamp_se uclamp_req[UCLAMP_CNT]; 400 /* Effective clamp values used for a task group */ 401 struct uclamp_se uclamp[UCLAMP_CNT]; 402 #endif 403 404 }; 405 406 #ifdef CONFIG_FAIR_GROUP_SCHED 407 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 408 409 /* 410 * A weight of 0 or 1 can cause arithmetics problems. 411 * A weight of a cfs_rq is the sum of weights of which entities 412 * are queued on this cfs_rq, so a weight of a entity should not be 413 * too large, so as the shares value of a task group. 414 * (The default weight is 1024 - so there's no practical 415 * limitation from this.) 416 */ 417 #define MIN_SHARES (1UL << 1) 418 #define MAX_SHARES (1UL << 18) 419 #endif 420 421 typedef int (*tg_visitor)(struct task_group *, void *); 422 423 extern int walk_tg_tree_from(struct task_group *from, 424 tg_visitor down, tg_visitor up, void *data); 425 426 /* 427 * Iterate the full tree, calling @down when first entering a node and @up when 428 * leaving it for the final time. 429 * 430 * Caller must hold rcu_lock or sufficient equivalent. 431 */ 432 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 433 { 434 return walk_tg_tree_from(&root_task_group, down, up, data); 435 } 436 437 extern int tg_nop(struct task_group *tg, void *data); 438 439 extern void free_fair_sched_group(struct task_group *tg); 440 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 441 extern void online_fair_sched_group(struct task_group *tg); 442 extern void unregister_fair_sched_group(struct task_group *tg); 443 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 444 struct sched_entity *se, int cpu, 445 struct sched_entity *parent); 446 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 447 448 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 449 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 450 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 451 452 extern void free_rt_sched_group(struct task_group *tg); 453 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 454 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 455 struct sched_rt_entity *rt_se, int cpu, 456 struct sched_rt_entity *parent); 457 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 458 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 459 extern long sched_group_rt_runtime(struct task_group *tg); 460 extern long sched_group_rt_period(struct task_group *tg); 461 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 462 463 extern struct task_group *sched_create_group(struct task_group *parent); 464 extern void sched_online_group(struct task_group *tg, 465 struct task_group *parent); 466 extern void sched_destroy_group(struct task_group *tg); 467 extern void sched_offline_group(struct task_group *tg); 468 469 extern void sched_move_task(struct task_struct *tsk); 470 471 #ifdef CONFIG_FAIR_GROUP_SCHED 472 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 473 474 #ifdef CONFIG_SMP 475 extern void set_task_rq_fair(struct sched_entity *se, 476 struct cfs_rq *prev, struct cfs_rq *next); 477 #else /* !CONFIG_SMP */ 478 static inline void set_task_rq_fair(struct sched_entity *se, 479 struct cfs_rq *prev, struct cfs_rq *next) { } 480 #endif /* CONFIG_SMP */ 481 #endif /* CONFIG_FAIR_GROUP_SCHED */ 482 483 #else /* CONFIG_CGROUP_SCHED */ 484 485 struct cfs_bandwidth { }; 486 487 #endif /* CONFIG_CGROUP_SCHED */ 488 489 /* CFS-related fields in a runqueue */ 490 struct cfs_rq { 491 struct load_weight load; 492 unsigned long runnable_weight; 493 unsigned int nr_running; 494 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 495 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 496 497 u64 exec_clock; 498 u64 min_vruntime; 499 #ifndef CONFIG_64BIT 500 u64 min_vruntime_copy; 501 #endif 502 503 struct rb_root_cached tasks_timeline; 504 505 /* 506 * 'curr' points to currently running entity on this cfs_rq. 507 * It is set to NULL otherwise (i.e when none are currently running). 508 */ 509 struct sched_entity *curr; 510 struct sched_entity *next; 511 struct sched_entity *last; 512 struct sched_entity *skip; 513 514 #ifdef CONFIG_SCHED_DEBUG 515 unsigned int nr_spread_over; 516 #endif 517 518 #ifdef CONFIG_SMP 519 /* 520 * CFS load tracking 521 */ 522 struct sched_avg avg; 523 #ifndef CONFIG_64BIT 524 u64 load_last_update_time_copy; 525 #endif 526 struct { 527 raw_spinlock_t lock ____cacheline_aligned; 528 int nr; 529 unsigned long load_avg; 530 unsigned long util_avg; 531 unsigned long runnable_sum; 532 } removed; 533 534 #ifdef CONFIG_FAIR_GROUP_SCHED 535 unsigned long tg_load_avg_contrib; 536 long propagate; 537 long prop_runnable_sum; 538 539 /* 540 * h_load = weight * f(tg) 541 * 542 * Where f(tg) is the recursive weight fraction assigned to 543 * this group. 544 */ 545 unsigned long h_load; 546 u64 last_h_load_update; 547 struct sched_entity *h_load_next; 548 #endif /* CONFIG_FAIR_GROUP_SCHED */ 549 #endif /* CONFIG_SMP */ 550 551 #ifdef CONFIG_FAIR_GROUP_SCHED 552 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 553 554 /* 555 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 556 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 557 * (like users, containers etc.) 558 * 559 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 560 * This list is used during load balance. 561 */ 562 int on_list; 563 struct list_head leaf_cfs_rq_list; 564 struct task_group *tg; /* group that "owns" this runqueue */ 565 566 #ifdef CONFIG_CFS_BANDWIDTH 567 int runtime_enabled; 568 s64 runtime_remaining; 569 570 u64 throttled_clock; 571 u64 throttled_clock_task; 572 u64 throttled_clock_task_time; 573 int throttled; 574 int throttle_count; 575 struct list_head throttled_list; 576 #endif /* CONFIG_CFS_BANDWIDTH */ 577 #endif /* CONFIG_FAIR_GROUP_SCHED */ 578 }; 579 580 static inline int rt_bandwidth_enabled(void) 581 { 582 return sysctl_sched_rt_runtime >= 0; 583 } 584 585 /* RT IPI pull logic requires IRQ_WORK */ 586 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 587 # define HAVE_RT_PUSH_IPI 588 #endif 589 590 /* Real-Time classes' related field in a runqueue: */ 591 struct rt_rq { 592 struct rt_prio_array active; 593 unsigned int rt_nr_running; 594 unsigned int rr_nr_running; 595 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 596 struct { 597 int curr; /* highest queued rt task prio */ 598 #ifdef CONFIG_SMP 599 int next; /* next highest */ 600 #endif 601 } highest_prio; 602 #endif 603 #ifdef CONFIG_SMP 604 unsigned long rt_nr_migratory; 605 unsigned long rt_nr_total; 606 int overloaded; 607 struct plist_head pushable_tasks; 608 609 #endif /* CONFIG_SMP */ 610 int rt_queued; 611 612 int rt_throttled; 613 u64 rt_time; 614 u64 rt_runtime; 615 /* Nests inside the rq lock: */ 616 raw_spinlock_t rt_runtime_lock; 617 618 #ifdef CONFIG_RT_GROUP_SCHED 619 unsigned long rt_nr_boosted; 620 621 struct rq *rq; 622 struct task_group *tg; 623 #endif 624 }; 625 626 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 627 { 628 return rt_rq->rt_queued && rt_rq->rt_nr_running; 629 } 630 631 /* Deadline class' related fields in a runqueue */ 632 struct dl_rq { 633 /* runqueue is an rbtree, ordered by deadline */ 634 struct rb_root_cached root; 635 636 unsigned long dl_nr_running; 637 638 #ifdef CONFIG_SMP 639 /* 640 * Deadline values of the currently executing and the 641 * earliest ready task on this rq. Caching these facilitates 642 * the decision whether or not a ready but not running task 643 * should migrate somewhere else. 644 */ 645 struct { 646 u64 curr; 647 u64 next; 648 } earliest_dl; 649 650 unsigned long dl_nr_migratory; 651 int overloaded; 652 653 /* 654 * Tasks on this rq that can be pushed away. They are kept in 655 * an rb-tree, ordered by tasks' deadlines, with caching 656 * of the leftmost (earliest deadline) element. 657 */ 658 struct rb_root_cached pushable_dl_tasks_root; 659 #else 660 struct dl_bw dl_bw; 661 #endif 662 /* 663 * "Active utilization" for this runqueue: increased when a 664 * task wakes up (becomes TASK_RUNNING) and decreased when a 665 * task blocks 666 */ 667 u64 running_bw; 668 669 /* 670 * Utilization of the tasks "assigned" to this runqueue (including 671 * the tasks that are in runqueue and the tasks that executed on this 672 * CPU and blocked). Increased when a task moves to this runqueue, and 673 * decreased when the task moves away (migrates, changes scheduling 674 * policy, or terminates). 675 * This is needed to compute the "inactive utilization" for the 676 * runqueue (inactive utilization = this_bw - running_bw). 677 */ 678 u64 this_bw; 679 u64 extra_bw; 680 681 /* 682 * Inverse of the fraction of CPU utilization that can be reclaimed 683 * by the GRUB algorithm. 684 */ 685 u64 bw_ratio; 686 }; 687 688 #ifdef CONFIG_FAIR_GROUP_SCHED 689 /* An entity is a task if it doesn't "own" a runqueue */ 690 #define entity_is_task(se) (!se->my_q) 691 #else 692 #define entity_is_task(se) 1 693 #endif 694 695 #ifdef CONFIG_SMP 696 /* 697 * XXX we want to get rid of these helpers and use the full load resolution. 698 */ 699 static inline long se_weight(struct sched_entity *se) 700 { 701 return scale_load_down(se->load.weight); 702 } 703 704 static inline long se_runnable(struct sched_entity *se) 705 { 706 return scale_load_down(se->runnable_weight); 707 } 708 709 static inline bool sched_asym_prefer(int a, int b) 710 { 711 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 712 } 713 714 struct perf_domain { 715 struct em_perf_domain *em_pd; 716 struct perf_domain *next; 717 struct rcu_head rcu; 718 }; 719 720 /* Scheduling group status flags */ 721 #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 722 #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 723 724 /* 725 * We add the notion of a root-domain which will be used to define per-domain 726 * variables. Each exclusive cpuset essentially defines an island domain by 727 * fully partitioning the member CPUs from any other cpuset. Whenever a new 728 * exclusive cpuset is created, we also create and attach a new root-domain 729 * object. 730 * 731 */ 732 struct root_domain { 733 atomic_t refcount; 734 atomic_t rto_count; 735 struct rcu_head rcu; 736 cpumask_var_t span; 737 cpumask_var_t online; 738 739 /* 740 * Indicate pullable load on at least one CPU, e.g: 741 * - More than one runnable task 742 * - Running task is misfit 743 */ 744 int overload; 745 746 /* Indicate one or more cpus over-utilized (tipping point) */ 747 int overutilized; 748 749 /* 750 * The bit corresponding to a CPU gets set here if such CPU has more 751 * than one runnable -deadline task (as it is below for RT tasks). 752 */ 753 cpumask_var_t dlo_mask; 754 atomic_t dlo_count; 755 struct dl_bw dl_bw; 756 struct cpudl cpudl; 757 758 #ifdef HAVE_RT_PUSH_IPI 759 /* 760 * For IPI pull requests, loop across the rto_mask. 761 */ 762 struct irq_work rto_push_work; 763 raw_spinlock_t rto_lock; 764 /* These are only updated and read within rto_lock */ 765 int rto_loop; 766 int rto_cpu; 767 /* These atomics are updated outside of a lock */ 768 atomic_t rto_loop_next; 769 atomic_t rto_loop_start; 770 #endif 771 /* 772 * The "RT overload" flag: it gets set if a CPU has more than 773 * one runnable RT task. 774 */ 775 cpumask_var_t rto_mask; 776 struct cpupri cpupri; 777 778 unsigned long max_cpu_capacity; 779 780 /* 781 * NULL-terminated list of performance domains intersecting with the 782 * CPUs of the rd. Protected by RCU. 783 */ 784 struct perf_domain __rcu *pd; 785 }; 786 787 extern void init_defrootdomain(void); 788 extern int sched_init_domains(const struct cpumask *cpu_map); 789 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 790 extern void sched_get_rd(struct root_domain *rd); 791 extern void sched_put_rd(struct root_domain *rd); 792 793 #ifdef HAVE_RT_PUSH_IPI 794 extern void rto_push_irq_work_func(struct irq_work *work); 795 #endif 796 #endif /* CONFIG_SMP */ 797 798 #ifdef CONFIG_UCLAMP_TASK 799 /* 800 * struct uclamp_bucket - Utilization clamp bucket 801 * @value: utilization clamp value for tasks on this clamp bucket 802 * @tasks: number of RUNNABLE tasks on this clamp bucket 803 * 804 * Keep track of how many tasks are RUNNABLE for a given utilization 805 * clamp value. 806 */ 807 struct uclamp_bucket { 808 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 809 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 810 }; 811 812 /* 813 * struct uclamp_rq - rq's utilization clamp 814 * @value: currently active clamp values for a rq 815 * @bucket: utilization clamp buckets affecting a rq 816 * 817 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 818 * A clamp value is affecting a rq when there is at least one task RUNNABLE 819 * (or actually running) with that value. 820 * 821 * There are up to UCLAMP_CNT possible different clamp values, currently there 822 * are only two: minimum utilization and maximum utilization. 823 * 824 * All utilization clamping values are MAX aggregated, since: 825 * - for util_min: we want to run the CPU at least at the max of the minimum 826 * utilization required by its currently RUNNABLE tasks. 827 * - for util_max: we want to allow the CPU to run up to the max of the 828 * maximum utilization allowed by its currently RUNNABLE tasks. 829 * 830 * Since on each system we expect only a limited number of different 831 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 832 * the metrics required to compute all the per-rq utilization clamp values. 833 */ 834 struct uclamp_rq { 835 unsigned int value; 836 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 837 }; 838 #endif /* CONFIG_UCLAMP_TASK */ 839 840 /* 841 * This is the main, per-CPU runqueue data structure. 842 * 843 * Locking rule: those places that want to lock multiple runqueues 844 * (such as the load balancing or the thread migration code), lock 845 * acquire operations must be ordered by ascending &runqueue. 846 */ 847 struct rq { 848 /* runqueue lock: */ 849 raw_spinlock_t lock; 850 851 /* 852 * nr_running and cpu_load should be in the same cacheline because 853 * remote CPUs use both these fields when doing load calculation. 854 */ 855 unsigned int nr_running; 856 #ifdef CONFIG_NUMA_BALANCING 857 unsigned int nr_numa_running; 858 unsigned int nr_preferred_running; 859 unsigned int numa_migrate_on; 860 #endif 861 #ifdef CONFIG_NO_HZ_COMMON 862 #ifdef CONFIG_SMP 863 unsigned long last_load_update_tick; 864 unsigned long last_blocked_load_update_tick; 865 unsigned int has_blocked_load; 866 #endif /* CONFIG_SMP */ 867 unsigned int nohz_tick_stopped; 868 atomic_t nohz_flags; 869 #endif /* CONFIG_NO_HZ_COMMON */ 870 871 unsigned long nr_load_updates; 872 u64 nr_switches; 873 874 #ifdef CONFIG_UCLAMP_TASK 875 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 876 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 877 unsigned int uclamp_flags; 878 #define UCLAMP_FLAG_IDLE 0x01 879 #endif 880 881 struct cfs_rq cfs; 882 struct rt_rq rt; 883 struct dl_rq dl; 884 885 #ifdef CONFIG_FAIR_GROUP_SCHED 886 /* list of leaf cfs_rq on this CPU: */ 887 struct list_head leaf_cfs_rq_list; 888 struct list_head *tmp_alone_branch; 889 #endif /* CONFIG_FAIR_GROUP_SCHED */ 890 891 /* 892 * This is part of a global counter where only the total sum 893 * over all CPUs matters. A task can increase this counter on 894 * one CPU and if it got migrated afterwards it may decrease 895 * it on another CPU. Always updated under the runqueue lock: 896 */ 897 unsigned long nr_uninterruptible; 898 899 struct task_struct *curr; 900 struct task_struct *idle; 901 struct task_struct *stop; 902 unsigned long next_balance; 903 struct mm_struct *prev_mm; 904 905 unsigned int clock_update_flags; 906 u64 clock; 907 /* Ensure that all clocks are in the same cache line */ 908 u64 clock_task ____cacheline_aligned; 909 u64 clock_pelt; 910 unsigned long lost_idle_time; 911 912 atomic_t nr_iowait; 913 914 #ifdef CONFIG_SMP 915 struct root_domain *rd; 916 struct sched_domain __rcu *sd; 917 918 unsigned long cpu_capacity; 919 unsigned long cpu_capacity_orig; 920 921 struct callback_head *balance_callback; 922 923 unsigned char idle_balance; 924 925 unsigned long misfit_task_load; 926 927 /* For active balancing */ 928 int active_balance; 929 int push_cpu; 930 struct cpu_stop_work active_balance_work; 931 932 /* CPU of this runqueue: */ 933 int cpu; 934 int online; 935 936 struct list_head cfs_tasks; 937 938 struct sched_avg avg_rt; 939 struct sched_avg avg_dl; 940 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 941 struct sched_avg avg_irq; 942 #endif 943 u64 idle_stamp; 944 u64 avg_idle; 945 946 /* This is used to determine avg_idle's max value */ 947 u64 max_idle_balance_cost; 948 #endif 949 950 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 951 u64 prev_irq_time; 952 #endif 953 #ifdef CONFIG_PARAVIRT 954 u64 prev_steal_time; 955 #endif 956 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 957 u64 prev_steal_time_rq; 958 #endif 959 960 /* calc_load related fields */ 961 unsigned long calc_load_update; 962 long calc_load_active; 963 964 #ifdef CONFIG_SCHED_HRTICK 965 #ifdef CONFIG_SMP 966 int hrtick_csd_pending; 967 call_single_data_t hrtick_csd; 968 #endif 969 struct hrtimer hrtick_timer; 970 #endif 971 972 #ifdef CONFIG_SCHEDSTATS 973 /* latency stats */ 974 struct sched_info rq_sched_info; 975 unsigned long long rq_cpu_time; 976 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 977 978 /* sys_sched_yield() stats */ 979 unsigned int yld_count; 980 981 /* schedule() stats */ 982 unsigned int sched_count; 983 unsigned int sched_goidle; 984 985 /* try_to_wake_up() stats */ 986 unsigned int ttwu_count; 987 unsigned int ttwu_local; 988 #endif 989 990 #ifdef CONFIG_SMP 991 struct llist_head wake_list; 992 #endif 993 994 #ifdef CONFIG_CPU_IDLE 995 /* Must be inspected within a rcu lock section */ 996 struct cpuidle_state *idle_state; 997 #endif 998 }; 999 1000 #ifdef CONFIG_FAIR_GROUP_SCHED 1001 1002 /* CPU runqueue to which this cfs_rq is attached */ 1003 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1004 { 1005 return cfs_rq->rq; 1006 } 1007 1008 #else 1009 1010 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1011 { 1012 return container_of(cfs_rq, struct rq, cfs); 1013 } 1014 #endif 1015 1016 static inline int cpu_of(struct rq *rq) 1017 { 1018 #ifdef CONFIG_SMP 1019 return rq->cpu; 1020 #else 1021 return 0; 1022 #endif 1023 } 1024 1025 1026 #ifdef CONFIG_SCHED_SMT 1027 extern void __update_idle_core(struct rq *rq); 1028 1029 static inline void update_idle_core(struct rq *rq) 1030 { 1031 if (static_branch_unlikely(&sched_smt_present)) 1032 __update_idle_core(rq); 1033 } 1034 1035 #else 1036 static inline void update_idle_core(struct rq *rq) { } 1037 #endif 1038 1039 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1040 1041 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1042 #define this_rq() this_cpu_ptr(&runqueues) 1043 #define task_rq(p) cpu_rq(task_cpu(p)) 1044 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1045 #define raw_rq() raw_cpu_ptr(&runqueues) 1046 1047 extern void update_rq_clock(struct rq *rq); 1048 1049 static inline u64 __rq_clock_broken(struct rq *rq) 1050 { 1051 return READ_ONCE(rq->clock); 1052 } 1053 1054 /* 1055 * rq::clock_update_flags bits 1056 * 1057 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1058 * call to __schedule(). This is an optimisation to avoid 1059 * neighbouring rq clock updates. 1060 * 1061 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1062 * in effect and calls to update_rq_clock() are being ignored. 1063 * 1064 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1065 * made to update_rq_clock() since the last time rq::lock was pinned. 1066 * 1067 * If inside of __schedule(), clock_update_flags will have been 1068 * shifted left (a left shift is a cheap operation for the fast path 1069 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1070 * 1071 * if (rq-clock_update_flags >= RQCF_UPDATED) 1072 * 1073 * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1074 * one position though, because the next rq_unpin_lock() will shift it 1075 * back. 1076 */ 1077 #define RQCF_REQ_SKIP 0x01 1078 #define RQCF_ACT_SKIP 0x02 1079 #define RQCF_UPDATED 0x04 1080 1081 static inline void assert_clock_updated(struct rq *rq) 1082 { 1083 /* 1084 * The only reason for not seeing a clock update since the 1085 * last rq_pin_lock() is if we're currently skipping updates. 1086 */ 1087 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1088 } 1089 1090 static inline u64 rq_clock(struct rq *rq) 1091 { 1092 lockdep_assert_held(&rq->lock); 1093 assert_clock_updated(rq); 1094 1095 return rq->clock; 1096 } 1097 1098 static inline u64 rq_clock_task(struct rq *rq) 1099 { 1100 lockdep_assert_held(&rq->lock); 1101 assert_clock_updated(rq); 1102 1103 return rq->clock_task; 1104 } 1105 1106 static inline void rq_clock_skip_update(struct rq *rq) 1107 { 1108 lockdep_assert_held(&rq->lock); 1109 rq->clock_update_flags |= RQCF_REQ_SKIP; 1110 } 1111 1112 /* 1113 * See rt task throttling, which is the only time a skip 1114 * request is cancelled. 1115 */ 1116 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1117 { 1118 lockdep_assert_held(&rq->lock); 1119 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1120 } 1121 1122 struct rq_flags { 1123 unsigned long flags; 1124 struct pin_cookie cookie; 1125 #ifdef CONFIG_SCHED_DEBUG 1126 /* 1127 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1128 * current pin context is stashed here in case it needs to be 1129 * restored in rq_repin_lock(). 1130 */ 1131 unsigned int clock_update_flags; 1132 #endif 1133 }; 1134 1135 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1136 { 1137 rf->cookie = lockdep_pin_lock(&rq->lock); 1138 1139 #ifdef CONFIG_SCHED_DEBUG 1140 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1141 rf->clock_update_flags = 0; 1142 #endif 1143 } 1144 1145 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1146 { 1147 #ifdef CONFIG_SCHED_DEBUG 1148 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1149 rf->clock_update_flags = RQCF_UPDATED; 1150 #endif 1151 1152 lockdep_unpin_lock(&rq->lock, rf->cookie); 1153 } 1154 1155 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1156 { 1157 lockdep_repin_lock(&rq->lock, rf->cookie); 1158 1159 #ifdef CONFIG_SCHED_DEBUG 1160 /* 1161 * Restore the value we stashed in @rf for this pin context. 1162 */ 1163 rq->clock_update_flags |= rf->clock_update_flags; 1164 #endif 1165 } 1166 1167 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1168 __acquires(rq->lock); 1169 1170 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1171 __acquires(p->pi_lock) 1172 __acquires(rq->lock); 1173 1174 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1175 __releases(rq->lock) 1176 { 1177 rq_unpin_lock(rq, rf); 1178 raw_spin_unlock(&rq->lock); 1179 } 1180 1181 static inline void 1182 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1183 __releases(rq->lock) 1184 __releases(p->pi_lock) 1185 { 1186 rq_unpin_lock(rq, rf); 1187 raw_spin_unlock(&rq->lock); 1188 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1189 } 1190 1191 static inline void 1192 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1193 __acquires(rq->lock) 1194 { 1195 raw_spin_lock_irqsave(&rq->lock, rf->flags); 1196 rq_pin_lock(rq, rf); 1197 } 1198 1199 static inline void 1200 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1201 __acquires(rq->lock) 1202 { 1203 raw_spin_lock_irq(&rq->lock); 1204 rq_pin_lock(rq, rf); 1205 } 1206 1207 static inline void 1208 rq_lock(struct rq *rq, struct rq_flags *rf) 1209 __acquires(rq->lock) 1210 { 1211 raw_spin_lock(&rq->lock); 1212 rq_pin_lock(rq, rf); 1213 } 1214 1215 static inline void 1216 rq_relock(struct rq *rq, struct rq_flags *rf) 1217 __acquires(rq->lock) 1218 { 1219 raw_spin_lock(&rq->lock); 1220 rq_repin_lock(rq, rf); 1221 } 1222 1223 static inline void 1224 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1225 __releases(rq->lock) 1226 { 1227 rq_unpin_lock(rq, rf); 1228 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 1229 } 1230 1231 static inline void 1232 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1233 __releases(rq->lock) 1234 { 1235 rq_unpin_lock(rq, rf); 1236 raw_spin_unlock_irq(&rq->lock); 1237 } 1238 1239 static inline void 1240 rq_unlock(struct rq *rq, struct rq_flags *rf) 1241 __releases(rq->lock) 1242 { 1243 rq_unpin_lock(rq, rf); 1244 raw_spin_unlock(&rq->lock); 1245 } 1246 1247 static inline struct rq * 1248 this_rq_lock_irq(struct rq_flags *rf) 1249 __acquires(rq->lock) 1250 { 1251 struct rq *rq; 1252 1253 local_irq_disable(); 1254 rq = this_rq(); 1255 rq_lock(rq, rf); 1256 return rq; 1257 } 1258 1259 #ifdef CONFIG_NUMA 1260 enum numa_topology_type { 1261 NUMA_DIRECT, 1262 NUMA_GLUELESS_MESH, 1263 NUMA_BACKPLANE, 1264 }; 1265 extern enum numa_topology_type sched_numa_topology_type; 1266 extern int sched_max_numa_distance; 1267 extern bool find_numa_distance(int distance); 1268 extern void sched_init_numa(void); 1269 extern void sched_domains_numa_masks_set(unsigned int cpu); 1270 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1271 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1272 #else 1273 static inline void sched_init_numa(void) { } 1274 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1275 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1276 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1277 { 1278 return nr_cpu_ids; 1279 } 1280 #endif 1281 1282 #ifdef CONFIG_NUMA_BALANCING 1283 /* The regions in numa_faults array from task_struct */ 1284 enum numa_faults_stats { 1285 NUMA_MEM = 0, 1286 NUMA_CPU, 1287 NUMA_MEMBUF, 1288 NUMA_CPUBUF 1289 }; 1290 extern void sched_setnuma(struct task_struct *p, int node); 1291 extern int migrate_task_to(struct task_struct *p, int cpu); 1292 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1293 int cpu, int scpu); 1294 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1295 #else 1296 static inline void 1297 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1298 { 1299 } 1300 #endif /* CONFIG_NUMA_BALANCING */ 1301 1302 #ifdef CONFIG_SMP 1303 1304 static inline void 1305 queue_balance_callback(struct rq *rq, 1306 struct callback_head *head, 1307 void (*func)(struct rq *rq)) 1308 { 1309 lockdep_assert_held(&rq->lock); 1310 1311 if (unlikely(head->next)) 1312 return; 1313 1314 head->func = (void (*)(struct callback_head *))func; 1315 head->next = rq->balance_callback; 1316 rq->balance_callback = head; 1317 } 1318 1319 extern void sched_ttwu_pending(void); 1320 1321 #define rcu_dereference_check_sched_domain(p) \ 1322 rcu_dereference_check((p), \ 1323 lockdep_is_held(&sched_domains_mutex)) 1324 1325 /* 1326 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1327 * See destroy_sched_domains: call_rcu for details. 1328 * 1329 * The domain tree of any CPU may only be accessed from within 1330 * preempt-disabled sections. 1331 */ 1332 #define for_each_domain(cpu, __sd) \ 1333 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1334 __sd; __sd = __sd->parent) 1335 1336 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 1337 1338 /** 1339 * highest_flag_domain - Return highest sched_domain containing flag. 1340 * @cpu: The CPU whose highest level of sched domain is to 1341 * be returned. 1342 * @flag: The flag to check for the highest sched_domain 1343 * for the given CPU. 1344 * 1345 * Returns the highest sched_domain of a CPU which contains the given flag. 1346 */ 1347 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1348 { 1349 struct sched_domain *sd, *hsd = NULL; 1350 1351 for_each_domain(cpu, sd) { 1352 if (!(sd->flags & flag)) 1353 break; 1354 hsd = sd; 1355 } 1356 1357 return hsd; 1358 } 1359 1360 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1361 { 1362 struct sched_domain *sd; 1363 1364 for_each_domain(cpu, sd) { 1365 if (sd->flags & flag) 1366 break; 1367 } 1368 1369 return sd; 1370 } 1371 1372 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1373 DECLARE_PER_CPU(int, sd_llc_size); 1374 DECLARE_PER_CPU(int, sd_llc_id); 1375 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1376 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1377 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1378 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1379 extern struct static_key_false sched_asym_cpucapacity; 1380 1381 struct sched_group_capacity { 1382 atomic_t ref; 1383 /* 1384 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1385 * for a single CPU. 1386 */ 1387 unsigned long capacity; 1388 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1389 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1390 unsigned long next_update; 1391 int imbalance; /* XXX unrelated to capacity but shared group state */ 1392 1393 #ifdef CONFIG_SCHED_DEBUG 1394 int id; 1395 #endif 1396 1397 unsigned long cpumask[0]; /* Balance mask */ 1398 }; 1399 1400 struct sched_group { 1401 struct sched_group *next; /* Must be a circular list */ 1402 atomic_t ref; 1403 1404 unsigned int group_weight; 1405 struct sched_group_capacity *sgc; 1406 int asym_prefer_cpu; /* CPU of highest priority in group */ 1407 1408 /* 1409 * The CPUs this group covers. 1410 * 1411 * NOTE: this field is variable length. (Allocated dynamically 1412 * by attaching extra space to the end of the structure, 1413 * depending on how many CPUs the kernel has booted up with) 1414 */ 1415 unsigned long cpumask[0]; 1416 }; 1417 1418 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1419 { 1420 return to_cpumask(sg->cpumask); 1421 } 1422 1423 /* 1424 * See build_balance_mask(). 1425 */ 1426 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1427 { 1428 return to_cpumask(sg->sgc->cpumask); 1429 } 1430 1431 /** 1432 * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 1433 * @group: The group whose first CPU is to be returned. 1434 */ 1435 static inline unsigned int group_first_cpu(struct sched_group *group) 1436 { 1437 return cpumask_first(sched_group_span(group)); 1438 } 1439 1440 extern int group_balance_cpu(struct sched_group *sg); 1441 1442 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 1443 void register_sched_domain_sysctl(void); 1444 void dirty_sched_domain_sysctl(int cpu); 1445 void unregister_sched_domain_sysctl(void); 1446 #else 1447 static inline void register_sched_domain_sysctl(void) 1448 { 1449 } 1450 static inline void dirty_sched_domain_sysctl(int cpu) 1451 { 1452 } 1453 static inline void unregister_sched_domain_sysctl(void) 1454 { 1455 } 1456 #endif 1457 1458 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf); 1459 1460 #else 1461 1462 static inline void sched_ttwu_pending(void) { } 1463 1464 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } 1465 1466 #endif /* CONFIG_SMP */ 1467 1468 #include "stats.h" 1469 #include "autogroup.h" 1470 1471 #ifdef CONFIG_CGROUP_SCHED 1472 1473 /* 1474 * Return the group to which this tasks belongs. 1475 * 1476 * We cannot use task_css() and friends because the cgroup subsystem 1477 * changes that value before the cgroup_subsys::attach() method is called, 1478 * therefore we cannot pin it and might observe the wrong value. 1479 * 1480 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 1481 * core changes this before calling sched_move_task(). 1482 * 1483 * Instead we use a 'copy' which is updated from sched_move_task() while 1484 * holding both task_struct::pi_lock and rq::lock. 1485 */ 1486 static inline struct task_group *task_group(struct task_struct *p) 1487 { 1488 return p->sched_task_group; 1489 } 1490 1491 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1492 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1493 { 1494 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1495 struct task_group *tg = task_group(p); 1496 #endif 1497 1498 #ifdef CONFIG_FAIR_GROUP_SCHED 1499 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1500 p->se.cfs_rq = tg->cfs_rq[cpu]; 1501 p->se.parent = tg->se[cpu]; 1502 #endif 1503 1504 #ifdef CONFIG_RT_GROUP_SCHED 1505 p->rt.rt_rq = tg->rt_rq[cpu]; 1506 p->rt.parent = tg->rt_se[cpu]; 1507 #endif 1508 } 1509 1510 #else /* CONFIG_CGROUP_SCHED */ 1511 1512 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1513 static inline struct task_group *task_group(struct task_struct *p) 1514 { 1515 return NULL; 1516 } 1517 1518 #endif /* CONFIG_CGROUP_SCHED */ 1519 1520 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1521 { 1522 set_task_rq(p, cpu); 1523 #ifdef CONFIG_SMP 1524 /* 1525 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1526 * successfully executed on another CPU. We must ensure that updates of 1527 * per-task data have been completed by this moment. 1528 */ 1529 smp_wmb(); 1530 #ifdef CONFIG_THREAD_INFO_IN_TASK 1531 WRITE_ONCE(p->cpu, cpu); 1532 #else 1533 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1534 #endif 1535 p->wake_cpu = cpu; 1536 #endif 1537 } 1538 1539 /* 1540 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1541 */ 1542 #ifdef CONFIG_SCHED_DEBUG 1543 # include <linux/static_key.h> 1544 # define const_debug __read_mostly 1545 #else 1546 # define const_debug const 1547 #endif 1548 1549 #define SCHED_FEAT(name, enabled) \ 1550 __SCHED_FEAT_##name , 1551 1552 enum { 1553 #include "features.h" 1554 __SCHED_FEAT_NR, 1555 }; 1556 1557 #undef SCHED_FEAT 1558 1559 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1560 1561 /* 1562 * To support run-time toggling of sched features, all the translation units 1563 * (but core.c) reference the sysctl_sched_features defined in core.c. 1564 */ 1565 extern const_debug unsigned int sysctl_sched_features; 1566 1567 #define SCHED_FEAT(name, enabled) \ 1568 static __always_inline bool static_branch_##name(struct static_key *key) \ 1569 { \ 1570 return static_key_##enabled(key); \ 1571 } 1572 1573 #include "features.h" 1574 #undef SCHED_FEAT 1575 1576 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1577 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1578 1579 #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1580 1581 /* 1582 * Each translation unit has its own copy of sysctl_sched_features to allow 1583 * constants propagation at compile time and compiler optimization based on 1584 * features default. 1585 */ 1586 #define SCHED_FEAT(name, enabled) \ 1587 (1UL << __SCHED_FEAT_##name) * enabled | 1588 static const_debug __maybe_unused unsigned int sysctl_sched_features = 1589 #include "features.h" 1590 0; 1591 #undef SCHED_FEAT 1592 1593 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1594 1595 #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1596 1597 extern struct static_key_false sched_numa_balancing; 1598 extern struct static_key_false sched_schedstats; 1599 1600 static inline u64 global_rt_period(void) 1601 { 1602 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1603 } 1604 1605 static inline u64 global_rt_runtime(void) 1606 { 1607 if (sysctl_sched_rt_runtime < 0) 1608 return RUNTIME_INF; 1609 1610 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1611 } 1612 1613 static inline int task_current(struct rq *rq, struct task_struct *p) 1614 { 1615 return rq->curr == p; 1616 } 1617 1618 static inline int task_running(struct rq *rq, struct task_struct *p) 1619 { 1620 #ifdef CONFIG_SMP 1621 return p->on_cpu; 1622 #else 1623 return task_current(rq, p); 1624 #endif 1625 } 1626 1627 static inline int task_on_rq_queued(struct task_struct *p) 1628 { 1629 return p->on_rq == TASK_ON_RQ_QUEUED; 1630 } 1631 1632 static inline int task_on_rq_migrating(struct task_struct *p) 1633 { 1634 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1635 } 1636 1637 /* 1638 * wake flags 1639 */ 1640 #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 1641 #define WF_FORK 0x02 /* Child wakeup after fork */ 1642 #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ 1643 1644 /* 1645 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1646 * of tasks with abnormal "nice" values across CPUs the contribution that 1647 * each task makes to its run queue's load is weighted according to its 1648 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1649 * scaled version of the new time slice allocation that they receive on time 1650 * slice expiry etc. 1651 */ 1652 1653 #define WEIGHT_IDLEPRIO 3 1654 #define WMULT_IDLEPRIO 1431655765 1655 1656 extern const int sched_prio_to_weight[40]; 1657 extern const u32 sched_prio_to_wmult[40]; 1658 1659 /* 1660 * {de,en}queue flags: 1661 * 1662 * DEQUEUE_SLEEP - task is no longer runnable 1663 * ENQUEUE_WAKEUP - task just became runnable 1664 * 1665 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1666 * are in a known state which allows modification. Such pairs 1667 * should preserve as much state as possible. 1668 * 1669 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1670 * in the runqueue. 1671 * 1672 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1673 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 1674 * ENQUEUE_MIGRATED - the task was migrated during wakeup 1675 * 1676 */ 1677 1678 #define DEQUEUE_SLEEP 0x01 1679 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 1680 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 1681 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1682 1683 #define ENQUEUE_WAKEUP 0x01 1684 #define ENQUEUE_RESTORE 0x02 1685 #define ENQUEUE_MOVE 0x04 1686 #define ENQUEUE_NOCLOCK 0x08 1687 1688 #define ENQUEUE_HEAD 0x10 1689 #define ENQUEUE_REPLENISH 0x20 1690 #ifdef CONFIG_SMP 1691 #define ENQUEUE_MIGRATED 0x40 1692 #else 1693 #define ENQUEUE_MIGRATED 0x00 1694 #endif 1695 1696 #define RETRY_TASK ((void *)-1UL) 1697 1698 struct sched_class { 1699 const struct sched_class *next; 1700 1701 #ifdef CONFIG_UCLAMP_TASK 1702 int uclamp_enabled; 1703 #endif 1704 1705 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1706 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1707 void (*yield_task) (struct rq *rq); 1708 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt); 1709 1710 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1711 1712 /* 1713 * Both @prev and @rf are optional and may be NULL, in which case the 1714 * caller must already have invoked put_prev_task(rq, prev, rf). 1715 * 1716 * Otherwise it is the responsibility of the pick_next_task() to call 1717 * put_prev_task() on the @prev task or something equivalent, IFF it 1718 * returns a next task. 1719 * 1720 * In that case (@rf != NULL) it may return RETRY_TASK when it finds a 1721 * higher prio class has runnable tasks. 1722 */ 1723 struct task_struct * (*pick_next_task)(struct rq *rq, 1724 struct task_struct *prev, 1725 struct rq_flags *rf); 1726 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf); 1727 void (*set_next_task)(struct rq *rq, struct task_struct *p); 1728 1729 #ifdef CONFIG_SMP 1730 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1731 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1732 1733 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1734 1735 void (*set_cpus_allowed)(struct task_struct *p, 1736 const struct cpumask *newmask); 1737 1738 void (*rq_online)(struct rq *rq); 1739 void (*rq_offline)(struct rq *rq); 1740 #endif 1741 1742 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1743 void (*task_fork)(struct task_struct *p); 1744 void (*task_dead)(struct task_struct *p); 1745 1746 /* 1747 * The switched_from() call is allowed to drop rq->lock, therefore we 1748 * cannot assume the switched_from/switched_to pair is serliazed by 1749 * rq->lock. They are however serialized by p->pi_lock. 1750 */ 1751 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1752 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1753 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1754 int oldprio); 1755 1756 unsigned int (*get_rr_interval)(struct rq *rq, 1757 struct task_struct *task); 1758 1759 void (*update_curr)(struct rq *rq); 1760 1761 #define TASK_SET_GROUP 0 1762 #define TASK_MOVE_GROUP 1 1763 1764 #ifdef CONFIG_FAIR_GROUP_SCHED 1765 void (*task_change_group)(struct task_struct *p, int type); 1766 #endif 1767 }; 1768 1769 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1770 { 1771 WARN_ON_ONCE(rq->curr != prev); 1772 prev->sched_class->put_prev_task(rq, prev, NULL); 1773 } 1774 1775 static inline void set_next_task(struct rq *rq, struct task_struct *next) 1776 { 1777 WARN_ON_ONCE(rq->curr != next); 1778 next->sched_class->set_next_task(rq, next); 1779 } 1780 1781 #ifdef CONFIG_SMP 1782 #define sched_class_highest (&stop_sched_class) 1783 #else 1784 #define sched_class_highest (&dl_sched_class) 1785 #endif 1786 #define for_each_class(class) \ 1787 for (class = sched_class_highest; class; class = class->next) 1788 1789 extern const struct sched_class stop_sched_class; 1790 extern const struct sched_class dl_sched_class; 1791 extern const struct sched_class rt_sched_class; 1792 extern const struct sched_class fair_sched_class; 1793 extern const struct sched_class idle_sched_class; 1794 1795 1796 #ifdef CONFIG_SMP 1797 1798 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1799 1800 extern void trigger_load_balance(struct rq *rq); 1801 1802 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1803 1804 #endif 1805 1806 #ifdef CONFIG_CPU_IDLE 1807 static inline void idle_set_state(struct rq *rq, 1808 struct cpuidle_state *idle_state) 1809 { 1810 rq->idle_state = idle_state; 1811 } 1812 1813 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1814 { 1815 SCHED_WARN_ON(!rcu_read_lock_held()); 1816 1817 return rq->idle_state; 1818 } 1819 #else 1820 static inline void idle_set_state(struct rq *rq, 1821 struct cpuidle_state *idle_state) 1822 { 1823 } 1824 1825 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1826 { 1827 return NULL; 1828 } 1829 #endif 1830 1831 extern void schedule_idle(void); 1832 1833 extern void sysrq_sched_debug_show(void); 1834 extern void sched_init_granularity(void); 1835 extern void update_max_interval(void); 1836 1837 extern void init_sched_dl_class(void); 1838 extern void init_sched_rt_class(void); 1839 extern void init_sched_fair_class(void); 1840 1841 extern void reweight_task(struct task_struct *p, int prio); 1842 1843 extern void resched_curr(struct rq *rq); 1844 extern void resched_cpu(int cpu); 1845 1846 extern struct rt_bandwidth def_rt_bandwidth; 1847 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1848 1849 extern struct dl_bandwidth def_dl_bandwidth; 1850 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1851 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1852 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1853 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq); 1854 1855 #define BW_SHIFT 20 1856 #define BW_UNIT (1 << BW_SHIFT) 1857 #define RATIO_SHIFT 8 1858 unsigned long to_ratio(u64 period, u64 runtime); 1859 1860 extern void init_entity_runnable_average(struct sched_entity *se); 1861 extern void post_init_entity_util_avg(struct task_struct *p); 1862 1863 #ifdef CONFIG_NO_HZ_FULL 1864 extern bool sched_can_stop_tick(struct rq *rq); 1865 extern int __init sched_tick_offload_init(void); 1866 1867 /* 1868 * Tick may be needed by tasks in the runqueue depending on their policy and 1869 * requirements. If tick is needed, lets send the target an IPI to kick it out of 1870 * nohz mode if necessary. 1871 */ 1872 static inline void sched_update_tick_dependency(struct rq *rq) 1873 { 1874 int cpu; 1875 1876 if (!tick_nohz_full_enabled()) 1877 return; 1878 1879 cpu = cpu_of(rq); 1880 1881 if (!tick_nohz_full_cpu(cpu)) 1882 return; 1883 1884 if (sched_can_stop_tick(rq)) 1885 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 1886 else 1887 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 1888 } 1889 #else 1890 static inline int sched_tick_offload_init(void) { return 0; } 1891 static inline void sched_update_tick_dependency(struct rq *rq) { } 1892 #endif 1893 1894 static inline void add_nr_running(struct rq *rq, unsigned count) 1895 { 1896 unsigned prev_nr = rq->nr_running; 1897 1898 rq->nr_running = prev_nr + count; 1899 1900 #ifdef CONFIG_SMP 1901 if (prev_nr < 2 && rq->nr_running >= 2) { 1902 if (!READ_ONCE(rq->rd->overload)) 1903 WRITE_ONCE(rq->rd->overload, 1); 1904 } 1905 #endif 1906 1907 sched_update_tick_dependency(rq); 1908 } 1909 1910 static inline void sub_nr_running(struct rq *rq, unsigned count) 1911 { 1912 rq->nr_running -= count; 1913 /* Check if we still need preemption */ 1914 sched_update_tick_dependency(rq); 1915 } 1916 1917 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1918 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1919 1920 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1921 1922 extern const_debug unsigned int sysctl_sched_nr_migrate; 1923 extern const_debug unsigned int sysctl_sched_migration_cost; 1924 1925 #ifdef CONFIG_SCHED_HRTICK 1926 1927 /* 1928 * Use hrtick when: 1929 * - enabled by features 1930 * - hrtimer is actually high res 1931 */ 1932 static inline int hrtick_enabled(struct rq *rq) 1933 { 1934 if (!sched_feat(HRTICK)) 1935 return 0; 1936 if (!cpu_active(cpu_of(rq))) 1937 return 0; 1938 return hrtimer_is_hres_active(&rq->hrtick_timer); 1939 } 1940 1941 void hrtick_start(struct rq *rq, u64 delay); 1942 1943 #else 1944 1945 static inline int hrtick_enabled(struct rq *rq) 1946 { 1947 return 0; 1948 } 1949 1950 #endif /* CONFIG_SCHED_HRTICK */ 1951 1952 #ifndef arch_scale_freq_capacity 1953 static __always_inline 1954 unsigned long arch_scale_freq_capacity(int cpu) 1955 { 1956 return SCHED_CAPACITY_SCALE; 1957 } 1958 #endif 1959 1960 #ifdef CONFIG_SMP 1961 #ifdef CONFIG_PREEMPTION 1962 1963 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1964 1965 /* 1966 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1967 * way at the expense of forcing extra atomic operations in all 1968 * invocations. This assures that the double_lock is acquired using the 1969 * same underlying policy as the spinlock_t on this architecture, which 1970 * reduces latency compared to the unfair variant below. However, it 1971 * also adds more overhead and therefore may reduce throughput. 1972 */ 1973 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1974 __releases(this_rq->lock) 1975 __acquires(busiest->lock) 1976 __acquires(this_rq->lock) 1977 { 1978 raw_spin_unlock(&this_rq->lock); 1979 double_rq_lock(this_rq, busiest); 1980 1981 return 1; 1982 } 1983 1984 #else 1985 /* 1986 * Unfair double_lock_balance: Optimizes throughput at the expense of 1987 * latency by eliminating extra atomic operations when the locks are 1988 * already in proper order on entry. This favors lower CPU-ids and will 1989 * grant the double lock to lower CPUs over higher ids under contention, 1990 * regardless of entry order into the function. 1991 */ 1992 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1993 __releases(this_rq->lock) 1994 __acquires(busiest->lock) 1995 __acquires(this_rq->lock) 1996 { 1997 int ret = 0; 1998 1999 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2000 if (busiest < this_rq) { 2001 raw_spin_unlock(&this_rq->lock); 2002 raw_spin_lock(&busiest->lock); 2003 raw_spin_lock_nested(&this_rq->lock, 2004 SINGLE_DEPTH_NESTING); 2005 ret = 1; 2006 } else 2007 raw_spin_lock_nested(&busiest->lock, 2008 SINGLE_DEPTH_NESTING); 2009 } 2010 return ret; 2011 } 2012 2013 #endif /* CONFIG_PREEMPTION */ 2014 2015 /* 2016 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2017 */ 2018 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2019 { 2020 if (unlikely(!irqs_disabled())) { 2021 /* printk() doesn't work well under rq->lock */ 2022 raw_spin_unlock(&this_rq->lock); 2023 BUG_ON(1); 2024 } 2025 2026 return _double_lock_balance(this_rq, busiest); 2027 } 2028 2029 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2030 __releases(busiest->lock) 2031 { 2032 raw_spin_unlock(&busiest->lock); 2033 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2034 } 2035 2036 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2037 { 2038 if (l1 > l2) 2039 swap(l1, l2); 2040 2041 spin_lock(l1); 2042 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2043 } 2044 2045 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2046 { 2047 if (l1 > l2) 2048 swap(l1, l2); 2049 2050 spin_lock_irq(l1); 2051 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2052 } 2053 2054 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2055 { 2056 if (l1 > l2) 2057 swap(l1, l2); 2058 2059 raw_spin_lock(l1); 2060 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2061 } 2062 2063 /* 2064 * double_rq_lock - safely lock two runqueues 2065 * 2066 * Note this does not disable interrupts like task_rq_lock, 2067 * you need to do so manually before calling. 2068 */ 2069 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2070 __acquires(rq1->lock) 2071 __acquires(rq2->lock) 2072 { 2073 BUG_ON(!irqs_disabled()); 2074 if (rq1 == rq2) { 2075 raw_spin_lock(&rq1->lock); 2076 __acquire(rq2->lock); /* Fake it out ;) */ 2077 } else { 2078 if (rq1 < rq2) { 2079 raw_spin_lock(&rq1->lock); 2080 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2081 } else { 2082 raw_spin_lock(&rq2->lock); 2083 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2084 } 2085 } 2086 } 2087 2088 /* 2089 * double_rq_unlock - safely unlock two runqueues 2090 * 2091 * Note this does not restore interrupts like task_rq_unlock, 2092 * you need to do so manually after calling. 2093 */ 2094 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2095 __releases(rq1->lock) 2096 __releases(rq2->lock) 2097 { 2098 raw_spin_unlock(&rq1->lock); 2099 if (rq1 != rq2) 2100 raw_spin_unlock(&rq2->lock); 2101 else 2102 __release(rq2->lock); 2103 } 2104 2105 extern void set_rq_online (struct rq *rq); 2106 extern void set_rq_offline(struct rq *rq); 2107 extern bool sched_smp_initialized; 2108 2109 #else /* CONFIG_SMP */ 2110 2111 /* 2112 * double_rq_lock - safely lock two runqueues 2113 * 2114 * Note this does not disable interrupts like task_rq_lock, 2115 * you need to do so manually before calling. 2116 */ 2117 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2118 __acquires(rq1->lock) 2119 __acquires(rq2->lock) 2120 { 2121 BUG_ON(!irqs_disabled()); 2122 BUG_ON(rq1 != rq2); 2123 raw_spin_lock(&rq1->lock); 2124 __acquire(rq2->lock); /* Fake it out ;) */ 2125 } 2126 2127 /* 2128 * double_rq_unlock - safely unlock two runqueues 2129 * 2130 * Note this does not restore interrupts like task_rq_unlock, 2131 * you need to do so manually after calling. 2132 */ 2133 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2134 __releases(rq1->lock) 2135 __releases(rq2->lock) 2136 { 2137 BUG_ON(rq1 != rq2); 2138 raw_spin_unlock(&rq1->lock); 2139 __release(rq2->lock); 2140 } 2141 2142 #endif 2143 2144 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2145 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2146 2147 #ifdef CONFIG_SCHED_DEBUG 2148 extern bool sched_debug_enabled; 2149 2150 extern void print_cfs_stats(struct seq_file *m, int cpu); 2151 extern void print_rt_stats(struct seq_file *m, int cpu); 2152 extern void print_dl_stats(struct seq_file *m, int cpu); 2153 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2154 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2155 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2156 #ifdef CONFIG_NUMA_BALANCING 2157 extern void 2158 show_numa_stats(struct task_struct *p, struct seq_file *m); 2159 extern void 2160 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2161 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2162 #endif /* CONFIG_NUMA_BALANCING */ 2163 #endif /* CONFIG_SCHED_DEBUG */ 2164 2165 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2166 extern void init_rt_rq(struct rt_rq *rt_rq); 2167 extern void init_dl_rq(struct dl_rq *dl_rq); 2168 2169 extern void cfs_bandwidth_usage_inc(void); 2170 extern void cfs_bandwidth_usage_dec(void); 2171 2172 #ifdef CONFIG_NO_HZ_COMMON 2173 #define NOHZ_BALANCE_KICK_BIT 0 2174 #define NOHZ_STATS_KICK_BIT 1 2175 2176 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2177 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2178 2179 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 2180 2181 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2182 2183 extern void nohz_balance_exit_idle(struct rq *rq); 2184 #else 2185 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2186 #endif 2187 2188 2189 #ifdef CONFIG_SMP 2190 static inline 2191 void __dl_update(struct dl_bw *dl_b, s64 bw) 2192 { 2193 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2194 int i; 2195 2196 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2197 "sched RCU must be held"); 2198 for_each_cpu_and(i, rd->span, cpu_active_mask) { 2199 struct rq *rq = cpu_rq(i); 2200 2201 rq->dl.extra_bw += bw; 2202 } 2203 } 2204 #else 2205 static inline 2206 void __dl_update(struct dl_bw *dl_b, s64 bw) 2207 { 2208 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2209 2210 dl->extra_bw += bw; 2211 } 2212 #endif 2213 2214 2215 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2216 struct irqtime { 2217 u64 total; 2218 u64 tick_delta; 2219 u64 irq_start_time; 2220 struct u64_stats_sync sync; 2221 }; 2222 2223 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2224 2225 /* 2226 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2227 * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 2228 * and never move forward. 2229 */ 2230 static inline u64 irq_time_read(int cpu) 2231 { 2232 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2233 unsigned int seq; 2234 u64 total; 2235 2236 do { 2237 seq = __u64_stats_fetch_begin(&irqtime->sync); 2238 total = irqtime->total; 2239 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2240 2241 return total; 2242 } 2243 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2244 2245 #ifdef CONFIG_CPU_FREQ 2246 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2247 2248 /** 2249 * cpufreq_update_util - Take a note about CPU utilization changes. 2250 * @rq: Runqueue to carry out the update for. 2251 * @flags: Update reason flags. 2252 * 2253 * This function is called by the scheduler on the CPU whose utilization is 2254 * being updated. 2255 * 2256 * It can only be called from RCU-sched read-side critical sections. 2257 * 2258 * The way cpufreq is currently arranged requires it to evaluate the CPU 2259 * performance state (frequency/voltage) on a regular basis to prevent it from 2260 * being stuck in a completely inadequate performance level for too long. 2261 * That is not guaranteed to happen if the updates are only triggered from CFS 2262 * and DL, though, because they may not be coming in if only RT tasks are 2263 * active all the time (or there are RT tasks only). 2264 * 2265 * As a workaround for that issue, this function is called periodically by the 2266 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2267 * but that really is a band-aid. Going forward it should be replaced with 2268 * solutions targeted more specifically at RT tasks. 2269 */ 2270 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2271 { 2272 struct update_util_data *data; 2273 2274 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2275 cpu_of(rq))); 2276 if (data) 2277 data->func(data, rq_clock(rq), flags); 2278 } 2279 #else 2280 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2281 #endif /* CONFIG_CPU_FREQ */ 2282 2283 #ifdef CONFIG_UCLAMP_TASK 2284 enum uclamp_id uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 2285 2286 static __always_inline 2287 unsigned int uclamp_util_with(struct rq *rq, unsigned int util, 2288 struct task_struct *p) 2289 { 2290 unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2291 unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2292 2293 if (p) { 2294 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 2295 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 2296 } 2297 2298 /* 2299 * Since CPU's {min,max}_util clamps are MAX aggregated considering 2300 * RUNNABLE tasks with _different_ clamps, we can end up with an 2301 * inversion. Fix it now when the clamps are applied. 2302 */ 2303 if (unlikely(min_util >= max_util)) 2304 return min_util; 2305 2306 return clamp(util, min_util, max_util); 2307 } 2308 2309 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) 2310 { 2311 return uclamp_util_with(rq, util, NULL); 2312 } 2313 #else /* CONFIG_UCLAMP_TASK */ 2314 static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util, 2315 struct task_struct *p) 2316 { 2317 return util; 2318 } 2319 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util) 2320 { 2321 return util; 2322 } 2323 #endif /* CONFIG_UCLAMP_TASK */ 2324 2325 #ifdef arch_scale_freq_capacity 2326 # ifndef arch_scale_freq_invariant 2327 # define arch_scale_freq_invariant() true 2328 # endif 2329 #else 2330 # define arch_scale_freq_invariant() false 2331 #endif 2332 2333 #ifdef CONFIG_SMP 2334 static inline unsigned long capacity_orig_of(int cpu) 2335 { 2336 return cpu_rq(cpu)->cpu_capacity_orig; 2337 } 2338 #endif 2339 2340 /** 2341 * enum schedutil_type - CPU utilization type 2342 * @FREQUENCY_UTIL: Utilization used to select frequency 2343 * @ENERGY_UTIL: Utilization used during energy calculation 2344 * 2345 * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2346 * need to be aggregated differently depending on the usage made of them. This 2347 * enum is used within schedutil_freq_util() to differentiate the types of 2348 * utilization expected by the callers, and adjust the aggregation accordingly. 2349 */ 2350 enum schedutil_type { 2351 FREQUENCY_UTIL, 2352 ENERGY_UTIL, 2353 }; 2354 2355 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2356 2357 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2358 unsigned long max, enum schedutil_type type, 2359 struct task_struct *p); 2360 2361 static inline unsigned long cpu_bw_dl(struct rq *rq) 2362 { 2363 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2364 } 2365 2366 static inline unsigned long cpu_util_dl(struct rq *rq) 2367 { 2368 return READ_ONCE(rq->avg_dl.util_avg); 2369 } 2370 2371 static inline unsigned long cpu_util_cfs(struct rq *rq) 2372 { 2373 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2374 2375 if (sched_feat(UTIL_EST)) { 2376 util = max_t(unsigned long, util, 2377 READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2378 } 2379 2380 return util; 2381 } 2382 2383 static inline unsigned long cpu_util_rt(struct rq *rq) 2384 { 2385 return READ_ONCE(rq->avg_rt.util_avg); 2386 } 2387 #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2388 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2389 unsigned long max, enum schedutil_type type, 2390 struct task_struct *p) 2391 { 2392 return 0; 2393 } 2394 #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2395 2396 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 2397 static inline unsigned long cpu_util_irq(struct rq *rq) 2398 { 2399 return rq->avg_irq.util_avg; 2400 } 2401 2402 static inline 2403 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2404 { 2405 util *= (max - irq); 2406 util /= max; 2407 2408 return util; 2409 2410 } 2411 #else 2412 static inline unsigned long cpu_util_irq(struct rq *rq) 2413 { 2414 return 0; 2415 } 2416 2417 static inline 2418 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 2419 { 2420 return util; 2421 } 2422 #endif 2423 2424 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 2425 2426 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2427 2428 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2429 2430 static inline bool sched_energy_enabled(void) 2431 { 2432 return static_branch_unlikely(&sched_energy_present); 2433 } 2434 2435 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2436 2437 #define perf_domain_span(pd) NULL 2438 static inline bool sched_energy_enabled(void) { return false; } 2439 2440 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2441