1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #ifndef _KERNEL_SCHED_SCHED_H 6 #define _KERNEL_SCHED_SCHED_H 7 8 #include <linux/sched/affinity.h> 9 #include <linux/sched/autogroup.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/deadline.h> 12 #include <linux/sched.h> 13 #include <linux/sched/loadavg.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/rseq_api.h> 16 #include <linux/sched/signal.h> 17 #include <linux/sched/smt.h> 18 #include <linux/sched/stat.h> 19 #include <linux/sched/sysctl.h> 20 #include <linux/sched/task_flags.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/topology.h> 23 24 #include <linux/atomic.h> 25 #include <linux/bitmap.h> 26 #include <linux/bug.h> 27 #include <linux/capability.h> 28 #include <linux/cgroup_api.h> 29 #include <linux/cgroup.h> 30 #include <linux/context_tracking.h> 31 #include <linux/cpufreq.h> 32 #include <linux/cpumask_api.h> 33 #include <linux/ctype.h> 34 #include <linux/file.h> 35 #include <linux/fs_api.h> 36 #include <linux/hrtimer_api.h> 37 #include <linux/interrupt.h> 38 #include <linux/irq_work.h> 39 #include <linux/jiffies.h> 40 #include <linux/kref_api.h> 41 #include <linux/kthread.h> 42 #include <linux/ktime_api.h> 43 #include <linux/lockdep_api.h> 44 #include <linux/lockdep.h> 45 #include <linux/minmax.h> 46 #include <linux/mm.h> 47 #include <linux/module.h> 48 #include <linux/mutex_api.h> 49 #include <linux/plist.h> 50 #include <linux/poll.h> 51 #include <linux/proc_fs.h> 52 #include <linux/profile.h> 53 #include <linux/psi.h> 54 #include <linux/rcupdate.h> 55 #include <linux/seq_file.h> 56 #include <linux/seqlock.h> 57 #include <linux/softirq.h> 58 #include <linux/spinlock_api.h> 59 #include <linux/static_key.h> 60 #include <linux/stop_machine.h> 61 #include <linux/syscalls_api.h> 62 #include <linux/syscalls.h> 63 #include <linux/tick.h> 64 #include <linux/topology.h> 65 #include <linux/types.h> 66 #include <linux/u64_stats_sync_api.h> 67 #include <linux/uaccess.h> 68 #include <linux/wait_api.h> 69 #include <linux/wait_bit.h> 70 #include <linux/workqueue_api.h> 71 #include <linux/delayacct.h> 72 73 #include <trace/events/power.h> 74 #include <trace/events/sched.h> 75 76 #include "../workqueue_internal.h" 77 78 struct rq; 79 struct cfs_rq; 80 struct rt_rq; 81 struct sched_group; 82 struct cpuidle_state; 83 84 #ifdef CONFIG_PARAVIRT 85 # include <asm/paravirt.h> 86 # include <asm/paravirt_api_clock.h> 87 #endif 88 89 #include <asm/barrier.h> 90 91 #include "cpupri.h" 92 #include "cpudeadline.h" 93 94 #ifdef CONFIG_SCHED_DEBUG 95 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 96 #else 97 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 98 #endif 99 100 /* task_struct::on_rq states: */ 101 #define TASK_ON_RQ_QUEUED 1 102 #define TASK_ON_RQ_MIGRATING 2 103 104 extern __read_mostly int scheduler_running; 105 106 extern unsigned long calc_load_update; 107 extern atomic_long_t calc_load_tasks; 108 109 extern void calc_global_load_tick(struct rq *this_rq); 110 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 111 112 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 113 114 extern int sysctl_sched_rt_period; 115 extern int sysctl_sched_rt_runtime; 116 extern int sched_rr_timeslice; 117 118 /* 119 * Asymmetric CPU capacity bits 120 */ 121 struct asym_cap_data { 122 struct list_head link; 123 struct rcu_head rcu; 124 unsigned long capacity; 125 unsigned long cpus[]; 126 }; 127 128 extern struct list_head asym_cap_list; 129 130 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) 131 132 /* 133 * Helpers for converting nanosecond timing to jiffy resolution 134 */ 135 #define NS_TO_JIFFIES(time) ((unsigned long)(time) / (NSEC_PER_SEC/HZ)) 136 137 /* 138 * Increase resolution of nice-level calculations for 64-bit architectures. 139 * The extra resolution improves shares distribution and load balancing of 140 * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group 141 * hierarchies, especially on larger systems. This is not a user-visible change 142 * and does not change the user-interface for setting shares/weights. 143 * 144 * We increase resolution only if we have enough bits to allow this increased 145 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 146 * are pretty high and the returns do not justify the increased costs. 147 * 148 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 149 * increase coverage and consistency always enable it on 64-bit platforms. 150 */ 151 #ifdef CONFIG_64BIT 152 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 153 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 154 # define scale_load_down(w) \ 155 ({ \ 156 unsigned long __w = (w); \ 157 \ 158 if (__w) \ 159 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 160 __w; \ 161 }) 162 #else 163 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 164 # define scale_load(w) (w) 165 # define scale_load_down(w) (w) 166 #endif 167 168 /* 169 * Task weight (visible to users) and its load (invisible to users) have 170 * independent resolution, but they should be well calibrated. We use 171 * scale_load() and scale_load_down(w) to convert between them. The 172 * following must be true: 173 * 174 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 175 * 176 */ 177 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 178 179 /* 180 * Single value that decides SCHED_DEADLINE internal math precision. 181 * 10 -> just above 1us 182 * 9 -> just above 0.5us 183 */ 184 #define DL_SCALE 10 185 186 /* 187 * Single value that denotes runtime == period, ie unlimited time. 188 */ 189 #define RUNTIME_INF ((u64)~0ULL) 190 191 static inline int idle_policy(int policy) 192 { 193 return policy == SCHED_IDLE; 194 } 195 196 static inline int fair_policy(int policy) 197 { 198 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 199 } 200 201 static inline int rt_policy(int policy) 202 { 203 return policy == SCHED_FIFO || policy == SCHED_RR; 204 } 205 206 static inline int dl_policy(int policy) 207 { 208 return policy == SCHED_DEADLINE; 209 } 210 211 static inline bool valid_policy(int policy) 212 { 213 return idle_policy(policy) || fair_policy(policy) || 214 rt_policy(policy) || dl_policy(policy); 215 } 216 217 static inline int task_has_idle_policy(struct task_struct *p) 218 { 219 return idle_policy(p->policy); 220 } 221 222 static inline int task_has_rt_policy(struct task_struct *p) 223 { 224 return rt_policy(p->policy); 225 } 226 227 static inline int task_has_dl_policy(struct task_struct *p) 228 { 229 return dl_policy(p->policy); 230 } 231 232 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 233 234 static inline void update_avg(u64 *avg, u64 sample) 235 { 236 s64 diff = sample - *avg; 237 238 *avg += diff / 8; 239 } 240 241 /* 242 * Shifting a value by an exponent greater *or equal* to the size of said value 243 * is UB; cap at size-1. 244 */ 245 #define shr_bound(val, shift) \ 246 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 247 248 /* 249 * !! For sched_setattr_nocheck() (kernel) only !! 250 * 251 * This is actually gross. :( 252 * 253 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 254 * tasks, but still be able to sleep. We need this on platforms that cannot 255 * atomically change clock frequency. Remove once fast switching will be 256 * available on such platforms. 257 * 258 * SUGOV stands for SchedUtil GOVernor. 259 */ 260 #define SCHED_FLAG_SUGOV 0x10000000 261 262 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 263 264 static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) 265 { 266 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 267 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 268 #else 269 return false; 270 #endif 271 } 272 273 /* 274 * Tells if entity @a should preempt entity @b. 275 */ 276 static inline bool dl_entity_preempt(const struct sched_dl_entity *a, 277 const struct sched_dl_entity *b) 278 { 279 return dl_entity_is_special(a) || 280 dl_time_before(a->deadline, b->deadline); 281 } 282 283 /* 284 * This is the priority-queue data structure of the RT scheduling class: 285 */ 286 struct rt_prio_array { 287 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 288 struct list_head queue[MAX_RT_PRIO]; 289 }; 290 291 struct rt_bandwidth { 292 /* nests inside the rq lock: */ 293 raw_spinlock_t rt_runtime_lock; 294 ktime_t rt_period; 295 u64 rt_runtime; 296 struct hrtimer rt_period_timer; 297 unsigned int rt_period_active; 298 }; 299 300 static inline int dl_bandwidth_enabled(void) 301 { 302 return sysctl_sched_rt_runtime >= 0; 303 } 304 305 /* 306 * To keep the bandwidth of -deadline tasks under control 307 * we need some place where: 308 * - store the maximum -deadline bandwidth of each cpu; 309 * - cache the fraction of bandwidth that is currently allocated in 310 * each root domain; 311 * 312 * This is all done in the data structure below. It is similar to the 313 * one used for RT-throttling (rt_bandwidth), with the main difference 314 * that, since here we are only interested in admission control, we 315 * do not decrease any runtime while the group "executes", neither we 316 * need a timer to replenish it. 317 * 318 * With respect to SMP, bandwidth is given on a per root domain basis, 319 * meaning that: 320 * - bw (< 100%) is the deadline bandwidth of each CPU; 321 * - total_bw is the currently allocated bandwidth in each root domain; 322 */ 323 struct dl_bw { 324 raw_spinlock_t lock; 325 u64 bw; 326 u64 total_bw; 327 }; 328 329 extern void init_dl_bw(struct dl_bw *dl_b); 330 extern int sched_dl_global_validate(void); 331 extern void sched_dl_do_global(void); 332 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 333 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 334 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 335 extern bool __checkparam_dl(const struct sched_attr *attr); 336 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 337 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 338 extern int dl_bw_check_overflow(int cpu); 339 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); 340 /* 341 * SCHED_DEADLINE supports servers (nested scheduling) with the following 342 * interface: 343 * 344 * dl_se::rq -- runqueue we belong to. 345 * 346 * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the 347 * server when it runs out of tasks to run. 348 * 349 * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this 350 * returns NULL. 351 * 352 * dl_server_update() -- called from update_curr_common(), propagates runtime 353 * to the server. 354 * 355 * dl_server_start() 356 * dl_server_stop() -- start/stop the server when it has (no) tasks. 357 * 358 * dl_server_init() -- initializes the server. 359 */ 360 extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec); 361 extern void dl_server_start(struct sched_dl_entity *dl_se); 362 extern void dl_server_stop(struct sched_dl_entity *dl_se); 363 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 364 dl_server_has_tasks_f has_tasks, 365 dl_server_pick_f pick_task); 366 367 extern void dl_server_update_idle_time(struct rq *rq, 368 struct task_struct *p); 369 extern void fair_server_init(struct rq *rq); 370 extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq); 371 extern int dl_server_apply_params(struct sched_dl_entity *dl_se, 372 u64 runtime, u64 period, bool init); 373 374 #ifdef CONFIG_CGROUP_SCHED 375 376 extern struct list_head task_groups; 377 378 struct cfs_bandwidth { 379 #ifdef CONFIG_CFS_BANDWIDTH 380 raw_spinlock_t lock; 381 ktime_t period; 382 u64 quota; 383 u64 runtime; 384 u64 burst; 385 u64 runtime_snap; 386 s64 hierarchical_quota; 387 388 u8 idle; 389 u8 period_active; 390 u8 slack_started; 391 struct hrtimer period_timer; 392 struct hrtimer slack_timer; 393 struct list_head throttled_cfs_rq; 394 395 /* Statistics: */ 396 int nr_periods; 397 int nr_throttled; 398 int nr_burst; 399 u64 throttled_time; 400 u64 burst_time; 401 #endif 402 }; 403 404 /* Task group related information */ 405 struct task_group { 406 struct cgroup_subsys_state css; 407 408 #ifdef CONFIG_FAIR_GROUP_SCHED 409 /* schedulable entities of this group on each CPU */ 410 struct sched_entity **se; 411 /* runqueue "owned" by this group on each CPU */ 412 struct cfs_rq **cfs_rq; 413 unsigned long shares; 414 415 /* A positive value indicates that this is a SCHED_IDLE group. */ 416 int idle; 417 418 #ifdef CONFIG_SMP 419 /* 420 * load_avg can be heavily contended at clock tick time, so put 421 * it in its own cache-line separated from the fields above which 422 * will also be accessed at each tick. 423 */ 424 atomic_long_t load_avg ____cacheline_aligned; 425 #endif 426 #endif 427 428 #ifdef CONFIG_RT_GROUP_SCHED 429 struct sched_rt_entity **rt_se; 430 struct rt_rq **rt_rq; 431 432 struct rt_bandwidth rt_bandwidth; 433 #endif 434 435 struct rcu_head rcu; 436 struct list_head list; 437 438 struct task_group *parent; 439 struct list_head siblings; 440 struct list_head children; 441 442 #ifdef CONFIG_SCHED_AUTOGROUP 443 struct autogroup *autogroup; 444 #endif 445 446 struct cfs_bandwidth cfs_bandwidth; 447 448 #ifdef CONFIG_UCLAMP_TASK_GROUP 449 /* The two decimal precision [%] value requested from user-space */ 450 unsigned int uclamp_pct[UCLAMP_CNT]; 451 /* Clamp values requested for a task group */ 452 struct uclamp_se uclamp_req[UCLAMP_CNT]; 453 /* Effective clamp values used for a task group */ 454 struct uclamp_se uclamp[UCLAMP_CNT]; 455 #endif 456 457 }; 458 459 #ifdef CONFIG_FAIR_GROUP_SCHED 460 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 461 462 /* 463 * A weight of 0 or 1 can cause arithmetics problems. 464 * A weight of a cfs_rq is the sum of weights of which entities 465 * are queued on this cfs_rq, so a weight of a entity should not be 466 * too large, so as the shares value of a task group. 467 * (The default weight is 1024 - so there's no practical 468 * limitation from this.) 469 */ 470 #define MIN_SHARES (1UL << 1) 471 #define MAX_SHARES (1UL << 18) 472 #endif 473 474 typedef int (*tg_visitor)(struct task_group *, void *); 475 476 extern int walk_tg_tree_from(struct task_group *from, 477 tg_visitor down, tg_visitor up, void *data); 478 479 /* 480 * Iterate the full tree, calling @down when first entering a node and @up when 481 * leaving it for the final time. 482 * 483 * Caller must hold rcu_lock or sufficient equivalent. 484 */ 485 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 486 { 487 return walk_tg_tree_from(&root_task_group, down, up, data); 488 } 489 490 extern int tg_nop(struct task_group *tg, void *data); 491 492 #ifdef CONFIG_FAIR_GROUP_SCHED 493 extern void free_fair_sched_group(struct task_group *tg); 494 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 495 extern void online_fair_sched_group(struct task_group *tg); 496 extern void unregister_fair_sched_group(struct task_group *tg); 497 #else 498 static inline void free_fair_sched_group(struct task_group *tg) { } 499 static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 500 { 501 return 1; 502 } 503 static inline void online_fair_sched_group(struct task_group *tg) { } 504 static inline void unregister_fair_sched_group(struct task_group *tg) { } 505 #endif 506 507 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 508 struct sched_entity *se, int cpu, 509 struct sched_entity *parent); 510 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); 511 512 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 513 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 514 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 515 extern bool cfs_task_bw_constrained(struct task_struct *p); 516 517 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 518 struct sched_rt_entity *rt_se, int cpu, 519 struct sched_rt_entity *parent); 520 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 521 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 522 extern long sched_group_rt_runtime(struct task_group *tg); 523 extern long sched_group_rt_period(struct task_group *tg); 524 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 525 526 extern struct task_group *sched_create_group(struct task_group *parent); 527 extern void sched_online_group(struct task_group *tg, 528 struct task_group *parent); 529 extern void sched_destroy_group(struct task_group *tg); 530 extern void sched_release_group(struct task_group *tg); 531 532 extern void sched_move_task(struct task_struct *tsk); 533 534 #ifdef CONFIG_FAIR_GROUP_SCHED 535 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 536 537 extern int sched_group_set_idle(struct task_group *tg, long idle); 538 539 #ifdef CONFIG_SMP 540 extern void set_task_rq_fair(struct sched_entity *se, 541 struct cfs_rq *prev, struct cfs_rq *next); 542 #else /* !CONFIG_SMP */ 543 static inline void set_task_rq_fair(struct sched_entity *se, 544 struct cfs_rq *prev, struct cfs_rq *next) { } 545 #endif /* CONFIG_SMP */ 546 #endif /* CONFIG_FAIR_GROUP_SCHED */ 547 548 #else /* CONFIG_CGROUP_SCHED */ 549 550 struct cfs_bandwidth { }; 551 552 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } 553 554 #endif /* CONFIG_CGROUP_SCHED */ 555 556 extern void unregister_rt_sched_group(struct task_group *tg); 557 extern void free_rt_sched_group(struct task_group *tg); 558 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 559 560 /* 561 * u64_u32_load/u64_u32_store 562 * 563 * Use a copy of a u64 value to protect against data race. This is only 564 * applicable for 32-bits architectures. 565 */ 566 #ifdef CONFIG_64BIT 567 # define u64_u32_load_copy(var, copy) var 568 # define u64_u32_store_copy(var, copy, val) (var = val) 569 #else 570 # define u64_u32_load_copy(var, copy) \ 571 ({ \ 572 u64 __val, __val_copy; \ 573 do { \ 574 __val_copy = copy; \ 575 /* \ 576 * paired with u64_u32_store_copy(), ordering access \ 577 * to var and copy. \ 578 */ \ 579 smp_rmb(); \ 580 __val = var; \ 581 } while (__val != __val_copy); \ 582 __val; \ 583 }) 584 # define u64_u32_store_copy(var, copy, val) \ 585 do { \ 586 typeof(val) __val = (val); \ 587 var = __val; \ 588 /* \ 589 * paired with u64_u32_load_copy(), ordering access to var and \ 590 * copy. \ 591 */ \ 592 smp_wmb(); \ 593 copy = __val; \ 594 } while (0) 595 #endif 596 # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) 597 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) 598 599 /* CFS-related fields in a runqueue */ 600 struct cfs_rq { 601 struct load_weight load; 602 unsigned int nr_running; 603 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 604 unsigned int idle_nr_running; /* SCHED_IDLE */ 605 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 606 607 s64 avg_vruntime; 608 u64 avg_load; 609 610 u64 min_vruntime; 611 #ifdef CONFIG_SCHED_CORE 612 unsigned int forceidle_seq; 613 u64 min_vruntime_fi; 614 #endif 615 616 struct rb_root_cached tasks_timeline; 617 618 /* 619 * 'curr' points to currently running entity on this cfs_rq. 620 * It is set to NULL otherwise (i.e when none are currently running). 621 */ 622 struct sched_entity *curr; 623 struct sched_entity *next; 624 625 #ifdef CONFIG_SMP 626 /* 627 * CFS load tracking 628 */ 629 struct sched_avg avg; 630 #ifndef CONFIG_64BIT 631 u64 last_update_time_copy; 632 #endif 633 struct { 634 raw_spinlock_t lock ____cacheline_aligned; 635 int nr; 636 unsigned long load_avg; 637 unsigned long util_avg; 638 unsigned long runnable_avg; 639 } removed; 640 641 #ifdef CONFIG_FAIR_GROUP_SCHED 642 u64 last_update_tg_load_avg; 643 unsigned long tg_load_avg_contrib; 644 long propagate; 645 long prop_runnable_sum; 646 647 /* 648 * h_load = weight * f(tg) 649 * 650 * Where f(tg) is the recursive weight fraction assigned to 651 * this group. 652 */ 653 unsigned long h_load; 654 u64 last_h_load_update; 655 struct sched_entity *h_load_next; 656 #endif /* CONFIG_FAIR_GROUP_SCHED */ 657 #endif /* CONFIG_SMP */ 658 659 #ifdef CONFIG_FAIR_GROUP_SCHED 660 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 661 662 /* 663 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 664 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 665 * (like users, containers etc.) 666 * 667 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 668 * This list is used during load balance. 669 */ 670 int on_list; 671 struct list_head leaf_cfs_rq_list; 672 struct task_group *tg; /* group that "owns" this runqueue */ 673 674 /* Locally cached copy of our task_group's idle value */ 675 int idle; 676 677 #ifdef CONFIG_CFS_BANDWIDTH 678 int runtime_enabled; 679 s64 runtime_remaining; 680 681 u64 throttled_pelt_idle; 682 #ifndef CONFIG_64BIT 683 u64 throttled_pelt_idle_copy; 684 #endif 685 u64 throttled_clock; 686 u64 throttled_clock_pelt; 687 u64 throttled_clock_pelt_time; 688 u64 throttled_clock_self; 689 u64 throttled_clock_self_time; 690 int throttled; 691 int throttle_count; 692 struct list_head throttled_list; 693 struct list_head throttled_csd_list; 694 #endif /* CONFIG_CFS_BANDWIDTH */ 695 #endif /* CONFIG_FAIR_GROUP_SCHED */ 696 }; 697 698 static inline int rt_bandwidth_enabled(void) 699 { 700 return sysctl_sched_rt_runtime >= 0; 701 } 702 703 /* RT IPI pull logic requires IRQ_WORK */ 704 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 705 # define HAVE_RT_PUSH_IPI 706 #endif 707 708 /* Real-Time classes' related field in a runqueue: */ 709 struct rt_rq { 710 struct rt_prio_array active; 711 unsigned int rt_nr_running; 712 unsigned int rr_nr_running; 713 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 714 struct { 715 int curr; /* highest queued rt task prio */ 716 #ifdef CONFIG_SMP 717 int next; /* next highest */ 718 #endif 719 } highest_prio; 720 #endif 721 #ifdef CONFIG_SMP 722 bool overloaded; 723 struct plist_head pushable_tasks; 724 725 #endif /* CONFIG_SMP */ 726 int rt_queued; 727 728 #ifdef CONFIG_RT_GROUP_SCHED 729 int rt_throttled; 730 u64 rt_time; 731 u64 rt_runtime; 732 /* Nests inside the rq lock: */ 733 raw_spinlock_t rt_runtime_lock; 734 735 unsigned int rt_nr_boosted; 736 737 struct rq *rq; 738 struct task_group *tg; 739 #endif 740 }; 741 742 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 743 { 744 return rt_rq->rt_queued && rt_rq->rt_nr_running; 745 } 746 747 /* Deadline class' related fields in a runqueue */ 748 struct dl_rq { 749 /* runqueue is an rbtree, ordered by deadline */ 750 struct rb_root_cached root; 751 752 unsigned int dl_nr_running; 753 754 #ifdef CONFIG_SMP 755 /* 756 * Deadline values of the currently executing and the 757 * earliest ready task on this rq. Caching these facilitates 758 * the decision whether or not a ready but not running task 759 * should migrate somewhere else. 760 */ 761 struct { 762 u64 curr; 763 u64 next; 764 } earliest_dl; 765 766 bool overloaded; 767 768 /* 769 * Tasks on this rq that can be pushed away. They are kept in 770 * an rb-tree, ordered by tasks' deadlines, with caching 771 * of the leftmost (earliest deadline) element. 772 */ 773 struct rb_root_cached pushable_dl_tasks_root; 774 #else 775 struct dl_bw dl_bw; 776 #endif 777 /* 778 * "Active utilization" for this runqueue: increased when a 779 * task wakes up (becomes TASK_RUNNING) and decreased when a 780 * task blocks 781 */ 782 u64 running_bw; 783 784 /* 785 * Utilization of the tasks "assigned" to this runqueue (including 786 * the tasks that are in runqueue and the tasks that executed on this 787 * CPU and blocked). Increased when a task moves to this runqueue, and 788 * decreased when the task moves away (migrates, changes scheduling 789 * policy, or terminates). 790 * This is needed to compute the "inactive utilization" for the 791 * runqueue (inactive utilization = this_bw - running_bw). 792 */ 793 u64 this_bw; 794 u64 extra_bw; 795 796 /* 797 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM 798 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). 799 */ 800 u64 max_bw; 801 802 /* 803 * Inverse of the fraction of CPU utilization that can be reclaimed 804 * by the GRUB algorithm. 805 */ 806 u64 bw_ratio; 807 }; 808 809 #ifdef CONFIG_FAIR_GROUP_SCHED 810 811 /* An entity is a task if it doesn't "own" a runqueue */ 812 #define entity_is_task(se) (!se->my_q) 813 814 static inline void se_update_runnable(struct sched_entity *se) 815 { 816 if (!entity_is_task(se)) 817 se->runnable_weight = se->my_q->h_nr_running; 818 } 819 820 static inline long se_runnable(struct sched_entity *se) 821 { 822 if (se->sched_delayed) 823 return false; 824 825 if (entity_is_task(se)) 826 return !!se->on_rq; 827 else 828 return se->runnable_weight; 829 } 830 831 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 832 833 #define entity_is_task(se) 1 834 835 static inline void se_update_runnable(struct sched_entity *se) { } 836 837 static inline long se_runnable(struct sched_entity *se) 838 { 839 if (se->sched_delayed) 840 return false; 841 842 return !!se->on_rq; 843 } 844 845 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 846 847 #ifdef CONFIG_SMP 848 /* 849 * XXX we want to get rid of these helpers and use the full load resolution. 850 */ 851 static inline long se_weight(struct sched_entity *se) 852 { 853 return scale_load_down(se->load.weight); 854 } 855 856 857 static inline bool sched_asym_prefer(int a, int b) 858 { 859 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 860 } 861 862 struct perf_domain { 863 struct em_perf_domain *em_pd; 864 struct perf_domain *next; 865 struct rcu_head rcu; 866 }; 867 868 /* 869 * We add the notion of a root-domain which will be used to define per-domain 870 * variables. Each exclusive cpuset essentially defines an island domain by 871 * fully partitioning the member CPUs from any other cpuset. Whenever a new 872 * exclusive cpuset is created, we also create and attach a new root-domain 873 * object. 874 * 875 */ 876 struct root_domain { 877 atomic_t refcount; 878 atomic_t rto_count; 879 struct rcu_head rcu; 880 cpumask_var_t span; 881 cpumask_var_t online; 882 883 /* 884 * Indicate pullable load on at least one CPU, e.g: 885 * - More than one runnable task 886 * - Running task is misfit 887 */ 888 bool overloaded; 889 890 /* Indicate one or more CPUs over-utilized (tipping point) */ 891 bool overutilized; 892 893 /* 894 * The bit corresponding to a CPU gets set here if such CPU has more 895 * than one runnable -deadline task (as it is below for RT tasks). 896 */ 897 cpumask_var_t dlo_mask; 898 atomic_t dlo_count; 899 struct dl_bw dl_bw; 900 struct cpudl cpudl; 901 902 /* 903 * Indicate whether a root_domain's dl_bw has been checked or 904 * updated. It's monotonously increasing value. 905 * 906 * Also, some corner cases, like 'wrap around' is dangerous, but given 907 * that u64 is 'big enough'. So that shouldn't be a concern. 908 */ 909 u64 visit_gen; 910 911 #ifdef HAVE_RT_PUSH_IPI 912 /* 913 * For IPI pull requests, loop across the rto_mask. 914 */ 915 struct irq_work rto_push_work; 916 raw_spinlock_t rto_lock; 917 /* These are only updated and read within rto_lock */ 918 int rto_loop; 919 int rto_cpu; 920 /* These atomics are updated outside of a lock */ 921 atomic_t rto_loop_next; 922 atomic_t rto_loop_start; 923 #endif 924 /* 925 * The "RT overload" flag: it gets set if a CPU has more than 926 * one runnable RT task. 927 */ 928 cpumask_var_t rto_mask; 929 struct cpupri cpupri; 930 931 /* 932 * NULL-terminated list of performance domains intersecting with the 933 * CPUs of the rd. Protected by RCU. 934 */ 935 struct perf_domain __rcu *pd; 936 }; 937 938 extern void init_defrootdomain(void); 939 extern int sched_init_domains(const struct cpumask *cpu_map); 940 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 941 extern void sched_get_rd(struct root_domain *rd); 942 extern void sched_put_rd(struct root_domain *rd); 943 944 static inline int get_rd_overloaded(struct root_domain *rd) 945 { 946 return READ_ONCE(rd->overloaded); 947 } 948 949 static inline void set_rd_overloaded(struct root_domain *rd, int status) 950 { 951 if (get_rd_overloaded(rd) != status) 952 WRITE_ONCE(rd->overloaded, status); 953 } 954 955 #ifdef HAVE_RT_PUSH_IPI 956 extern void rto_push_irq_work_func(struct irq_work *work); 957 #endif 958 #endif /* CONFIG_SMP */ 959 960 #ifdef CONFIG_UCLAMP_TASK 961 /* 962 * struct uclamp_bucket - Utilization clamp bucket 963 * @value: utilization clamp value for tasks on this clamp bucket 964 * @tasks: number of RUNNABLE tasks on this clamp bucket 965 * 966 * Keep track of how many tasks are RUNNABLE for a given utilization 967 * clamp value. 968 */ 969 struct uclamp_bucket { 970 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 971 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 972 }; 973 974 /* 975 * struct uclamp_rq - rq's utilization clamp 976 * @value: currently active clamp values for a rq 977 * @bucket: utilization clamp buckets affecting a rq 978 * 979 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 980 * A clamp value is affecting a rq when there is at least one task RUNNABLE 981 * (or actually running) with that value. 982 * 983 * There are up to UCLAMP_CNT possible different clamp values, currently there 984 * are only two: minimum utilization and maximum utilization. 985 * 986 * All utilization clamping values are MAX aggregated, since: 987 * - for util_min: we want to run the CPU at least at the max of the minimum 988 * utilization required by its currently RUNNABLE tasks. 989 * - for util_max: we want to allow the CPU to run up to the max of the 990 * maximum utilization allowed by its currently RUNNABLE tasks. 991 * 992 * Since on each system we expect only a limited number of different 993 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 994 * the metrics required to compute all the per-rq utilization clamp values. 995 */ 996 struct uclamp_rq { 997 unsigned int value; 998 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 999 }; 1000 1001 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 1002 #endif /* CONFIG_UCLAMP_TASK */ 1003 1004 struct balance_callback { 1005 struct balance_callback *next; 1006 void (*func)(struct rq *rq); 1007 }; 1008 1009 /* 1010 * This is the main, per-CPU runqueue data structure. 1011 * 1012 * Locking rule: those places that want to lock multiple runqueues 1013 * (such as the load balancing or the thread migration code), lock 1014 * acquire operations must be ordered by ascending &runqueue. 1015 */ 1016 struct rq { 1017 /* runqueue lock: */ 1018 raw_spinlock_t __lock; 1019 1020 unsigned int nr_running; 1021 #ifdef CONFIG_NUMA_BALANCING 1022 unsigned int nr_numa_running; 1023 unsigned int nr_preferred_running; 1024 unsigned int numa_migrate_on; 1025 #endif 1026 #ifdef CONFIG_NO_HZ_COMMON 1027 #ifdef CONFIG_SMP 1028 unsigned long last_blocked_load_update_tick; 1029 unsigned int has_blocked_load; 1030 call_single_data_t nohz_csd; 1031 #endif /* CONFIG_SMP */ 1032 unsigned int nohz_tick_stopped; 1033 atomic_t nohz_flags; 1034 #endif /* CONFIG_NO_HZ_COMMON */ 1035 1036 #ifdef CONFIG_SMP 1037 unsigned int ttwu_pending; 1038 #endif 1039 u64 nr_switches; 1040 1041 #ifdef CONFIG_UCLAMP_TASK 1042 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 1043 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 1044 unsigned int uclamp_flags; 1045 #define UCLAMP_FLAG_IDLE 0x01 1046 #endif 1047 1048 struct cfs_rq cfs; 1049 struct rt_rq rt; 1050 struct dl_rq dl; 1051 1052 struct sched_dl_entity fair_server; 1053 1054 #ifdef CONFIG_FAIR_GROUP_SCHED 1055 /* list of leaf cfs_rq on this CPU: */ 1056 struct list_head leaf_cfs_rq_list; 1057 struct list_head *tmp_alone_branch; 1058 #endif /* CONFIG_FAIR_GROUP_SCHED */ 1059 1060 /* 1061 * This is part of a global counter where only the total sum 1062 * over all CPUs matters. A task can increase this counter on 1063 * one CPU and if it got migrated afterwards it may decrease 1064 * it on another CPU. Always updated under the runqueue lock: 1065 */ 1066 unsigned int nr_uninterruptible; 1067 1068 struct task_struct __rcu *curr; 1069 struct sched_dl_entity *dl_server; 1070 struct task_struct *idle; 1071 struct task_struct *stop; 1072 unsigned long next_balance; 1073 struct mm_struct *prev_mm; 1074 1075 unsigned int clock_update_flags; 1076 u64 clock; 1077 /* Ensure that all clocks are in the same cache line */ 1078 u64 clock_task ____cacheline_aligned; 1079 u64 clock_pelt; 1080 unsigned long lost_idle_time; 1081 u64 clock_pelt_idle; 1082 u64 clock_idle; 1083 #ifndef CONFIG_64BIT 1084 u64 clock_pelt_idle_copy; 1085 u64 clock_idle_copy; 1086 #endif 1087 1088 atomic_t nr_iowait; 1089 1090 #ifdef CONFIG_SCHED_DEBUG 1091 u64 last_seen_need_resched_ns; 1092 int ticks_without_resched; 1093 #endif 1094 1095 #ifdef CONFIG_MEMBARRIER 1096 int membarrier_state; 1097 #endif 1098 1099 #ifdef CONFIG_SMP 1100 struct root_domain *rd; 1101 struct sched_domain __rcu *sd; 1102 1103 unsigned long cpu_capacity; 1104 1105 struct balance_callback *balance_callback; 1106 1107 unsigned char nohz_idle_balance; 1108 unsigned char idle_balance; 1109 1110 unsigned long misfit_task_load; 1111 1112 /* For active balancing */ 1113 int active_balance; 1114 int push_cpu; 1115 struct cpu_stop_work active_balance_work; 1116 1117 /* CPU of this runqueue: */ 1118 int cpu; 1119 int online; 1120 1121 struct list_head cfs_tasks; 1122 1123 struct sched_avg avg_rt; 1124 struct sched_avg avg_dl; 1125 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1126 struct sched_avg avg_irq; 1127 #endif 1128 #ifdef CONFIG_SCHED_HW_PRESSURE 1129 struct sched_avg avg_hw; 1130 #endif 1131 u64 idle_stamp; 1132 u64 avg_idle; 1133 1134 /* This is used to determine avg_idle's max value */ 1135 u64 max_idle_balance_cost; 1136 1137 #ifdef CONFIG_HOTPLUG_CPU 1138 struct rcuwait hotplug_wait; 1139 #endif 1140 #endif /* CONFIG_SMP */ 1141 1142 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1143 u64 prev_irq_time; 1144 u64 psi_irq_time; 1145 #endif 1146 #ifdef CONFIG_PARAVIRT 1147 u64 prev_steal_time; 1148 #endif 1149 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1150 u64 prev_steal_time_rq; 1151 #endif 1152 1153 /* calc_load related fields */ 1154 unsigned long calc_load_update; 1155 long calc_load_active; 1156 1157 #ifdef CONFIG_SCHED_HRTICK 1158 #ifdef CONFIG_SMP 1159 call_single_data_t hrtick_csd; 1160 #endif 1161 struct hrtimer hrtick_timer; 1162 ktime_t hrtick_time; 1163 #endif 1164 1165 #ifdef CONFIG_SCHEDSTATS 1166 /* latency stats */ 1167 struct sched_info rq_sched_info; 1168 unsigned long long rq_cpu_time; 1169 1170 /* sys_sched_yield() stats */ 1171 unsigned int yld_count; 1172 1173 /* schedule() stats */ 1174 unsigned int sched_count; 1175 unsigned int sched_goidle; 1176 1177 /* try_to_wake_up() stats */ 1178 unsigned int ttwu_count; 1179 unsigned int ttwu_local; 1180 #endif 1181 1182 #ifdef CONFIG_CPU_IDLE 1183 /* Must be inspected within a RCU lock section */ 1184 struct cpuidle_state *idle_state; 1185 #endif 1186 1187 #ifdef CONFIG_SMP 1188 unsigned int nr_pinned; 1189 #endif 1190 unsigned int push_busy; 1191 struct cpu_stop_work push_work; 1192 1193 #ifdef CONFIG_SCHED_CORE 1194 /* per rq */ 1195 struct rq *core; 1196 struct task_struct *core_pick; 1197 struct sched_dl_entity *core_dl_server; 1198 unsigned int core_enabled; 1199 unsigned int core_sched_seq; 1200 struct rb_root core_tree; 1201 1202 /* shared state -- careful with sched_core_cpu_deactivate() */ 1203 unsigned int core_task_seq; 1204 unsigned int core_pick_seq; 1205 unsigned long core_cookie; 1206 unsigned int core_forceidle_count; 1207 unsigned int core_forceidle_seq; 1208 unsigned int core_forceidle_occupation; 1209 u64 core_forceidle_start; 1210 #endif 1211 1212 /* Scratch cpumask to be temporarily used under rq_lock */ 1213 cpumask_var_t scratch_mask; 1214 1215 #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) 1216 call_single_data_t cfsb_csd; 1217 struct list_head cfsb_csd_list; 1218 #endif 1219 }; 1220 1221 #ifdef CONFIG_FAIR_GROUP_SCHED 1222 1223 /* CPU runqueue to which this cfs_rq is attached */ 1224 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1225 { 1226 return cfs_rq->rq; 1227 } 1228 1229 #else 1230 1231 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1232 { 1233 return container_of(cfs_rq, struct rq, cfs); 1234 } 1235 #endif 1236 1237 static inline int cpu_of(struct rq *rq) 1238 { 1239 #ifdef CONFIG_SMP 1240 return rq->cpu; 1241 #else 1242 return 0; 1243 #endif 1244 } 1245 1246 #define MDF_PUSH 0x01 1247 1248 static inline bool is_migration_disabled(struct task_struct *p) 1249 { 1250 #ifdef CONFIG_SMP 1251 return p->migration_disabled; 1252 #else 1253 return false; 1254 #endif 1255 } 1256 1257 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1258 1259 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1260 #define this_rq() this_cpu_ptr(&runqueues) 1261 #define task_rq(p) cpu_rq(task_cpu(p)) 1262 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1263 #define raw_rq() raw_cpu_ptr(&runqueues) 1264 1265 #ifdef CONFIG_SCHED_CORE 1266 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1267 1268 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1269 1270 static inline bool sched_core_enabled(struct rq *rq) 1271 { 1272 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1273 } 1274 1275 static inline bool sched_core_disabled(void) 1276 { 1277 return !static_branch_unlikely(&__sched_core_enabled); 1278 } 1279 1280 /* 1281 * Be careful with this function; not for general use. The return value isn't 1282 * stable unless you actually hold a relevant rq->__lock. 1283 */ 1284 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1285 { 1286 if (sched_core_enabled(rq)) 1287 return &rq->core->__lock; 1288 1289 return &rq->__lock; 1290 } 1291 1292 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1293 { 1294 if (rq->core_enabled) 1295 return &rq->core->__lock; 1296 1297 return &rq->__lock; 1298 } 1299 1300 extern bool 1301 cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi); 1302 1303 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 1304 1305 /* 1306 * Helpers to check if the CPU's core cookie matches with the task's cookie 1307 * when core scheduling is enabled. 1308 * A special case is that the task's cookie always matches with CPU's core 1309 * cookie if the CPU is in an idle core. 1310 */ 1311 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1312 { 1313 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1314 if (!sched_core_enabled(rq)) 1315 return true; 1316 1317 return rq->core->core_cookie == p->core_cookie; 1318 } 1319 1320 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1321 { 1322 bool idle_core = true; 1323 int cpu; 1324 1325 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1326 if (!sched_core_enabled(rq)) 1327 return true; 1328 1329 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1330 if (!available_idle_cpu(cpu)) { 1331 idle_core = false; 1332 break; 1333 } 1334 } 1335 1336 /* 1337 * A CPU in an idle core is always the best choice for tasks with 1338 * cookies. 1339 */ 1340 return idle_core || rq->core->core_cookie == p->core_cookie; 1341 } 1342 1343 static inline bool sched_group_cookie_match(struct rq *rq, 1344 struct task_struct *p, 1345 struct sched_group *group) 1346 { 1347 int cpu; 1348 1349 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1350 if (!sched_core_enabled(rq)) 1351 return true; 1352 1353 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1354 if (sched_core_cookie_match(cpu_rq(cpu), p)) 1355 return true; 1356 } 1357 return false; 1358 } 1359 1360 static inline bool sched_core_enqueued(struct task_struct *p) 1361 { 1362 return !RB_EMPTY_NODE(&p->core_node); 1363 } 1364 1365 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1366 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); 1367 1368 extern void sched_core_get(void); 1369 extern void sched_core_put(void); 1370 1371 #else /* !CONFIG_SCHED_CORE: */ 1372 1373 static inline bool sched_core_enabled(struct rq *rq) 1374 { 1375 return false; 1376 } 1377 1378 static inline bool sched_core_disabled(void) 1379 { 1380 return true; 1381 } 1382 1383 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1384 { 1385 return &rq->__lock; 1386 } 1387 1388 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1389 { 1390 return &rq->__lock; 1391 } 1392 1393 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1394 { 1395 return true; 1396 } 1397 1398 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1399 { 1400 return true; 1401 } 1402 1403 static inline bool sched_group_cookie_match(struct rq *rq, 1404 struct task_struct *p, 1405 struct sched_group *group) 1406 { 1407 return true; 1408 } 1409 1410 #endif /* !CONFIG_SCHED_CORE */ 1411 1412 static inline void lockdep_assert_rq_held(struct rq *rq) 1413 { 1414 lockdep_assert_held(__rq_lockp(rq)); 1415 } 1416 1417 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1418 extern bool raw_spin_rq_trylock(struct rq *rq); 1419 extern void raw_spin_rq_unlock(struct rq *rq); 1420 1421 static inline void raw_spin_rq_lock(struct rq *rq) 1422 { 1423 raw_spin_rq_lock_nested(rq, 0); 1424 } 1425 1426 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1427 { 1428 local_irq_disable(); 1429 raw_spin_rq_lock(rq); 1430 } 1431 1432 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1433 { 1434 raw_spin_rq_unlock(rq); 1435 local_irq_enable(); 1436 } 1437 1438 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1439 { 1440 unsigned long flags; 1441 1442 local_irq_save(flags); 1443 raw_spin_rq_lock(rq); 1444 1445 return flags; 1446 } 1447 1448 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1449 { 1450 raw_spin_rq_unlock(rq); 1451 local_irq_restore(flags); 1452 } 1453 1454 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1455 do { \ 1456 flags = _raw_spin_rq_lock_irqsave(rq); \ 1457 } while (0) 1458 1459 #ifdef CONFIG_SCHED_SMT 1460 extern void __update_idle_core(struct rq *rq); 1461 1462 static inline void update_idle_core(struct rq *rq) 1463 { 1464 if (static_branch_unlikely(&sched_smt_present)) 1465 __update_idle_core(rq); 1466 } 1467 1468 #else 1469 static inline void update_idle_core(struct rq *rq) { } 1470 #endif 1471 1472 #ifdef CONFIG_FAIR_GROUP_SCHED 1473 1474 static inline struct task_struct *task_of(struct sched_entity *se) 1475 { 1476 SCHED_WARN_ON(!entity_is_task(se)); 1477 return container_of(se, struct task_struct, se); 1478 } 1479 1480 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1481 { 1482 return p->se.cfs_rq; 1483 } 1484 1485 /* runqueue on which this entity is (to be) queued */ 1486 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1487 { 1488 return se->cfs_rq; 1489 } 1490 1491 /* runqueue "owned" by this group */ 1492 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1493 { 1494 return grp->my_q; 1495 } 1496 1497 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 1498 1499 #define task_of(_se) container_of(_se, struct task_struct, se) 1500 1501 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) 1502 { 1503 return &task_rq(p)->cfs; 1504 } 1505 1506 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1507 { 1508 const struct task_struct *p = task_of(se); 1509 struct rq *rq = task_rq(p); 1510 1511 return &rq->cfs; 1512 } 1513 1514 /* runqueue "owned" by this group */ 1515 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1516 { 1517 return NULL; 1518 } 1519 1520 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 1521 1522 extern void update_rq_clock(struct rq *rq); 1523 1524 /* 1525 * rq::clock_update_flags bits 1526 * 1527 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1528 * call to __schedule(). This is an optimisation to avoid 1529 * neighbouring rq clock updates. 1530 * 1531 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1532 * in effect and calls to update_rq_clock() are being ignored. 1533 * 1534 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1535 * made to update_rq_clock() since the last time rq::lock was pinned. 1536 * 1537 * If inside of __schedule(), clock_update_flags will have been 1538 * shifted left (a left shift is a cheap operation for the fast path 1539 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1540 * 1541 * if (rq-clock_update_flags >= RQCF_UPDATED) 1542 * 1543 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1544 * one position though, because the next rq_unpin_lock() will shift it 1545 * back. 1546 */ 1547 #define RQCF_REQ_SKIP 0x01 1548 #define RQCF_ACT_SKIP 0x02 1549 #define RQCF_UPDATED 0x04 1550 1551 static inline void assert_clock_updated(struct rq *rq) 1552 { 1553 /* 1554 * The only reason for not seeing a clock update since the 1555 * last rq_pin_lock() is if we're currently skipping updates. 1556 */ 1557 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1558 } 1559 1560 static inline u64 rq_clock(struct rq *rq) 1561 { 1562 lockdep_assert_rq_held(rq); 1563 assert_clock_updated(rq); 1564 1565 return rq->clock; 1566 } 1567 1568 static inline u64 rq_clock_task(struct rq *rq) 1569 { 1570 lockdep_assert_rq_held(rq); 1571 assert_clock_updated(rq); 1572 1573 return rq->clock_task; 1574 } 1575 1576 static inline void rq_clock_skip_update(struct rq *rq) 1577 { 1578 lockdep_assert_rq_held(rq); 1579 rq->clock_update_flags |= RQCF_REQ_SKIP; 1580 } 1581 1582 /* 1583 * See rt task throttling, which is the only time a skip 1584 * request is canceled. 1585 */ 1586 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1587 { 1588 lockdep_assert_rq_held(rq); 1589 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1590 } 1591 1592 /* 1593 * During cpu offlining and rq wide unthrottling, we can trigger 1594 * an update_rq_clock() for several cfs and rt runqueues (Typically 1595 * when using list_for_each_entry_*) 1596 * rq_clock_start_loop_update() can be called after updating the clock 1597 * once and before iterating over the list to prevent multiple update. 1598 * After the iterative traversal, we need to call rq_clock_stop_loop_update() 1599 * to clear RQCF_ACT_SKIP of rq->clock_update_flags. 1600 */ 1601 static inline void rq_clock_start_loop_update(struct rq *rq) 1602 { 1603 lockdep_assert_rq_held(rq); 1604 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); 1605 rq->clock_update_flags |= RQCF_ACT_SKIP; 1606 } 1607 1608 static inline void rq_clock_stop_loop_update(struct rq *rq) 1609 { 1610 lockdep_assert_rq_held(rq); 1611 rq->clock_update_flags &= ~RQCF_ACT_SKIP; 1612 } 1613 1614 struct rq_flags { 1615 unsigned long flags; 1616 struct pin_cookie cookie; 1617 #ifdef CONFIG_SCHED_DEBUG 1618 /* 1619 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1620 * current pin context is stashed here in case it needs to be 1621 * restored in rq_repin_lock(). 1622 */ 1623 unsigned int clock_update_flags; 1624 #endif 1625 }; 1626 1627 extern struct balance_callback balance_push_callback; 1628 1629 /* 1630 * Lockdep annotation that avoids accidental unlocks; it's like a 1631 * sticky/continuous lockdep_assert_held(). 1632 * 1633 * This avoids code that has access to 'struct rq *rq' (basically everything in 1634 * the scheduler) from accidentally unlocking the rq if they do not also have a 1635 * copy of the (on-stack) 'struct rq_flags rf'. 1636 * 1637 * Also see Documentation/locking/lockdep-design.rst. 1638 */ 1639 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1640 { 1641 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1642 1643 #ifdef CONFIG_SCHED_DEBUG 1644 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1645 rf->clock_update_flags = 0; 1646 # ifdef CONFIG_SMP 1647 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1648 # endif 1649 #endif 1650 } 1651 1652 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1653 { 1654 #ifdef CONFIG_SCHED_DEBUG 1655 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1656 rf->clock_update_flags = RQCF_UPDATED; 1657 #endif 1658 1659 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1660 } 1661 1662 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1663 { 1664 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1665 1666 #ifdef CONFIG_SCHED_DEBUG 1667 /* 1668 * Restore the value we stashed in @rf for this pin context. 1669 */ 1670 rq->clock_update_flags |= rf->clock_update_flags; 1671 #endif 1672 } 1673 1674 extern 1675 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1676 __acquires(rq->lock); 1677 1678 extern 1679 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1680 __acquires(p->pi_lock) 1681 __acquires(rq->lock); 1682 1683 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1684 __releases(rq->lock) 1685 { 1686 rq_unpin_lock(rq, rf); 1687 raw_spin_rq_unlock(rq); 1688 } 1689 1690 static inline void 1691 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1692 __releases(rq->lock) 1693 __releases(p->pi_lock) 1694 { 1695 rq_unpin_lock(rq, rf); 1696 raw_spin_rq_unlock(rq); 1697 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1698 } 1699 1700 DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, 1701 _T->rq = task_rq_lock(_T->lock, &_T->rf), 1702 task_rq_unlock(_T->rq, _T->lock, &_T->rf), 1703 struct rq *rq; struct rq_flags rf) 1704 1705 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1706 __acquires(rq->lock) 1707 { 1708 raw_spin_rq_lock_irqsave(rq, rf->flags); 1709 rq_pin_lock(rq, rf); 1710 } 1711 1712 static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1713 __acquires(rq->lock) 1714 { 1715 raw_spin_rq_lock_irq(rq); 1716 rq_pin_lock(rq, rf); 1717 } 1718 1719 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) 1720 __acquires(rq->lock) 1721 { 1722 raw_spin_rq_lock(rq); 1723 rq_pin_lock(rq, rf); 1724 } 1725 1726 static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1727 __releases(rq->lock) 1728 { 1729 rq_unpin_lock(rq, rf); 1730 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1731 } 1732 1733 static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1734 __releases(rq->lock) 1735 { 1736 rq_unpin_lock(rq, rf); 1737 raw_spin_rq_unlock_irq(rq); 1738 } 1739 1740 static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) 1741 __releases(rq->lock) 1742 { 1743 rq_unpin_lock(rq, rf); 1744 raw_spin_rq_unlock(rq); 1745 } 1746 1747 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1748 rq_lock(_T->lock, &_T->rf), 1749 rq_unlock(_T->lock, &_T->rf), 1750 struct rq_flags rf) 1751 1752 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, 1753 rq_lock_irq(_T->lock, &_T->rf), 1754 rq_unlock_irq(_T->lock, &_T->rf), 1755 struct rq_flags rf) 1756 1757 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, 1758 rq_lock_irqsave(_T->lock, &_T->rf), 1759 rq_unlock_irqrestore(_T->lock, &_T->rf), 1760 struct rq_flags rf) 1761 1762 static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) 1763 __acquires(rq->lock) 1764 { 1765 struct rq *rq; 1766 1767 local_irq_disable(); 1768 rq = this_rq(); 1769 rq_lock(rq, rf); 1770 1771 return rq; 1772 } 1773 1774 #ifdef CONFIG_NUMA 1775 1776 enum numa_topology_type { 1777 NUMA_DIRECT, 1778 NUMA_GLUELESS_MESH, 1779 NUMA_BACKPLANE, 1780 }; 1781 1782 extern enum numa_topology_type sched_numa_topology_type; 1783 extern int sched_max_numa_distance; 1784 extern bool find_numa_distance(int distance); 1785 extern void sched_init_numa(int offline_node); 1786 extern void sched_update_numa(int cpu, bool online); 1787 extern void sched_domains_numa_masks_set(unsigned int cpu); 1788 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1789 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1790 1791 #else /* !CONFIG_NUMA: */ 1792 1793 static inline void sched_init_numa(int offline_node) { } 1794 static inline void sched_update_numa(int cpu, bool online) { } 1795 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1796 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1797 1798 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1799 { 1800 return nr_cpu_ids; 1801 } 1802 1803 #endif /* !CONFIG_NUMA */ 1804 1805 #ifdef CONFIG_NUMA_BALANCING 1806 1807 /* The regions in numa_faults array from task_struct */ 1808 enum numa_faults_stats { 1809 NUMA_MEM = 0, 1810 NUMA_CPU, 1811 NUMA_MEMBUF, 1812 NUMA_CPUBUF 1813 }; 1814 1815 extern void sched_setnuma(struct task_struct *p, int node); 1816 extern int migrate_task_to(struct task_struct *p, int cpu); 1817 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1818 int cpu, int scpu); 1819 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1820 1821 #else /* !CONFIG_NUMA_BALANCING: */ 1822 1823 static inline void 1824 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1825 { 1826 } 1827 1828 #endif /* !CONFIG_NUMA_BALANCING */ 1829 1830 #ifdef CONFIG_SMP 1831 1832 static inline void 1833 queue_balance_callback(struct rq *rq, 1834 struct balance_callback *head, 1835 void (*func)(struct rq *rq)) 1836 { 1837 lockdep_assert_rq_held(rq); 1838 1839 /* 1840 * Don't (re)queue an already queued item; nor queue anything when 1841 * balance_push() is active, see the comment with 1842 * balance_push_callback. 1843 */ 1844 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1845 return; 1846 1847 head->func = func; 1848 head->next = rq->balance_callback; 1849 rq->balance_callback = head; 1850 } 1851 1852 #define rcu_dereference_check_sched_domain(p) \ 1853 rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) 1854 1855 /* 1856 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1857 * See destroy_sched_domains: call_rcu for details. 1858 * 1859 * The domain tree of any CPU may only be accessed from within 1860 * preempt-disabled sections. 1861 */ 1862 #define for_each_domain(cpu, __sd) \ 1863 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1864 __sd; __sd = __sd->parent) 1865 1866 /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ 1867 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | 1868 static const unsigned int SD_SHARED_CHILD_MASK = 1869 #include <linux/sched/sd_flags.h> 1870 0; 1871 #undef SD_FLAG 1872 1873 /** 1874 * highest_flag_domain - Return highest sched_domain containing flag. 1875 * @cpu: The CPU whose highest level of sched domain is to 1876 * be returned. 1877 * @flag: The flag to check for the highest sched_domain 1878 * for the given CPU. 1879 * 1880 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has 1881 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. 1882 */ 1883 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1884 { 1885 struct sched_domain *sd, *hsd = NULL; 1886 1887 for_each_domain(cpu, sd) { 1888 if (sd->flags & flag) { 1889 hsd = sd; 1890 continue; 1891 } 1892 1893 /* 1894 * Stop the search if @flag is known to be shared at lower 1895 * levels. It will not be found further up. 1896 */ 1897 if (flag & SD_SHARED_CHILD_MASK) 1898 break; 1899 } 1900 1901 return hsd; 1902 } 1903 1904 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1905 { 1906 struct sched_domain *sd; 1907 1908 for_each_domain(cpu, sd) { 1909 if (sd->flags & flag) 1910 break; 1911 } 1912 1913 return sd; 1914 } 1915 1916 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1917 DECLARE_PER_CPU(int, sd_llc_size); 1918 DECLARE_PER_CPU(int, sd_llc_id); 1919 DECLARE_PER_CPU(int, sd_share_id); 1920 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1921 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1922 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1923 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1924 1925 extern struct static_key_false sched_asym_cpucapacity; 1926 extern struct static_key_false sched_cluster_active; 1927 1928 static __always_inline bool sched_asym_cpucap_active(void) 1929 { 1930 return static_branch_unlikely(&sched_asym_cpucapacity); 1931 } 1932 1933 struct sched_group_capacity { 1934 atomic_t ref; 1935 /* 1936 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1937 * for a single CPU. 1938 */ 1939 unsigned long capacity; 1940 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1941 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1942 unsigned long next_update; 1943 int imbalance; /* XXX unrelated to capacity but shared group state */ 1944 1945 #ifdef CONFIG_SCHED_DEBUG 1946 int id; 1947 #endif 1948 1949 unsigned long cpumask[]; /* Balance mask */ 1950 }; 1951 1952 struct sched_group { 1953 struct sched_group *next; /* Must be a circular list */ 1954 atomic_t ref; 1955 1956 unsigned int group_weight; 1957 unsigned int cores; 1958 struct sched_group_capacity *sgc; 1959 int asym_prefer_cpu; /* CPU of highest priority in group */ 1960 int flags; 1961 1962 /* 1963 * The CPUs this group covers. 1964 * 1965 * NOTE: this field is variable length. (Allocated dynamically 1966 * by attaching extra space to the end of the structure, 1967 * depending on how many CPUs the kernel has booted up with) 1968 */ 1969 unsigned long cpumask[]; 1970 }; 1971 1972 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1973 { 1974 return to_cpumask(sg->cpumask); 1975 } 1976 1977 /* 1978 * See build_balance_mask(). 1979 */ 1980 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1981 { 1982 return to_cpumask(sg->sgc->cpumask); 1983 } 1984 1985 extern int group_balance_cpu(struct sched_group *sg); 1986 1987 #ifdef CONFIG_SCHED_DEBUG 1988 extern void update_sched_domain_debugfs(void); 1989 extern void dirty_sched_domain_sysctl(int cpu); 1990 #else 1991 static inline void update_sched_domain_debugfs(void) { } 1992 static inline void dirty_sched_domain_sysctl(int cpu) { } 1993 #endif 1994 1995 extern int sched_update_scaling(void); 1996 1997 static inline const struct cpumask *task_user_cpus(struct task_struct *p) 1998 { 1999 if (!p->user_cpus_ptr) 2000 return cpu_possible_mask; /* &init_task.cpus_mask */ 2001 return p->user_cpus_ptr; 2002 } 2003 2004 #endif /* CONFIG_SMP */ 2005 2006 #include "stats.h" 2007 2008 #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) 2009 2010 extern void __sched_core_account_forceidle(struct rq *rq); 2011 2012 static inline void sched_core_account_forceidle(struct rq *rq) 2013 { 2014 if (schedstat_enabled()) 2015 __sched_core_account_forceidle(rq); 2016 } 2017 2018 extern void __sched_core_tick(struct rq *rq); 2019 2020 static inline void sched_core_tick(struct rq *rq) 2021 { 2022 if (sched_core_enabled(rq) && schedstat_enabled()) 2023 __sched_core_tick(rq); 2024 } 2025 2026 #else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */ 2027 2028 static inline void sched_core_account_forceidle(struct rq *rq) { } 2029 2030 static inline void sched_core_tick(struct rq *rq) { } 2031 2032 #endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */ 2033 2034 #ifdef CONFIG_CGROUP_SCHED 2035 2036 /* 2037 * Return the group to which this tasks belongs. 2038 * 2039 * We cannot use task_css() and friends because the cgroup subsystem 2040 * changes that value before the cgroup_subsys::attach() method is called, 2041 * therefore we cannot pin it and might observe the wrong value. 2042 * 2043 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 2044 * core changes this before calling sched_move_task(). 2045 * 2046 * Instead we use a 'copy' which is updated from sched_move_task() while 2047 * holding both task_struct::pi_lock and rq::lock. 2048 */ 2049 static inline struct task_group *task_group(struct task_struct *p) 2050 { 2051 return p->sched_task_group; 2052 } 2053 2054 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 2055 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 2056 { 2057 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 2058 struct task_group *tg = task_group(p); 2059 #endif 2060 2061 #ifdef CONFIG_FAIR_GROUP_SCHED 2062 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 2063 p->se.cfs_rq = tg->cfs_rq[cpu]; 2064 p->se.parent = tg->se[cpu]; 2065 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; 2066 #endif 2067 2068 #ifdef CONFIG_RT_GROUP_SCHED 2069 p->rt.rt_rq = tg->rt_rq[cpu]; 2070 p->rt.parent = tg->rt_se[cpu]; 2071 #endif 2072 } 2073 2074 #else /* !CONFIG_CGROUP_SCHED: */ 2075 2076 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 2077 2078 static inline struct task_group *task_group(struct task_struct *p) 2079 { 2080 return NULL; 2081 } 2082 2083 #endif /* !CONFIG_CGROUP_SCHED */ 2084 2085 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 2086 { 2087 set_task_rq(p, cpu); 2088 #ifdef CONFIG_SMP 2089 /* 2090 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 2091 * successfully executed on another CPU. We must ensure that updates of 2092 * per-task data have been completed by this moment. 2093 */ 2094 smp_wmb(); 2095 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 2096 p->wake_cpu = cpu; 2097 #endif 2098 } 2099 2100 /* 2101 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 2102 */ 2103 #ifdef CONFIG_SCHED_DEBUG 2104 # define const_debug __read_mostly 2105 #else 2106 # define const_debug const 2107 #endif 2108 2109 #define SCHED_FEAT(name, enabled) \ 2110 __SCHED_FEAT_##name , 2111 2112 enum { 2113 #include "features.h" 2114 __SCHED_FEAT_NR, 2115 }; 2116 2117 #undef SCHED_FEAT 2118 2119 #ifdef CONFIG_SCHED_DEBUG 2120 2121 /* 2122 * To support run-time toggling of sched features, all the translation units 2123 * (but core.c) reference the sysctl_sched_features defined in core.c. 2124 */ 2125 extern const_debug unsigned int sysctl_sched_features; 2126 2127 #ifdef CONFIG_JUMP_LABEL 2128 2129 #define SCHED_FEAT(name, enabled) \ 2130 static __always_inline bool static_branch_##name(struct static_key *key) \ 2131 { \ 2132 return static_key_##enabled(key); \ 2133 } 2134 2135 #include "features.h" 2136 #undef SCHED_FEAT 2137 2138 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 2139 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 2140 2141 #else /* !CONFIG_JUMP_LABEL: */ 2142 2143 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2144 2145 #endif /* !CONFIG_JUMP_LABEL */ 2146 2147 #else /* !SCHED_DEBUG: */ 2148 2149 /* 2150 * Each translation unit has its own copy of sysctl_sched_features to allow 2151 * constants propagation at compile time and compiler optimization based on 2152 * features default. 2153 */ 2154 #define SCHED_FEAT(name, enabled) \ 2155 (1UL << __SCHED_FEAT_##name) * enabled | 2156 static const_debug __maybe_unused unsigned int sysctl_sched_features = 2157 #include "features.h" 2158 0; 2159 #undef SCHED_FEAT 2160 2161 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2162 2163 #endif /* !SCHED_DEBUG */ 2164 2165 extern struct static_key_false sched_numa_balancing; 2166 extern struct static_key_false sched_schedstats; 2167 2168 static inline u64 global_rt_period(void) 2169 { 2170 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2171 } 2172 2173 static inline u64 global_rt_runtime(void) 2174 { 2175 if (sysctl_sched_rt_runtime < 0) 2176 return RUNTIME_INF; 2177 2178 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2179 } 2180 2181 static inline int task_current(struct rq *rq, struct task_struct *p) 2182 { 2183 return rq->curr == p; 2184 } 2185 2186 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) 2187 { 2188 #ifdef CONFIG_SMP 2189 return p->on_cpu; 2190 #else 2191 return task_current(rq, p); 2192 #endif 2193 } 2194 2195 static inline int task_on_rq_queued(struct task_struct *p) 2196 { 2197 return p->on_rq == TASK_ON_RQ_QUEUED; 2198 } 2199 2200 static inline int task_on_rq_migrating(struct task_struct *p) 2201 { 2202 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2203 } 2204 2205 /* Wake flags. The first three directly map to some SD flag value */ 2206 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2207 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2208 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2209 2210 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2211 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2212 #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */ 2213 2214 #ifdef CONFIG_SMP 2215 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2216 static_assert(WF_FORK == SD_BALANCE_FORK); 2217 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2218 #endif 2219 2220 /* 2221 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2222 * of tasks with abnormal "nice" values across CPUs the contribution that 2223 * each task makes to its run queue's load is weighted according to its 2224 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2225 * scaled version of the new time slice allocation that they receive on time 2226 * slice expiry etc. 2227 */ 2228 2229 #define WEIGHT_IDLEPRIO 3 2230 #define WMULT_IDLEPRIO 1431655765 2231 2232 extern const int sched_prio_to_weight[40]; 2233 extern const u32 sched_prio_to_wmult[40]; 2234 2235 /* 2236 * {de,en}queue flags: 2237 * 2238 * DEQUEUE_SLEEP - task is no longer runnable 2239 * ENQUEUE_WAKEUP - task just became runnable 2240 * 2241 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2242 * are in a known state which allows modification. Such pairs 2243 * should preserve as much state as possible. 2244 * 2245 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2246 * in the runqueue. 2247 * 2248 * NOCLOCK - skip the update_rq_clock() (avoids double updates) 2249 * 2250 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE) 2251 * 2252 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2253 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2254 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2255 * 2256 */ 2257 2258 #define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */ 2259 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2260 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2261 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2262 #define DEQUEUE_SPECIAL 0x10 2263 #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ 2264 #define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */ 2265 2266 #define ENQUEUE_WAKEUP 0x01 2267 #define ENQUEUE_RESTORE 0x02 2268 #define ENQUEUE_MOVE 0x04 2269 #define ENQUEUE_NOCLOCK 0x08 2270 2271 #define ENQUEUE_HEAD 0x10 2272 #define ENQUEUE_REPLENISH 0x20 2273 #ifdef CONFIG_SMP 2274 #define ENQUEUE_MIGRATED 0x40 2275 #else 2276 #define ENQUEUE_MIGRATED 0x00 2277 #endif 2278 #define ENQUEUE_INITIAL 0x80 2279 #define ENQUEUE_MIGRATING 0x100 2280 #define ENQUEUE_DELAYED 0x200 2281 2282 #define RETRY_TASK ((void *)-1UL) 2283 2284 struct affinity_context { 2285 const struct cpumask *new_mask; 2286 struct cpumask *user_mask; 2287 unsigned int flags; 2288 }; 2289 2290 extern s64 update_curr_common(struct rq *rq); 2291 2292 struct sched_class { 2293 2294 #ifdef CONFIG_UCLAMP_TASK 2295 int uclamp_enabled; 2296 #endif 2297 2298 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2299 bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2300 void (*yield_task) (struct rq *rq); 2301 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2302 2303 void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); 2304 2305 struct task_struct *(*pick_task)(struct rq *rq); 2306 /* 2307 * Optional! When implemented pick_next_task() should be equivalent to: 2308 * 2309 * next = pick_task(); 2310 * if (next) { 2311 * put_prev_task(prev); 2312 * set_next_task_first(next); 2313 * } 2314 */ 2315 struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); 2316 2317 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); 2318 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2319 2320 #ifdef CONFIG_SMP 2321 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2322 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2323 2324 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2325 2326 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2327 2328 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); 2329 2330 void (*rq_online)(struct rq *rq); 2331 void (*rq_offline)(struct rq *rq); 2332 2333 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2334 #endif 2335 2336 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2337 void (*task_fork)(struct task_struct *p); 2338 void (*task_dead)(struct task_struct *p); 2339 2340 /* 2341 * The switched_from() call is allowed to drop rq->lock, therefore we 2342 * cannot assume the switched_from/switched_to pair is serialized by 2343 * rq->lock. They are however serialized by p->pi_lock. 2344 */ 2345 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2346 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2347 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2348 int oldprio); 2349 2350 unsigned int (*get_rr_interval)(struct rq *rq, 2351 struct task_struct *task); 2352 2353 void (*update_curr)(struct rq *rq); 2354 2355 #ifdef CONFIG_FAIR_GROUP_SCHED 2356 void (*task_change_group)(struct task_struct *p); 2357 #endif 2358 2359 #ifdef CONFIG_SCHED_CORE 2360 int (*task_is_throttled)(struct task_struct *p, int cpu); 2361 #endif 2362 }; 2363 2364 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2365 { 2366 WARN_ON_ONCE(rq->curr != prev); 2367 prev->sched_class->put_prev_task(rq, prev, NULL); 2368 } 2369 2370 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2371 { 2372 next->sched_class->set_next_task(rq, next, false); 2373 } 2374 2375 static inline void 2376 __put_prev_set_next_dl_server(struct rq *rq, 2377 struct task_struct *prev, 2378 struct task_struct *next) 2379 { 2380 prev->dl_server = NULL; 2381 next->dl_server = rq->dl_server; 2382 rq->dl_server = NULL; 2383 } 2384 2385 static inline void put_prev_set_next_task(struct rq *rq, 2386 struct task_struct *prev, 2387 struct task_struct *next) 2388 { 2389 WARN_ON_ONCE(rq->curr != prev); 2390 2391 __put_prev_set_next_dl_server(rq, prev, next); 2392 2393 if (next == prev) 2394 return; 2395 2396 prev->sched_class->put_prev_task(rq, prev, next); 2397 next->sched_class->set_next_task(rq, next, true); 2398 } 2399 2400 /* 2401 * Helper to define a sched_class instance; each one is placed in a separate 2402 * section which is ordered by the linker script: 2403 * 2404 * include/asm-generic/vmlinux.lds.h 2405 * 2406 * *CAREFUL* they are laid out in *REVERSE* order!!! 2407 * 2408 * Also enforce alignment on the instance, not the type, to guarantee layout. 2409 */ 2410 #define DEFINE_SCHED_CLASS(name) \ 2411 const struct sched_class name##_sched_class \ 2412 __aligned(__alignof__(struct sched_class)) \ 2413 __section("__" #name "_sched_class") 2414 2415 /* Defined in include/asm-generic/vmlinux.lds.h */ 2416 extern struct sched_class __sched_class_highest[]; 2417 extern struct sched_class __sched_class_lowest[]; 2418 2419 #define for_class_range(class, _from, _to) \ 2420 for (class = (_from); class < (_to); class++) 2421 2422 #define for_each_class(class) \ 2423 for_class_range(class, __sched_class_highest, __sched_class_lowest) 2424 2425 #define sched_class_above(_a, _b) ((_a) < (_b)) 2426 2427 extern const struct sched_class stop_sched_class; 2428 extern const struct sched_class dl_sched_class; 2429 extern const struct sched_class rt_sched_class; 2430 extern const struct sched_class fair_sched_class; 2431 extern const struct sched_class idle_sched_class; 2432 2433 static inline bool sched_stop_runnable(struct rq *rq) 2434 { 2435 return rq->stop && task_on_rq_queued(rq->stop); 2436 } 2437 2438 static inline bool sched_dl_runnable(struct rq *rq) 2439 { 2440 return rq->dl.dl_nr_running > 0; 2441 } 2442 2443 static inline bool sched_rt_runnable(struct rq *rq) 2444 { 2445 return rq->rt.rt_queued > 0; 2446 } 2447 2448 static inline bool sched_fair_runnable(struct rq *rq) 2449 { 2450 return rq->cfs.nr_running > 0; 2451 } 2452 2453 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2454 extern struct task_struct *pick_task_idle(struct rq *rq); 2455 2456 #define SCA_CHECK 0x01 2457 #define SCA_MIGRATE_DISABLE 0x02 2458 #define SCA_MIGRATE_ENABLE 0x04 2459 #define SCA_USER 0x08 2460 2461 #ifdef CONFIG_SMP 2462 2463 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2464 2465 extern void sched_balance_trigger(struct rq *rq); 2466 2467 extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx); 2468 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); 2469 2470 static inline cpumask_t *alloc_user_cpus_ptr(int node) 2471 { 2472 /* 2473 * See do_set_cpus_allowed() above for the rcu_head usage. 2474 */ 2475 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); 2476 2477 return kmalloc_node(size, GFP_KERNEL, node); 2478 } 2479 2480 static inline struct task_struct *get_push_task(struct rq *rq) 2481 { 2482 struct task_struct *p = rq->curr; 2483 2484 lockdep_assert_rq_held(rq); 2485 2486 if (rq->push_busy) 2487 return NULL; 2488 2489 if (p->nr_cpus_allowed == 1) 2490 return NULL; 2491 2492 if (p->migration_disabled) 2493 return NULL; 2494 2495 rq->push_busy = true; 2496 return get_task_struct(p); 2497 } 2498 2499 extern int push_cpu_stop(void *arg); 2500 2501 #else /* !CONFIG_SMP: */ 2502 2503 static inline int __set_cpus_allowed_ptr(struct task_struct *p, 2504 struct affinity_context *ctx) 2505 { 2506 return set_cpus_allowed_ptr(p, ctx->new_mask); 2507 } 2508 2509 static inline cpumask_t *alloc_user_cpus_ptr(int node) 2510 { 2511 return NULL; 2512 } 2513 2514 #endif /* !CONFIG_SMP */ 2515 2516 #ifdef CONFIG_CPU_IDLE 2517 2518 static inline void idle_set_state(struct rq *rq, 2519 struct cpuidle_state *idle_state) 2520 { 2521 rq->idle_state = idle_state; 2522 } 2523 2524 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2525 { 2526 SCHED_WARN_ON(!rcu_read_lock_held()); 2527 2528 return rq->idle_state; 2529 } 2530 2531 #else /* !CONFIG_CPU_IDLE: */ 2532 2533 static inline void idle_set_state(struct rq *rq, 2534 struct cpuidle_state *idle_state) 2535 { 2536 } 2537 2538 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2539 { 2540 return NULL; 2541 } 2542 2543 #endif /* !CONFIG_CPU_IDLE */ 2544 2545 extern void schedule_idle(void); 2546 asmlinkage void schedule_user(void); 2547 2548 extern void sysrq_sched_debug_show(void); 2549 extern void sched_init_granularity(void); 2550 extern void update_max_interval(void); 2551 2552 extern void init_sched_dl_class(void); 2553 extern void init_sched_rt_class(void); 2554 extern void init_sched_fair_class(void); 2555 2556 extern void reweight_task(struct task_struct *p, const struct load_weight *lw); 2557 2558 extern void resched_curr(struct rq *rq); 2559 extern void resched_cpu(int cpu); 2560 2561 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2562 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 2563 2564 extern void init_dl_entity(struct sched_dl_entity *dl_se); 2565 2566 #define BW_SHIFT 20 2567 #define BW_UNIT (1 << BW_SHIFT) 2568 #define RATIO_SHIFT 8 2569 #define MAX_BW_BITS (64 - BW_SHIFT) 2570 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2571 2572 extern unsigned long to_ratio(u64 period, u64 runtime); 2573 2574 extern void init_entity_runnable_average(struct sched_entity *se); 2575 extern void post_init_entity_util_avg(struct task_struct *p); 2576 2577 #ifdef CONFIG_NO_HZ_FULL 2578 extern bool sched_can_stop_tick(struct rq *rq); 2579 extern int __init sched_tick_offload_init(void); 2580 2581 /* 2582 * Tick may be needed by tasks in the runqueue depending on their policy and 2583 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2584 * nohz mode if necessary. 2585 */ 2586 static inline void sched_update_tick_dependency(struct rq *rq) 2587 { 2588 int cpu = cpu_of(rq); 2589 2590 if (!tick_nohz_full_cpu(cpu)) 2591 return; 2592 2593 if (sched_can_stop_tick(rq)) 2594 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2595 else 2596 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2597 } 2598 #else /* !CONFIG_NO_HZ_FULL: */ 2599 static inline int sched_tick_offload_init(void) { return 0; } 2600 static inline void sched_update_tick_dependency(struct rq *rq) { } 2601 #endif /* !CONFIG_NO_HZ_FULL */ 2602 2603 static inline void add_nr_running(struct rq *rq, unsigned count) 2604 { 2605 unsigned prev_nr = rq->nr_running; 2606 2607 rq->nr_running = prev_nr + count; 2608 if (trace_sched_update_nr_running_tp_enabled()) { 2609 call_trace_sched_update_nr_running(rq, count); 2610 } 2611 2612 #ifdef CONFIG_SMP 2613 if (prev_nr < 2 && rq->nr_running >= 2) 2614 set_rd_overloaded(rq->rd, 1); 2615 #endif 2616 2617 sched_update_tick_dependency(rq); 2618 } 2619 2620 static inline void sub_nr_running(struct rq *rq, unsigned count) 2621 { 2622 rq->nr_running -= count; 2623 if (trace_sched_update_nr_running_tp_enabled()) { 2624 call_trace_sched_update_nr_running(rq, -count); 2625 } 2626 2627 /* Check if we still need preemption */ 2628 sched_update_tick_dependency(rq); 2629 } 2630 2631 static inline void __block_task(struct rq *rq, struct task_struct *p) 2632 { 2633 WRITE_ONCE(p->on_rq, 0); 2634 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2635 if (p->sched_contributes_to_load) 2636 rq->nr_uninterruptible++; 2637 2638 if (p->in_iowait) { 2639 atomic_inc(&rq->nr_iowait); 2640 delayacct_blkio_start(); 2641 } 2642 } 2643 2644 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2645 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2646 2647 extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); 2648 2649 #ifdef CONFIG_PREEMPT_RT 2650 # define SCHED_NR_MIGRATE_BREAK 8 2651 #else 2652 # define SCHED_NR_MIGRATE_BREAK 32 2653 #endif 2654 2655 extern const_debug unsigned int sysctl_sched_nr_migrate; 2656 extern const_debug unsigned int sysctl_sched_migration_cost; 2657 2658 extern unsigned int sysctl_sched_base_slice; 2659 2660 #ifdef CONFIG_SCHED_DEBUG 2661 extern int sysctl_resched_latency_warn_ms; 2662 extern int sysctl_resched_latency_warn_once; 2663 2664 extern unsigned int sysctl_sched_tunable_scaling; 2665 2666 extern unsigned int sysctl_numa_balancing_scan_delay; 2667 extern unsigned int sysctl_numa_balancing_scan_period_min; 2668 extern unsigned int sysctl_numa_balancing_scan_period_max; 2669 extern unsigned int sysctl_numa_balancing_scan_size; 2670 extern unsigned int sysctl_numa_balancing_hot_threshold; 2671 #endif 2672 2673 #ifdef CONFIG_SCHED_HRTICK 2674 2675 /* 2676 * Use hrtick when: 2677 * - enabled by features 2678 * - hrtimer is actually high res 2679 */ 2680 static inline int hrtick_enabled(struct rq *rq) 2681 { 2682 if (!cpu_active(cpu_of(rq))) 2683 return 0; 2684 return hrtimer_is_hres_active(&rq->hrtick_timer); 2685 } 2686 2687 static inline int hrtick_enabled_fair(struct rq *rq) 2688 { 2689 if (!sched_feat(HRTICK)) 2690 return 0; 2691 return hrtick_enabled(rq); 2692 } 2693 2694 static inline int hrtick_enabled_dl(struct rq *rq) 2695 { 2696 if (!sched_feat(HRTICK_DL)) 2697 return 0; 2698 return hrtick_enabled(rq); 2699 } 2700 2701 extern void hrtick_start(struct rq *rq, u64 delay); 2702 2703 #else /* !CONFIG_SCHED_HRTICK: */ 2704 2705 static inline int hrtick_enabled_fair(struct rq *rq) 2706 { 2707 return 0; 2708 } 2709 2710 static inline int hrtick_enabled_dl(struct rq *rq) 2711 { 2712 return 0; 2713 } 2714 2715 static inline int hrtick_enabled(struct rq *rq) 2716 { 2717 return 0; 2718 } 2719 2720 #endif /* !CONFIG_SCHED_HRTICK */ 2721 2722 #ifndef arch_scale_freq_tick 2723 static __always_inline void arch_scale_freq_tick(void) { } 2724 #endif 2725 2726 #ifndef arch_scale_freq_capacity 2727 /** 2728 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2729 * @cpu: the CPU in question. 2730 * 2731 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2732 * 2733 * f_curr 2734 * ------ * SCHED_CAPACITY_SCALE 2735 * f_max 2736 */ 2737 static __always_inline 2738 unsigned long arch_scale_freq_capacity(int cpu) 2739 { 2740 return SCHED_CAPACITY_SCALE; 2741 } 2742 #endif 2743 2744 #ifdef CONFIG_SCHED_DEBUG 2745 /* 2746 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to 2747 * acquire rq lock instead of rq_lock(). So at the end of these two functions 2748 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of 2749 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. 2750 */ 2751 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) 2752 { 2753 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2754 /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ 2755 #ifdef CONFIG_SMP 2756 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2757 #endif 2758 } 2759 #else 2760 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) { } 2761 #endif 2762 2763 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ 2764 __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ 2765 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ 2766 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 2767 _lock; return _t; } 2768 2769 #ifdef CONFIG_SMP 2770 2771 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2772 { 2773 #ifdef CONFIG_SCHED_CORE 2774 /* 2775 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2776 * order by core-id first and cpu-id second. 2777 * 2778 * Notably: 2779 * 2780 * double_rq_lock(0,3); will take core-0, core-1 lock 2781 * double_rq_lock(1,2); will take core-1, core-0 lock 2782 * 2783 * when only cpu-id is considered. 2784 */ 2785 if (rq1->core->cpu < rq2->core->cpu) 2786 return true; 2787 if (rq1->core->cpu > rq2->core->cpu) 2788 return false; 2789 2790 /* 2791 * __sched_core_flip() relies on SMT having cpu-id lock order. 2792 */ 2793 #endif 2794 return rq1->cpu < rq2->cpu; 2795 } 2796 2797 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2798 2799 #ifdef CONFIG_PREEMPTION 2800 2801 /* 2802 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2803 * way at the expense of forcing extra atomic operations in all 2804 * invocations. This assures that the double_lock is acquired using the 2805 * same underlying policy as the spinlock_t on this architecture, which 2806 * reduces latency compared to the unfair variant below. However, it 2807 * also adds more overhead and therefore may reduce throughput. 2808 */ 2809 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2810 __releases(this_rq->lock) 2811 __acquires(busiest->lock) 2812 __acquires(this_rq->lock) 2813 { 2814 raw_spin_rq_unlock(this_rq); 2815 double_rq_lock(this_rq, busiest); 2816 2817 return 1; 2818 } 2819 2820 #else /* !CONFIG_PREEMPTION: */ 2821 /* 2822 * Unfair double_lock_balance: Optimizes throughput at the expense of 2823 * latency by eliminating extra atomic operations when the locks are 2824 * already in proper order on entry. This favors lower CPU-ids and will 2825 * grant the double lock to lower CPUs over higher ids under contention, 2826 * regardless of entry order into the function. 2827 */ 2828 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2829 __releases(this_rq->lock) 2830 __acquires(busiest->lock) 2831 __acquires(this_rq->lock) 2832 { 2833 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 2834 likely(raw_spin_rq_trylock(busiest))) { 2835 double_rq_clock_clear_update(this_rq, busiest); 2836 return 0; 2837 } 2838 2839 if (rq_order_less(this_rq, busiest)) { 2840 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2841 double_rq_clock_clear_update(this_rq, busiest); 2842 return 0; 2843 } 2844 2845 raw_spin_rq_unlock(this_rq); 2846 double_rq_lock(this_rq, busiest); 2847 2848 return 1; 2849 } 2850 2851 #endif /* !CONFIG_PREEMPTION */ 2852 2853 /* 2854 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2855 */ 2856 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2857 { 2858 lockdep_assert_irqs_disabled(); 2859 2860 return _double_lock_balance(this_rq, busiest); 2861 } 2862 2863 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2864 __releases(busiest->lock) 2865 { 2866 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2867 raw_spin_rq_unlock(busiest); 2868 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2869 } 2870 2871 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2872 { 2873 if (l1 > l2) 2874 swap(l1, l2); 2875 2876 spin_lock(l1); 2877 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2878 } 2879 2880 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2881 { 2882 if (l1 > l2) 2883 swap(l1, l2); 2884 2885 spin_lock_irq(l1); 2886 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2887 } 2888 2889 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2890 { 2891 if (l1 > l2) 2892 swap(l1, l2); 2893 2894 raw_spin_lock(l1); 2895 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2896 } 2897 2898 static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2899 { 2900 raw_spin_unlock(l1); 2901 raw_spin_unlock(l2); 2902 } 2903 2904 DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, 2905 double_raw_lock(_T->lock, _T->lock2), 2906 double_raw_unlock(_T->lock, _T->lock2)) 2907 2908 /* 2909 * double_rq_unlock - safely unlock two runqueues 2910 * 2911 * Note this does not restore interrupts like task_rq_unlock, 2912 * you need to do so manually after calling. 2913 */ 2914 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2915 __releases(rq1->lock) 2916 __releases(rq2->lock) 2917 { 2918 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2919 raw_spin_rq_unlock(rq2); 2920 else 2921 __release(rq2->lock); 2922 raw_spin_rq_unlock(rq1); 2923 } 2924 2925 extern void set_rq_online (struct rq *rq); 2926 extern void set_rq_offline(struct rq *rq); 2927 2928 extern bool sched_smp_initialized; 2929 2930 #else /* !CONFIG_SMP: */ 2931 2932 /* 2933 * double_rq_lock - safely lock two runqueues 2934 * 2935 * Note this does not disable interrupts like task_rq_lock, 2936 * you need to do so manually before calling. 2937 */ 2938 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2939 __acquires(rq1->lock) 2940 __acquires(rq2->lock) 2941 { 2942 WARN_ON_ONCE(!irqs_disabled()); 2943 WARN_ON_ONCE(rq1 != rq2); 2944 raw_spin_rq_lock(rq1); 2945 __acquire(rq2->lock); /* Fake it out ;) */ 2946 double_rq_clock_clear_update(rq1, rq2); 2947 } 2948 2949 /* 2950 * double_rq_unlock - safely unlock two runqueues 2951 * 2952 * Note this does not restore interrupts like task_rq_unlock, 2953 * you need to do so manually after calling. 2954 */ 2955 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2956 __releases(rq1->lock) 2957 __releases(rq2->lock) 2958 { 2959 WARN_ON_ONCE(rq1 != rq2); 2960 raw_spin_rq_unlock(rq1); 2961 __release(rq2->lock); 2962 } 2963 2964 #endif /* !CONFIG_SMP */ 2965 2966 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, 2967 double_rq_lock(_T->lock, _T->lock2), 2968 double_rq_unlock(_T->lock, _T->lock2)) 2969 2970 extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq); 2971 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2972 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2973 2974 #ifdef CONFIG_SCHED_DEBUG 2975 extern bool sched_debug_verbose; 2976 2977 extern void print_cfs_stats(struct seq_file *m, int cpu); 2978 extern void print_rt_stats(struct seq_file *m, int cpu); 2979 extern void print_dl_stats(struct seq_file *m, int cpu); 2980 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2981 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2982 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2983 2984 extern void resched_latency_warn(int cpu, u64 latency); 2985 # ifdef CONFIG_NUMA_BALANCING 2986 extern void show_numa_stats(struct task_struct *p, struct seq_file *m); 2987 extern void 2988 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2989 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2990 # endif /* CONFIG_NUMA_BALANCING */ 2991 #else /* !CONFIG_SCHED_DEBUG: */ 2992 static inline void resched_latency_warn(int cpu, u64 latency) { } 2993 #endif /* !CONFIG_SCHED_DEBUG */ 2994 2995 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2996 extern void init_rt_rq(struct rt_rq *rt_rq); 2997 extern void init_dl_rq(struct dl_rq *dl_rq); 2998 2999 extern void cfs_bandwidth_usage_inc(void); 3000 extern void cfs_bandwidth_usage_dec(void); 3001 3002 #ifdef CONFIG_NO_HZ_COMMON 3003 3004 #define NOHZ_BALANCE_KICK_BIT 0 3005 #define NOHZ_STATS_KICK_BIT 1 3006 #define NOHZ_NEWILB_KICK_BIT 2 3007 #define NOHZ_NEXT_KICK_BIT 3 3008 3009 /* Run sched_balance_domains() */ 3010 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 3011 /* Update blocked load */ 3012 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 3013 /* Update blocked load when entering idle */ 3014 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 3015 /* Update nohz.next_balance */ 3016 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 3017 3018 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 3019 3020 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 3021 3022 extern void nohz_balance_exit_idle(struct rq *rq); 3023 #else /* !CONFIG_NO_HZ_COMMON: */ 3024 static inline void nohz_balance_exit_idle(struct rq *rq) { } 3025 #endif /* !CONFIG_NO_HZ_COMMON */ 3026 3027 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 3028 extern void nohz_run_idle_balance(int cpu); 3029 #else 3030 static inline void nohz_run_idle_balance(int cpu) { } 3031 #endif 3032 3033 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 3034 3035 struct irqtime { 3036 u64 total; 3037 u64 tick_delta; 3038 u64 irq_start_time; 3039 struct u64_stats_sync sync; 3040 }; 3041 3042 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 3043 3044 /* 3045 * Returns the irqtime minus the softirq time computed by ksoftirqd. 3046 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 3047 * and never move forward. 3048 */ 3049 static inline u64 irq_time_read(int cpu) 3050 { 3051 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 3052 unsigned int seq; 3053 u64 total; 3054 3055 do { 3056 seq = __u64_stats_fetch_begin(&irqtime->sync); 3057 total = irqtime->total; 3058 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 3059 3060 return total; 3061 } 3062 3063 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 3064 3065 #ifdef CONFIG_CPU_FREQ 3066 3067 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 3068 3069 /** 3070 * cpufreq_update_util - Take a note about CPU utilization changes. 3071 * @rq: Runqueue to carry out the update for. 3072 * @flags: Update reason flags. 3073 * 3074 * This function is called by the scheduler on the CPU whose utilization is 3075 * being updated. 3076 * 3077 * It can only be called from RCU-sched read-side critical sections. 3078 * 3079 * The way cpufreq is currently arranged requires it to evaluate the CPU 3080 * performance state (frequency/voltage) on a regular basis to prevent it from 3081 * being stuck in a completely inadequate performance level for too long. 3082 * That is not guaranteed to happen if the updates are only triggered from CFS 3083 * and DL, though, because they may not be coming in if only RT tasks are 3084 * active all the time (or there are RT tasks only). 3085 * 3086 * As a workaround for that issue, this function is called periodically by the 3087 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 3088 * but that really is a band-aid. Going forward it should be replaced with 3089 * solutions targeted more specifically at RT tasks. 3090 */ 3091 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 3092 { 3093 struct update_util_data *data; 3094 3095 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 3096 cpu_of(rq))); 3097 if (data) 3098 data->func(data, rq_clock(rq), flags); 3099 } 3100 #else /* !CONFIG_CPU_FREQ: */ 3101 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { } 3102 #endif /* !CONFIG_CPU_FREQ */ 3103 3104 #ifdef arch_scale_freq_capacity 3105 # ifndef arch_scale_freq_invariant 3106 # define arch_scale_freq_invariant() true 3107 # endif 3108 #else 3109 # define arch_scale_freq_invariant() false 3110 #endif 3111 3112 #ifdef CONFIG_SMP 3113 3114 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 3115 unsigned long *min, 3116 unsigned long *max); 3117 3118 unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, 3119 unsigned long min, 3120 unsigned long max); 3121 3122 3123 /* 3124 * Verify the fitness of task @p to run on @cpu taking into account the 3125 * CPU original capacity and the runtime/deadline ratio of the task. 3126 * 3127 * The function will return true if the original capacity of @cpu is 3128 * greater than or equal to task's deadline density right shifted by 3129 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. 3130 */ 3131 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 3132 { 3133 unsigned long cap = arch_scale_cpu_capacity(cpu); 3134 3135 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); 3136 } 3137 3138 static inline unsigned long cpu_bw_dl(struct rq *rq) 3139 { 3140 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 3141 } 3142 3143 static inline unsigned long cpu_util_dl(struct rq *rq) 3144 { 3145 return READ_ONCE(rq->avg_dl.util_avg); 3146 } 3147 3148 3149 extern unsigned long cpu_util_cfs(int cpu); 3150 extern unsigned long cpu_util_cfs_boost(int cpu); 3151 3152 static inline unsigned long cpu_util_rt(struct rq *rq) 3153 { 3154 return READ_ONCE(rq->avg_rt.util_avg); 3155 } 3156 3157 #endif /* CONFIG_SMP */ 3158 3159 #ifdef CONFIG_UCLAMP_TASK 3160 3161 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 3162 3163 static inline unsigned long uclamp_rq_get(struct rq *rq, 3164 enum uclamp_id clamp_id) 3165 { 3166 return READ_ONCE(rq->uclamp[clamp_id].value); 3167 } 3168 3169 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3170 unsigned int value) 3171 { 3172 WRITE_ONCE(rq->uclamp[clamp_id].value, value); 3173 } 3174 3175 static inline bool uclamp_rq_is_idle(struct rq *rq) 3176 { 3177 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; 3178 } 3179 3180 /* Is the rq being capped/throttled by uclamp_max? */ 3181 static inline bool uclamp_rq_is_capped(struct rq *rq) 3182 { 3183 unsigned long rq_util; 3184 unsigned long max_util; 3185 3186 if (!static_branch_likely(&sched_uclamp_used)) 3187 return false; 3188 3189 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); 3190 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 3191 3192 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; 3193 } 3194 3195 /* 3196 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 3197 * by default in the fast path and only gets turned on once userspace performs 3198 * an operation that requires it. 3199 * 3200 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 3201 * hence is active. 3202 */ 3203 static inline bool uclamp_is_used(void) 3204 { 3205 return static_branch_likely(&sched_uclamp_used); 3206 } 3207 3208 #define for_each_clamp_id(clamp_id) \ 3209 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 3210 3211 extern unsigned int sysctl_sched_uclamp_util_min_rt_default; 3212 3213 3214 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 3215 { 3216 if (clamp_id == UCLAMP_MIN) 3217 return 0; 3218 return SCHED_CAPACITY_SCALE; 3219 } 3220 3221 /* Integer rounded range for each bucket */ 3222 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 3223 3224 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 3225 { 3226 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 3227 } 3228 3229 static inline void 3230 uclamp_se_set(struct uclamp_se *uc_se, unsigned int value, bool user_defined) 3231 { 3232 uc_se->value = value; 3233 uc_se->bucket_id = uclamp_bucket_id(value); 3234 uc_se->user_defined = user_defined; 3235 } 3236 3237 #else /* !CONFIG_UCLAMP_TASK: */ 3238 3239 static inline unsigned long 3240 uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 3241 { 3242 if (clamp_id == UCLAMP_MIN) 3243 return 0; 3244 3245 return SCHED_CAPACITY_SCALE; 3246 } 3247 3248 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } 3249 3250 static inline bool uclamp_is_used(void) 3251 { 3252 return false; 3253 } 3254 3255 static inline unsigned long 3256 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) 3257 { 3258 if (clamp_id == UCLAMP_MIN) 3259 return 0; 3260 3261 return SCHED_CAPACITY_SCALE; 3262 } 3263 3264 static inline void 3265 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) 3266 { 3267 } 3268 3269 static inline bool uclamp_rq_is_idle(struct rq *rq) 3270 { 3271 return false; 3272 } 3273 3274 #endif /* !CONFIG_UCLAMP_TASK */ 3275 3276 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 3277 3278 static inline unsigned long cpu_util_irq(struct rq *rq) 3279 { 3280 return READ_ONCE(rq->avg_irq.util_avg); 3281 } 3282 3283 static inline 3284 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3285 { 3286 util *= (max - irq); 3287 util /= max; 3288 3289 return util; 3290 3291 } 3292 3293 #else /* !CONFIG_HAVE_SCHED_AVG_IRQ: */ 3294 3295 static inline unsigned long cpu_util_irq(struct rq *rq) 3296 { 3297 return 0; 3298 } 3299 3300 static inline 3301 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3302 { 3303 return util; 3304 } 3305 3306 #endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */ 3307 3308 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3309 3310 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3311 3312 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3313 3314 static inline bool sched_energy_enabled(void) 3315 { 3316 return static_branch_unlikely(&sched_energy_present); 3317 } 3318 3319 extern struct cpufreq_governor schedutil_gov; 3320 3321 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3322 3323 #define perf_domain_span(pd) NULL 3324 3325 static inline bool sched_energy_enabled(void) { return false; } 3326 3327 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3328 3329 #ifdef CONFIG_MEMBARRIER 3330 3331 /* 3332 * The scheduler provides memory barriers required by membarrier between: 3333 * - prior user-space memory accesses and store to rq->membarrier_state, 3334 * - store to rq->membarrier_state and following user-space memory accesses. 3335 * In the same way it provides those guarantees around store to rq->curr. 3336 */ 3337 static inline void membarrier_switch_mm(struct rq *rq, 3338 struct mm_struct *prev_mm, 3339 struct mm_struct *next_mm) 3340 { 3341 int membarrier_state; 3342 3343 if (prev_mm == next_mm) 3344 return; 3345 3346 membarrier_state = atomic_read(&next_mm->membarrier_state); 3347 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3348 return; 3349 3350 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3351 } 3352 3353 #else /* !CONFIG_MEMBARRIER :*/ 3354 3355 static inline void membarrier_switch_mm(struct rq *rq, 3356 struct mm_struct *prev_mm, 3357 struct mm_struct *next_mm) 3358 { 3359 } 3360 3361 #endif /* !CONFIG_MEMBARRIER */ 3362 3363 #ifdef CONFIG_SMP 3364 static inline bool is_per_cpu_kthread(struct task_struct *p) 3365 { 3366 if (!(p->flags & PF_KTHREAD)) 3367 return false; 3368 3369 if (p->nr_cpus_allowed != 1) 3370 return false; 3371 3372 return true; 3373 } 3374 #endif 3375 3376 extern void swake_up_all_locked(struct swait_queue_head *q); 3377 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3378 3379 extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); 3380 3381 #ifdef CONFIG_PREEMPT_DYNAMIC 3382 extern int preempt_dynamic_mode; 3383 extern int sched_dynamic_mode(const char *str); 3384 extern void sched_dynamic_update(int mode); 3385 #endif 3386 3387 #ifdef CONFIG_SCHED_MM_CID 3388 3389 #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ 3390 #define MM_CID_SCAN_DELAY 100 /* 100ms */ 3391 3392 extern raw_spinlock_t cid_lock; 3393 extern int use_cid_lock; 3394 3395 extern void sched_mm_cid_migrate_from(struct task_struct *t); 3396 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); 3397 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); 3398 extern void init_sched_mm_cid(struct task_struct *t); 3399 3400 static inline void __mm_cid_put(struct mm_struct *mm, int cid) 3401 { 3402 if (cid < 0) 3403 return; 3404 cpumask_clear_cpu(cid, mm_cidmask(mm)); 3405 } 3406 3407 /* 3408 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to 3409 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to 3410 * be held to transition to other states. 3411 * 3412 * State transitions synchronized with cmpxchg or try_cmpxchg need to be 3413 * consistent across CPUs, which prevents use of this_cpu_cmpxchg. 3414 */ 3415 static inline void mm_cid_put_lazy(struct task_struct *t) 3416 { 3417 struct mm_struct *mm = t->mm; 3418 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3419 int cid; 3420 3421 lockdep_assert_irqs_disabled(); 3422 cid = __this_cpu_read(pcpu_cid->cid); 3423 if (!mm_cid_is_lazy_put(cid) || 3424 !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3425 return; 3426 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3427 } 3428 3429 static inline int mm_cid_pcpu_unset(struct mm_struct *mm) 3430 { 3431 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3432 int cid, res; 3433 3434 lockdep_assert_irqs_disabled(); 3435 cid = __this_cpu_read(pcpu_cid->cid); 3436 for (;;) { 3437 if (mm_cid_is_unset(cid)) 3438 return MM_CID_UNSET; 3439 /* 3440 * Attempt transition from valid or lazy-put to unset. 3441 */ 3442 res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); 3443 if (res == cid) 3444 break; 3445 cid = res; 3446 } 3447 return cid; 3448 } 3449 3450 static inline void mm_cid_put(struct mm_struct *mm) 3451 { 3452 int cid; 3453 3454 lockdep_assert_irqs_disabled(); 3455 cid = mm_cid_pcpu_unset(mm); 3456 if (cid == MM_CID_UNSET) 3457 return; 3458 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3459 } 3460 3461 static inline int __mm_cid_try_get(struct mm_struct *mm) 3462 { 3463 struct cpumask *cpumask; 3464 int cid; 3465 3466 cpumask = mm_cidmask(mm); 3467 /* 3468 * Retry finding first zero bit if the mask is temporarily 3469 * filled. This only happens during concurrent remote-clear 3470 * which owns a cid without holding a rq lock. 3471 */ 3472 for (;;) { 3473 cid = cpumask_first_zero(cpumask); 3474 if (cid < nr_cpu_ids) 3475 break; 3476 cpu_relax(); 3477 } 3478 if (cpumask_test_and_set_cpu(cid, cpumask)) 3479 return -1; 3480 3481 return cid; 3482 } 3483 3484 /* 3485 * Save a snapshot of the current runqueue time of this cpu 3486 * with the per-cpu cid value, allowing to estimate how recently it was used. 3487 */ 3488 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) 3489 { 3490 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); 3491 3492 lockdep_assert_rq_held(rq); 3493 WRITE_ONCE(pcpu_cid->time, rq->clock); 3494 } 3495 3496 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) 3497 { 3498 int cid; 3499 3500 /* 3501 * All allocations (even those using the cid_lock) are lock-free. If 3502 * use_cid_lock is set, hold the cid_lock to perform cid allocation to 3503 * guarantee forward progress. 3504 */ 3505 if (!READ_ONCE(use_cid_lock)) { 3506 cid = __mm_cid_try_get(mm); 3507 if (cid >= 0) 3508 goto end; 3509 raw_spin_lock(&cid_lock); 3510 } else { 3511 raw_spin_lock(&cid_lock); 3512 cid = __mm_cid_try_get(mm); 3513 if (cid >= 0) 3514 goto unlock; 3515 } 3516 3517 /* 3518 * cid concurrently allocated. Retry while forcing following 3519 * allocations to use the cid_lock to ensure forward progress. 3520 */ 3521 WRITE_ONCE(use_cid_lock, 1); 3522 /* 3523 * Set use_cid_lock before allocation. Only care about program order 3524 * because this is only required for forward progress. 3525 */ 3526 barrier(); 3527 /* 3528 * Retry until it succeeds. It is guaranteed to eventually succeed once 3529 * all newcoming allocations observe the use_cid_lock flag set. 3530 */ 3531 do { 3532 cid = __mm_cid_try_get(mm); 3533 cpu_relax(); 3534 } while (cid < 0); 3535 /* 3536 * Allocate before clearing use_cid_lock. Only care about 3537 * program order because this is for forward progress. 3538 */ 3539 barrier(); 3540 WRITE_ONCE(use_cid_lock, 0); 3541 unlock: 3542 raw_spin_unlock(&cid_lock); 3543 end: 3544 mm_cid_snapshot_time(rq, mm); 3545 3546 return cid; 3547 } 3548 3549 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) 3550 { 3551 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3552 struct cpumask *cpumask; 3553 int cid; 3554 3555 lockdep_assert_rq_held(rq); 3556 cpumask = mm_cidmask(mm); 3557 cid = __this_cpu_read(pcpu_cid->cid); 3558 if (mm_cid_is_valid(cid)) { 3559 mm_cid_snapshot_time(rq, mm); 3560 return cid; 3561 } 3562 if (mm_cid_is_lazy_put(cid)) { 3563 if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3564 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3565 } 3566 cid = __mm_cid_get(rq, mm); 3567 __this_cpu_write(pcpu_cid->cid, cid); 3568 3569 return cid; 3570 } 3571 3572 static inline void switch_mm_cid(struct rq *rq, 3573 struct task_struct *prev, 3574 struct task_struct *next) 3575 { 3576 /* 3577 * Provide a memory barrier between rq->curr store and load of 3578 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. 3579 * 3580 * Should be adapted if context_switch() is modified. 3581 */ 3582 if (!next->mm) { // to kernel 3583 /* 3584 * user -> kernel transition does not guarantee a barrier, but 3585 * we can use the fact that it performs an atomic operation in 3586 * mmgrab(). 3587 */ 3588 if (prev->mm) // from user 3589 smp_mb__after_mmgrab(); 3590 /* 3591 * kernel -> kernel transition does not change rq->curr->mm 3592 * state. It stays NULL. 3593 */ 3594 } else { // to user 3595 /* 3596 * kernel -> user transition does not provide a barrier 3597 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. 3598 * Provide it here. 3599 */ 3600 if (!prev->mm) { // from kernel 3601 smp_mb(); 3602 } else { // from user 3603 /* 3604 * user->user transition relies on an implicit 3605 * memory barrier in switch_mm() when 3606 * current->mm changes. If the architecture 3607 * switch_mm() does not have an implicit memory 3608 * barrier, it is emitted here. If current->mm 3609 * is unchanged, no barrier is needed. 3610 */ 3611 smp_mb__after_switch_mm(); 3612 } 3613 } 3614 if (prev->mm_cid_active) { 3615 mm_cid_snapshot_time(rq, prev->mm); 3616 mm_cid_put_lazy(prev); 3617 prev->mm_cid = -1; 3618 } 3619 if (next->mm_cid_active) 3620 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); 3621 } 3622 3623 #else /* !CONFIG_SCHED_MM_CID: */ 3624 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } 3625 static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } 3626 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } 3627 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } 3628 static inline void init_sched_mm_cid(struct task_struct *t) { } 3629 #endif /* !CONFIG_SCHED_MM_CID */ 3630 3631 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); 3632 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); 3633 3634 #ifdef CONFIG_RT_MUTEXES 3635 3636 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 3637 { 3638 if (pi_task) 3639 prio = min(prio, pi_task->prio); 3640 3641 return prio; 3642 } 3643 3644 static inline int rt_effective_prio(struct task_struct *p, int prio) 3645 { 3646 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3647 3648 return __rt_effective_prio(pi_task, prio); 3649 } 3650 3651 #else /* !CONFIG_RT_MUTEXES: */ 3652 3653 static inline int rt_effective_prio(struct task_struct *p, int prio) 3654 { 3655 return prio; 3656 } 3657 3658 #endif /* !CONFIG_RT_MUTEXES */ 3659 3660 extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi); 3661 extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); 3662 extern void __setscheduler_prio(struct task_struct *p, int prio); 3663 extern void set_load_weight(struct task_struct *p, bool update_load); 3664 extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); 3665 extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); 3666 3667 extern void check_class_changed(struct rq *rq, struct task_struct *p, 3668 const struct sched_class *prev_class, 3669 int oldprio); 3670 3671 #ifdef CONFIG_SMP 3672 extern struct balance_callback *splice_balance_callbacks(struct rq *rq); 3673 extern void balance_callbacks(struct rq *rq, struct balance_callback *head); 3674 #else 3675 3676 static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) 3677 { 3678 return NULL; 3679 } 3680 3681 static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) 3682 { 3683 } 3684 3685 #endif 3686 3687 #endif /* _KERNEL_SCHED_SCHED_H */ 3688