1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #ifndef _KERNEL_SCHED_SCHED_H 6 #define _KERNEL_SCHED_SCHED_H 7 8 #include <linux/prandom.h> 9 #include <linux/sched/affinity.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/cpufreq.h> 12 #include <linux/sched/deadline.h> 13 #include <linux/sched.h> 14 #include <linux/sched/loadavg.h> 15 #include <linux/sched/mm.h> 16 #include <linux/sched/rseq_api.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/smt.h> 19 #include <linux/sched/stat.h> 20 #include <linux/sched/sysctl.h> 21 #include <linux/sched/task_flags.h> 22 #include <linux/sched/task.h> 23 #include <linux/sched/topology.h> 24 #include <linux/atomic.h> 25 #include <linux/bitmap.h> 26 #include <linux/bug.h> 27 #include <linux/capability.h> 28 #include <linux/cgroup_api.h> 29 #include <linux/cgroup.h> 30 #include <linux/context_tracking.h> 31 #include <linux/cpufreq.h> 32 #include <linux/cpumask_api.h> 33 #include <linux/ctype.h> 34 #include <linux/file.h> 35 #include <linux/fs_api.h> 36 #include <linux/hrtimer_api.h> 37 #include <linux/interrupt.h> 38 #include <linux/irq_work.h> 39 #include <linux/jiffies.h> 40 #include <linux/kref_api.h> 41 #include <linux/kthread.h> 42 #include <linux/ktime_api.h> 43 #include <linux/lockdep_api.h> 44 #include <linux/lockdep.h> 45 #include <linux/minmax.h> 46 #include <linux/mm.h> 47 #include <linux/module.h> 48 #include <linux/mutex_api.h> 49 #include <linux/plist.h> 50 #include <linux/poll.h> 51 #include <linux/proc_fs.h> 52 #include <linux/profile.h> 53 #include <linux/psi.h> 54 #include <linux/rcupdate.h> 55 #include <linux/seq_file.h> 56 #include <linux/seqlock.h> 57 #include <linux/softirq.h> 58 #include <linux/spinlock_api.h> 59 #include <linux/static_key.h> 60 #include <linux/stop_machine.h> 61 #include <linux/syscalls_api.h> 62 #include <linux/syscalls.h> 63 #include <linux/tick.h> 64 #include <linux/topology.h> 65 #include <linux/types.h> 66 #include <linux/u64_stats_sync_api.h> 67 #include <linux/uaccess.h> 68 #include <linux/wait_api.h> 69 #include <linux/wait_bit.h> 70 #include <linux/workqueue_api.h> 71 #include <linux/delayacct.h> 72 #include <linux/mmu_context.h> 73 74 #include <trace/events/power.h> 75 #include <trace/events/sched.h> 76 77 #include "../workqueue_internal.h" 78 79 struct rq; 80 struct cfs_rq; 81 struct rt_rq; 82 struct sched_group; 83 struct cpuidle_state; 84 85 #ifdef CONFIG_PARAVIRT 86 # include <asm/paravirt.h> 87 # include <asm/paravirt_api_clock.h> 88 #endif 89 90 #include <asm/barrier.h> 91 92 #include "cpupri.h" 93 #include "cpudeadline.h" 94 95 /* task_struct::on_rq states: */ 96 #define TASK_ON_RQ_QUEUED 1 97 #define TASK_ON_RQ_MIGRATING 2 98 99 extern __read_mostly int scheduler_running; 100 101 extern unsigned long calc_load_update; 102 extern atomic_long_t calc_load_tasks; 103 104 extern void calc_global_load_tick(struct rq *this_rq); 105 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 106 107 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 108 109 extern int sysctl_sched_rt_period; 110 extern int sysctl_sched_rt_runtime; 111 extern int sched_rr_timeslice; 112 113 /* 114 * Asymmetric CPU capacity bits 115 */ 116 struct asym_cap_data { 117 struct list_head link; 118 struct rcu_head rcu; 119 unsigned long capacity; 120 unsigned long cpus[]; 121 }; 122 123 extern struct list_head asym_cap_list; 124 125 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) 126 127 /* 128 * Helpers for converting nanosecond timing to jiffy resolution 129 */ 130 #define NS_TO_JIFFIES(time) ((unsigned long)(time) / (NSEC_PER_SEC/HZ)) 131 132 /* 133 * Increase resolution of nice-level calculations for 64-bit architectures. 134 * The extra resolution improves shares distribution and load balancing of 135 * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group 136 * hierarchies, especially on larger systems. This is not a user-visible change 137 * and does not change the user-interface for setting shares/weights. 138 * 139 * We increase resolution only if we have enough bits to allow this increased 140 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 141 * are pretty high and the returns do not justify the increased costs. 142 * 143 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 144 * increase coverage and consistency always enable it on 64-bit platforms. 145 */ 146 #ifdef CONFIG_64BIT 147 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 148 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 149 # define scale_load_down(w) \ 150 ({ \ 151 unsigned long __w = (w); \ 152 \ 153 if (__w) \ 154 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 155 __w; \ 156 }) 157 #else 158 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 159 # define scale_load(w) (w) 160 # define scale_load_down(w) (w) 161 #endif 162 163 /* 164 * Task weight (visible to users) and its load (invisible to users) have 165 * independent resolution, but they should be well calibrated. We use 166 * scale_load() and scale_load_down(w) to convert between them. The 167 * following must be true: 168 * 169 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 170 * 171 */ 172 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 173 174 /* 175 * Single value that decides SCHED_DEADLINE internal math precision. 176 * 10 -> just above 1us 177 * 9 -> just above 0.5us 178 */ 179 #define DL_SCALE 10 180 181 /* 182 * Single value that denotes runtime == period, ie unlimited time. 183 */ 184 #define RUNTIME_INF ((u64)~0ULL) 185 186 static inline int idle_policy(int policy) 187 { 188 return policy == SCHED_IDLE; 189 } 190 191 static inline int normal_policy(int policy) 192 { 193 #ifdef CONFIG_SCHED_CLASS_EXT 194 if (policy == SCHED_EXT) 195 return true; 196 #endif 197 return policy == SCHED_NORMAL; 198 } 199 200 static inline int fair_policy(int policy) 201 { 202 return normal_policy(policy) || policy == SCHED_BATCH; 203 } 204 205 static inline int rt_policy(int policy) 206 { 207 return policy == SCHED_FIFO || policy == SCHED_RR; 208 } 209 210 static inline int dl_policy(int policy) 211 { 212 return policy == SCHED_DEADLINE; 213 } 214 215 static inline bool valid_policy(int policy) 216 { 217 return idle_policy(policy) || fair_policy(policy) || 218 rt_policy(policy) || dl_policy(policy); 219 } 220 221 static inline int task_has_idle_policy(struct task_struct *p) 222 { 223 return idle_policy(p->policy); 224 } 225 226 static inline int task_has_rt_policy(struct task_struct *p) 227 { 228 return rt_policy(p->policy); 229 } 230 231 static inline int task_has_dl_policy(struct task_struct *p) 232 { 233 return dl_policy(p->policy); 234 } 235 236 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 237 238 static inline void update_avg(u64 *avg, u64 sample) 239 { 240 s64 diff = sample - *avg; 241 242 *avg += diff / 8; 243 } 244 245 /* 246 * Shifting a value by an exponent greater *or equal* to the size of said value 247 * is UB; cap at size-1. 248 */ 249 #define shr_bound(val, shift) \ 250 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 251 252 /* 253 * cgroup weight knobs should use the common MIN, DFL and MAX values which are 254 * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it 255 * maps pretty well onto the shares value used by scheduler and the round-trip 256 * conversions preserve the original value over the entire range. 257 */ 258 static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight) 259 { 260 return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL); 261 } 262 263 static inline unsigned long sched_weight_to_cgroup(unsigned long weight) 264 { 265 return clamp_t(unsigned long, 266 DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024), 267 CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); 268 } 269 270 /* 271 * !! For sched_setattr_nocheck() (kernel) only !! 272 * 273 * This is actually gross. :( 274 * 275 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 276 * tasks, but still be able to sleep. We need this on platforms that cannot 277 * atomically change clock frequency. Remove once fast switching will be 278 * available on such platforms. 279 * 280 * SUGOV stands for SchedUtil GOVernor. 281 */ 282 #define SCHED_FLAG_SUGOV 0x10000000 283 284 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 285 286 static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) 287 { 288 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 289 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 290 #else 291 return false; 292 #endif 293 } 294 295 /* 296 * Tells if entity @a should preempt entity @b. 297 */ 298 static inline bool dl_entity_preempt(const struct sched_dl_entity *a, 299 const struct sched_dl_entity *b) 300 { 301 return dl_entity_is_special(a) || 302 dl_time_before(a->deadline, b->deadline); 303 } 304 305 /* 306 * This is the priority-queue data structure of the RT scheduling class: 307 */ 308 struct rt_prio_array { 309 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 310 struct list_head queue[MAX_RT_PRIO]; 311 }; 312 313 struct rt_bandwidth { 314 /* nests inside the rq lock: */ 315 raw_spinlock_t rt_runtime_lock; 316 ktime_t rt_period; 317 u64 rt_runtime; 318 struct hrtimer rt_period_timer; 319 unsigned int rt_period_active; 320 }; 321 322 static inline int dl_bandwidth_enabled(void) 323 { 324 return sysctl_sched_rt_runtime >= 0; 325 } 326 327 /* 328 * To keep the bandwidth of -deadline tasks under control 329 * we need some place where: 330 * - store the maximum -deadline bandwidth of each cpu; 331 * - cache the fraction of bandwidth that is currently allocated in 332 * each root domain; 333 * 334 * This is all done in the data structure below. It is similar to the 335 * one used for RT-throttling (rt_bandwidth), with the main difference 336 * that, since here we are only interested in admission control, we 337 * do not decrease any runtime while the group "executes", neither we 338 * need a timer to replenish it. 339 * 340 * With respect to SMP, bandwidth is given on a per root domain basis, 341 * meaning that: 342 * - bw (< 100%) is the deadline bandwidth of each CPU; 343 * - total_bw is the currently allocated bandwidth in each root domain; 344 */ 345 struct dl_bw { 346 raw_spinlock_t lock; 347 u64 bw; 348 u64 total_bw; 349 }; 350 351 extern void init_dl_bw(struct dl_bw *dl_b); 352 extern int sched_dl_global_validate(void); 353 extern void sched_dl_do_global(void); 354 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 355 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 356 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 357 extern bool __checkparam_dl(const struct sched_attr *attr); 358 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 359 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 360 extern int dl_bw_deactivate(int cpu); 361 extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); 362 /* 363 * SCHED_DEADLINE supports servers (nested scheduling) with the following 364 * interface: 365 * 366 * dl_se::rq -- runqueue we belong to. 367 * 368 * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this 369 * returns NULL. 370 * 371 * dl_server_update() -- called from update_curr_common(), propagates runtime 372 * to the server. 373 * 374 * dl_server_start() -- start the server when it has tasks; it will stop 375 * automatically when there are no more tasks, per 376 * dl_se::server_pick() returning NULL. 377 * 378 * dl_server_stop() -- (force) stop the server; use when updating 379 * parameters. 380 * 381 * dl_server_init() -- initializes the server. 382 * 383 * When started the dl_server will (per dl_defer) schedule a timer for its 384 * zero-laxity point -- that is, unlike regular EDF tasks which run ASAP, a 385 * server will run at the very end of its period. 386 * 387 * This is done such that any runtime from the target class can be accounted 388 * against the server -- through dl_server_update() above -- such that when it 389 * becomes time to run, it might already be out of runtime and get deferred 390 * until the next period. In this case dl_server_timer() will alternate 391 * between defer and replenish but never actually enqueue the server. 392 * 393 * Only when the target class does not manage to exhaust the server's runtime 394 * (there's actualy starvation in the given period), will the dl_server get on 395 * the runqueue. Once queued it will pick tasks from the target class and run 396 * them until either its runtime is exhaused, at which point its back to 397 * dl_server_timer, or until there are no more tasks to run, at which point 398 * the dl_server stops itself. 399 * 400 * By stopping at this point the dl_server retains bandwidth, which, if a new 401 * task wakes up imminently (starting the server again), can be used -- 402 * subject to CBS wakeup rules -- without having to wait for the next period. 403 * 404 * Additionally, because of the dl_defer behaviour the start/stop behaviour is 405 * naturally thottled to once per period, avoiding high context switch 406 * workloads from spamming the hrtimer program/cancel paths. 407 */ 408 extern void dl_server_update_idle(struct sched_dl_entity *dl_se, s64 delta_exec); 409 extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec); 410 extern void dl_server_start(struct sched_dl_entity *dl_se); 411 extern void dl_server_stop(struct sched_dl_entity *dl_se); 412 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 413 dl_server_pick_f pick_task); 414 extern void sched_init_dl_servers(void); 415 416 extern void fair_server_init(struct rq *rq); 417 extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq); 418 extern int dl_server_apply_params(struct sched_dl_entity *dl_se, 419 u64 runtime, u64 period, bool init); 420 421 static inline bool dl_server_active(struct sched_dl_entity *dl_se) 422 { 423 return dl_se->dl_server_active; 424 } 425 426 #ifdef CONFIG_CGROUP_SCHED 427 428 extern struct list_head task_groups; 429 430 #ifdef CONFIG_GROUP_SCHED_BANDWIDTH 431 extern const u64 max_bw_quota_period_us; 432 433 /* 434 * default period for group bandwidth. 435 * default: 0.1s, units: microseconds 436 */ 437 static inline u64 default_bw_period_us(void) 438 { 439 return 100000ULL; 440 } 441 #endif /* CONFIG_GROUP_SCHED_BANDWIDTH */ 442 443 struct cfs_bandwidth { 444 #ifdef CONFIG_CFS_BANDWIDTH 445 raw_spinlock_t lock; 446 ktime_t period; 447 u64 quota; 448 u64 runtime; 449 u64 burst; 450 u64 runtime_snap; 451 s64 hierarchical_quota; 452 453 u8 idle; 454 u8 period_active; 455 u8 slack_started; 456 struct hrtimer period_timer; 457 struct hrtimer slack_timer; 458 struct list_head throttled_cfs_rq; 459 460 /* Statistics: */ 461 int nr_periods; 462 int nr_throttled; 463 int nr_burst; 464 u64 throttled_time; 465 u64 burst_time; 466 #endif /* CONFIG_CFS_BANDWIDTH */ 467 }; 468 469 /* Task group related information */ 470 struct task_group { 471 struct cgroup_subsys_state css; 472 473 #ifdef CONFIG_GROUP_SCHED_WEIGHT 474 /* A positive value indicates that this is a SCHED_IDLE group. */ 475 int idle; 476 #endif 477 478 #ifdef CONFIG_FAIR_GROUP_SCHED 479 /* schedulable entities of this group on each CPU */ 480 struct sched_entity **se; 481 /* runqueue "owned" by this group on each CPU */ 482 struct cfs_rq **cfs_rq; 483 unsigned long shares; 484 /* 485 * load_avg can be heavily contended at clock tick time, so put 486 * it in its own cache-line separated from the fields above which 487 * will also be accessed at each tick. 488 */ 489 atomic_long_t load_avg ____cacheline_aligned; 490 #endif /* CONFIG_FAIR_GROUP_SCHED */ 491 492 #ifdef CONFIG_RT_GROUP_SCHED 493 struct sched_rt_entity **rt_se; 494 struct rt_rq **rt_rq; 495 496 struct rt_bandwidth rt_bandwidth; 497 #endif 498 499 struct scx_task_group scx; 500 501 struct rcu_head rcu; 502 struct list_head list; 503 504 struct task_group *parent; 505 struct list_head siblings; 506 struct list_head children; 507 508 #ifdef CONFIG_SCHED_AUTOGROUP 509 struct autogroup *autogroup; 510 #endif 511 512 struct cfs_bandwidth cfs_bandwidth; 513 514 #ifdef CONFIG_UCLAMP_TASK_GROUP 515 /* The two decimal precision [%] value requested from user-space */ 516 unsigned int uclamp_pct[UCLAMP_CNT]; 517 /* Clamp values requested for a task group */ 518 struct uclamp_se uclamp_req[UCLAMP_CNT]; 519 /* Effective clamp values used for a task group */ 520 struct uclamp_se uclamp[UCLAMP_CNT]; 521 #endif 522 523 }; 524 525 #ifdef CONFIG_GROUP_SCHED_WEIGHT 526 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 527 528 /* 529 * A weight of 0 or 1 can cause arithmetics problems. 530 * A weight of a cfs_rq is the sum of weights of which entities 531 * are queued on this cfs_rq, so a weight of a entity should not be 532 * too large, so as the shares value of a task group. 533 * (The default weight is 1024 - so there's no practical 534 * limitation from this.) 535 */ 536 #define MIN_SHARES (1UL << 1) 537 #define MAX_SHARES (1UL << 18) 538 #endif 539 540 typedef int (*tg_visitor)(struct task_group *, void *); 541 542 extern int walk_tg_tree_from(struct task_group *from, 543 tg_visitor down, tg_visitor up, void *data); 544 545 /* 546 * Iterate the full tree, calling @down when first entering a node and @up when 547 * leaving it for the final time. 548 * 549 * Caller must hold rcu_lock or sufficient equivalent. 550 */ 551 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 552 { 553 return walk_tg_tree_from(&root_task_group, down, up, data); 554 } 555 556 static inline struct task_group *css_tg(struct cgroup_subsys_state *css) 557 { 558 return css ? container_of(css, struct task_group, css) : NULL; 559 } 560 561 extern int tg_nop(struct task_group *tg, void *data); 562 563 #ifdef CONFIG_FAIR_GROUP_SCHED 564 extern void free_fair_sched_group(struct task_group *tg); 565 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 566 extern void online_fair_sched_group(struct task_group *tg); 567 extern void unregister_fair_sched_group(struct task_group *tg); 568 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 569 static inline void free_fair_sched_group(struct task_group *tg) { } 570 static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 571 { 572 return 1; 573 } 574 static inline void online_fair_sched_group(struct task_group *tg) { } 575 static inline void unregister_fair_sched_group(struct task_group *tg) { } 576 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 577 578 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 579 struct sched_entity *se, int cpu, 580 struct sched_entity *parent); 581 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); 582 583 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 584 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 585 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 586 extern bool cfs_task_bw_constrained(struct task_struct *p); 587 588 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 589 struct sched_rt_entity *rt_se, int cpu, 590 struct sched_rt_entity *parent); 591 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 592 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 593 extern long sched_group_rt_runtime(struct task_group *tg); 594 extern long sched_group_rt_period(struct task_group *tg); 595 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 596 597 extern struct task_group *sched_create_group(struct task_group *parent); 598 extern void sched_online_group(struct task_group *tg, 599 struct task_group *parent); 600 extern void sched_destroy_group(struct task_group *tg); 601 extern void sched_release_group(struct task_group *tg); 602 603 extern void sched_move_task(struct task_struct *tsk, bool for_autogroup); 604 605 #ifdef CONFIG_FAIR_GROUP_SCHED 606 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 607 608 extern int sched_group_set_idle(struct task_group *tg, long idle); 609 610 extern void set_task_rq_fair(struct sched_entity *se, 611 struct cfs_rq *prev, struct cfs_rq *next); 612 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 613 static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; } 614 static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; } 615 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 616 617 #else /* !CONFIG_CGROUP_SCHED: */ 618 619 struct cfs_bandwidth { }; 620 621 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } 622 623 #endif /* !CONFIG_CGROUP_SCHED */ 624 625 extern void unregister_rt_sched_group(struct task_group *tg); 626 extern void free_rt_sched_group(struct task_group *tg); 627 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 628 629 /* 630 * u64_u32_load/u64_u32_store 631 * 632 * Use a copy of a u64 value to protect against data race. This is only 633 * applicable for 32-bits architectures. 634 */ 635 #ifdef CONFIG_64BIT 636 # define u64_u32_load_copy(var, copy) var 637 # define u64_u32_store_copy(var, copy, val) (var = val) 638 #else 639 # define u64_u32_load_copy(var, copy) \ 640 ({ \ 641 u64 __val, __val_copy; \ 642 do { \ 643 __val_copy = copy; \ 644 /* \ 645 * paired with u64_u32_store_copy(), ordering access \ 646 * to var and copy. \ 647 */ \ 648 smp_rmb(); \ 649 __val = var; \ 650 } while (__val != __val_copy); \ 651 __val; \ 652 }) 653 # define u64_u32_store_copy(var, copy, val) \ 654 do { \ 655 typeof(val) __val = (val); \ 656 var = __val; \ 657 /* \ 658 * paired with u64_u32_load_copy(), ordering access to var and \ 659 * copy. \ 660 */ \ 661 smp_wmb(); \ 662 copy = __val; \ 663 } while (0) 664 #endif 665 # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) 666 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) 667 668 struct balance_callback { 669 struct balance_callback *next; 670 void (*func)(struct rq *rq); 671 }; 672 673 /* CFS-related fields in a runqueue */ 674 struct cfs_rq { 675 struct load_weight load; 676 unsigned int nr_queued; 677 unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */ 678 unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */ 679 unsigned int h_nr_idle; /* SCHED_IDLE */ 680 681 s64 avg_vruntime; 682 u64 avg_load; 683 684 u64 zero_vruntime; 685 #ifdef CONFIG_SCHED_CORE 686 unsigned int forceidle_seq; 687 u64 zero_vruntime_fi; 688 #endif 689 690 struct rb_root_cached tasks_timeline; 691 692 /* 693 * 'curr' points to currently running entity on this cfs_rq. 694 * It is set to NULL otherwise (i.e when none are currently running). 695 */ 696 struct sched_entity *curr; 697 struct sched_entity *next; 698 699 /* 700 * CFS load tracking 701 */ 702 struct sched_avg avg; 703 #ifndef CONFIG_64BIT 704 u64 last_update_time_copy; 705 #endif 706 struct { 707 raw_spinlock_t lock ____cacheline_aligned; 708 int nr; 709 unsigned long load_avg; 710 unsigned long util_avg; 711 unsigned long runnable_avg; 712 } removed; 713 714 #ifdef CONFIG_FAIR_GROUP_SCHED 715 u64 last_update_tg_load_avg; 716 unsigned long tg_load_avg_contrib; 717 long propagate; 718 long prop_runnable_sum; 719 720 /* 721 * h_load = weight * f(tg) 722 * 723 * Where f(tg) is the recursive weight fraction assigned to 724 * this group. 725 */ 726 unsigned long h_load; 727 u64 last_h_load_update; 728 struct sched_entity *h_load_next; 729 #endif /* CONFIG_FAIR_GROUP_SCHED */ 730 731 #ifdef CONFIG_FAIR_GROUP_SCHED 732 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 733 734 /* 735 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 736 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 737 * (like users, containers etc.) 738 * 739 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 740 * This list is used during load balance. 741 */ 742 int on_list; 743 struct list_head leaf_cfs_rq_list; 744 struct task_group *tg; /* group that "owns" this runqueue */ 745 746 /* Locally cached copy of our task_group's idle value */ 747 int idle; 748 749 #ifdef CONFIG_CFS_BANDWIDTH 750 int runtime_enabled; 751 s64 runtime_remaining; 752 753 u64 throttled_pelt_idle; 754 #ifndef CONFIG_64BIT 755 u64 throttled_pelt_idle_copy; 756 #endif 757 u64 throttled_clock; 758 u64 throttled_clock_pelt; 759 u64 throttled_clock_pelt_time; 760 u64 throttled_clock_self; 761 u64 throttled_clock_self_time; 762 bool throttled:1; 763 bool pelt_clock_throttled:1; 764 int throttle_count; 765 struct list_head throttled_list; 766 struct list_head throttled_csd_list; 767 struct list_head throttled_limbo_list; 768 #endif /* CONFIG_CFS_BANDWIDTH */ 769 #endif /* CONFIG_FAIR_GROUP_SCHED */ 770 }; 771 772 #ifdef CONFIG_SCHED_CLASS_EXT 773 /* scx_rq->flags, protected by the rq lock */ 774 enum scx_rq_flags { 775 /* 776 * A hotplugged CPU starts scheduling before rq_online_scx(). Track 777 * ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called 778 * only while the BPF scheduler considers the CPU to be online. 779 */ 780 SCX_RQ_ONLINE = 1 << 0, 781 SCX_RQ_CAN_STOP_TICK = 1 << 1, 782 SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ 783 SCX_RQ_BYPASSING = 1 << 4, 784 SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */ 785 SCX_RQ_BAL_CB_PENDING = 1 << 6, /* must queue a cb after dispatching */ 786 787 SCX_RQ_IN_WAKEUP = 1 << 16, 788 SCX_RQ_IN_BALANCE = 1 << 17, 789 }; 790 791 struct scx_rq { 792 struct scx_dispatch_q local_dsq; 793 struct list_head runnable_list; /* runnable tasks on this rq */ 794 struct list_head ddsp_deferred_locals; /* deferred ddsps from enq */ 795 unsigned long ops_qseq; 796 u64 extra_enq_flags; /* see move_task_to_local_dsq() */ 797 u32 nr_running; 798 u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */ 799 bool cpu_released; 800 u32 flags; 801 u64 clock; /* current per-rq clock -- see scx_bpf_now() */ 802 cpumask_var_t cpus_to_kick; 803 cpumask_var_t cpus_to_kick_if_idle; 804 cpumask_var_t cpus_to_preempt; 805 cpumask_var_t cpus_to_wait; 806 unsigned long kick_sync; 807 local_t reenq_local_deferred; 808 struct balance_callback deferred_bal_cb; 809 struct irq_work deferred_irq_work; 810 struct irq_work kick_cpus_irq_work; 811 struct scx_dispatch_q bypass_dsq; 812 }; 813 #endif /* CONFIG_SCHED_CLASS_EXT */ 814 815 static inline int rt_bandwidth_enabled(void) 816 { 817 return sysctl_sched_rt_runtime >= 0; 818 } 819 820 /* RT IPI pull logic requires IRQ_WORK */ 821 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 822 # define HAVE_RT_PUSH_IPI 823 #endif 824 825 /* Real-Time classes' related field in a runqueue: */ 826 struct rt_rq { 827 struct rt_prio_array active; 828 unsigned int rt_nr_running; 829 unsigned int rr_nr_running; 830 struct { 831 int curr; /* highest queued rt task prio */ 832 int next; /* next highest */ 833 } highest_prio; 834 bool overloaded; 835 struct plist_head pushable_tasks; 836 837 int rt_queued; 838 839 #ifdef CONFIG_RT_GROUP_SCHED 840 int rt_throttled; 841 u64 rt_time; /* consumed RT time, goes up in update_curr_rt */ 842 u64 rt_runtime; /* allotted RT time, "slice" from rt_bandwidth, RT sharing/balancing */ 843 /* Nests inside the rq lock: */ 844 raw_spinlock_t rt_runtime_lock; 845 846 unsigned int rt_nr_boosted; 847 848 struct rq *rq; /* this is always top-level rq, cache? */ 849 #endif 850 #ifdef CONFIG_CGROUP_SCHED 851 struct task_group *tg; /* this tg has "this" rt_rq on given CPU for runnable entities */ 852 #endif 853 }; 854 855 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 856 { 857 return rt_rq->rt_queued && rt_rq->rt_nr_running; 858 } 859 860 /* Deadline class' related fields in a runqueue */ 861 struct dl_rq { 862 /* runqueue is an rbtree, ordered by deadline */ 863 struct rb_root_cached root; 864 865 unsigned int dl_nr_running; 866 867 /* 868 * Deadline values of the currently executing and the 869 * earliest ready task on this rq. Caching these facilitates 870 * the decision whether or not a ready but not running task 871 * should migrate somewhere else. 872 */ 873 struct { 874 u64 curr; 875 u64 next; 876 } earliest_dl; 877 878 bool overloaded; 879 880 /* 881 * Tasks on this rq that can be pushed away. They are kept in 882 * an rb-tree, ordered by tasks' deadlines, with caching 883 * of the leftmost (earliest deadline) element. 884 */ 885 struct rb_root_cached pushable_dl_tasks_root; 886 887 /* 888 * "Active utilization" for this runqueue: increased when a 889 * task wakes up (becomes TASK_RUNNING) and decreased when a 890 * task blocks 891 */ 892 u64 running_bw; 893 894 /* 895 * Utilization of the tasks "assigned" to this runqueue (including 896 * the tasks that are in runqueue and the tasks that executed on this 897 * CPU and blocked). Increased when a task moves to this runqueue, and 898 * decreased when the task moves away (migrates, changes scheduling 899 * policy, or terminates). 900 * This is needed to compute the "inactive utilization" for the 901 * runqueue (inactive utilization = this_bw - running_bw). 902 */ 903 u64 this_bw; 904 u64 extra_bw; 905 906 /* 907 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM 908 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). 909 */ 910 u64 max_bw; 911 912 /* 913 * Inverse of the fraction of CPU utilization that can be reclaimed 914 * by the GRUB algorithm. 915 */ 916 u64 bw_ratio; 917 }; 918 919 #ifdef CONFIG_FAIR_GROUP_SCHED 920 921 /* An entity is a task if it doesn't "own" a runqueue */ 922 #define entity_is_task(se) (!se->my_q) 923 924 static inline void se_update_runnable(struct sched_entity *se) 925 { 926 if (!entity_is_task(se)) 927 se->runnable_weight = se->my_q->h_nr_runnable; 928 } 929 930 static inline long se_runnable(struct sched_entity *se) 931 { 932 if (se->sched_delayed) 933 return false; 934 935 if (entity_is_task(se)) 936 return !!se->on_rq; 937 else 938 return se->runnable_weight; 939 } 940 941 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 942 943 #define entity_is_task(se) 1 944 945 static inline void se_update_runnable(struct sched_entity *se) { } 946 947 static inline long se_runnable(struct sched_entity *se) 948 { 949 if (se->sched_delayed) 950 return false; 951 952 return !!se->on_rq; 953 } 954 955 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 956 957 /* 958 * XXX we want to get rid of these helpers and use the full load resolution. 959 */ 960 static inline long se_weight(struct sched_entity *se) 961 { 962 return scale_load_down(se->load.weight); 963 } 964 965 966 static inline bool sched_asym_prefer(int a, int b) 967 { 968 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 969 } 970 971 struct perf_domain { 972 struct em_perf_domain *em_pd; 973 struct perf_domain *next; 974 struct rcu_head rcu; 975 }; 976 977 /* 978 * We add the notion of a root-domain which will be used to define per-domain 979 * variables. Each exclusive cpuset essentially defines an island domain by 980 * fully partitioning the member CPUs from any other cpuset. Whenever a new 981 * exclusive cpuset is created, we also create and attach a new root-domain 982 * object. 983 * 984 */ 985 struct root_domain { 986 atomic_t refcount; 987 atomic_t rto_count; 988 struct rcu_head rcu; 989 cpumask_var_t span; 990 cpumask_var_t online; 991 992 /* 993 * Indicate pullable load on at least one CPU, e.g: 994 * - More than one runnable task 995 * - Running task is misfit 996 */ 997 bool overloaded; 998 999 /* Indicate one or more CPUs over-utilized (tipping point) */ 1000 bool overutilized; 1001 1002 /* 1003 * The bit corresponding to a CPU gets set here if such CPU has more 1004 * than one runnable -deadline task (as it is below for RT tasks). 1005 */ 1006 cpumask_var_t dlo_mask; 1007 atomic_t dlo_count; 1008 struct dl_bw dl_bw; 1009 struct cpudl cpudl; 1010 1011 /* 1012 * Indicate whether a root_domain's dl_bw has been checked or 1013 * updated. It's monotonously increasing value. 1014 * 1015 * Also, some corner cases, like 'wrap around' is dangerous, but given 1016 * that u64 is 'big enough'. So that shouldn't be a concern. 1017 */ 1018 u64 visit_cookie; 1019 1020 #ifdef HAVE_RT_PUSH_IPI 1021 /* 1022 * For IPI pull requests, loop across the rto_mask. 1023 */ 1024 struct irq_work rto_push_work; 1025 raw_spinlock_t rto_lock; 1026 /* These are only updated and read within rto_lock */ 1027 int rto_loop; 1028 int rto_cpu; 1029 /* These atomics are updated outside of a lock */ 1030 atomic_t rto_loop_next; 1031 atomic_t rto_loop_start; 1032 #endif /* HAVE_RT_PUSH_IPI */ 1033 /* 1034 * The "RT overload" flag: it gets set if a CPU has more than 1035 * one runnable RT task. 1036 */ 1037 cpumask_var_t rto_mask; 1038 struct cpupri cpupri; 1039 1040 /* 1041 * NULL-terminated list of performance domains intersecting with the 1042 * CPUs of the rd. Protected by RCU. 1043 */ 1044 struct perf_domain __rcu *pd; 1045 }; 1046 1047 extern void init_defrootdomain(void); 1048 extern int sched_init_domains(const struct cpumask *cpu_map); 1049 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 1050 extern void sched_get_rd(struct root_domain *rd); 1051 extern void sched_put_rd(struct root_domain *rd); 1052 1053 static inline int get_rd_overloaded(struct root_domain *rd) 1054 { 1055 return READ_ONCE(rd->overloaded); 1056 } 1057 1058 static inline void set_rd_overloaded(struct root_domain *rd, int status) 1059 { 1060 if (get_rd_overloaded(rd) != status) 1061 WRITE_ONCE(rd->overloaded, status); 1062 } 1063 1064 #ifdef HAVE_RT_PUSH_IPI 1065 extern void rto_push_irq_work_func(struct irq_work *work); 1066 #endif 1067 1068 #ifdef CONFIG_UCLAMP_TASK 1069 /* 1070 * struct uclamp_bucket - Utilization clamp bucket 1071 * @value: utilization clamp value for tasks on this clamp bucket 1072 * @tasks: number of RUNNABLE tasks on this clamp bucket 1073 * 1074 * Keep track of how many tasks are RUNNABLE for a given utilization 1075 * clamp value. 1076 */ 1077 struct uclamp_bucket { 1078 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 1079 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 1080 }; 1081 1082 /* 1083 * struct uclamp_rq - rq's utilization clamp 1084 * @value: currently active clamp values for a rq 1085 * @bucket: utilization clamp buckets affecting a rq 1086 * 1087 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 1088 * A clamp value is affecting a rq when there is at least one task RUNNABLE 1089 * (or actually running) with that value. 1090 * 1091 * There are up to UCLAMP_CNT possible different clamp values, currently there 1092 * are only two: minimum utilization and maximum utilization. 1093 * 1094 * All utilization clamping values are MAX aggregated, since: 1095 * - for util_min: we want to run the CPU at least at the max of the minimum 1096 * utilization required by its currently RUNNABLE tasks. 1097 * - for util_max: we want to allow the CPU to run up to the max of the 1098 * maximum utilization allowed by its currently RUNNABLE tasks. 1099 * 1100 * Since on each system we expect only a limited number of different 1101 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 1102 * the metrics required to compute all the per-rq utilization clamp values. 1103 */ 1104 struct uclamp_rq { 1105 unsigned int value; 1106 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 1107 }; 1108 1109 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 1110 #endif /* CONFIG_UCLAMP_TASK */ 1111 1112 /* 1113 * This is the main, per-CPU runqueue data structure. 1114 * 1115 * Locking rule: those places that want to lock multiple runqueues 1116 * (such as the load balancing or the thread migration code), lock 1117 * acquire operations must be ordered by ascending &runqueue. 1118 */ 1119 struct rq { 1120 /* runqueue lock: */ 1121 raw_spinlock_t __lock; 1122 1123 /* Per class runqueue modification mask; bits in class order. */ 1124 unsigned int queue_mask; 1125 unsigned int nr_running; 1126 #ifdef CONFIG_NUMA_BALANCING 1127 unsigned int nr_numa_running; 1128 unsigned int nr_preferred_running; 1129 unsigned int numa_migrate_on; 1130 #endif 1131 #ifdef CONFIG_NO_HZ_COMMON 1132 unsigned long last_blocked_load_update_tick; 1133 unsigned int has_blocked_load; 1134 call_single_data_t nohz_csd; 1135 unsigned int nohz_tick_stopped; 1136 atomic_t nohz_flags; 1137 #endif /* CONFIG_NO_HZ_COMMON */ 1138 1139 unsigned int ttwu_pending; 1140 u64 nr_switches; 1141 1142 #ifdef CONFIG_UCLAMP_TASK 1143 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 1144 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 1145 unsigned int uclamp_flags; 1146 #define UCLAMP_FLAG_IDLE 0x01 1147 #endif 1148 1149 struct cfs_rq cfs; 1150 struct rt_rq rt; 1151 struct dl_rq dl; 1152 #ifdef CONFIG_SCHED_CLASS_EXT 1153 struct scx_rq scx; 1154 #endif 1155 1156 struct sched_dl_entity fair_server; 1157 1158 #ifdef CONFIG_FAIR_GROUP_SCHED 1159 /* list of leaf cfs_rq on this CPU: */ 1160 struct list_head leaf_cfs_rq_list; 1161 struct list_head *tmp_alone_branch; 1162 #endif /* CONFIG_FAIR_GROUP_SCHED */ 1163 1164 /* 1165 * This is part of a global counter where only the total sum 1166 * over all CPUs matters. A task can increase this counter on 1167 * one CPU and if it got migrated afterwards it may decrease 1168 * it on another CPU. Always updated under the runqueue lock: 1169 */ 1170 unsigned long nr_uninterruptible; 1171 1172 #ifdef CONFIG_SCHED_PROXY_EXEC 1173 struct task_struct __rcu *donor; /* Scheduling context */ 1174 struct task_struct __rcu *curr; /* Execution context */ 1175 #else 1176 union { 1177 struct task_struct __rcu *donor; /* Scheduler context */ 1178 struct task_struct __rcu *curr; /* Execution context */ 1179 }; 1180 #endif 1181 struct sched_dl_entity *dl_server; 1182 struct task_struct *idle; 1183 struct task_struct *stop; 1184 unsigned long next_balance; 1185 struct mm_struct *prev_mm; 1186 1187 unsigned int clock_update_flags; 1188 u64 clock; 1189 /* Ensure that all clocks are in the same cache line */ 1190 u64 clock_task ____cacheline_aligned; 1191 u64 clock_pelt; 1192 unsigned long lost_idle_time; 1193 u64 clock_pelt_idle; 1194 u64 clock_idle; 1195 #ifndef CONFIG_64BIT 1196 u64 clock_pelt_idle_copy; 1197 u64 clock_idle_copy; 1198 #endif 1199 1200 atomic_t nr_iowait; 1201 1202 u64 last_seen_need_resched_ns; 1203 int ticks_without_resched; 1204 1205 #ifdef CONFIG_MEMBARRIER 1206 int membarrier_state; 1207 #endif 1208 1209 struct root_domain *rd; 1210 struct sched_domain __rcu *sd; 1211 1212 unsigned long cpu_capacity; 1213 1214 struct balance_callback *balance_callback; 1215 1216 unsigned char nohz_idle_balance; 1217 unsigned char idle_balance; 1218 1219 unsigned long misfit_task_load; 1220 1221 /* For active balancing */ 1222 int active_balance; 1223 int push_cpu; 1224 struct cpu_stop_work active_balance_work; 1225 1226 /* CPU of this runqueue: */ 1227 int cpu; 1228 int online; 1229 1230 struct list_head cfs_tasks; 1231 1232 struct sched_avg avg_rt; 1233 struct sched_avg avg_dl; 1234 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1235 struct sched_avg avg_irq; 1236 #endif 1237 #ifdef CONFIG_SCHED_HW_PRESSURE 1238 struct sched_avg avg_hw; 1239 #endif 1240 u64 idle_stamp; 1241 u64 avg_idle; 1242 1243 /* This is used to determine avg_idle's max value */ 1244 u64 max_idle_balance_cost; 1245 1246 #ifdef CONFIG_HOTPLUG_CPU 1247 struct rcuwait hotplug_wait; 1248 #endif 1249 1250 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1251 u64 prev_irq_time; 1252 u64 psi_irq_time; 1253 #endif 1254 #ifdef CONFIG_PARAVIRT 1255 u64 prev_steal_time; 1256 #endif 1257 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1258 u64 prev_steal_time_rq; 1259 #endif 1260 1261 /* calc_load related fields */ 1262 unsigned long calc_load_update; 1263 long calc_load_active; 1264 1265 #ifdef CONFIG_SCHED_HRTICK 1266 call_single_data_t hrtick_csd; 1267 struct hrtimer hrtick_timer; 1268 ktime_t hrtick_time; 1269 #endif 1270 1271 #ifdef CONFIG_SCHEDSTATS 1272 /* latency stats */ 1273 struct sched_info rq_sched_info; 1274 unsigned long long rq_cpu_time; 1275 1276 /* sys_sched_yield() stats */ 1277 unsigned int yld_count; 1278 1279 /* schedule() stats */ 1280 unsigned int sched_count; 1281 unsigned int sched_goidle; 1282 1283 /* try_to_wake_up() stats */ 1284 unsigned int ttwu_count; 1285 unsigned int ttwu_local; 1286 #endif 1287 1288 #ifdef CONFIG_CPU_IDLE 1289 /* Must be inspected within a RCU lock section */ 1290 struct cpuidle_state *idle_state; 1291 #endif 1292 1293 unsigned int nr_pinned; 1294 unsigned int push_busy; 1295 struct cpu_stop_work push_work; 1296 1297 #ifdef CONFIG_SCHED_CORE 1298 /* per rq */ 1299 struct rq *core; 1300 struct task_struct *core_pick; 1301 struct sched_dl_entity *core_dl_server; 1302 unsigned int core_enabled; 1303 unsigned int core_sched_seq; 1304 struct rb_root core_tree; 1305 1306 /* shared state -- careful with sched_core_cpu_deactivate() */ 1307 unsigned int core_task_seq; 1308 unsigned int core_pick_seq; 1309 unsigned long core_cookie; 1310 unsigned int core_forceidle_count; 1311 unsigned int core_forceidle_seq; 1312 unsigned int core_forceidle_occupation; 1313 u64 core_forceidle_start; 1314 #endif /* CONFIG_SCHED_CORE */ 1315 1316 /* Scratch cpumask to be temporarily used under rq_lock */ 1317 cpumask_var_t scratch_mask; 1318 1319 #ifdef CONFIG_CFS_BANDWIDTH 1320 call_single_data_t cfsb_csd; 1321 struct list_head cfsb_csd_list; 1322 #endif 1323 }; 1324 1325 #ifdef CONFIG_FAIR_GROUP_SCHED 1326 1327 /* CPU runqueue to which this cfs_rq is attached */ 1328 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1329 { 1330 return cfs_rq->rq; 1331 } 1332 1333 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 1334 1335 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1336 { 1337 return container_of(cfs_rq, struct rq, cfs); 1338 } 1339 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 1340 1341 static inline int cpu_of(struct rq *rq) 1342 { 1343 return rq->cpu; 1344 } 1345 1346 #define MDF_PUSH 0x01 1347 1348 static inline bool is_migration_disabled(struct task_struct *p) 1349 { 1350 return p->migration_disabled; 1351 } 1352 1353 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1354 DECLARE_PER_CPU(struct rnd_state, sched_rnd_state); 1355 1356 static inline u32 sched_rng(void) 1357 { 1358 return prandom_u32_state(this_cpu_ptr(&sched_rnd_state)); 1359 } 1360 1361 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1362 #define this_rq() this_cpu_ptr(&runqueues) 1363 #define task_rq(p) cpu_rq(task_cpu(p)) 1364 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1365 #define raw_rq() raw_cpu_ptr(&runqueues) 1366 1367 #ifdef CONFIG_SCHED_PROXY_EXEC 1368 static inline void rq_set_donor(struct rq *rq, struct task_struct *t) 1369 { 1370 rcu_assign_pointer(rq->donor, t); 1371 } 1372 #else 1373 static inline void rq_set_donor(struct rq *rq, struct task_struct *t) 1374 { 1375 /* Do nothing */ 1376 } 1377 #endif 1378 1379 #ifdef CONFIG_SCHED_CORE 1380 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1381 1382 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1383 1384 static inline bool sched_core_enabled(struct rq *rq) 1385 { 1386 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1387 } 1388 1389 static inline bool sched_core_disabled(void) 1390 { 1391 return !static_branch_unlikely(&__sched_core_enabled); 1392 } 1393 1394 /* 1395 * Be careful with this function; not for general use. The return value isn't 1396 * stable unless you actually hold a relevant rq->__lock. 1397 */ 1398 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1399 { 1400 if (sched_core_enabled(rq)) 1401 return &rq->core->__lock; 1402 1403 return &rq->__lock; 1404 } 1405 1406 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1407 { 1408 if (rq->core_enabled) 1409 return &rq->core->__lock; 1410 1411 return &rq->__lock; 1412 } 1413 1414 extern bool 1415 cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi); 1416 1417 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 1418 1419 /* 1420 * Helpers to check if the CPU's core cookie matches with the task's cookie 1421 * when core scheduling is enabled. 1422 * A special case is that the task's cookie always matches with CPU's core 1423 * cookie if the CPU is in an idle core. 1424 */ 1425 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1426 { 1427 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1428 if (!sched_core_enabled(rq)) 1429 return true; 1430 1431 return rq->core->core_cookie == p->core_cookie; 1432 } 1433 1434 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1435 { 1436 bool idle_core = true; 1437 int cpu; 1438 1439 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1440 if (!sched_core_enabled(rq)) 1441 return true; 1442 1443 if (rq->core->core_cookie == p->core_cookie) 1444 return true; 1445 1446 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1447 if (!available_idle_cpu(cpu)) { 1448 idle_core = false; 1449 break; 1450 } 1451 } 1452 1453 /* 1454 * A CPU in an idle core is always the best choice for tasks with 1455 * cookies. 1456 */ 1457 return idle_core; 1458 } 1459 1460 static inline bool sched_group_cookie_match(struct rq *rq, 1461 struct task_struct *p, 1462 struct sched_group *group) 1463 { 1464 int cpu; 1465 1466 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1467 if (!sched_core_enabled(rq)) 1468 return true; 1469 1470 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1471 if (sched_core_cookie_match(cpu_rq(cpu), p)) 1472 return true; 1473 } 1474 return false; 1475 } 1476 1477 static inline bool sched_core_enqueued(struct task_struct *p) 1478 { 1479 return !RB_EMPTY_NODE(&p->core_node); 1480 } 1481 1482 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1483 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); 1484 1485 extern void sched_core_get(void); 1486 extern void sched_core_put(void); 1487 1488 #else /* !CONFIG_SCHED_CORE: */ 1489 1490 static inline bool sched_core_enabled(struct rq *rq) 1491 { 1492 return false; 1493 } 1494 1495 static inline bool sched_core_disabled(void) 1496 { 1497 return true; 1498 } 1499 1500 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1501 { 1502 return &rq->__lock; 1503 } 1504 1505 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1506 { 1507 return &rq->__lock; 1508 } 1509 1510 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1511 { 1512 return true; 1513 } 1514 1515 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1516 { 1517 return true; 1518 } 1519 1520 static inline bool sched_group_cookie_match(struct rq *rq, 1521 struct task_struct *p, 1522 struct sched_group *group) 1523 { 1524 return true; 1525 } 1526 1527 #endif /* !CONFIG_SCHED_CORE */ 1528 1529 #ifdef CONFIG_RT_GROUP_SCHED 1530 # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED 1531 DECLARE_STATIC_KEY_FALSE(rt_group_sched); 1532 static inline bool rt_group_sched_enabled(void) 1533 { 1534 return static_branch_unlikely(&rt_group_sched); 1535 } 1536 # else /* !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED: */ 1537 DECLARE_STATIC_KEY_TRUE(rt_group_sched); 1538 static inline bool rt_group_sched_enabled(void) 1539 { 1540 return static_branch_likely(&rt_group_sched); 1541 } 1542 # endif /* !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED */ 1543 #else /* !CONFIG_RT_GROUP_SCHED: */ 1544 # define rt_group_sched_enabled() false 1545 #endif /* !CONFIG_RT_GROUP_SCHED */ 1546 1547 static inline void lockdep_assert_rq_held(struct rq *rq) 1548 { 1549 lockdep_assert_held(__rq_lockp(rq)); 1550 } 1551 1552 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1553 extern bool raw_spin_rq_trylock(struct rq *rq); 1554 extern void raw_spin_rq_unlock(struct rq *rq); 1555 1556 static inline void raw_spin_rq_lock(struct rq *rq) 1557 { 1558 raw_spin_rq_lock_nested(rq, 0); 1559 } 1560 1561 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1562 { 1563 local_irq_disable(); 1564 raw_spin_rq_lock(rq); 1565 } 1566 1567 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1568 { 1569 raw_spin_rq_unlock(rq); 1570 local_irq_enable(); 1571 } 1572 1573 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1574 { 1575 unsigned long flags; 1576 1577 local_irq_save(flags); 1578 raw_spin_rq_lock(rq); 1579 1580 return flags; 1581 } 1582 1583 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1584 { 1585 raw_spin_rq_unlock(rq); 1586 local_irq_restore(flags); 1587 } 1588 1589 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1590 do { \ 1591 flags = _raw_spin_rq_lock_irqsave(rq); \ 1592 } while (0) 1593 1594 #ifdef CONFIG_SCHED_SMT 1595 extern void __update_idle_core(struct rq *rq); 1596 1597 static inline void update_idle_core(struct rq *rq) 1598 { 1599 if (static_branch_unlikely(&sched_smt_present)) 1600 __update_idle_core(rq); 1601 } 1602 1603 #else /* !CONFIG_SCHED_SMT: */ 1604 static inline void update_idle_core(struct rq *rq) { } 1605 #endif /* !CONFIG_SCHED_SMT */ 1606 1607 #ifdef CONFIG_FAIR_GROUP_SCHED 1608 1609 static inline struct task_struct *task_of(struct sched_entity *se) 1610 { 1611 WARN_ON_ONCE(!entity_is_task(se)); 1612 return container_of(se, struct task_struct, se); 1613 } 1614 1615 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1616 { 1617 return p->se.cfs_rq; 1618 } 1619 1620 /* runqueue on which this entity is (to be) queued */ 1621 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1622 { 1623 return se->cfs_rq; 1624 } 1625 1626 /* runqueue "owned" by this group */ 1627 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1628 { 1629 return grp->my_q; 1630 } 1631 1632 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 1633 1634 #define task_of(_se) container_of(_se, struct task_struct, se) 1635 1636 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) 1637 { 1638 return &task_rq(p)->cfs; 1639 } 1640 1641 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1642 { 1643 const struct task_struct *p = task_of(se); 1644 struct rq *rq = task_rq(p); 1645 1646 return &rq->cfs; 1647 } 1648 1649 /* runqueue "owned" by this group */ 1650 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1651 { 1652 return NULL; 1653 } 1654 1655 #endif /* !CONFIG_FAIR_GROUP_SCHED */ 1656 1657 extern void update_rq_clock(struct rq *rq); 1658 1659 /* 1660 * rq::clock_update_flags bits 1661 * 1662 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1663 * call to __schedule(). This is an optimisation to avoid 1664 * neighbouring rq clock updates. 1665 * 1666 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1667 * in effect and calls to update_rq_clock() are being ignored. 1668 * 1669 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1670 * made to update_rq_clock() since the last time rq::lock was pinned. 1671 * 1672 * If inside of __schedule(), clock_update_flags will have been 1673 * shifted left (a left shift is a cheap operation for the fast path 1674 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1675 * 1676 * if (rq-clock_update_flags >= RQCF_UPDATED) 1677 * 1678 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1679 * one position though, because the next rq_unpin_lock() will shift it 1680 * back. 1681 */ 1682 #define RQCF_REQ_SKIP 0x01 1683 #define RQCF_ACT_SKIP 0x02 1684 #define RQCF_UPDATED 0x04 1685 1686 static inline void assert_clock_updated(struct rq *rq) 1687 { 1688 /* 1689 * The only reason for not seeing a clock update since the 1690 * last rq_pin_lock() is if we're currently skipping updates. 1691 */ 1692 WARN_ON_ONCE(rq->clock_update_flags < RQCF_ACT_SKIP); 1693 } 1694 1695 static inline u64 rq_clock(struct rq *rq) 1696 { 1697 lockdep_assert_rq_held(rq); 1698 assert_clock_updated(rq); 1699 1700 return rq->clock; 1701 } 1702 1703 static inline u64 rq_clock_task(struct rq *rq) 1704 { 1705 lockdep_assert_rq_held(rq); 1706 assert_clock_updated(rq); 1707 1708 return rq->clock_task; 1709 } 1710 1711 static inline void rq_clock_skip_update(struct rq *rq) 1712 { 1713 lockdep_assert_rq_held(rq); 1714 rq->clock_update_flags |= RQCF_REQ_SKIP; 1715 } 1716 1717 /* 1718 * See rt task throttling, which is the only time a skip 1719 * request is canceled. 1720 */ 1721 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1722 { 1723 lockdep_assert_rq_held(rq); 1724 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1725 } 1726 1727 /* 1728 * During cpu offlining and rq wide unthrottling, we can trigger 1729 * an update_rq_clock() for several cfs and rt runqueues (Typically 1730 * when using list_for_each_entry_*) 1731 * rq_clock_start_loop_update() can be called after updating the clock 1732 * once and before iterating over the list to prevent multiple update. 1733 * After the iterative traversal, we need to call rq_clock_stop_loop_update() 1734 * to clear RQCF_ACT_SKIP of rq->clock_update_flags. 1735 */ 1736 static inline void rq_clock_start_loop_update(struct rq *rq) 1737 { 1738 lockdep_assert_rq_held(rq); 1739 WARN_ON_ONCE(rq->clock_update_flags & RQCF_ACT_SKIP); 1740 rq->clock_update_flags |= RQCF_ACT_SKIP; 1741 } 1742 1743 static inline void rq_clock_stop_loop_update(struct rq *rq) 1744 { 1745 lockdep_assert_rq_held(rq); 1746 rq->clock_update_flags &= ~RQCF_ACT_SKIP; 1747 } 1748 1749 struct rq_flags { 1750 unsigned long flags; 1751 struct pin_cookie cookie; 1752 /* 1753 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1754 * current pin context is stashed here in case it needs to be 1755 * restored in rq_repin_lock(). 1756 */ 1757 unsigned int clock_update_flags; 1758 }; 1759 1760 extern struct balance_callback balance_push_callback; 1761 1762 #ifdef CONFIG_SCHED_CLASS_EXT 1763 extern const struct sched_class ext_sched_class; 1764 1765 DECLARE_STATIC_KEY_FALSE(__scx_enabled); /* SCX BPF scheduler loaded */ 1766 DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ 1767 1768 #define scx_enabled() static_branch_unlikely(&__scx_enabled) 1769 #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) 1770 1771 static inline void scx_rq_clock_update(struct rq *rq, u64 clock) 1772 { 1773 if (!scx_enabled()) 1774 return; 1775 WRITE_ONCE(rq->scx.clock, clock); 1776 smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID); 1777 } 1778 1779 static inline void scx_rq_clock_invalidate(struct rq *rq) 1780 { 1781 if (!scx_enabled()) 1782 return; 1783 WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID); 1784 } 1785 1786 #else /* !CONFIG_SCHED_CLASS_EXT: */ 1787 #define scx_enabled() false 1788 #define scx_switched_all() false 1789 1790 static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {} 1791 static inline void scx_rq_clock_invalidate(struct rq *rq) {} 1792 #endif /* !CONFIG_SCHED_CLASS_EXT */ 1793 1794 /* 1795 * Lockdep annotation that avoids accidental unlocks; it's like a 1796 * sticky/continuous lockdep_assert_held(). 1797 * 1798 * This avoids code that has access to 'struct rq *rq' (basically everything in 1799 * the scheduler) from accidentally unlocking the rq if they do not also have a 1800 * copy of the (on-stack) 'struct rq_flags rf'. 1801 * 1802 * Also see Documentation/locking/lockdep-design.rst. 1803 */ 1804 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1805 { 1806 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1807 1808 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1809 rf->clock_update_flags = 0; 1810 WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1811 } 1812 1813 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1814 { 1815 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1816 rf->clock_update_flags = RQCF_UPDATED; 1817 1818 scx_rq_clock_invalidate(rq); 1819 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1820 } 1821 1822 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1823 { 1824 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1825 1826 /* 1827 * Restore the value we stashed in @rf for this pin context. 1828 */ 1829 rq->clock_update_flags |= rf->clock_update_flags; 1830 } 1831 1832 extern 1833 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1834 __acquires(rq->lock); 1835 1836 extern 1837 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1838 __acquires(p->pi_lock) 1839 __acquires(rq->lock); 1840 1841 static inline void 1842 __task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1843 __releases(rq->lock) 1844 { 1845 rq_unpin_lock(rq, rf); 1846 raw_spin_rq_unlock(rq); 1847 } 1848 1849 static inline void 1850 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1851 __releases(rq->lock) 1852 __releases(p->pi_lock) 1853 { 1854 __task_rq_unlock(rq, p, rf); 1855 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1856 } 1857 1858 DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, 1859 _T->rq = task_rq_lock(_T->lock, &_T->rf), 1860 task_rq_unlock(_T->rq, _T->lock, &_T->rf), 1861 struct rq *rq; struct rq_flags rf) 1862 1863 DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, 1864 _T->rq = __task_rq_lock(_T->lock, &_T->rf), 1865 __task_rq_unlock(_T->rq, _T->lock, &_T->rf), 1866 struct rq *rq; struct rq_flags rf) 1867 1868 static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1869 __acquires(rq->lock) 1870 { 1871 raw_spin_rq_lock_irqsave(rq, rf->flags); 1872 rq_pin_lock(rq, rf); 1873 } 1874 1875 static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1876 __acquires(rq->lock) 1877 { 1878 raw_spin_rq_lock_irq(rq); 1879 rq_pin_lock(rq, rf); 1880 } 1881 1882 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) 1883 __acquires(rq->lock) 1884 { 1885 raw_spin_rq_lock(rq); 1886 rq_pin_lock(rq, rf); 1887 } 1888 1889 static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1890 __releases(rq->lock) 1891 { 1892 rq_unpin_lock(rq, rf); 1893 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1894 } 1895 1896 static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1897 __releases(rq->lock) 1898 { 1899 rq_unpin_lock(rq, rf); 1900 raw_spin_rq_unlock_irq(rq); 1901 } 1902 1903 static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) 1904 __releases(rq->lock) 1905 { 1906 rq_unpin_lock(rq, rf); 1907 raw_spin_rq_unlock(rq); 1908 } 1909 1910 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1911 rq_lock(_T->lock, &_T->rf), 1912 rq_unlock(_T->lock, &_T->rf), 1913 struct rq_flags rf) 1914 1915 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, 1916 rq_lock_irq(_T->lock, &_T->rf), 1917 rq_unlock_irq(_T->lock, &_T->rf), 1918 struct rq_flags rf) 1919 1920 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, 1921 rq_lock_irqsave(_T->lock, &_T->rf), 1922 rq_unlock_irqrestore(_T->lock, &_T->rf), 1923 struct rq_flags rf) 1924 1925 static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) 1926 __acquires(rq->lock) 1927 { 1928 struct rq *rq; 1929 1930 local_irq_disable(); 1931 rq = this_rq(); 1932 rq_lock(rq, rf); 1933 1934 return rq; 1935 } 1936 1937 #ifdef CONFIG_NUMA 1938 1939 enum numa_topology_type { 1940 NUMA_DIRECT, 1941 NUMA_GLUELESS_MESH, 1942 NUMA_BACKPLANE, 1943 }; 1944 1945 extern enum numa_topology_type sched_numa_topology_type; 1946 extern int sched_max_numa_distance; 1947 extern bool find_numa_distance(int distance); 1948 extern void sched_init_numa(int offline_node); 1949 extern void sched_update_numa(int cpu, bool online); 1950 extern void sched_domains_numa_masks_set(unsigned int cpu); 1951 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1952 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1953 1954 #else /* !CONFIG_NUMA: */ 1955 1956 static inline void sched_init_numa(int offline_node) { } 1957 static inline void sched_update_numa(int cpu, bool online) { } 1958 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1959 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1960 1961 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1962 { 1963 return nr_cpu_ids; 1964 } 1965 1966 #endif /* !CONFIG_NUMA */ 1967 1968 #ifdef CONFIG_NUMA_BALANCING 1969 1970 /* The regions in numa_faults array from task_struct */ 1971 enum numa_faults_stats { 1972 NUMA_MEM = 0, 1973 NUMA_CPU, 1974 NUMA_MEMBUF, 1975 NUMA_CPUBUF 1976 }; 1977 1978 extern void sched_setnuma(struct task_struct *p, int node); 1979 extern int migrate_task_to(struct task_struct *p, int cpu); 1980 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1981 int cpu, int scpu); 1982 extern void init_numa_balancing(u64 clone_flags, struct task_struct *p); 1983 1984 #else /* !CONFIG_NUMA_BALANCING: */ 1985 1986 static inline void 1987 init_numa_balancing(u64 clone_flags, struct task_struct *p) 1988 { 1989 } 1990 1991 #endif /* !CONFIG_NUMA_BALANCING */ 1992 1993 static inline void 1994 queue_balance_callback(struct rq *rq, 1995 struct balance_callback *head, 1996 void (*func)(struct rq *rq)) 1997 { 1998 lockdep_assert_rq_held(rq); 1999 2000 /* 2001 * Don't (re)queue an already queued item; nor queue anything when 2002 * balance_push() is active, see the comment with 2003 * balance_push_callback. 2004 */ 2005 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 2006 return; 2007 2008 head->func = func; 2009 head->next = rq->balance_callback; 2010 rq->balance_callback = head; 2011 } 2012 2013 #define rcu_dereference_check_sched_domain(p) \ 2014 rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) 2015 2016 /* 2017 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 2018 * See destroy_sched_domains: call_rcu for details. 2019 * 2020 * The domain tree of any CPU may only be accessed from within 2021 * preempt-disabled sections. 2022 */ 2023 #define for_each_domain(cpu, __sd) \ 2024 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 2025 __sd; __sd = __sd->parent) 2026 2027 /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ 2028 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | 2029 static const unsigned int SD_SHARED_CHILD_MASK = 2030 #include <linux/sched/sd_flags.h> 2031 0; 2032 #undef SD_FLAG 2033 2034 /** 2035 * highest_flag_domain - Return highest sched_domain containing flag. 2036 * @cpu: The CPU whose highest level of sched domain is to 2037 * be returned. 2038 * @flag: The flag to check for the highest sched_domain 2039 * for the given CPU. 2040 * 2041 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has 2042 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. 2043 */ 2044 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 2045 { 2046 struct sched_domain *sd, *hsd = NULL; 2047 2048 for_each_domain(cpu, sd) { 2049 if (sd->flags & flag) { 2050 hsd = sd; 2051 continue; 2052 } 2053 2054 /* 2055 * Stop the search if @flag is known to be shared at lower 2056 * levels. It will not be found further up. 2057 */ 2058 if (flag & SD_SHARED_CHILD_MASK) 2059 break; 2060 } 2061 2062 return hsd; 2063 } 2064 2065 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 2066 { 2067 struct sched_domain *sd; 2068 2069 for_each_domain(cpu, sd) { 2070 if (sd->flags & flag) 2071 break; 2072 } 2073 2074 return sd; 2075 } 2076 2077 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 2078 DECLARE_PER_CPU(int, sd_llc_size); 2079 DECLARE_PER_CPU(int, sd_llc_id); 2080 DECLARE_PER_CPU(int, sd_share_id); 2081 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 2082 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 2083 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 2084 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 2085 2086 extern struct static_key_false sched_asym_cpucapacity; 2087 extern struct static_key_false sched_cluster_active; 2088 2089 static __always_inline bool sched_asym_cpucap_active(void) 2090 { 2091 return static_branch_unlikely(&sched_asym_cpucapacity); 2092 } 2093 2094 struct sched_group_capacity { 2095 atomic_t ref; 2096 /* 2097 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 2098 * for a single CPU. 2099 */ 2100 unsigned long capacity; 2101 unsigned long min_capacity; /* Min per-CPU capacity in group */ 2102 unsigned long max_capacity; /* Max per-CPU capacity in group */ 2103 unsigned long next_update; 2104 int imbalance; /* XXX unrelated to capacity but shared group state */ 2105 2106 int id; 2107 2108 unsigned long cpumask[]; /* Balance mask */ 2109 }; 2110 2111 struct sched_group { 2112 struct sched_group *next; /* Must be a circular list */ 2113 atomic_t ref; 2114 2115 unsigned int group_weight; 2116 unsigned int cores; 2117 struct sched_group_capacity *sgc; 2118 int asym_prefer_cpu; /* CPU of highest priority in group */ 2119 int flags; 2120 2121 /* 2122 * The CPUs this group covers. 2123 * 2124 * NOTE: this field is variable length. (Allocated dynamically 2125 * by attaching extra space to the end of the structure, 2126 * depending on how many CPUs the kernel has booted up with) 2127 */ 2128 unsigned long cpumask[]; 2129 }; 2130 2131 static inline struct cpumask *sched_group_span(struct sched_group *sg) 2132 { 2133 return to_cpumask(sg->cpumask); 2134 } 2135 2136 /* 2137 * See build_balance_mask(). 2138 */ 2139 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 2140 { 2141 return to_cpumask(sg->sgc->cpumask); 2142 } 2143 2144 extern int group_balance_cpu(struct sched_group *sg); 2145 2146 extern void update_sched_domain_debugfs(void); 2147 extern void dirty_sched_domain_sysctl(int cpu); 2148 2149 extern int sched_update_scaling(void); 2150 2151 static inline const struct cpumask *task_user_cpus(struct task_struct *p) 2152 { 2153 if (!p->user_cpus_ptr) 2154 return cpu_possible_mask; /* &init_task.cpus_mask */ 2155 return p->user_cpus_ptr; 2156 } 2157 2158 #ifdef CONFIG_CGROUP_SCHED 2159 2160 /* 2161 * Return the group to which this tasks belongs. 2162 * 2163 * We cannot use task_css() and friends because the cgroup subsystem 2164 * changes that value before the cgroup_subsys::attach() method is called, 2165 * therefore we cannot pin it and might observe the wrong value. 2166 * 2167 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 2168 * core changes this before calling sched_move_task(). 2169 * 2170 * Instead we use a 'copy' which is updated from sched_move_task() while 2171 * holding both task_struct::pi_lock and rq::lock. 2172 */ 2173 static inline struct task_group *task_group(struct task_struct *p) 2174 { 2175 return p->sched_task_group; 2176 } 2177 2178 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 2179 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 2180 { 2181 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 2182 struct task_group *tg = task_group(p); 2183 #endif 2184 2185 #ifdef CONFIG_FAIR_GROUP_SCHED 2186 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 2187 p->se.cfs_rq = tg->cfs_rq[cpu]; 2188 p->se.parent = tg->se[cpu]; 2189 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; 2190 #endif 2191 2192 #ifdef CONFIG_RT_GROUP_SCHED 2193 /* 2194 * p->rt.rt_rq is NULL initially and it is easier to assign 2195 * root_task_group's rt_rq than switching in rt_rq_of_se() 2196 * Clobbers tg(!) 2197 */ 2198 if (!rt_group_sched_enabled()) 2199 tg = &root_task_group; 2200 p->rt.rt_rq = tg->rt_rq[cpu]; 2201 p->rt.parent = tg->rt_se[cpu]; 2202 #endif /* CONFIG_RT_GROUP_SCHED */ 2203 } 2204 2205 #else /* !CONFIG_CGROUP_SCHED: */ 2206 2207 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 2208 2209 static inline struct task_group *task_group(struct task_struct *p) 2210 { 2211 return NULL; 2212 } 2213 2214 #endif /* !CONFIG_CGROUP_SCHED */ 2215 2216 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 2217 { 2218 set_task_rq(p, cpu); 2219 #ifdef CONFIG_SMP 2220 /* 2221 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 2222 * successfully executed on another CPU. We must ensure that updates of 2223 * per-task data have been completed by this moment. 2224 */ 2225 smp_wmb(); 2226 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 2227 p->wake_cpu = cpu; 2228 rseq_sched_set_ids_changed(p); 2229 #endif /* CONFIG_SMP */ 2230 } 2231 2232 /* 2233 * Tunables: 2234 */ 2235 2236 #define SCHED_FEAT(name, enabled) \ 2237 __SCHED_FEAT_##name , 2238 2239 enum { 2240 #include "features.h" 2241 __SCHED_FEAT_NR, 2242 }; 2243 2244 #undef SCHED_FEAT 2245 2246 /* 2247 * To support run-time toggling of sched features, all the translation units 2248 * (but core.c) reference the sysctl_sched_features defined in core.c. 2249 */ 2250 extern __read_mostly unsigned int sysctl_sched_features; 2251 2252 #ifdef CONFIG_JUMP_LABEL 2253 2254 #define SCHED_FEAT(name, enabled) \ 2255 static __always_inline bool static_branch_##name(struct static_key *key) \ 2256 { \ 2257 return static_key_##enabled(key); \ 2258 } 2259 2260 #include "features.h" 2261 #undef SCHED_FEAT 2262 2263 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 2264 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 2265 2266 #else /* !CONFIG_JUMP_LABEL: */ 2267 2268 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2269 2270 #endif /* !CONFIG_JUMP_LABEL */ 2271 2272 extern struct static_key_false sched_numa_balancing; 2273 extern struct static_key_false sched_schedstats; 2274 2275 static inline u64 global_rt_period(void) 2276 { 2277 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2278 } 2279 2280 static inline u64 global_rt_runtime(void) 2281 { 2282 if (sysctl_sched_rt_runtime < 0) 2283 return RUNTIME_INF; 2284 2285 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2286 } 2287 2288 /* 2289 * Is p the current execution context? 2290 */ 2291 static inline int task_current(struct rq *rq, struct task_struct *p) 2292 { 2293 return rq->curr == p; 2294 } 2295 2296 /* 2297 * Is p the current scheduling context? 2298 * 2299 * Note that it might be the current execution context at the same time if 2300 * rq->curr == rq->donor == p. 2301 */ 2302 static inline int task_current_donor(struct rq *rq, struct task_struct *p) 2303 { 2304 return rq->donor == p; 2305 } 2306 2307 static inline bool task_is_blocked(struct task_struct *p) 2308 { 2309 if (!sched_proxy_exec()) 2310 return false; 2311 2312 return !!p->blocked_on; 2313 } 2314 2315 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) 2316 { 2317 return p->on_cpu; 2318 } 2319 2320 static inline int task_on_rq_queued(struct task_struct *p) 2321 { 2322 return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED; 2323 } 2324 2325 static inline int task_on_rq_migrating(struct task_struct *p) 2326 { 2327 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2328 } 2329 2330 /* Wake flags. The first three directly map to some SD flag value */ 2331 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2332 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2333 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2334 2335 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2336 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2337 #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */ 2338 #define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */ 2339 2340 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2341 static_assert(WF_FORK == SD_BALANCE_FORK); 2342 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2343 2344 /* 2345 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2346 * of tasks with abnormal "nice" values across CPUs the contribution that 2347 * each task makes to its run queue's load is weighted according to its 2348 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2349 * scaled version of the new time slice allocation that they receive on time 2350 * slice expiry etc. 2351 */ 2352 2353 #define WEIGHT_IDLEPRIO 3 2354 #define WMULT_IDLEPRIO 1431655765 2355 2356 extern const int sched_prio_to_weight[40]; 2357 extern const u32 sched_prio_to_wmult[40]; 2358 2359 /* 2360 * {de,en}queue flags: 2361 * 2362 * SLEEP/WAKEUP - task is no-longer/just-became runnable 2363 * 2364 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2365 * are in a known state which allows modification. Such pairs 2366 * should preserve as much state as possible. 2367 * 2368 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2369 * in the runqueue. 2370 * 2371 * NOCLOCK - skip the update_rq_clock() (avoids double updates) 2372 * 2373 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE) 2374 * 2375 * DELAYED - de/re-queue a sched_delayed task 2376 * 2377 * CLASS - going to update p->sched_class; makes sched_change call the 2378 * various switch methods. 2379 * 2380 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2381 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2382 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2383 * ENQUEUE_RQ_SELECTED - ->select_task_rq() was called 2384 * 2385 * XXX SAVE/RESTORE in combination with CLASS doesn't really make sense, but 2386 * SCHED_DEADLINE seems to rely on this for now. 2387 */ 2388 2389 #define DEQUEUE_SLEEP 0x0001 /* Matches ENQUEUE_WAKEUP */ 2390 #define DEQUEUE_SAVE 0x0002 /* Matches ENQUEUE_RESTORE */ 2391 #define DEQUEUE_MOVE 0x0004 /* Matches ENQUEUE_MOVE */ 2392 #define DEQUEUE_NOCLOCK 0x0008 /* Matches ENQUEUE_NOCLOCK */ 2393 2394 #define DEQUEUE_MIGRATING 0x0010 /* Matches ENQUEUE_MIGRATING */ 2395 #define DEQUEUE_DELAYED 0x0020 /* Matches ENQUEUE_DELAYED */ 2396 #define DEQUEUE_CLASS 0x0040 /* Matches ENQUEUE_CLASS */ 2397 2398 #define DEQUEUE_SPECIAL 0x00010000 2399 #define DEQUEUE_THROTTLE 0x00020000 2400 2401 #define ENQUEUE_WAKEUP 0x0001 2402 #define ENQUEUE_RESTORE 0x0002 2403 #define ENQUEUE_MOVE 0x0004 2404 #define ENQUEUE_NOCLOCK 0x0008 2405 2406 #define ENQUEUE_MIGRATING 0x0010 2407 #define ENQUEUE_DELAYED 0x0020 2408 #define ENQUEUE_CLASS 0x0040 2409 2410 #define ENQUEUE_HEAD 0x00010000 2411 #define ENQUEUE_REPLENISH 0x00020000 2412 #define ENQUEUE_MIGRATED 0x00040000 2413 #define ENQUEUE_INITIAL 0x00080000 2414 #define ENQUEUE_RQ_SELECTED 0x00100000 2415 2416 #define RETRY_TASK ((void *)-1UL) 2417 2418 struct affinity_context { 2419 const struct cpumask *new_mask; 2420 struct cpumask *user_mask; 2421 unsigned int flags; 2422 }; 2423 2424 extern s64 update_curr_common(struct rq *rq); 2425 2426 struct sched_class { 2427 2428 #ifdef CONFIG_UCLAMP_TASK 2429 int uclamp_enabled; 2430 #endif 2431 /* 2432 * idle: 0 2433 * ext: 1 2434 * fair: 2 2435 * rt: 4 2436 * dl: 8 2437 * stop: 16 2438 */ 2439 unsigned int queue_mask; 2440 2441 /* 2442 * move_queued_task/activate_task/enqueue_task: rq->lock 2443 * ttwu_do_activate/activate_task/enqueue_task: rq->lock 2444 * wake_up_new_task/activate_task/enqueue_task: task_rq_lock 2445 * ttwu_runnable/enqueue_task: task_rq_lock 2446 * proxy_task_current: rq->lock 2447 * sched_change_end 2448 */ 2449 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2450 /* 2451 * move_queued_task/deactivate_task/dequeue_task: rq->lock 2452 * __schedule/block_task/dequeue_task: rq->lock 2453 * proxy_task_current: rq->lock 2454 * wait_task_inactive: task_rq_lock 2455 * sched_change_begin 2456 */ 2457 bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2458 2459 /* 2460 * do_sched_yield: rq->lock 2461 */ 2462 void (*yield_task) (struct rq *rq); 2463 /* 2464 * yield_to: rq->lock (double) 2465 */ 2466 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2467 2468 /* 2469 * move_queued_task: rq->lock 2470 * __migrate_swap_task: rq->lock 2471 * ttwu_do_activate: rq->lock 2472 * ttwu_runnable: task_rq_lock 2473 * wake_up_new_task: task_rq_lock 2474 */ 2475 void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); 2476 2477 /* 2478 * schedule/pick_next_task/prev_balance: rq->lock 2479 */ 2480 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2481 2482 /* 2483 * schedule/pick_next_task: rq->lock 2484 */ 2485 struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf); 2486 /* 2487 * Optional! When implemented pick_next_task() should be equivalent to: 2488 * 2489 * next = pick_task(); 2490 * if (next) { 2491 * put_prev_task(prev); 2492 * set_next_task_first(next); 2493 * } 2494 */ 2495 struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev, 2496 struct rq_flags *rf); 2497 2498 /* 2499 * sched_change: 2500 * __schedule: rq->lock 2501 */ 2502 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); 2503 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2504 2505 /* 2506 * select_task_rq: p->pi_lock 2507 * sched_exec: p->pi_lock 2508 */ 2509 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2510 2511 /* 2512 * set_task_cpu: p->pi_lock || rq->lock (ttwu like) 2513 */ 2514 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2515 2516 /* 2517 * ttwu_do_activate: rq->lock 2518 * wake_up_new_task: task_rq_lock 2519 */ 2520 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2521 2522 /* 2523 * do_set_cpus_allowed: task_rq_lock + sched_change 2524 */ 2525 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); 2526 2527 /* 2528 * sched_set_rq_{on,off}line: rq->lock 2529 */ 2530 void (*rq_online)(struct rq *rq); 2531 void (*rq_offline)(struct rq *rq); 2532 2533 /* 2534 * push_cpu_stop: p->pi_lock && rq->lock 2535 */ 2536 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2537 2538 /* 2539 * hrtick: rq->lock 2540 * sched_tick: rq->lock 2541 * sched_tick_remote: rq->lock 2542 */ 2543 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2544 /* 2545 * sched_cgroup_fork: p->pi_lock 2546 */ 2547 void (*task_fork)(struct task_struct *p); 2548 /* 2549 * finish_task_switch: no locks 2550 */ 2551 void (*task_dead)(struct task_struct *p); 2552 2553 /* 2554 * sched_change 2555 */ 2556 void (*switching_from)(struct rq *this_rq, struct task_struct *task); 2557 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 2558 void (*switching_to) (struct rq *this_rq, struct task_struct *task); 2559 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2560 u64 (*get_prio) (struct rq *this_rq, struct task_struct *task); 2561 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2562 u64 oldprio); 2563 2564 /* 2565 * set_load_weight: task_rq_lock + sched_change 2566 * __setscheduler_parms: task_rq_lock + sched_change 2567 */ 2568 void (*reweight_task)(struct rq *this_rq, struct task_struct *task, 2569 const struct load_weight *lw); 2570 2571 /* 2572 * sched_rr_get_interval: task_rq_lock 2573 */ 2574 unsigned int (*get_rr_interval)(struct rq *rq, 2575 struct task_struct *task); 2576 2577 /* 2578 * task_sched_runtime: task_rq_lock 2579 */ 2580 void (*update_curr)(struct rq *rq); 2581 2582 #ifdef CONFIG_FAIR_GROUP_SCHED 2583 /* 2584 * sched_change_group: task_rq_lock + sched_change 2585 */ 2586 void (*task_change_group)(struct task_struct *p); 2587 #endif 2588 2589 #ifdef CONFIG_SCHED_CORE 2590 /* 2591 * pick_next_task: rq->lock 2592 * try_steal_cookie: rq->lock (double) 2593 */ 2594 int (*task_is_throttled)(struct task_struct *p, int cpu); 2595 #endif 2596 }; 2597 2598 /* 2599 * Does not nest; only used around sched_class::pick_task() rq-lock-breaks. 2600 */ 2601 static inline void rq_modified_clear(struct rq *rq) 2602 { 2603 rq->queue_mask = 0; 2604 } 2605 2606 static inline bool rq_modified_above(struct rq *rq, const struct sched_class * class) 2607 { 2608 unsigned int mask = class->queue_mask; 2609 return rq->queue_mask & ~((mask << 1) - 1); 2610 } 2611 2612 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2613 { 2614 WARN_ON_ONCE(rq->donor != prev); 2615 prev->sched_class->put_prev_task(rq, prev, NULL); 2616 } 2617 2618 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2619 { 2620 next->sched_class->set_next_task(rq, next, false); 2621 } 2622 2623 static inline void 2624 __put_prev_set_next_dl_server(struct rq *rq, 2625 struct task_struct *prev, 2626 struct task_struct *next) 2627 { 2628 prev->dl_server = NULL; 2629 next->dl_server = rq->dl_server; 2630 rq->dl_server = NULL; 2631 } 2632 2633 static inline void put_prev_set_next_task(struct rq *rq, 2634 struct task_struct *prev, 2635 struct task_struct *next) 2636 { 2637 WARN_ON_ONCE(rq->donor != prev); 2638 2639 __put_prev_set_next_dl_server(rq, prev, next); 2640 2641 if (next == prev) 2642 return; 2643 2644 prev->sched_class->put_prev_task(rq, prev, next); 2645 next->sched_class->set_next_task(rq, next, true); 2646 } 2647 2648 /* 2649 * Helper to define a sched_class instance; each one is placed in a separate 2650 * section which is ordered by the linker script: 2651 * 2652 * include/asm-generic/vmlinux.lds.h 2653 * 2654 * *CAREFUL* they are laid out in *REVERSE* order!!! 2655 * 2656 * Also enforce alignment on the instance, not the type, to guarantee layout. 2657 */ 2658 #define DEFINE_SCHED_CLASS(name) \ 2659 const struct sched_class name##_sched_class \ 2660 __aligned(__alignof__(struct sched_class)) \ 2661 __section("__" #name "_sched_class") 2662 2663 /* Defined in include/asm-generic/vmlinux.lds.h */ 2664 extern struct sched_class __sched_class_highest[]; 2665 extern struct sched_class __sched_class_lowest[]; 2666 2667 extern const struct sched_class stop_sched_class; 2668 extern const struct sched_class dl_sched_class; 2669 extern const struct sched_class rt_sched_class; 2670 extern const struct sched_class fair_sched_class; 2671 extern const struct sched_class idle_sched_class; 2672 2673 /* 2674 * Iterate only active classes. SCX can take over all fair tasks or be 2675 * completely disabled. If the former, skip fair. If the latter, skip SCX. 2676 */ 2677 static inline const struct sched_class *next_active_class(const struct sched_class *class) 2678 { 2679 class++; 2680 #ifdef CONFIG_SCHED_CLASS_EXT 2681 if (scx_switched_all() && class == &fair_sched_class) 2682 class++; 2683 if (!scx_enabled() && class == &ext_sched_class) 2684 class++; 2685 #endif 2686 return class; 2687 } 2688 2689 #define for_class_range(class, _from, _to) \ 2690 for (class = (_from); class < (_to); class++) 2691 2692 #define for_each_class(class) \ 2693 for_class_range(class, __sched_class_highest, __sched_class_lowest) 2694 2695 #define for_active_class_range(class, _from, _to) \ 2696 for (class = (_from); class != (_to); class = next_active_class(class)) 2697 2698 #define for_each_active_class(class) \ 2699 for_active_class_range(class, __sched_class_highest, __sched_class_lowest) 2700 2701 #define sched_class_above(_a, _b) ((_a) < (_b)) 2702 2703 static inline bool sched_stop_runnable(struct rq *rq) 2704 { 2705 return rq->stop && task_on_rq_queued(rq->stop); 2706 } 2707 2708 static inline bool sched_dl_runnable(struct rq *rq) 2709 { 2710 return rq->dl.dl_nr_running > 0; 2711 } 2712 2713 static inline bool sched_rt_runnable(struct rq *rq) 2714 { 2715 return rq->rt.rt_queued > 0; 2716 } 2717 2718 static inline bool sched_fair_runnable(struct rq *rq) 2719 { 2720 return rq->cfs.nr_queued > 0; 2721 } 2722 2723 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, 2724 struct rq_flags *rf); 2725 extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf); 2726 2727 #define SCA_CHECK 0x01 2728 #define SCA_MIGRATE_DISABLE 0x02 2729 #define SCA_MIGRATE_ENABLE 0x04 2730 #define SCA_USER 0x08 2731 2732 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2733 2734 extern void sched_balance_trigger(struct rq *rq); 2735 2736 extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx); 2737 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); 2738 2739 static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) 2740 { 2741 /* When not in the task's cpumask, no point in looking further. */ 2742 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) 2743 return false; 2744 2745 /* Can @cpu run a user thread? */ 2746 if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) 2747 return false; 2748 2749 return true; 2750 } 2751 2752 static inline cpumask_t *alloc_user_cpus_ptr(int node) 2753 { 2754 /* 2755 * See set_cpus_allowed_force() above for the rcu_head usage. 2756 */ 2757 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); 2758 2759 return kmalloc_node(size, GFP_KERNEL, node); 2760 } 2761 2762 static inline struct task_struct *get_push_task(struct rq *rq) 2763 { 2764 struct task_struct *p = rq->donor; 2765 2766 lockdep_assert_rq_held(rq); 2767 2768 if (rq->push_busy) 2769 return NULL; 2770 2771 if (p->nr_cpus_allowed == 1) 2772 return NULL; 2773 2774 if (p->migration_disabled) 2775 return NULL; 2776 2777 rq->push_busy = true; 2778 return get_task_struct(p); 2779 } 2780 2781 extern int push_cpu_stop(void *arg); 2782 2783 #ifdef CONFIG_CPU_IDLE 2784 2785 static inline void idle_set_state(struct rq *rq, 2786 struct cpuidle_state *idle_state) 2787 { 2788 rq->idle_state = idle_state; 2789 } 2790 2791 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2792 { 2793 WARN_ON_ONCE(!rcu_read_lock_held()); 2794 2795 return rq->idle_state; 2796 } 2797 2798 #else /* !CONFIG_CPU_IDLE: */ 2799 2800 static inline void idle_set_state(struct rq *rq, 2801 struct cpuidle_state *idle_state) 2802 { 2803 } 2804 2805 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2806 { 2807 return NULL; 2808 } 2809 2810 #endif /* !CONFIG_CPU_IDLE */ 2811 2812 extern void schedule_idle(void); 2813 asmlinkage void schedule_user(void); 2814 2815 extern void sysrq_sched_debug_show(void); 2816 extern void sched_init_granularity(void); 2817 extern void update_max_interval(void); 2818 2819 extern void init_sched_dl_class(void); 2820 extern void init_sched_rt_class(void); 2821 extern void init_sched_fair_class(void); 2822 2823 extern void resched_curr(struct rq *rq); 2824 extern void resched_curr_lazy(struct rq *rq); 2825 extern void resched_cpu(int cpu); 2826 2827 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2828 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 2829 2830 extern void init_dl_entity(struct sched_dl_entity *dl_se); 2831 2832 extern void init_cfs_throttle_work(struct task_struct *p); 2833 2834 #define BW_SHIFT 20 2835 #define BW_UNIT (1 << BW_SHIFT) 2836 #define RATIO_SHIFT 8 2837 #define MAX_BW_BITS (64 - BW_SHIFT) 2838 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2839 2840 extern unsigned long to_ratio(u64 period, u64 runtime); 2841 2842 extern void init_entity_runnable_average(struct sched_entity *se); 2843 extern void post_init_entity_util_avg(struct task_struct *p); 2844 2845 #ifdef CONFIG_NO_HZ_FULL 2846 extern bool sched_can_stop_tick(struct rq *rq); 2847 extern int __init sched_tick_offload_init(void); 2848 2849 /* 2850 * Tick may be needed by tasks in the runqueue depending on their policy and 2851 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2852 * nohz mode if necessary. 2853 */ 2854 static inline void sched_update_tick_dependency(struct rq *rq) 2855 { 2856 int cpu = cpu_of(rq); 2857 2858 if (!tick_nohz_full_cpu(cpu)) 2859 return; 2860 2861 if (sched_can_stop_tick(rq)) 2862 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2863 else 2864 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2865 } 2866 #else /* !CONFIG_NO_HZ_FULL: */ 2867 static inline int sched_tick_offload_init(void) { return 0; } 2868 static inline void sched_update_tick_dependency(struct rq *rq) { } 2869 #endif /* !CONFIG_NO_HZ_FULL */ 2870 2871 static inline void add_nr_running(struct rq *rq, unsigned count) 2872 { 2873 unsigned prev_nr = rq->nr_running; 2874 2875 rq->nr_running = prev_nr + count; 2876 if (trace_sched_update_nr_running_tp_enabled()) { 2877 call_trace_sched_update_nr_running(rq, count); 2878 } 2879 2880 if (prev_nr < 2 && rq->nr_running >= 2) 2881 set_rd_overloaded(rq->rd, 1); 2882 2883 sched_update_tick_dependency(rq); 2884 } 2885 2886 static inline void sub_nr_running(struct rq *rq, unsigned count) 2887 { 2888 rq->nr_running -= count; 2889 if (trace_sched_update_nr_running_tp_enabled()) { 2890 call_trace_sched_update_nr_running(rq, -count); 2891 } 2892 2893 /* Check if we still need preemption */ 2894 sched_update_tick_dependency(rq); 2895 } 2896 2897 static inline void __block_task(struct rq *rq, struct task_struct *p) 2898 { 2899 if (p->sched_contributes_to_load) 2900 rq->nr_uninterruptible++; 2901 2902 if (p->in_iowait) { 2903 atomic_inc(&rq->nr_iowait); 2904 delayacct_blkio_start(); 2905 } 2906 2907 ASSERT_EXCLUSIVE_WRITER(p->on_rq); 2908 2909 /* 2910 * The moment this write goes through, ttwu() can swoop in and migrate 2911 * this task, rendering our rq->__lock ineffective. 2912 * 2913 * __schedule() try_to_wake_up() 2914 * LOCK rq->__lock LOCK p->pi_lock 2915 * pick_next_task() 2916 * pick_next_task_fair() 2917 * pick_next_entity() 2918 * dequeue_entities() 2919 * __block_task() 2920 * RELEASE p->on_rq = 0 if (p->on_rq && ...) 2921 * break; 2922 * 2923 * ACQUIRE (after ctrl-dep) 2924 * 2925 * cpu = select_task_rq(); 2926 * set_task_cpu(p, cpu); 2927 * ttwu_queue() 2928 * ttwu_do_activate() 2929 * LOCK rq->__lock 2930 * activate_task() 2931 * STORE p->on_rq = 1 2932 * UNLOCK rq->__lock 2933 * 2934 * Callers must ensure to not reference @p after this -- we no longer 2935 * own it. 2936 */ 2937 smp_store_release(&p->on_rq, 0); 2938 } 2939 2940 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2941 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2942 2943 extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); 2944 2945 #ifdef CONFIG_PREEMPT_RT 2946 # define SCHED_NR_MIGRATE_BREAK 8 2947 #else 2948 # define SCHED_NR_MIGRATE_BREAK 32 2949 #endif 2950 2951 extern __read_mostly unsigned int sysctl_sched_nr_migrate; 2952 extern __read_mostly unsigned int sysctl_sched_migration_cost; 2953 2954 extern unsigned int sysctl_sched_base_slice; 2955 2956 extern int sysctl_resched_latency_warn_ms; 2957 extern int sysctl_resched_latency_warn_once; 2958 2959 extern unsigned int sysctl_sched_tunable_scaling; 2960 2961 extern unsigned int sysctl_numa_balancing_scan_delay; 2962 extern unsigned int sysctl_numa_balancing_scan_period_min; 2963 extern unsigned int sysctl_numa_balancing_scan_period_max; 2964 extern unsigned int sysctl_numa_balancing_scan_size; 2965 extern unsigned int sysctl_numa_balancing_hot_threshold; 2966 2967 #ifdef CONFIG_SCHED_HRTICK 2968 2969 /* 2970 * Use hrtick when: 2971 * - enabled by features 2972 * - hrtimer is actually high res 2973 */ 2974 static inline int hrtick_enabled(struct rq *rq) 2975 { 2976 if (!cpu_active(cpu_of(rq))) 2977 return 0; 2978 return hrtimer_is_hres_active(&rq->hrtick_timer); 2979 } 2980 2981 static inline int hrtick_enabled_fair(struct rq *rq) 2982 { 2983 if (!sched_feat(HRTICK)) 2984 return 0; 2985 return hrtick_enabled(rq); 2986 } 2987 2988 static inline int hrtick_enabled_dl(struct rq *rq) 2989 { 2990 if (!sched_feat(HRTICK_DL)) 2991 return 0; 2992 return hrtick_enabled(rq); 2993 } 2994 2995 extern void hrtick_start(struct rq *rq, u64 delay); 2996 2997 #else /* !CONFIG_SCHED_HRTICK: */ 2998 2999 static inline int hrtick_enabled_fair(struct rq *rq) 3000 { 3001 return 0; 3002 } 3003 3004 static inline int hrtick_enabled_dl(struct rq *rq) 3005 { 3006 return 0; 3007 } 3008 3009 static inline int hrtick_enabled(struct rq *rq) 3010 { 3011 return 0; 3012 } 3013 3014 #endif /* !CONFIG_SCHED_HRTICK */ 3015 3016 #ifndef arch_scale_freq_tick 3017 static __always_inline void arch_scale_freq_tick(void) { } 3018 #endif 3019 3020 #ifndef arch_scale_freq_capacity 3021 /** 3022 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 3023 * @cpu: the CPU in question. 3024 * 3025 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 3026 * 3027 * f_curr 3028 * ------ * SCHED_CAPACITY_SCALE 3029 * f_max 3030 */ 3031 static __always_inline 3032 unsigned long arch_scale_freq_capacity(int cpu) 3033 { 3034 return SCHED_CAPACITY_SCALE; 3035 } 3036 #endif 3037 3038 /* 3039 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to 3040 * acquire rq lock instead of rq_lock(). So at the end of these two functions 3041 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of 3042 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. 3043 */ 3044 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) 3045 { 3046 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 3047 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 3048 } 3049 3050 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ 3051 __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ 3052 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ 3053 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 3054 _lock; return _t; } 3055 3056 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 3057 { 3058 #ifdef CONFIG_SCHED_CORE 3059 /* 3060 * In order to not have {0,2},{1,3} turn into into an AB-BA, 3061 * order by core-id first and cpu-id second. 3062 * 3063 * Notably: 3064 * 3065 * double_rq_lock(0,3); will take core-0, core-1 lock 3066 * double_rq_lock(1,2); will take core-1, core-0 lock 3067 * 3068 * when only cpu-id is considered. 3069 */ 3070 if (rq1->core->cpu < rq2->core->cpu) 3071 return true; 3072 if (rq1->core->cpu > rq2->core->cpu) 3073 return false; 3074 3075 /* 3076 * __sched_core_flip() relies on SMT having cpu-id lock order. 3077 */ 3078 #endif /* CONFIG_SCHED_CORE */ 3079 return rq1->cpu < rq2->cpu; 3080 } 3081 3082 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 3083 3084 #ifdef CONFIG_PREEMPTION 3085 3086 /* 3087 * fair double_lock_balance: Safely acquires both rq->locks in a fair 3088 * way at the expense of forcing extra atomic operations in all 3089 * invocations. This assures that the double_lock is acquired using the 3090 * same underlying policy as the spinlock_t on this architecture, which 3091 * reduces latency compared to the unfair variant below. However, it 3092 * also adds more overhead and therefore may reduce throughput. 3093 */ 3094 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 3095 __releases(this_rq->lock) 3096 __acquires(busiest->lock) 3097 __acquires(this_rq->lock) 3098 { 3099 raw_spin_rq_unlock(this_rq); 3100 double_rq_lock(this_rq, busiest); 3101 3102 return 1; 3103 } 3104 3105 #else /* !CONFIG_PREEMPTION: */ 3106 /* 3107 * Unfair double_lock_balance: Optimizes throughput at the expense of 3108 * latency by eliminating extra atomic operations when the locks are 3109 * already in proper order on entry. This favors lower CPU-ids and will 3110 * grant the double lock to lower CPUs over higher ids under contention, 3111 * regardless of entry order into the function. 3112 */ 3113 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 3114 __releases(this_rq->lock) 3115 __acquires(busiest->lock) 3116 __acquires(this_rq->lock) 3117 { 3118 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 3119 likely(raw_spin_rq_trylock(busiest))) { 3120 double_rq_clock_clear_update(this_rq, busiest); 3121 return 0; 3122 } 3123 3124 if (rq_order_less(this_rq, busiest)) { 3125 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 3126 double_rq_clock_clear_update(this_rq, busiest); 3127 return 0; 3128 } 3129 3130 raw_spin_rq_unlock(this_rq); 3131 double_rq_lock(this_rq, busiest); 3132 3133 return 1; 3134 } 3135 3136 #endif /* !CONFIG_PREEMPTION */ 3137 3138 /* 3139 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 3140 */ 3141 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 3142 { 3143 lockdep_assert_irqs_disabled(); 3144 3145 return _double_lock_balance(this_rq, busiest); 3146 } 3147 3148 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 3149 __releases(busiest->lock) 3150 { 3151 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 3152 raw_spin_rq_unlock(busiest); 3153 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 3154 } 3155 3156 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 3157 { 3158 if (l1 > l2) 3159 swap(l1, l2); 3160 3161 spin_lock(l1); 3162 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 3163 } 3164 3165 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 3166 { 3167 if (l1 > l2) 3168 swap(l1, l2); 3169 3170 spin_lock_irq(l1); 3171 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 3172 } 3173 3174 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 3175 { 3176 if (l1 > l2) 3177 swap(l1, l2); 3178 3179 raw_spin_lock(l1); 3180 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 3181 } 3182 3183 static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) 3184 { 3185 raw_spin_unlock(l1); 3186 raw_spin_unlock(l2); 3187 } 3188 3189 DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, 3190 double_raw_lock(_T->lock, _T->lock2), 3191 double_raw_unlock(_T->lock, _T->lock2)) 3192 3193 /* 3194 * double_rq_unlock - safely unlock two runqueues 3195 * 3196 * Note this does not restore interrupts like task_rq_unlock, 3197 * you need to do so manually after calling. 3198 */ 3199 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 3200 __releases(rq1->lock) 3201 __releases(rq2->lock) 3202 { 3203 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 3204 raw_spin_rq_unlock(rq2); 3205 else 3206 __release(rq2->lock); 3207 raw_spin_rq_unlock(rq1); 3208 } 3209 3210 extern void set_rq_online (struct rq *rq); 3211 extern void set_rq_offline(struct rq *rq); 3212 3213 extern bool sched_smp_initialized; 3214 3215 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, 3216 double_rq_lock(_T->lock, _T->lock2), 3217 double_rq_unlock(_T->lock, _T->lock2)) 3218 3219 extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq); 3220 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 3221 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 3222 3223 extern bool sched_debug_verbose; 3224 3225 extern void print_cfs_stats(struct seq_file *m, int cpu); 3226 extern void print_rt_stats(struct seq_file *m, int cpu); 3227 extern void print_dl_stats(struct seq_file *m, int cpu); 3228 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 3229 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 3230 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 3231 3232 extern void resched_latency_warn(int cpu, u64 latency); 3233 3234 #ifdef CONFIG_NUMA_BALANCING 3235 extern void show_numa_stats(struct task_struct *p, struct seq_file *m); 3236 extern void 3237 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 3238 unsigned long tpf, unsigned long gsf, unsigned long gpf); 3239 #endif /* CONFIG_NUMA_BALANCING */ 3240 3241 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 3242 extern void init_rt_rq(struct rt_rq *rt_rq); 3243 extern void init_dl_rq(struct dl_rq *dl_rq); 3244 3245 extern void cfs_bandwidth_usage_inc(void); 3246 extern void cfs_bandwidth_usage_dec(void); 3247 3248 #ifdef CONFIG_NO_HZ_COMMON 3249 3250 #define NOHZ_BALANCE_KICK_BIT 0 3251 #define NOHZ_STATS_KICK_BIT 1 3252 #define NOHZ_NEWILB_KICK_BIT 2 3253 #define NOHZ_NEXT_KICK_BIT 3 3254 3255 /* Run sched_balance_domains() */ 3256 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 3257 /* Update blocked load */ 3258 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 3259 /* Update blocked load when entering idle */ 3260 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 3261 /* Update nohz.next_balance */ 3262 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 3263 3264 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 3265 3266 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 3267 3268 extern void nohz_balance_exit_idle(struct rq *rq); 3269 #else /* !CONFIG_NO_HZ_COMMON: */ 3270 static inline void nohz_balance_exit_idle(struct rq *rq) { } 3271 #endif /* !CONFIG_NO_HZ_COMMON */ 3272 3273 #ifdef CONFIG_NO_HZ_COMMON 3274 extern void nohz_run_idle_balance(int cpu); 3275 #else 3276 static inline void nohz_run_idle_balance(int cpu) { } 3277 #endif 3278 3279 #include "stats.h" 3280 3281 #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) 3282 3283 extern void __sched_core_account_forceidle(struct rq *rq); 3284 3285 static inline void sched_core_account_forceidle(struct rq *rq) 3286 { 3287 if (schedstat_enabled()) 3288 __sched_core_account_forceidle(rq); 3289 } 3290 3291 extern void __sched_core_tick(struct rq *rq); 3292 3293 static inline void sched_core_tick(struct rq *rq) 3294 { 3295 if (sched_core_enabled(rq) && schedstat_enabled()) 3296 __sched_core_tick(rq); 3297 } 3298 3299 #else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */ 3300 3301 static inline void sched_core_account_forceidle(struct rq *rq) { } 3302 3303 static inline void sched_core_tick(struct rq *rq) { } 3304 3305 #endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */ 3306 3307 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 3308 3309 struct irqtime { 3310 u64 total; 3311 u64 tick_delta; 3312 u64 irq_start_time; 3313 struct u64_stats_sync sync; 3314 }; 3315 3316 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 3317 extern int sched_clock_irqtime; 3318 3319 static inline int irqtime_enabled(void) 3320 { 3321 return sched_clock_irqtime; 3322 } 3323 3324 /* 3325 * Returns the irqtime minus the softirq time computed by ksoftirqd. 3326 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 3327 * and never move forward. 3328 */ 3329 static inline u64 irq_time_read(int cpu) 3330 { 3331 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 3332 unsigned int seq; 3333 u64 total; 3334 3335 do { 3336 seq = __u64_stats_fetch_begin(&irqtime->sync); 3337 total = irqtime->total; 3338 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 3339 3340 return total; 3341 } 3342 3343 #else /* !CONFIG_IRQ_TIME_ACCOUNTING: */ 3344 3345 static inline int irqtime_enabled(void) 3346 { 3347 return 0; 3348 } 3349 3350 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ 3351 3352 #ifdef CONFIG_CPU_FREQ 3353 3354 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 3355 3356 /** 3357 * cpufreq_update_util - Take a note about CPU utilization changes. 3358 * @rq: Runqueue to carry out the update for. 3359 * @flags: Update reason flags. 3360 * 3361 * This function is called by the scheduler on the CPU whose utilization is 3362 * being updated. 3363 * 3364 * It can only be called from RCU-sched read-side critical sections. 3365 * 3366 * The way cpufreq is currently arranged requires it to evaluate the CPU 3367 * performance state (frequency/voltage) on a regular basis to prevent it from 3368 * being stuck in a completely inadequate performance level for too long. 3369 * That is not guaranteed to happen if the updates are only triggered from CFS 3370 * and DL, though, because they may not be coming in if only RT tasks are 3371 * active all the time (or there are RT tasks only). 3372 * 3373 * As a workaround for that issue, this function is called periodically by the 3374 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 3375 * but that really is a band-aid. Going forward it should be replaced with 3376 * solutions targeted more specifically at RT tasks. 3377 */ 3378 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 3379 { 3380 struct update_util_data *data; 3381 3382 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 3383 cpu_of(rq))); 3384 if (data) 3385 data->func(data, rq_clock(rq), flags); 3386 } 3387 #else /* !CONFIG_CPU_FREQ: */ 3388 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { } 3389 #endif /* !CONFIG_CPU_FREQ */ 3390 3391 #ifdef arch_scale_freq_capacity 3392 # ifndef arch_scale_freq_invariant 3393 # define arch_scale_freq_invariant() true 3394 # endif 3395 #else 3396 # define arch_scale_freq_invariant() false 3397 #endif 3398 3399 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 3400 unsigned long *min, 3401 unsigned long *max); 3402 3403 unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, 3404 unsigned long min, 3405 unsigned long max); 3406 3407 3408 /* 3409 * Verify the fitness of task @p to run on @cpu taking into account the 3410 * CPU original capacity and the runtime/deadline ratio of the task. 3411 * 3412 * The function will return true if the original capacity of @cpu is 3413 * greater than or equal to task's deadline density right shifted by 3414 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. 3415 */ 3416 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 3417 { 3418 unsigned long cap = arch_scale_cpu_capacity(cpu); 3419 3420 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); 3421 } 3422 3423 static inline unsigned long cpu_bw_dl(struct rq *rq) 3424 { 3425 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 3426 } 3427 3428 static inline unsigned long cpu_util_dl(struct rq *rq) 3429 { 3430 return READ_ONCE(rq->avg_dl.util_avg); 3431 } 3432 3433 3434 extern unsigned long cpu_util_cfs(int cpu); 3435 extern unsigned long cpu_util_cfs_boost(int cpu); 3436 3437 static inline unsigned long cpu_util_rt(struct rq *rq) 3438 { 3439 return READ_ONCE(rq->avg_rt.util_avg); 3440 } 3441 3442 #ifdef CONFIG_UCLAMP_TASK 3443 3444 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 3445 3446 /* 3447 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 3448 * by default in the fast path and only gets turned on once userspace performs 3449 * an operation that requires it. 3450 * 3451 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 3452 * hence is active. 3453 */ 3454 static inline bool uclamp_is_used(void) 3455 { 3456 return static_branch_likely(&sched_uclamp_used); 3457 } 3458 3459 /* 3460 * Enabling static branches would get the cpus_read_lock(), 3461 * check whether uclamp_is_used before enable it to avoid always 3462 * calling cpus_read_lock(). Because we never disable this 3463 * static key once enable it. 3464 */ 3465 static inline void sched_uclamp_enable(void) 3466 { 3467 if (!uclamp_is_used()) 3468 static_branch_enable(&sched_uclamp_used); 3469 } 3470 3471 static inline unsigned long uclamp_rq_get(struct rq *rq, 3472 enum uclamp_id clamp_id) 3473 { 3474 return READ_ONCE(rq->uclamp[clamp_id].value); 3475 } 3476 3477 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3478 unsigned int value) 3479 { 3480 WRITE_ONCE(rq->uclamp[clamp_id].value, value); 3481 } 3482 3483 static inline bool uclamp_rq_is_idle(struct rq *rq) 3484 { 3485 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; 3486 } 3487 3488 /* Is the rq being capped/throttled by uclamp_max? */ 3489 static inline bool uclamp_rq_is_capped(struct rq *rq) 3490 { 3491 unsigned long rq_util; 3492 unsigned long max_util; 3493 3494 if (!uclamp_is_used()) 3495 return false; 3496 3497 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); 3498 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 3499 3500 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; 3501 } 3502 3503 #define for_each_clamp_id(clamp_id) \ 3504 for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) 3505 3506 extern unsigned int sysctl_sched_uclamp_util_min_rt_default; 3507 3508 3509 static inline unsigned int uclamp_none(enum uclamp_id clamp_id) 3510 { 3511 if (clamp_id == UCLAMP_MIN) 3512 return 0; 3513 return SCHED_CAPACITY_SCALE; 3514 } 3515 3516 /* Integer rounded range for each bucket */ 3517 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) 3518 3519 static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) 3520 { 3521 return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); 3522 } 3523 3524 static inline void 3525 uclamp_se_set(struct uclamp_se *uc_se, unsigned int value, bool user_defined) 3526 { 3527 uc_se->value = value; 3528 uc_se->bucket_id = uclamp_bucket_id(value); 3529 uc_se->user_defined = user_defined; 3530 } 3531 3532 #else /* !CONFIG_UCLAMP_TASK: */ 3533 3534 static inline unsigned long 3535 uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) 3536 { 3537 if (clamp_id == UCLAMP_MIN) 3538 return 0; 3539 3540 return SCHED_CAPACITY_SCALE; 3541 } 3542 3543 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } 3544 3545 static inline bool uclamp_is_used(void) 3546 { 3547 return false; 3548 } 3549 3550 static inline void sched_uclamp_enable(void) {} 3551 3552 static inline unsigned long 3553 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) 3554 { 3555 if (clamp_id == UCLAMP_MIN) 3556 return 0; 3557 3558 return SCHED_CAPACITY_SCALE; 3559 } 3560 3561 static inline void 3562 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) 3563 { 3564 } 3565 3566 static inline bool uclamp_rq_is_idle(struct rq *rq) 3567 { 3568 return false; 3569 } 3570 3571 #endif /* !CONFIG_UCLAMP_TASK */ 3572 3573 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 3574 3575 static inline unsigned long cpu_util_irq(struct rq *rq) 3576 { 3577 return READ_ONCE(rq->avg_irq.util_avg); 3578 } 3579 3580 static inline 3581 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3582 { 3583 util *= (max - irq); 3584 util /= max; 3585 3586 return util; 3587 3588 } 3589 3590 #else /* !CONFIG_HAVE_SCHED_AVG_IRQ: */ 3591 3592 static inline unsigned long cpu_util_irq(struct rq *rq) 3593 { 3594 return 0; 3595 } 3596 3597 static inline 3598 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3599 { 3600 return util; 3601 } 3602 3603 #endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */ 3604 3605 extern void __setparam_fair(struct task_struct *p, const struct sched_attr *attr); 3606 3607 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3608 3609 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3610 3611 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3612 3613 static inline bool sched_energy_enabled(void) 3614 { 3615 return static_branch_unlikely(&sched_energy_present); 3616 } 3617 3618 #else /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL): */ 3619 3620 #define perf_domain_span(pd) NULL 3621 3622 static inline bool sched_energy_enabled(void) { return false; } 3623 3624 #endif /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3625 3626 #ifdef CONFIG_MEMBARRIER 3627 3628 /* 3629 * The scheduler provides memory barriers required by membarrier between: 3630 * - prior user-space memory accesses and store to rq->membarrier_state, 3631 * - store to rq->membarrier_state and following user-space memory accesses. 3632 * In the same way it provides those guarantees around store to rq->curr. 3633 */ 3634 static inline void membarrier_switch_mm(struct rq *rq, 3635 struct mm_struct *prev_mm, 3636 struct mm_struct *next_mm) 3637 { 3638 int membarrier_state; 3639 3640 if (prev_mm == next_mm) 3641 return; 3642 3643 membarrier_state = atomic_read(&next_mm->membarrier_state); 3644 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3645 return; 3646 3647 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3648 } 3649 3650 #else /* !CONFIG_MEMBARRIER: */ 3651 3652 static inline void membarrier_switch_mm(struct rq *rq, 3653 struct mm_struct *prev_mm, 3654 struct mm_struct *next_mm) 3655 { 3656 } 3657 3658 #endif /* !CONFIG_MEMBARRIER */ 3659 3660 static inline bool is_per_cpu_kthread(struct task_struct *p) 3661 { 3662 if (!(p->flags & PF_KTHREAD)) 3663 return false; 3664 3665 if (p->nr_cpus_allowed != 1) 3666 return false; 3667 3668 return true; 3669 } 3670 3671 extern void swake_up_all_locked(struct swait_queue_head *q); 3672 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3673 3674 extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); 3675 3676 #ifdef CONFIG_PREEMPT_DYNAMIC 3677 extern int preempt_dynamic_mode; 3678 extern int sched_dynamic_mode(const char *str); 3679 extern void sched_dynamic_update(int mode); 3680 #endif 3681 extern const char *preempt_modes[]; 3682 3683 #ifdef CONFIG_SCHED_MM_CID 3684 3685 static __always_inline bool cid_on_cpu(unsigned int cid) 3686 { 3687 return cid & MM_CID_ONCPU; 3688 } 3689 3690 static __always_inline bool cid_in_transit(unsigned int cid) 3691 { 3692 return cid & MM_CID_TRANSIT; 3693 } 3694 3695 static __always_inline unsigned int cpu_cid_to_cid(unsigned int cid) 3696 { 3697 return cid & ~MM_CID_ONCPU; 3698 } 3699 3700 static __always_inline unsigned int cid_to_cpu_cid(unsigned int cid) 3701 { 3702 return cid | MM_CID_ONCPU; 3703 } 3704 3705 static __always_inline unsigned int cid_to_transit_cid(unsigned int cid) 3706 { 3707 return cid | MM_CID_TRANSIT; 3708 } 3709 3710 static __always_inline unsigned int cid_from_transit_cid(unsigned int cid) 3711 { 3712 return cid & ~MM_CID_TRANSIT; 3713 } 3714 3715 static __always_inline bool cid_on_task(unsigned int cid) 3716 { 3717 /* True if none of the MM_CID_ONCPU, MM_CID_TRANSIT, MM_CID_UNSET bits is set */ 3718 return cid < MM_CID_TRANSIT; 3719 } 3720 3721 static __always_inline void mm_drop_cid(struct mm_struct *mm, unsigned int cid) 3722 { 3723 clear_bit(cid, mm_cidmask(mm)); 3724 } 3725 3726 static __always_inline void mm_unset_cid_on_task(struct task_struct *t) 3727 { 3728 unsigned int cid = t->mm_cid.cid; 3729 3730 t->mm_cid.cid = MM_CID_UNSET; 3731 if (cid_on_task(cid)) 3732 mm_drop_cid(t->mm, cid); 3733 } 3734 3735 static __always_inline void mm_drop_cid_on_cpu(struct mm_struct *mm, struct mm_cid_pcpu *pcp) 3736 { 3737 /* Clear the ONCPU bit, but do not set UNSET in the per CPU storage */ 3738 pcp->cid = cpu_cid_to_cid(pcp->cid); 3739 mm_drop_cid(mm, pcp->cid); 3740 } 3741 3742 static inline unsigned int __mm_get_cid(struct mm_struct *mm, unsigned int max_cids) 3743 { 3744 unsigned int cid = find_first_zero_bit(mm_cidmask(mm), max_cids); 3745 3746 if (cid >= max_cids) 3747 return MM_CID_UNSET; 3748 if (test_and_set_bit(cid, mm_cidmask(mm))) 3749 return MM_CID_UNSET; 3750 return cid; 3751 } 3752 3753 static inline unsigned int mm_get_cid(struct mm_struct *mm) 3754 { 3755 unsigned int cid = __mm_get_cid(mm, READ_ONCE(mm->mm_cid.max_cids)); 3756 3757 while (cid == MM_CID_UNSET) { 3758 cpu_relax(); 3759 cid = __mm_get_cid(mm, num_possible_cpus()); 3760 } 3761 return cid; 3762 } 3763 3764 static inline unsigned int mm_cid_converge(struct mm_struct *mm, unsigned int orig_cid, 3765 unsigned int max_cids) 3766 { 3767 unsigned int new_cid, cid = cpu_cid_to_cid(orig_cid); 3768 3769 /* Is it in the optimal CID space? */ 3770 if (likely(cid < max_cids)) 3771 return orig_cid; 3772 3773 /* Try to find one in the optimal space. Otherwise keep the provided. */ 3774 new_cid = __mm_get_cid(mm, max_cids); 3775 if (new_cid != MM_CID_UNSET) { 3776 mm_drop_cid(mm, cid); 3777 /* Preserve the ONCPU mode of the original CID */ 3778 return new_cid | (orig_cid & MM_CID_ONCPU); 3779 } 3780 return orig_cid; 3781 } 3782 3783 static __always_inline void mm_cid_update_task_cid(struct task_struct *t, unsigned int cid) 3784 { 3785 if (t->mm_cid.cid != cid) { 3786 t->mm_cid.cid = cid; 3787 rseq_sched_set_ids_changed(t); 3788 } 3789 } 3790 3791 static __always_inline void mm_cid_update_pcpu_cid(struct mm_struct *mm, unsigned int cid) 3792 { 3793 __this_cpu_write(mm->mm_cid.pcpu->cid, cid); 3794 } 3795 3796 static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid) 3797 { 3798 unsigned int max_cids, tcid = t->mm_cid.cid; 3799 struct mm_struct *mm = t->mm; 3800 3801 max_cids = READ_ONCE(mm->mm_cid.max_cids); 3802 /* Optimize for the common case where both have the ONCPU bit set */ 3803 if (likely(cid_on_cpu(cpu_cid & tcid))) { 3804 if (likely(cpu_cid_to_cid(cpu_cid) < max_cids)) { 3805 mm_cid_update_task_cid(t, cpu_cid); 3806 return; 3807 } 3808 /* Try to converge into the optimal CID space */ 3809 cpu_cid = mm_cid_converge(mm, cpu_cid, max_cids); 3810 } else { 3811 /* Hand over or drop the task owned CID */ 3812 if (cid_on_task(tcid)) { 3813 if (cid_on_cpu(cpu_cid)) 3814 mm_unset_cid_on_task(t); 3815 else 3816 cpu_cid = cid_to_cpu_cid(tcid); 3817 } 3818 /* Still nothing, allocate a new one */ 3819 if (!cid_on_cpu(cpu_cid)) 3820 cpu_cid = cid_to_cpu_cid(mm_get_cid(mm)); 3821 } 3822 mm_cid_update_pcpu_cid(mm, cpu_cid); 3823 mm_cid_update_task_cid(t, cpu_cid); 3824 } 3825 3826 static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid) 3827 { 3828 unsigned int max_cids, tcid = t->mm_cid.cid; 3829 struct mm_struct *mm = t->mm; 3830 3831 max_cids = READ_ONCE(mm->mm_cid.max_cids); 3832 /* Optimize for the common case, where both have the ONCPU bit clear */ 3833 if (likely(cid_on_task(tcid | cpu_cid))) { 3834 if (likely(tcid < max_cids)) { 3835 mm_cid_update_pcpu_cid(mm, tcid); 3836 return; 3837 } 3838 /* Try to converge into the optimal CID space */ 3839 tcid = mm_cid_converge(mm, tcid, max_cids); 3840 } else { 3841 /* Hand over or drop the CPU owned CID */ 3842 if (cid_on_cpu(cpu_cid)) { 3843 if (cid_on_task(tcid)) 3844 mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu)); 3845 else 3846 tcid = cpu_cid_to_cid(cpu_cid); 3847 } 3848 /* Still nothing, allocate a new one */ 3849 if (!cid_on_task(tcid)) 3850 tcid = mm_get_cid(mm); 3851 /* Set the transition mode flag if required */ 3852 tcid |= READ_ONCE(mm->mm_cid.transit); 3853 } 3854 mm_cid_update_pcpu_cid(mm, tcid); 3855 mm_cid_update_task_cid(t, tcid); 3856 } 3857 3858 static __always_inline void mm_cid_schedin(struct task_struct *next) 3859 { 3860 struct mm_struct *mm = next->mm; 3861 unsigned int cpu_cid; 3862 3863 if (!next->mm_cid.active) 3864 return; 3865 3866 cpu_cid = __this_cpu_read(mm->mm_cid.pcpu->cid); 3867 if (likely(!READ_ONCE(mm->mm_cid.percpu))) 3868 mm_cid_from_task(next, cpu_cid); 3869 else 3870 mm_cid_from_cpu(next, cpu_cid); 3871 } 3872 3873 static __always_inline void mm_cid_schedout(struct task_struct *prev) 3874 { 3875 /* During mode transitions CIDs are temporary and need to be dropped */ 3876 if (likely(!cid_in_transit(prev->mm_cid.cid))) 3877 return; 3878 3879 mm_drop_cid(prev->mm, cid_from_transit_cid(prev->mm_cid.cid)); 3880 prev->mm_cid.cid = MM_CID_UNSET; 3881 } 3882 3883 static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next) 3884 { 3885 mm_cid_schedout(prev); 3886 mm_cid_schedin(next); 3887 } 3888 3889 #else /* !CONFIG_SCHED_MM_CID: */ 3890 static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next) { } 3891 #endif /* !CONFIG_SCHED_MM_CID */ 3892 3893 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); 3894 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); 3895 static inline 3896 void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task) 3897 { 3898 lockdep_assert_rq_held(src_rq); 3899 lockdep_assert_rq_held(dst_rq); 3900 3901 deactivate_task(src_rq, task, 0); 3902 set_task_cpu(task, dst_rq->cpu); 3903 activate_task(dst_rq, task, 0); 3904 } 3905 3906 static inline 3907 bool task_is_pushable(struct rq *rq, struct task_struct *p, int cpu) 3908 { 3909 if (!task_on_cpu(rq, p) && 3910 cpumask_test_cpu(cpu, &p->cpus_mask)) 3911 return true; 3912 3913 return false; 3914 } 3915 3916 #ifdef CONFIG_RT_MUTEXES 3917 3918 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) 3919 { 3920 if (pi_task) 3921 prio = min(prio, pi_task->prio); 3922 3923 return prio; 3924 } 3925 3926 static inline int rt_effective_prio(struct task_struct *p, int prio) 3927 { 3928 struct task_struct *pi_task = rt_mutex_get_top_task(p); 3929 3930 return __rt_effective_prio(pi_task, prio); 3931 } 3932 3933 #else /* !CONFIG_RT_MUTEXES: */ 3934 3935 static inline int rt_effective_prio(struct task_struct *p, int prio) 3936 { 3937 return prio; 3938 } 3939 3940 #endif /* !CONFIG_RT_MUTEXES */ 3941 3942 extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi); 3943 extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); 3944 extern const struct sched_class *__setscheduler_class(int policy, int prio); 3945 extern void set_load_weight(struct task_struct *p, bool update_load); 3946 extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); 3947 extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); 3948 3949 extern struct balance_callback *splice_balance_callbacks(struct rq *rq); 3950 extern void balance_callbacks(struct rq *rq, struct balance_callback *head); 3951 3952 /* 3953 * The 'sched_change' pattern is the safe, easy and slow way of changing a 3954 * task's scheduling properties. It dequeues a task, such that the scheduler 3955 * is fully unaware of it; at which point its properties can be modified; 3956 * after which it is enqueued again. 3957 * 3958 * Typically this must be called while holding task_rq_lock, since most/all 3959 * properties are serialized under those locks. There is currently one 3960 * exception to this rule in sched/ext which only holds rq->lock. 3961 */ 3962 3963 /* 3964 * This structure is a temporary, used to preserve/convey the queueing state 3965 * of the task between sched_change_begin() and sched_change_end(). Ensuring 3966 * the task's queueing state is idempotent across the operation. 3967 */ 3968 struct sched_change_ctx { 3969 u64 prio; 3970 struct task_struct *p; 3971 int flags; 3972 bool queued; 3973 bool running; 3974 }; 3975 3976 struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int flags); 3977 void sched_change_end(struct sched_change_ctx *ctx); 3978 3979 DEFINE_CLASS(sched_change, struct sched_change_ctx *, 3980 sched_change_end(_T), 3981 sched_change_begin(p, flags), 3982 struct task_struct *p, unsigned int flags) 3983 3984 DEFINE_CLASS_IS_UNCONDITIONAL(sched_change) 3985 3986 #include "ext.h" 3987 3988 #endif /* _KERNEL_SCHED_SCHED_H */ 3989