1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Scheduler internal types and methods: 4 */ 5 #ifndef _KERNEL_SCHED_SCHED_H 6 #define _KERNEL_SCHED_SCHED_H 7 8 #include <linux/sched/affinity.h> 9 #include <linux/sched/autogroup.h> 10 #include <linux/sched/cpufreq.h> 11 #include <linux/sched/deadline.h> 12 #include <linux/sched.h> 13 #include <linux/sched/loadavg.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/rseq_api.h> 16 #include <linux/sched/signal.h> 17 #include <linux/sched/smt.h> 18 #include <linux/sched/stat.h> 19 #include <linux/sched/sysctl.h> 20 #include <linux/sched/task_flags.h> 21 #include <linux/sched/task.h> 22 #include <linux/sched/topology.h> 23 24 #include <linux/atomic.h> 25 #include <linux/bitmap.h> 26 #include <linux/bug.h> 27 #include <linux/capability.h> 28 #include <linux/cgroup_api.h> 29 #include <linux/cgroup.h> 30 #include <linux/context_tracking.h> 31 #include <linux/cpufreq.h> 32 #include <linux/cpumask_api.h> 33 #include <linux/ctype.h> 34 #include <linux/file.h> 35 #include <linux/fs_api.h> 36 #include <linux/hrtimer_api.h> 37 #include <linux/interrupt.h> 38 #include <linux/irq_work.h> 39 #include <linux/jiffies.h> 40 #include <linux/kref_api.h> 41 #include <linux/kthread.h> 42 #include <linux/ktime_api.h> 43 #include <linux/lockdep_api.h> 44 #include <linux/lockdep.h> 45 #include <linux/minmax.h> 46 #include <linux/mm.h> 47 #include <linux/module.h> 48 #include <linux/mutex_api.h> 49 #include <linux/plist.h> 50 #include <linux/poll.h> 51 #include <linux/proc_fs.h> 52 #include <linux/profile.h> 53 #include <linux/psi.h> 54 #include <linux/rcupdate.h> 55 #include <linux/seq_file.h> 56 #include <linux/seqlock.h> 57 #include <linux/softirq.h> 58 #include <linux/spinlock_api.h> 59 #include <linux/static_key.h> 60 #include <linux/stop_machine.h> 61 #include <linux/syscalls_api.h> 62 #include <linux/syscalls.h> 63 #include <linux/tick.h> 64 #include <linux/topology.h> 65 #include <linux/types.h> 66 #include <linux/u64_stats_sync_api.h> 67 #include <linux/uaccess.h> 68 #include <linux/wait_api.h> 69 #include <linux/wait_bit.h> 70 #include <linux/workqueue_api.h> 71 72 #include <trace/events/power.h> 73 #include <trace/events/sched.h> 74 75 #include "../workqueue_internal.h" 76 77 #ifdef CONFIG_PARAVIRT 78 # include <asm/paravirt.h> 79 # include <asm/paravirt_api_clock.h> 80 #endif 81 82 #include <asm/barrier.h> 83 84 #include "cpupri.h" 85 #include "cpudeadline.h" 86 87 #ifdef CONFIG_SCHED_DEBUG 88 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 89 #else 90 # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 91 #endif 92 93 struct rq; 94 struct cpuidle_state; 95 96 /* task_struct::on_rq states: */ 97 #define TASK_ON_RQ_QUEUED 1 98 #define TASK_ON_RQ_MIGRATING 2 99 100 extern __read_mostly int scheduler_running; 101 102 extern unsigned long calc_load_update; 103 extern atomic_long_t calc_load_tasks; 104 105 extern void calc_global_load_tick(struct rq *this_rq); 106 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 107 108 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 109 110 extern int sysctl_sched_rt_period; 111 extern int sysctl_sched_rt_runtime; 112 extern int sched_rr_timeslice; 113 114 /* 115 * Asymmetric CPU capacity bits 116 */ 117 struct asym_cap_data { 118 struct list_head link; 119 struct rcu_head rcu; 120 unsigned long capacity; 121 unsigned long cpus[]; 122 }; 123 124 extern struct list_head asym_cap_list; 125 126 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) 127 128 /* 129 * Helpers for converting nanosecond timing to jiffy resolution 130 */ 131 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 132 133 /* 134 * Increase resolution of nice-level calculations for 64-bit architectures. 135 * The extra resolution improves shares distribution and load balancing of 136 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 137 * hierarchies, especially on larger systems. This is not a user-visible change 138 * and does not change the user-interface for setting shares/weights. 139 * 140 * We increase resolution only if we have enough bits to allow this increased 141 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 142 * are pretty high and the returns do not justify the increased costs. 143 * 144 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 145 * increase coverage and consistency always enable it on 64-bit platforms. 146 */ 147 #ifdef CONFIG_64BIT 148 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 149 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 150 # define scale_load_down(w) \ 151 ({ \ 152 unsigned long __w = (w); \ 153 if (__w) \ 154 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 155 __w; \ 156 }) 157 #else 158 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 159 # define scale_load(w) (w) 160 # define scale_load_down(w) (w) 161 #endif 162 163 /* 164 * Task weight (visible to users) and its load (invisible to users) have 165 * independent resolution, but they should be well calibrated. We use 166 * scale_load() and scale_load_down(w) to convert between them. The 167 * following must be true: 168 * 169 * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD 170 * 171 */ 172 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 173 174 /* 175 * Single value that decides SCHED_DEADLINE internal math precision. 176 * 10 -> just above 1us 177 * 9 -> just above 0.5us 178 */ 179 #define DL_SCALE 10 180 181 /* 182 * Single value that denotes runtime == period, ie unlimited time. 183 */ 184 #define RUNTIME_INF ((u64)~0ULL) 185 186 static inline int idle_policy(int policy) 187 { 188 return policy == SCHED_IDLE; 189 } 190 static inline int fair_policy(int policy) 191 { 192 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 193 } 194 195 static inline int rt_policy(int policy) 196 { 197 return policy == SCHED_FIFO || policy == SCHED_RR; 198 } 199 200 static inline int dl_policy(int policy) 201 { 202 return policy == SCHED_DEADLINE; 203 } 204 static inline bool valid_policy(int policy) 205 { 206 return idle_policy(policy) || fair_policy(policy) || 207 rt_policy(policy) || dl_policy(policy); 208 } 209 210 static inline int task_has_idle_policy(struct task_struct *p) 211 { 212 return idle_policy(p->policy); 213 } 214 215 static inline int task_has_rt_policy(struct task_struct *p) 216 { 217 return rt_policy(p->policy); 218 } 219 220 static inline int task_has_dl_policy(struct task_struct *p) 221 { 222 return dl_policy(p->policy); 223 } 224 225 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 226 227 static inline void update_avg(u64 *avg, u64 sample) 228 { 229 s64 diff = sample - *avg; 230 *avg += diff / 8; 231 } 232 233 /* 234 * Shifting a value by an exponent greater *or equal* to the size of said value 235 * is UB; cap at size-1. 236 */ 237 #define shr_bound(val, shift) \ 238 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) 239 240 /* 241 * !! For sched_setattr_nocheck() (kernel) only !! 242 * 243 * This is actually gross. :( 244 * 245 * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 246 * tasks, but still be able to sleep. We need this on platforms that cannot 247 * atomically change clock frequency. Remove once fast switching will be 248 * available on such platforms. 249 * 250 * SUGOV stands for SchedUtil GOVernor. 251 */ 252 #define SCHED_FLAG_SUGOV 0x10000000 253 254 #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) 255 256 static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) 257 { 258 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 259 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 260 #else 261 return false; 262 #endif 263 } 264 265 /* 266 * Tells if entity @a should preempt entity @b. 267 */ 268 static inline bool dl_entity_preempt(const struct sched_dl_entity *a, 269 const struct sched_dl_entity *b) 270 { 271 return dl_entity_is_special(a) || 272 dl_time_before(a->deadline, b->deadline); 273 } 274 275 /* 276 * This is the priority-queue data structure of the RT scheduling class: 277 */ 278 struct rt_prio_array { 279 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 280 struct list_head queue[MAX_RT_PRIO]; 281 }; 282 283 struct rt_bandwidth { 284 /* nests inside the rq lock: */ 285 raw_spinlock_t rt_runtime_lock; 286 ktime_t rt_period; 287 u64 rt_runtime; 288 struct hrtimer rt_period_timer; 289 unsigned int rt_period_active; 290 }; 291 292 static inline int dl_bandwidth_enabled(void) 293 { 294 return sysctl_sched_rt_runtime >= 0; 295 } 296 297 /* 298 * To keep the bandwidth of -deadline tasks under control 299 * we need some place where: 300 * - store the maximum -deadline bandwidth of each cpu; 301 * - cache the fraction of bandwidth that is currently allocated in 302 * each root domain; 303 * 304 * This is all done in the data structure below. It is similar to the 305 * one used for RT-throttling (rt_bandwidth), with the main difference 306 * that, since here we are only interested in admission control, we 307 * do not decrease any runtime while the group "executes", neither we 308 * need a timer to replenish it. 309 * 310 * With respect to SMP, bandwidth is given on a per root domain basis, 311 * meaning that: 312 * - bw (< 100%) is the deadline bandwidth of each CPU; 313 * - total_bw is the currently allocated bandwidth in each root domain; 314 */ 315 struct dl_bw { 316 raw_spinlock_t lock; 317 u64 bw; 318 u64 total_bw; 319 }; 320 321 extern void init_dl_bw(struct dl_bw *dl_b); 322 extern int sched_dl_global_validate(void); 323 extern void sched_dl_do_global(void); 324 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 325 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 326 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 327 extern bool __checkparam_dl(const struct sched_attr *attr); 328 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 329 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 330 extern int dl_bw_check_overflow(int cpu); 331 332 /* 333 * SCHED_DEADLINE supports servers (nested scheduling) with the following 334 * interface: 335 * 336 * dl_se::rq -- runqueue we belong to. 337 * 338 * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the 339 * server when it runs out of tasks to run. 340 * 341 * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this 342 * returns NULL. 343 * 344 * dl_server_update() -- called from update_curr_common(), propagates runtime 345 * to the server. 346 * 347 * dl_server_start() 348 * dl_server_stop() -- start/stop the server when it has (no) tasks. 349 * 350 * dl_server_init() -- initializes the server. 351 */ 352 extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec); 353 extern void dl_server_start(struct sched_dl_entity *dl_se); 354 extern void dl_server_stop(struct sched_dl_entity *dl_se); 355 extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, 356 dl_server_has_tasks_f has_tasks, 357 dl_server_pick_f pick); 358 359 #ifdef CONFIG_CGROUP_SCHED 360 361 struct cfs_rq; 362 struct rt_rq; 363 364 extern struct list_head task_groups; 365 366 struct cfs_bandwidth { 367 #ifdef CONFIG_CFS_BANDWIDTH 368 raw_spinlock_t lock; 369 ktime_t period; 370 u64 quota; 371 u64 runtime; 372 u64 burst; 373 u64 runtime_snap; 374 s64 hierarchical_quota; 375 376 u8 idle; 377 u8 period_active; 378 u8 slack_started; 379 struct hrtimer period_timer; 380 struct hrtimer slack_timer; 381 struct list_head throttled_cfs_rq; 382 383 /* Statistics: */ 384 int nr_periods; 385 int nr_throttled; 386 int nr_burst; 387 u64 throttled_time; 388 u64 burst_time; 389 #endif 390 }; 391 392 /* Task group related information */ 393 struct task_group { 394 struct cgroup_subsys_state css; 395 396 #ifdef CONFIG_FAIR_GROUP_SCHED 397 /* schedulable entities of this group on each CPU */ 398 struct sched_entity **se; 399 /* runqueue "owned" by this group on each CPU */ 400 struct cfs_rq **cfs_rq; 401 unsigned long shares; 402 403 /* A positive value indicates that this is a SCHED_IDLE group. */ 404 int idle; 405 406 #ifdef CONFIG_SMP 407 /* 408 * load_avg can be heavily contended at clock tick time, so put 409 * it in its own cacheline separated from the fields above which 410 * will also be accessed at each tick. 411 */ 412 atomic_long_t load_avg ____cacheline_aligned; 413 #endif 414 #endif 415 416 #ifdef CONFIG_RT_GROUP_SCHED 417 struct sched_rt_entity **rt_se; 418 struct rt_rq **rt_rq; 419 420 struct rt_bandwidth rt_bandwidth; 421 #endif 422 423 struct rcu_head rcu; 424 struct list_head list; 425 426 struct task_group *parent; 427 struct list_head siblings; 428 struct list_head children; 429 430 #ifdef CONFIG_SCHED_AUTOGROUP 431 struct autogroup *autogroup; 432 #endif 433 434 struct cfs_bandwidth cfs_bandwidth; 435 436 #ifdef CONFIG_UCLAMP_TASK_GROUP 437 /* The two decimal precision [%] value requested from user-space */ 438 unsigned int uclamp_pct[UCLAMP_CNT]; 439 /* Clamp values requested for a task group */ 440 struct uclamp_se uclamp_req[UCLAMP_CNT]; 441 /* Effective clamp values used for a task group */ 442 struct uclamp_se uclamp[UCLAMP_CNT]; 443 #endif 444 445 }; 446 447 #ifdef CONFIG_FAIR_GROUP_SCHED 448 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 449 450 /* 451 * A weight of 0 or 1 can cause arithmetics problems. 452 * A weight of a cfs_rq is the sum of weights of which entities 453 * are queued on this cfs_rq, so a weight of a entity should not be 454 * too large, so as the shares value of a task group. 455 * (The default weight is 1024 - so there's no practical 456 * limitation from this.) 457 */ 458 #define MIN_SHARES (1UL << 1) 459 #define MAX_SHARES (1UL << 18) 460 #endif 461 462 typedef int (*tg_visitor)(struct task_group *, void *); 463 464 extern int walk_tg_tree_from(struct task_group *from, 465 tg_visitor down, tg_visitor up, void *data); 466 467 /* 468 * Iterate the full tree, calling @down when first entering a node and @up when 469 * leaving it for the final time. 470 * 471 * Caller must hold rcu_lock or sufficient equivalent. 472 */ 473 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 474 { 475 return walk_tg_tree_from(&root_task_group, down, up, data); 476 } 477 478 extern int tg_nop(struct task_group *tg, void *data); 479 480 #ifdef CONFIG_FAIR_GROUP_SCHED 481 extern void free_fair_sched_group(struct task_group *tg); 482 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 483 extern void online_fair_sched_group(struct task_group *tg); 484 extern void unregister_fair_sched_group(struct task_group *tg); 485 #else 486 static inline void free_fair_sched_group(struct task_group *tg) { } 487 static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 488 { 489 return 1; 490 } 491 static inline void online_fair_sched_group(struct task_group *tg) { } 492 static inline void unregister_fair_sched_group(struct task_group *tg) { } 493 #endif 494 495 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 496 struct sched_entity *se, int cpu, 497 struct sched_entity *parent); 498 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); 499 500 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 501 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 502 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 503 extern bool cfs_task_bw_constrained(struct task_struct *p); 504 505 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 506 struct sched_rt_entity *rt_se, int cpu, 507 struct sched_rt_entity *parent); 508 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 509 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 510 extern long sched_group_rt_runtime(struct task_group *tg); 511 extern long sched_group_rt_period(struct task_group *tg); 512 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 513 514 extern struct task_group *sched_create_group(struct task_group *parent); 515 extern void sched_online_group(struct task_group *tg, 516 struct task_group *parent); 517 extern void sched_destroy_group(struct task_group *tg); 518 extern void sched_release_group(struct task_group *tg); 519 520 extern void sched_move_task(struct task_struct *tsk); 521 522 #ifdef CONFIG_FAIR_GROUP_SCHED 523 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 524 525 extern int sched_group_set_idle(struct task_group *tg, long idle); 526 527 #ifdef CONFIG_SMP 528 extern void set_task_rq_fair(struct sched_entity *se, 529 struct cfs_rq *prev, struct cfs_rq *next); 530 #else /* !CONFIG_SMP */ 531 static inline void set_task_rq_fair(struct sched_entity *se, 532 struct cfs_rq *prev, struct cfs_rq *next) { } 533 #endif /* CONFIG_SMP */ 534 #endif /* CONFIG_FAIR_GROUP_SCHED */ 535 536 #else /* CONFIG_CGROUP_SCHED */ 537 538 struct cfs_bandwidth { }; 539 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } 540 541 #endif /* CONFIG_CGROUP_SCHED */ 542 543 extern void unregister_rt_sched_group(struct task_group *tg); 544 extern void free_rt_sched_group(struct task_group *tg); 545 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 546 547 /* 548 * u64_u32_load/u64_u32_store 549 * 550 * Use a copy of a u64 value to protect against data race. This is only 551 * applicable for 32-bits architectures. 552 */ 553 #ifdef CONFIG_64BIT 554 # define u64_u32_load_copy(var, copy) var 555 # define u64_u32_store_copy(var, copy, val) (var = val) 556 #else 557 # define u64_u32_load_copy(var, copy) \ 558 ({ \ 559 u64 __val, __val_copy; \ 560 do { \ 561 __val_copy = copy; \ 562 /* \ 563 * paired with u64_u32_store_copy(), ordering access \ 564 * to var and copy. \ 565 */ \ 566 smp_rmb(); \ 567 __val = var; \ 568 } while (__val != __val_copy); \ 569 __val; \ 570 }) 571 # define u64_u32_store_copy(var, copy, val) \ 572 do { \ 573 typeof(val) __val = (val); \ 574 var = __val; \ 575 /* \ 576 * paired with u64_u32_load_copy(), ordering access to var and \ 577 * copy. \ 578 */ \ 579 smp_wmb(); \ 580 copy = __val; \ 581 } while (0) 582 #endif 583 # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) 584 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) 585 586 /* CFS-related fields in a runqueue */ 587 struct cfs_rq { 588 struct load_weight load; 589 unsigned int nr_running; 590 unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 591 unsigned int idle_nr_running; /* SCHED_IDLE */ 592 unsigned int idle_h_nr_running; /* SCHED_IDLE */ 593 594 s64 avg_vruntime; 595 u64 avg_load; 596 597 u64 exec_clock; 598 u64 min_vruntime; 599 #ifdef CONFIG_SCHED_CORE 600 unsigned int forceidle_seq; 601 u64 min_vruntime_fi; 602 #endif 603 604 #ifndef CONFIG_64BIT 605 u64 min_vruntime_copy; 606 #endif 607 608 struct rb_root_cached tasks_timeline; 609 610 /* 611 * 'curr' points to currently running entity on this cfs_rq. 612 * It is set to NULL otherwise (i.e when none are currently running). 613 */ 614 struct sched_entity *curr; 615 struct sched_entity *next; 616 617 #ifdef CONFIG_SCHED_DEBUG 618 unsigned int nr_spread_over; 619 #endif 620 621 #ifdef CONFIG_SMP 622 /* 623 * CFS load tracking 624 */ 625 struct sched_avg avg; 626 #ifndef CONFIG_64BIT 627 u64 last_update_time_copy; 628 #endif 629 struct { 630 raw_spinlock_t lock ____cacheline_aligned; 631 int nr; 632 unsigned long load_avg; 633 unsigned long util_avg; 634 unsigned long runnable_avg; 635 } removed; 636 637 #ifdef CONFIG_FAIR_GROUP_SCHED 638 u64 last_update_tg_load_avg; 639 unsigned long tg_load_avg_contrib; 640 long propagate; 641 long prop_runnable_sum; 642 643 /* 644 * h_load = weight * f(tg) 645 * 646 * Where f(tg) is the recursive weight fraction assigned to 647 * this group. 648 */ 649 unsigned long h_load; 650 u64 last_h_load_update; 651 struct sched_entity *h_load_next; 652 #endif /* CONFIG_FAIR_GROUP_SCHED */ 653 #endif /* CONFIG_SMP */ 654 655 #ifdef CONFIG_FAIR_GROUP_SCHED 656 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 657 658 /* 659 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 660 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 661 * (like users, containers etc.) 662 * 663 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 664 * This list is used during load balance. 665 */ 666 int on_list; 667 struct list_head leaf_cfs_rq_list; 668 struct task_group *tg; /* group that "owns" this runqueue */ 669 670 /* Locally cached copy of our task_group's idle value */ 671 int idle; 672 673 #ifdef CONFIG_CFS_BANDWIDTH 674 int runtime_enabled; 675 s64 runtime_remaining; 676 677 u64 throttled_pelt_idle; 678 #ifndef CONFIG_64BIT 679 u64 throttled_pelt_idle_copy; 680 #endif 681 u64 throttled_clock; 682 u64 throttled_clock_pelt; 683 u64 throttled_clock_pelt_time; 684 u64 throttled_clock_self; 685 u64 throttled_clock_self_time; 686 int throttled; 687 int throttle_count; 688 struct list_head throttled_list; 689 struct list_head throttled_csd_list; 690 #endif /* CONFIG_CFS_BANDWIDTH */ 691 #endif /* CONFIG_FAIR_GROUP_SCHED */ 692 }; 693 694 static inline int rt_bandwidth_enabled(void) 695 { 696 return sysctl_sched_rt_runtime >= 0; 697 } 698 699 /* RT IPI pull logic requires IRQ_WORK */ 700 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 701 # define HAVE_RT_PUSH_IPI 702 #endif 703 704 /* Real-Time classes' related field in a runqueue: */ 705 struct rt_rq { 706 struct rt_prio_array active; 707 unsigned int rt_nr_running; 708 unsigned int rr_nr_running; 709 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 710 struct { 711 int curr; /* highest queued rt task prio */ 712 #ifdef CONFIG_SMP 713 int next; /* next highest */ 714 #endif 715 } highest_prio; 716 #endif 717 #ifdef CONFIG_SMP 718 bool overloaded; 719 struct plist_head pushable_tasks; 720 721 #endif /* CONFIG_SMP */ 722 int rt_queued; 723 724 int rt_throttled; 725 u64 rt_time; 726 u64 rt_runtime; 727 /* Nests inside the rq lock: */ 728 raw_spinlock_t rt_runtime_lock; 729 730 #ifdef CONFIG_RT_GROUP_SCHED 731 unsigned int rt_nr_boosted; 732 733 struct rq *rq; 734 struct task_group *tg; 735 #endif 736 }; 737 738 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 739 { 740 return rt_rq->rt_queued && rt_rq->rt_nr_running; 741 } 742 743 /* Deadline class' related fields in a runqueue */ 744 struct dl_rq { 745 /* runqueue is an rbtree, ordered by deadline */ 746 struct rb_root_cached root; 747 748 unsigned int dl_nr_running; 749 750 #ifdef CONFIG_SMP 751 /* 752 * Deadline values of the currently executing and the 753 * earliest ready task on this rq. Caching these facilitates 754 * the decision whether or not a ready but not running task 755 * should migrate somewhere else. 756 */ 757 struct { 758 u64 curr; 759 u64 next; 760 } earliest_dl; 761 762 bool overloaded; 763 764 /* 765 * Tasks on this rq that can be pushed away. They are kept in 766 * an rb-tree, ordered by tasks' deadlines, with caching 767 * of the leftmost (earliest deadline) element. 768 */ 769 struct rb_root_cached pushable_dl_tasks_root; 770 #else 771 struct dl_bw dl_bw; 772 #endif 773 /* 774 * "Active utilization" for this runqueue: increased when a 775 * task wakes up (becomes TASK_RUNNING) and decreased when a 776 * task blocks 777 */ 778 u64 running_bw; 779 780 /* 781 * Utilization of the tasks "assigned" to this runqueue (including 782 * the tasks that are in runqueue and the tasks that executed on this 783 * CPU and blocked). Increased when a task moves to this runqueue, and 784 * decreased when the task moves away (migrates, changes scheduling 785 * policy, or terminates). 786 * This is needed to compute the "inactive utilization" for the 787 * runqueue (inactive utilization = this_bw - running_bw). 788 */ 789 u64 this_bw; 790 u64 extra_bw; 791 792 /* 793 * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM 794 * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). 795 */ 796 u64 max_bw; 797 798 /* 799 * Inverse of the fraction of CPU utilization that can be reclaimed 800 * by the GRUB algorithm. 801 */ 802 u64 bw_ratio; 803 }; 804 805 #ifdef CONFIG_FAIR_GROUP_SCHED 806 /* An entity is a task if it doesn't "own" a runqueue */ 807 #define entity_is_task(se) (!se->my_q) 808 809 static inline void se_update_runnable(struct sched_entity *se) 810 { 811 if (!entity_is_task(se)) 812 se->runnable_weight = se->my_q->h_nr_running; 813 } 814 815 static inline long se_runnable(struct sched_entity *se) 816 { 817 if (entity_is_task(se)) 818 return !!se->on_rq; 819 else 820 return se->runnable_weight; 821 } 822 823 #else 824 #define entity_is_task(se) 1 825 826 static inline void se_update_runnable(struct sched_entity *se) {} 827 828 static inline long se_runnable(struct sched_entity *se) 829 { 830 return !!se->on_rq; 831 } 832 #endif 833 834 #ifdef CONFIG_SMP 835 /* 836 * XXX we want to get rid of these helpers and use the full load resolution. 837 */ 838 static inline long se_weight(struct sched_entity *se) 839 { 840 return scale_load_down(se->load.weight); 841 } 842 843 844 static inline bool sched_asym_prefer(int a, int b) 845 { 846 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 847 } 848 849 struct perf_domain { 850 struct em_perf_domain *em_pd; 851 struct perf_domain *next; 852 struct rcu_head rcu; 853 }; 854 855 /* 856 * We add the notion of a root-domain which will be used to define per-domain 857 * variables. Each exclusive cpuset essentially defines an island domain by 858 * fully partitioning the member CPUs from any other cpuset. Whenever a new 859 * exclusive cpuset is created, we also create and attach a new root-domain 860 * object. 861 * 862 */ 863 struct root_domain { 864 atomic_t refcount; 865 atomic_t rto_count; 866 struct rcu_head rcu; 867 cpumask_var_t span; 868 cpumask_var_t online; 869 870 /* 871 * Indicate pullable load on at least one CPU, e.g: 872 * - More than one runnable task 873 * - Running task is misfit 874 */ 875 bool overloaded; 876 877 /* Indicate one or more cpus over-utilized (tipping point) */ 878 bool overutilized; 879 880 /* 881 * The bit corresponding to a CPU gets set here if such CPU has more 882 * than one runnable -deadline task (as it is below for RT tasks). 883 */ 884 cpumask_var_t dlo_mask; 885 atomic_t dlo_count; 886 struct dl_bw dl_bw; 887 struct cpudl cpudl; 888 889 /* 890 * Indicate whether a root_domain's dl_bw has been checked or 891 * updated. It's monotonously increasing value. 892 * 893 * Also, some corner cases, like 'wrap around' is dangerous, but given 894 * that u64 is 'big enough'. So that shouldn't be a concern. 895 */ 896 u64 visit_gen; 897 898 #ifdef HAVE_RT_PUSH_IPI 899 /* 900 * For IPI pull requests, loop across the rto_mask. 901 */ 902 struct irq_work rto_push_work; 903 raw_spinlock_t rto_lock; 904 /* These are only updated and read within rto_lock */ 905 int rto_loop; 906 int rto_cpu; 907 /* These atomics are updated outside of a lock */ 908 atomic_t rto_loop_next; 909 atomic_t rto_loop_start; 910 #endif 911 /* 912 * The "RT overload" flag: it gets set if a CPU has more than 913 * one runnable RT task. 914 */ 915 cpumask_var_t rto_mask; 916 struct cpupri cpupri; 917 918 /* 919 * NULL-terminated list of performance domains intersecting with the 920 * CPUs of the rd. Protected by RCU. 921 */ 922 struct perf_domain __rcu *pd; 923 }; 924 925 extern void init_defrootdomain(void); 926 extern int sched_init_domains(const struct cpumask *cpu_map); 927 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 928 extern void sched_get_rd(struct root_domain *rd); 929 extern void sched_put_rd(struct root_domain *rd); 930 931 static inline int get_rd_overloaded(struct root_domain *rd) 932 { 933 return READ_ONCE(rd->overloaded); 934 } 935 936 static inline void set_rd_overloaded(struct root_domain *rd, int status) 937 { 938 if (get_rd_overloaded(rd) != status) 939 WRITE_ONCE(rd->overloaded, status); 940 } 941 942 #ifdef HAVE_RT_PUSH_IPI 943 extern void rto_push_irq_work_func(struct irq_work *work); 944 #endif 945 #endif /* CONFIG_SMP */ 946 947 #ifdef CONFIG_UCLAMP_TASK 948 /* 949 * struct uclamp_bucket - Utilization clamp bucket 950 * @value: utilization clamp value for tasks on this clamp bucket 951 * @tasks: number of RUNNABLE tasks on this clamp bucket 952 * 953 * Keep track of how many tasks are RUNNABLE for a given utilization 954 * clamp value. 955 */ 956 struct uclamp_bucket { 957 unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 958 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 959 }; 960 961 /* 962 * struct uclamp_rq - rq's utilization clamp 963 * @value: currently active clamp values for a rq 964 * @bucket: utilization clamp buckets affecting a rq 965 * 966 * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 967 * A clamp value is affecting a rq when there is at least one task RUNNABLE 968 * (or actually running) with that value. 969 * 970 * There are up to UCLAMP_CNT possible different clamp values, currently there 971 * are only two: minimum utilization and maximum utilization. 972 * 973 * All utilization clamping values are MAX aggregated, since: 974 * - for util_min: we want to run the CPU at least at the max of the minimum 975 * utilization required by its currently RUNNABLE tasks. 976 * - for util_max: we want to allow the CPU to run up to the max of the 977 * maximum utilization allowed by its currently RUNNABLE tasks. 978 * 979 * Since on each system we expect only a limited number of different 980 * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 981 * the metrics required to compute all the per-rq utilization clamp values. 982 */ 983 struct uclamp_rq { 984 unsigned int value; 985 struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 986 }; 987 988 DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 989 #endif /* CONFIG_UCLAMP_TASK */ 990 991 struct rq; 992 struct balance_callback { 993 struct balance_callback *next; 994 void (*func)(struct rq *rq); 995 }; 996 997 /* 998 * This is the main, per-CPU runqueue data structure. 999 * 1000 * Locking rule: those places that want to lock multiple runqueues 1001 * (such as the load balancing or the thread migration code), lock 1002 * acquire operations must be ordered by ascending &runqueue. 1003 */ 1004 struct rq { 1005 /* runqueue lock: */ 1006 raw_spinlock_t __lock; 1007 1008 unsigned int nr_running; 1009 #ifdef CONFIG_NUMA_BALANCING 1010 unsigned int nr_numa_running; 1011 unsigned int nr_preferred_running; 1012 unsigned int numa_migrate_on; 1013 #endif 1014 #ifdef CONFIG_NO_HZ_COMMON 1015 #ifdef CONFIG_SMP 1016 unsigned long last_blocked_load_update_tick; 1017 unsigned int has_blocked_load; 1018 call_single_data_t nohz_csd; 1019 #endif /* CONFIG_SMP */ 1020 unsigned int nohz_tick_stopped; 1021 atomic_t nohz_flags; 1022 #endif /* CONFIG_NO_HZ_COMMON */ 1023 1024 #ifdef CONFIG_SMP 1025 unsigned int ttwu_pending; 1026 #endif 1027 u64 nr_switches; 1028 1029 #ifdef CONFIG_UCLAMP_TASK 1030 /* Utilization clamp values based on CPU's RUNNABLE tasks */ 1031 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 1032 unsigned int uclamp_flags; 1033 #define UCLAMP_FLAG_IDLE 0x01 1034 #endif 1035 1036 struct cfs_rq cfs; 1037 struct rt_rq rt; 1038 struct dl_rq dl; 1039 1040 #ifdef CONFIG_FAIR_GROUP_SCHED 1041 /* list of leaf cfs_rq on this CPU: */ 1042 struct list_head leaf_cfs_rq_list; 1043 struct list_head *tmp_alone_branch; 1044 #endif /* CONFIG_FAIR_GROUP_SCHED */ 1045 1046 /* 1047 * This is part of a global counter where only the total sum 1048 * over all CPUs matters. A task can increase this counter on 1049 * one CPU and if it got migrated afterwards it may decrease 1050 * it on another CPU. Always updated under the runqueue lock: 1051 */ 1052 unsigned int nr_uninterruptible; 1053 1054 struct task_struct __rcu *curr; 1055 struct task_struct *idle; 1056 struct task_struct *stop; 1057 unsigned long next_balance; 1058 struct mm_struct *prev_mm; 1059 1060 unsigned int clock_update_flags; 1061 u64 clock; 1062 /* Ensure that all clocks are in the same cache line */ 1063 u64 clock_task ____cacheline_aligned; 1064 u64 clock_pelt; 1065 unsigned long lost_idle_time; 1066 u64 clock_pelt_idle; 1067 u64 clock_idle; 1068 #ifndef CONFIG_64BIT 1069 u64 clock_pelt_idle_copy; 1070 u64 clock_idle_copy; 1071 #endif 1072 1073 atomic_t nr_iowait; 1074 1075 #ifdef CONFIG_SCHED_DEBUG 1076 u64 last_seen_need_resched_ns; 1077 int ticks_without_resched; 1078 #endif 1079 1080 #ifdef CONFIG_MEMBARRIER 1081 int membarrier_state; 1082 #endif 1083 1084 #ifdef CONFIG_SMP 1085 struct root_domain *rd; 1086 struct sched_domain __rcu *sd; 1087 1088 unsigned long cpu_capacity; 1089 1090 struct balance_callback *balance_callback; 1091 1092 unsigned char nohz_idle_balance; 1093 unsigned char idle_balance; 1094 1095 unsigned long misfit_task_load; 1096 1097 /* For active balancing */ 1098 int active_balance; 1099 int push_cpu; 1100 struct cpu_stop_work active_balance_work; 1101 1102 /* CPU of this runqueue: */ 1103 int cpu; 1104 int online; 1105 1106 struct list_head cfs_tasks; 1107 1108 struct sched_avg avg_rt; 1109 struct sched_avg avg_dl; 1110 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 1111 struct sched_avg avg_irq; 1112 #endif 1113 #ifdef CONFIG_SCHED_HW_PRESSURE 1114 struct sched_avg avg_hw; 1115 #endif 1116 u64 idle_stamp; 1117 u64 avg_idle; 1118 1119 /* This is used to determine avg_idle's max value */ 1120 u64 max_idle_balance_cost; 1121 1122 #ifdef CONFIG_HOTPLUG_CPU 1123 struct rcuwait hotplug_wait; 1124 #endif 1125 #endif /* CONFIG_SMP */ 1126 1127 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1128 u64 prev_irq_time; 1129 #endif 1130 #ifdef CONFIG_PARAVIRT 1131 u64 prev_steal_time; 1132 #endif 1133 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1134 u64 prev_steal_time_rq; 1135 #endif 1136 1137 /* calc_load related fields */ 1138 unsigned long calc_load_update; 1139 long calc_load_active; 1140 1141 #ifdef CONFIG_SCHED_HRTICK 1142 #ifdef CONFIG_SMP 1143 call_single_data_t hrtick_csd; 1144 #endif 1145 struct hrtimer hrtick_timer; 1146 ktime_t hrtick_time; 1147 #endif 1148 1149 #ifdef CONFIG_SCHEDSTATS 1150 /* latency stats */ 1151 struct sched_info rq_sched_info; 1152 unsigned long long rq_cpu_time; 1153 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1154 1155 /* sys_sched_yield() stats */ 1156 unsigned int yld_count; 1157 1158 /* schedule() stats */ 1159 unsigned int sched_count; 1160 unsigned int sched_goidle; 1161 1162 /* try_to_wake_up() stats */ 1163 unsigned int ttwu_count; 1164 unsigned int ttwu_local; 1165 #endif 1166 1167 #ifdef CONFIG_CPU_IDLE 1168 /* Must be inspected within a rcu lock section */ 1169 struct cpuidle_state *idle_state; 1170 #endif 1171 1172 #ifdef CONFIG_SMP 1173 unsigned int nr_pinned; 1174 #endif 1175 unsigned int push_busy; 1176 struct cpu_stop_work push_work; 1177 1178 #ifdef CONFIG_SCHED_CORE 1179 /* per rq */ 1180 struct rq *core; 1181 struct task_struct *core_pick; 1182 unsigned int core_enabled; 1183 unsigned int core_sched_seq; 1184 struct rb_root core_tree; 1185 1186 /* shared state -- careful with sched_core_cpu_deactivate() */ 1187 unsigned int core_task_seq; 1188 unsigned int core_pick_seq; 1189 unsigned long core_cookie; 1190 unsigned int core_forceidle_count; 1191 unsigned int core_forceidle_seq; 1192 unsigned int core_forceidle_occupation; 1193 u64 core_forceidle_start; 1194 #endif 1195 1196 /* Scratch cpumask to be temporarily used under rq_lock */ 1197 cpumask_var_t scratch_mask; 1198 1199 #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) 1200 call_single_data_t cfsb_csd; 1201 struct list_head cfsb_csd_list; 1202 #endif 1203 }; 1204 1205 #ifdef CONFIG_FAIR_GROUP_SCHED 1206 1207 /* CPU runqueue to which this cfs_rq is attached */ 1208 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1209 { 1210 return cfs_rq->rq; 1211 } 1212 1213 #else 1214 1215 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 1216 { 1217 return container_of(cfs_rq, struct rq, cfs); 1218 } 1219 #endif 1220 1221 static inline int cpu_of(struct rq *rq) 1222 { 1223 #ifdef CONFIG_SMP 1224 return rq->cpu; 1225 #else 1226 return 0; 1227 #endif 1228 } 1229 1230 #define MDF_PUSH 0x01 1231 1232 static inline bool is_migration_disabled(struct task_struct *p) 1233 { 1234 #ifdef CONFIG_SMP 1235 return p->migration_disabled; 1236 #else 1237 return false; 1238 #endif 1239 } 1240 1241 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1242 1243 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1244 #define this_rq() this_cpu_ptr(&runqueues) 1245 #define task_rq(p) cpu_rq(task_cpu(p)) 1246 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1247 #define raw_rq() raw_cpu_ptr(&runqueues) 1248 1249 struct sched_group; 1250 #ifdef CONFIG_SCHED_CORE 1251 static inline struct cpumask *sched_group_span(struct sched_group *sg); 1252 1253 DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); 1254 1255 static inline bool sched_core_enabled(struct rq *rq) 1256 { 1257 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; 1258 } 1259 1260 static inline bool sched_core_disabled(void) 1261 { 1262 return !static_branch_unlikely(&__sched_core_enabled); 1263 } 1264 1265 /* 1266 * Be careful with this function; not for general use. The return value isn't 1267 * stable unless you actually hold a relevant rq->__lock. 1268 */ 1269 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1270 { 1271 if (sched_core_enabled(rq)) 1272 return &rq->core->__lock; 1273 1274 return &rq->__lock; 1275 } 1276 1277 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1278 { 1279 if (rq->core_enabled) 1280 return &rq->core->__lock; 1281 1282 return &rq->__lock; 1283 } 1284 1285 bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b, 1286 bool fi); 1287 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); 1288 1289 /* 1290 * Helpers to check if the CPU's core cookie matches with the task's cookie 1291 * when core scheduling is enabled. 1292 * A special case is that the task's cookie always matches with CPU's core 1293 * cookie if the CPU is in an idle core. 1294 */ 1295 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1296 { 1297 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1298 if (!sched_core_enabled(rq)) 1299 return true; 1300 1301 return rq->core->core_cookie == p->core_cookie; 1302 } 1303 1304 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1305 { 1306 bool idle_core = true; 1307 int cpu; 1308 1309 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1310 if (!sched_core_enabled(rq)) 1311 return true; 1312 1313 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { 1314 if (!available_idle_cpu(cpu)) { 1315 idle_core = false; 1316 break; 1317 } 1318 } 1319 1320 /* 1321 * A CPU in an idle core is always the best choice for tasks with 1322 * cookies. 1323 */ 1324 return idle_core || rq->core->core_cookie == p->core_cookie; 1325 } 1326 1327 static inline bool sched_group_cookie_match(struct rq *rq, 1328 struct task_struct *p, 1329 struct sched_group *group) 1330 { 1331 int cpu; 1332 1333 /* Ignore cookie match if core scheduler is not enabled on the CPU. */ 1334 if (!sched_core_enabled(rq)) 1335 return true; 1336 1337 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { 1338 if (sched_core_cookie_match(cpu_rq(cpu), p)) 1339 return true; 1340 } 1341 return false; 1342 } 1343 1344 static inline bool sched_core_enqueued(struct task_struct *p) 1345 { 1346 return !RB_EMPTY_NODE(&p->core_node); 1347 } 1348 1349 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); 1350 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); 1351 1352 extern void sched_core_get(void); 1353 extern void sched_core_put(void); 1354 1355 #else /* !CONFIG_SCHED_CORE */ 1356 1357 static inline bool sched_core_enabled(struct rq *rq) 1358 { 1359 return false; 1360 } 1361 1362 static inline bool sched_core_disabled(void) 1363 { 1364 return true; 1365 } 1366 1367 static inline raw_spinlock_t *rq_lockp(struct rq *rq) 1368 { 1369 return &rq->__lock; 1370 } 1371 1372 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) 1373 { 1374 return &rq->__lock; 1375 } 1376 1377 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) 1378 { 1379 return true; 1380 } 1381 1382 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) 1383 { 1384 return true; 1385 } 1386 1387 static inline bool sched_group_cookie_match(struct rq *rq, 1388 struct task_struct *p, 1389 struct sched_group *group) 1390 { 1391 return true; 1392 } 1393 #endif /* CONFIG_SCHED_CORE */ 1394 1395 static inline void lockdep_assert_rq_held(struct rq *rq) 1396 { 1397 lockdep_assert_held(__rq_lockp(rq)); 1398 } 1399 1400 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); 1401 extern bool raw_spin_rq_trylock(struct rq *rq); 1402 extern void raw_spin_rq_unlock(struct rq *rq); 1403 1404 static inline void raw_spin_rq_lock(struct rq *rq) 1405 { 1406 raw_spin_rq_lock_nested(rq, 0); 1407 } 1408 1409 static inline void raw_spin_rq_lock_irq(struct rq *rq) 1410 { 1411 local_irq_disable(); 1412 raw_spin_rq_lock(rq); 1413 } 1414 1415 static inline void raw_spin_rq_unlock_irq(struct rq *rq) 1416 { 1417 raw_spin_rq_unlock(rq); 1418 local_irq_enable(); 1419 } 1420 1421 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) 1422 { 1423 unsigned long flags; 1424 local_irq_save(flags); 1425 raw_spin_rq_lock(rq); 1426 return flags; 1427 } 1428 1429 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) 1430 { 1431 raw_spin_rq_unlock(rq); 1432 local_irq_restore(flags); 1433 } 1434 1435 #define raw_spin_rq_lock_irqsave(rq, flags) \ 1436 do { \ 1437 flags = _raw_spin_rq_lock_irqsave(rq); \ 1438 } while (0) 1439 1440 #ifdef CONFIG_SCHED_SMT 1441 extern void __update_idle_core(struct rq *rq); 1442 1443 static inline void update_idle_core(struct rq *rq) 1444 { 1445 if (static_branch_unlikely(&sched_smt_present)) 1446 __update_idle_core(rq); 1447 } 1448 1449 #else 1450 static inline void update_idle_core(struct rq *rq) { } 1451 #endif 1452 1453 #ifdef CONFIG_FAIR_GROUP_SCHED 1454 static inline struct task_struct *task_of(struct sched_entity *se) 1455 { 1456 SCHED_WARN_ON(!entity_is_task(se)); 1457 return container_of(se, struct task_struct, se); 1458 } 1459 1460 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) 1461 { 1462 return p->se.cfs_rq; 1463 } 1464 1465 /* runqueue on which this entity is (to be) queued */ 1466 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1467 { 1468 return se->cfs_rq; 1469 } 1470 1471 /* runqueue "owned" by this group */ 1472 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1473 { 1474 return grp->my_q; 1475 } 1476 1477 #else 1478 1479 #define task_of(_se) container_of(_se, struct task_struct, se) 1480 1481 static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) 1482 { 1483 return &task_rq(p)->cfs; 1484 } 1485 1486 static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) 1487 { 1488 const struct task_struct *p = task_of(se); 1489 struct rq *rq = task_rq(p); 1490 1491 return &rq->cfs; 1492 } 1493 1494 /* runqueue "owned" by this group */ 1495 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) 1496 { 1497 return NULL; 1498 } 1499 #endif 1500 1501 extern void update_rq_clock(struct rq *rq); 1502 1503 /* 1504 * rq::clock_update_flags bits 1505 * 1506 * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1507 * call to __schedule(). This is an optimisation to avoid 1508 * neighbouring rq clock updates. 1509 * 1510 * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1511 * in effect and calls to update_rq_clock() are being ignored. 1512 * 1513 * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1514 * made to update_rq_clock() since the last time rq::lock was pinned. 1515 * 1516 * If inside of __schedule(), clock_update_flags will have been 1517 * shifted left (a left shift is a cheap operation for the fast path 1518 * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1519 * 1520 * if (rq-clock_update_flags >= RQCF_UPDATED) 1521 * 1522 * to check if %RQCF_UPDATED is set. It'll never be shifted more than 1523 * one position though, because the next rq_unpin_lock() will shift it 1524 * back. 1525 */ 1526 #define RQCF_REQ_SKIP 0x01 1527 #define RQCF_ACT_SKIP 0x02 1528 #define RQCF_UPDATED 0x04 1529 1530 static inline void assert_clock_updated(struct rq *rq) 1531 { 1532 /* 1533 * The only reason for not seeing a clock update since the 1534 * last rq_pin_lock() is if we're currently skipping updates. 1535 */ 1536 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1537 } 1538 1539 static inline u64 rq_clock(struct rq *rq) 1540 { 1541 lockdep_assert_rq_held(rq); 1542 assert_clock_updated(rq); 1543 1544 return rq->clock; 1545 } 1546 1547 static inline u64 rq_clock_task(struct rq *rq) 1548 { 1549 lockdep_assert_rq_held(rq); 1550 assert_clock_updated(rq); 1551 1552 return rq->clock_task; 1553 } 1554 1555 static inline void rq_clock_skip_update(struct rq *rq) 1556 { 1557 lockdep_assert_rq_held(rq); 1558 rq->clock_update_flags |= RQCF_REQ_SKIP; 1559 } 1560 1561 /* 1562 * See rt task throttling, which is the only time a skip 1563 * request is canceled. 1564 */ 1565 static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1566 { 1567 lockdep_assert_rq_held(rq); 1568 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 1569 } 1570 1571 /* 1572 * During cpu offlining and rq wide unthrottling, we can trigger 1573 * an update_rq_clock() for several cfs and rt runqueues (Typically 1574 * when using list_for_each_entry_*) 1575 * rq_clock_start_loop_update() can be called after updating the clock 1576 * once and before iterating over the list to prevent multiple update. 1577 * After the iterative traversal, we need to call rq_clock_stop_loop_update() 1578 * to clear RQCF_ACT_SKIP of rq->clock_update_flags. 1579 */ 1580 static inline void rq_clock_start_loop_update(struct rq *rq) 1581 { 1582 lockdep_assert_rq_held(rq); 1583 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); 1584 rq->clock_update_flags |= RQCF_ACT_SKIP; 1585 } 1586 1587 static inline void rq_clock_stop_loop_update(struct rq *rq) 1588 { 1589 lockdep_assert_rq_held(rq); 1590 rq->clock_update_flags &= ~RQCF_ACT_SKIP; 1591 } 1592 1593 struct rq_flags { 1594 unsigned long flags; 1595 struct pin_cookie cookie; 1596 #ifdef CONFIG_SCHED_DEBUG 1597 /* 1598 * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1599 * current pin context is stashed here in case it needs to be 1600 * restored in rq_repin_lock(). 1601 */ 1602 unsigned int clock_update_flags; 1603 #endif 1604 }; 1605 1606 extern struct balance_callback balance_push_callback; 1607 1608 /* 1609 * Lockdep annotation that avoids accidental unlocks; it's like a 1610 * sticky/continuous lockdep_assert_held(). 1611 * 1612 * This avoids code that has access to 'struct rq *rq' (basically everything in 1613 * the scheduler) from accidentally unlocking the rq if they do not also have a 1614 * copy of the (on-stack) 'struct rq_flags rf'. 1615 * 1616 * Also see Documentation/locking/lockdep-design.rst. 1617 */ 1618 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1619 { 1620 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); 1621 1622 #ifdef CONFIG_SCHED_DEBUG 1623 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1624 rf->clock_update_flags = 0; 1625 #ifdef CONFIG_SMP 1626 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); 1627 #endif 1628 #endif 1629 } 1630 1631 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1632 { 1633 #ifdef CONFIG_SCHED_DEBUG 1634 if (rq->clock_update_flags > RQCF_ACT_SKIP) 1635 rf->clock_update_flags = RQCF_UPDATED; 1636 #endif 1637 1638 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); 1639 } 1640 1641 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1642 { 1643 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); 1644 1645 #ifdef CONFIG_SCHED_DEBUG 1646 /* 1647 * Restore the value we stashed in @rf for this pin context. 1648 */ 1649 rq->clock_update_flags |= rf->clock_update_flags; 1650 #endif 1651 } 1652 1653 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1654 __acquires(rq->lock); 1655 1656 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1657 __acquires(p->pi_lock) 1658 __acquires(rq->lock); 1659 1660 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1661 __releases(rq->lock) 1662 { 1663 rq_unpin_lock(rq, rf); 1664 raw_spin_rq_unlock(rq); 1665 } 1666 1667 static inline void 1668 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1669 __releases(rq->lock) 1670 __releases(p->pi_lock) 1671 { 1672 rq_unpin_lock(rq, rf); 1673 raw_spin_rq_unlock(rq); 1674 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1675 } 1676 1677 DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, 1678 _T->rq = task_rq_lock(_T->lock, &_T->rf), 1679 task_rq_unlock(_T->rq, _T->lock, &_T->rf), 1680 struct rq *rq; struct rq_flags rf) 1681 1682 static inline void 1683 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 1684 __acquires(rq->lock) 1685 { 1686 raw_spin_rq_lock_irqsave(rq, rf->flags); 1687 rq_pin_lock(rq, rf); 1688 } 1689 1690 static inline void 1691 rq_lock_irq(struct rq *rq, struct rq_flags *rf) 1692 __acquires(rq->lock) 1693 { 1694 raw_spin_rq_lock_irq(rq); 1695 rq_pin_lock(rq, rf); 1696 } 1697 1698 static inline void 1699 rq_lock(struct rq *rq, struct rq_flags *rf) 1700 __acquires(rq->lock) 1701 { 1702 raw_spin_rq_lock(rq); 1703 rq_pin_lock(rq, rf); 1704 } 1705 1706 static inline void 1707 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 1708 __releases(rq->lock) 1709 { 1710 rq_unpin_lock(rq, rf); 1711 raw_spin_rq_unlock_irqrestore(rq, rf->flags); 1712 } 1713 1714 static inline void 1715 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 1716 __releases(rq->lock) 1717 { 1718 rq_unpin_lock(rq, rf); 1719 raw_spin_rq_unlock_irq(rq); 1720 } 1721 1722 static inline void 1723 rq_unlock(struct rq *rq, struct rq_flags *rf) 1724 __releases(rq->lock) 1725 { 1726 rq_unpin_lock(rq, rf); 1727 raw_spin_rq_unlock(rq); 1728 } 1729 1730 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1731 rq_lock(_T->lock, &_T->rf), 1732 rq_unlock(_T->lock, &_T->rf), 1733 struct rq_flags rf) 1734 1735 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, 1736 rq_lock_irq(_T->lock, &_T->rf), 1737 rq_unlock_irq(_T->lock, &_T->rf), 1738 struct rq_flags rf) 1739 1740 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, 1741 rq_lock_irqsave(_T->lock, &_T->rf), 1742 rq_unlock_irqrestore(_T->lock, &_T->rf), 1743 struct rq_flags rf) 1744 1745 static inline struct rq * 1746 this_rq_lock_irq(struct rq_flags *rf) 1747 __acquires(rq->lock) 1748 { 1749 struct rq *rq; 1750 1751 local_irq_disable(); 1752 rq = this_rq(); 1753 rq_lock(rq, rf); 1754 return rq; 1755 } 1756 1757 #ifdef CONFIG_NUMA 1758 enum numa_topology_type { 1759 NUMA_DIRECT, 1760 NUMA_GLUELESS_MESH, 1761 NUMA_BACKPLANE, 1762 }; 1763 extern enum numa_topology_type sched_numa_topology_type; 1764 extern int sched_max_numa_distance; 1765 extern bool find_numa_distance(int distance); 1766 extern void sched_init_numa(int offline_node); 1767 extern void sched_update_numa(int cpu, bool online); 1768 extern void sched_domains_numa_masks_set(unsigned int cpu); 1769 extern void sched_domains_numa_masks_clear(unsigned int cpu); 1770 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1771 #else 1772 static inline void sched_init_numa(int offline_node) { } 1773 static inline void sched_update_numa(int cpu, bool online) { } 1774 static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1775 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1776 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1777 { 1778 return nr_cpu_ids; 1779 } 1780 #endif 1781 1782 #ifdef CONFIG_NUMA_BALANCING 1783 /* The regions in numa_faults array from task_struct */ 1784 enum numa_faults_stats { 1785 NUMA_MEM = 0, 1786 NUMA_CPU, 1787 NUMA_MEMBUF, 1788 NUMA_CPUBUF 1789 }; 1790 extern void sched_setnuma(struct task_struct *p, int node); 1791 extern int migrate_task_to(struct task_struct *p, int cpu); 1792 extern int migrate_swap(struct task_struct *p, struct task_struct *t, 1793 int cpu, int scpu); 1794 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 1795 #else 1796 static inline void 1797 init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 1798 { 1799 } 1800 #endif /* CONFIG_NUMA_BALANCING */ 1801 1802 #ifdef CONFIG_SMP 1803 1804 static inline void 1805 queue_balance_callback(struct rq *rq, 1806 struct balance_callback *head, 1807 void (*func)(struct rq *rq)) 1808 { 1809 lockdep_assert_rq_held(rq); 1810 1811 /* 1812 * Don't (re)queue an already queued item; nor queue anything when 1813 * balance_push() is active, see the comment with 1814 * balance_push_callback. 1815 */ 1816 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) 1817 return; 1818 1819 head->func = func; 1820 head->next = rq->balance_callback; 1821 rq->balance_callback = head; 1822 } 1823 1824 #define rcu_dereference_check_sched_domain(p) \ 1825 rcu_dereference_check((p), \ 1826 lockdep_is_held(&sched_domains_mutex)) 1827 1828 /* 1829 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1830 * See destroy_sched_domains: call_rcu for details. 1831 * 1832 * The domain tree of any CPU may only be accessed from within 1833 * preempt-disabled sections. 1834 */ 1835 #define for_each_domain(cpu, __sd) \ 1836 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1837 __sd; __sd = __sd->parent) 1838 1839 /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ 1840 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | 1841 static const unsigned int SD_SHARED_CHILD_MASK = 1842 #include <linux/sched/sd_flags.h> 1843 0; 1844 #undef SD_FLAG 1845 1846 /** 1847 * highest_flag_domain - Return highest sched_domain containing flag. 1848 * @cpu: The CPU whose highest level of sched domain is to 1849 * be returned. 1850 * @flag: The flag to check for the highest sched_domain 1851 * for the given CPU. 1852 * 1853 * Returns the highest sched_domain of a CPU which contains @flag. If @flag has 1854 * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. 1855 */ 1856 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1857 { 1858 struct sched_domain *sd, *hsd = NULL; 1859 1860 for_each_domain(cpu, sd) { 1861 if (sd->flags & flag) { 1862 hsd = sd; 1863 continue; 1864 } 1865 1866 /* 1867 * Stop the search if @flag is known to be shared at lower 1868 * levels. It will not be found further up. 1869 */ 1870 if (flag & SD_SHARED_CHILD_MASK) 1871 break; 1872 } 1873 1874 return hsd; 1875 } 1876 1877 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1878 { 1879 struct sched_domain *sd; 1880 1881 for_each_domain(cpu, sd) { 1882 if (sd->flags & flag) 1883 break; 1884 } 1885 1886 return sd; 1887 } 1888 1889 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 1890 DECLARE_PER_CPU(int, sd_llc_size); 1891 DECLARE_PER_CPU(int, sd_llc_id); 1892 DECLARE_PER_CPU(int, sd_share_id); 1893 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1894 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1895 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1896 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1897 extern struct static_key_false sched_asym_cpucapacity; 1898 extern struct static_key_false sched_cluster_active; 1899 1900 static __always_inline bool sched_asym_cpucap_active(void) 1901 { 1902 return static_branch_unlikely(&sched_asym_cpucapacity); 1903 } 1904 1905 struct sched_group_capacity { 1906 atomic_t ref; 1907 /* 1908 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 1909 * for a single CPU. 1910 */ 1911 unsigned long capacity; 1912 unsigned long min_capacity; /* Min per-CPU capacity in group */ 1913 unsigned long max_capacity; /* Max per-CPU capacity in group */ 1914 unsigned long next_update; 1915 int imbalance; /* XXX unrelated to capacity but shared group state */ 1916 1917 #ifdef CONFIG_SCHED_DEBUG 1918 int id; 1919 #endif 1920 1921 unsigned long cpumask[]; /* Balance mask */ 1922 }; 1923 1924 struct sched_group { 1925 struct sched_group *next; /* Must be a circular list */ 1926 atomic_t ref; 1927 1928 unsigned int group_weight; 1929 unsigned int cores; 1930 struct sched_group_capacity *sgc; 1931 int asym_prefer_cpu; /* CPU of highest priority in group */ 1932 int flags; 1933 1934 /* 1935 * The CPUs this group covers. 1936 * 1937 * NOTE: this field is variable length. (Allocated dynamically 1938 * by attaching extra space to the end of the structure, 1939 * depending on how many CPUs the kernel has booted up with) 1940 */ 1941 unsigned long cpumask[]; 1942 }; 1943 1944 static inline struct cpumask *sched_group_span(struct sched_group *sg) 1945 { 1946 return to_cpumask(sg->cpumask); 1947 } 1948 1949 /* 1950 * See build_balance_mask(). 1951 */ 1952 static inline struct cpumask *group_balance_mask(struct sched_group *sg) 1953 { 1954 return to_cpumask(sg->sgc->cpumask); 1955 } 1956 1957 extern int group_balance_cpu(struct sched_group *sg); 1958 1959 #ifdef CONFIG_SCHED_DEBUG 1960 void update_sched_domain_debugfs(void); 1961 void dirty_sched_domain_sysctl(int cpu); 1962 #else 1963 static inline void update_sched_domain_debugfs(void) 1964 { 1965 } 1966 static inline void dirty_sched_domain_sysctl(int cpu) 1967 { 1968 } 1969 #endif 1970 1971 extern int sched_update_scaling(void); 1972 1973 static inline const struct cpumask *task_user_cpus(struct task_struct *p) 1974 { 1975 if (!p->user_cpus_ptr) 1976 return cpu_possible_mask; /* &init_task.cpus_mask */ 1977 return p->user_cpus_ptr; 1978 } 1979 #endif /* CONFIG_SMP */ 1980 1981 #include "stats.h" 1982 1983 #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) 1984 1985 extern void __sched_core_account_forceidle(struct rq *rq); 1986 1987 static inline void sched_core_account_forceidle(struct rq *rq) 1988 { 1989 if (schedstat_enabled()) 1990 __sched_core_account_forceidle(rq); 1991 } 1992 1993 extern void __sched_core_tick(struct rq *rq); 1994 1995 static inline void sched_core_tick(struct rq *rq) 1996 { 1997 if (sched_core_enabled(rq) && schedstat_enabled()) 1998 __sched_core_tick(rq); 1999 } 2000 2001 #else 2002 2003 static inline void sched_core_account_forceidle(struct rq *rq) {} 2004 2005 static inline void sched_core_tick(struct rq *rq) {} 2006 2007 #endif /* CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS */ 2008 2009 #ifdef CONFIG_CGROUP_SCHED 2010 2011 /* 2012 * Return the group to which this tasks belongs. 2013 * 2014 * We cannot use task_css() and friends because the cgroup subsystem 2015 * changes that value before the cgroup_subsys::attach() method is called, 2016 * therefore we cannot pin it and might observe the wrong value. 2017 * 2018 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 2019 * core changes this before calling sched_move_task(). 2020 * 2021 * Instead we use a 'copy' which is updated from sched_move_task() while 2022 * holding both task_struct::pi_lock and rq::lock. 2023 */ 2024 static inline struct task_group *task_group(struct task_struct *p) 2025 { 2026 return p->sched_task_group; 2027 } 2028 2029 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 2030 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 2031 { 2032 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 2033 struct task_group *tg = task_group(p); 2034 #endif 2035 2036 #ifdef CONFIG_FAIR_GROUP_SCHED 2037 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 2038 p->se.cfs_rq = tg->cfs_rq[cpu]; 2039 p->se.parent = tg->se[cpu]; 2040 p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; 2041 #endif 2042 2043 #ifdef CONFIG_RT_GROUP_SCHED 2044 p->rt.rt_rq = tg->rt_rq[cpu]; 2045 p->rt.parent = tg->rt_se[cpu]; 2046 #endif 2047 } 2048 2049 #else /* CONFIG_CGROUP_SCHED */ 2050 2051 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 2052 static inline struct task_group *task_group(struct task_struct *p) 2053 { 2054 return NULL; 2055 } 2056 2057 #endif /* CONFIG_CGROUP_SCHED */ 2058 2059 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 2060 { 2061 set_task_rq(p, cpu); 2062 #ifdef CONFIG_SMP 2063 /* 2064 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 2065 * successfully executed on another CPU. We must ensure that updates of 2066 * per-task data have been completed by this moment. 2067 */ 2068 smp_wmb(); 2069 WRITE_ONCE(task_thread_info(p)->cpu, cpu); 2070 p->wake_cpu = cpu; 2071 #endif 2072 } 2073 2074 /* 2075 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 2076 */ 2077 #ifdef CONFIG_SCHED_DEBUG 2078 # define const_debug __read_mostly 2079 #else 2080 # define const_debug const 2081 #endif 2082 2083 #define SCHED_FEAT(name, enabled) \ 2084 __SCHED_FEAT_##name , 2085 2086 enum { 2087 #include "features.h" 2088 __SCHED_FEAT_NR, 2089 }; 2090 2091 #undef SCHED_FEAT 2092 2093 #ifdef CONFIG_SCHED_DEBUG 2094 2095 /* 2096 * To support run-time toggling of sched features, all the translation units 2097 * (but core.c) reference the sysctl_sched_features defined in core.c. 2098 */ 2099 extern const_debug unsigned int sysctl_sched_features; 2100 2101 #ifdef CONFIG_JUMP_LABEL 2102 #define SCHED_FEAT(name, enabled) \ 2103 static __always_inline bool static_branch_##name(struct static_key *key) \ 2104 { \ 2105 return static_key_##enabled(key); \ 2106 } 2107 2108 #include "features.h" 2109 #undef SCHED_FEAT 2110 2111 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 2112 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 2113 2114 #else /* !CONFIG_JUMP_LABEL */ 2115 2116 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2117 2118 #endif /* CONFIG_JUMP_LABEL */ 2119 2120 #else /* !SCHED_DEBUG */ 2121 2122 /* 2123 * Each translation unit has its own copy of sysctl_sched_features to allow 2124 * constants propagation at compile time and compiler optimization based on 2125 * features default. 2126 */ 2127 #define SCHED_FEAT(name, enabled) \ 2128 (1UL << __SCHED_FEAT_##name) * enabled | 2129 static const_debug __maybe_unused unsigned int sysctl_sched_features = 2130 #include "features.h" 2131 0; 2132 #undef SCHED_FEAT 2133 2134 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 2135 2136 #endif /* SCHED_DEBUG */ 2137 2138 extern struct static_key_false sched_numa_balancing; 2139 extern struct static_key_false sched_schedstats; 2140 2141 static inline u64 global_rt_period(void) 2142 { 2143 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 2144 } 2145 2146 static inline u64 global_rt_runtime(void) 2147 { 2148 if (sysctl_sched_rt_runtime < 0) 2149 return RUNTIME_INF; 2150 2151 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 2152 } 2153 2154 static inline int task_current(struct rq *rq, struct task_struct *p) 2155 { 2156 return rq->curr == p; 2157 } 2158 2159 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) 2160 { 2161 #ifdef CONFIG_SMP 2162 return p->on_cpu; 2163 #else 2164 return task_current(rq, p); 2165 #endif 2166 } 2167 2168 static inline int task_on_rq_queued(struct task_struct *p) 2169 { 2170 return p->on_rq == TASK_ON_RQ_QUEUED; 2171 } 2172 2173 static inline int task_on_rq_migrating(struct task_struct *p) 2174 { 2175 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 2176 } 2177 2178 /* Wake flags. The first three directly map to some SD flag value */ 2179 #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ 2180 #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ 2181 #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ 2182 2183 #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ 2184 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ 2185 #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */ 2186 2187 #ifdef CONFIG_SMP 2188 static_assert(WF_EXEC == SD_BALANCE_EXEC); 2189 static_assert(WF_FORK == SD_BALANCE_FORK); 2190 static_assert(WF_TTWU == SD_BALANCE_WAKE); 2191 #endif 2192 2193 /* 2194 * To aid in avoiding the subversion of "niceness" due to uneven distribution 2195 * of tasks with abnormal "nice" values across CPUs the contribution that 2196 * each task makes to its run queue's load is weighted according to its 2197 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 2198 * scaled version of the new time slice allocation that they receive on time 2199 * slice expiry etc. 2200 */ 2201 2202 #define WEIGHT_IDLEPRIO 3 2203 #define WMULT_IDLEPRIO 1431655765 2204 2205 extern const int sched_prio_to_weight[40]; 2206 extern const u32 sched_prio_to_wmult[40]; 2207 2208 /* 2209 * {de,en}queue flags: 2210 * 2211 * DEQUEUE_SLEEP - task is no longer runnable 2212 * ENQUEUE_WAKEUP - task just became runnable 2213 * 2214 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 2215 * are in a known state which allows modification. Such pairs 2216 * should preserve as much state as possible. 2217 * 2218 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 2219 * in the runqueue. 2220 * 2221 * NOCLOCK - skip the update_rq_clock() (avoids double updates) 2222 * 2223 * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE) 2224 * 2225 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 2226 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 2227 * ENQUEUE_MIGRATED - the task was migrated during wakeup 2228 * 2229 */ 2230 2231 #define DEQUEUE_SLEEP 0x01 2232 #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 2233 #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 2234 #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 2235 #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ 2236 2237 #define ENQUEUE_WAKEUP 0x01 2238 #define ENQUEUE_RESTORE 0x02 2239 #define ENQUEUE_MOVE 0x04 2240 #define ENQUEUE_NOCLOCK 0x08 2241 2242 #define ENQUEUE_HEAD 0x10 2243 #define ENQUEUE_REPLENISH 0x20 2244 #ifdef CONFIG_SMP 2245 #define ENQUEUE_MIGRATED 0x40 2246 #else 2247 #define ENQUEUE_MIGRATED 0x00 2248 #endif 2249 #define ENQUEUE_INITIAL 0x80 2250 #define ENQUEUE_MIGRATING 0x100 2251 2252 #define RETRY_TASK ((void *)-1UL) 2253 2254 struct affinity_context { 2255 const struct cpumask *new_mask; 2256 struct cpumask *user_mask; 2257 unsigned int flags; 2258 }; 2259 2260 extern s64 update_curr_common(struct rq *rq); 2261 2262 struct sched_class { 2263 2264 #ifdef CONFIG_UCLAMP_TASK 2265 int uclamp_enabled; 2266 #endif 2267 2268 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 2269 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 2270 void (*yield_task) (struct rq *rq); 2271 bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 2272 2273 void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); 2274 2275 struct task_struct *(*pick_next_task)(struct rq *rq); 2276 2277 void (*put_prev_task)(struct rq *rq, struct task_struct *p); 2278 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 2279 2280 #ifdef CONFIG_SMP 2281 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2282 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); 2283 2284 struct task_struct * (*pick_task)(struct rq *rq); 2285 2286 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 2287 2288 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 2289 2290 void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); 2291 2292 void (*rq_online)(struct rq *rq); 2293 void (*rq_offline)(struct rq *rq); 2294 2295 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); 2296 #endif 2297 2298 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 2299 void (*task_fork)(struct task_struct *p); 2300 void (*task_dead)(struct task_struct *p); 2301 2302 /* 2303 * The switched_from() call is allowed to drop rq->lock, therefore we 2304 * cannot assume the switched_from/switched_to pair is serialized by 2305 * rq->lock. They are however serialized by p->pi_lock. 2306 */ 2307 void (*switched_from)(struct rq *this_rq, struct task_struct *task); 2308 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 2309 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 2310 int oldprio); 2311 2312 unsigned int (*get_rr_interval)(struct rq *rq, 2313 struct task_struct *task); 2314 2315 void (*update_curr)(struct rq *rq); 2316 2317 #ifdef CONFIG_FAIR_GROUP_SCHED 2318 void (*task_change_group)(struct task_struct *p); 2319 #endif 2320 2321 #ifdef CONFIG_SCHED_CORE 2322 int (*task_is_throttled)(struct task_struct *p, int cpu); 2323 #endif 2324 }; 2325 2326 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 2327 { 2328 WARN_ON_ONCE(rq->curr != prev); 2329 prev->sched_class->put_prev_task(rq, prev); 2330 } 2331 2332 static inline void set_next_task(struct rq *rq, struct task_struct *next) 2333 { 2334 next->sched_class->set_next_task(rq, next, false); 2335 } 2336 2337 2338 /* 2339 * Helper to define a sched_class instance; each one is placed in a separate 2340 * section which is ordered by the linker script: 2341 * 2342 * include/asm-generic/vmlinux.lds.h 2343 * 2344 * *CAREFUL* they are laid out in *REVERSE* order!!! 2345 * 2346 * Also enforce alignment on the instance, not the type, to guarantee layout. 2347 */ 2348 #define DEFINE_SCHED_CLASS(name) \ 2349 const struct sched_class name##_sched_class \ 2350 __aligned(__alignof__(struct sched_class)) \ 2351 __section("__" #name "_sched_class") 2352 2353 /* Defined in include/asm-generic/vmlinux.lds.h */ 2354 extern struct sched_class __sched_class_highest[]; 2355 extern struct sched_class __sched_class_lowest[]; 2356 2357 #define for_class_range(class, _from, _to) \ 2358 for (class = (_from); class < (_to); class++) 2359 2360 #define for_each_class(class) \ 2361 for_class_range(class, __sched_class_highest, __sched_class_lowest) 2362 2363 #define sched_class_above(_a, _b) ((_a) < (_b)) 2364 2365 extern const struct sched_class stop_sched_class; 2366 extern const struct sched_class dl_sched_class; 2367 extern const struct sched_class rt_sched_class; 2368 extern const struct sched_class fair_sched_class; 2369 extern const struct sched_class idle_sched_class; 2370 2371 static inline bool sched_stop_runnable(struct rq *rq) 2372 { 2373 return rq->stop && task_on_rq_queued(rq->stop); 2374 } 2375 2376 static inline bool sched_dl_runnable(struct rq *rq) 2377 { 2378 return rq->dl.dl_nr_running > 0; 2379 } 2380 2381 static inline bool sched_rt_runnable(struct rq *rq) 2382 { 2383 return rq->rt.rt_queued > 0; 2384 } 2385 2386 static inline bool sched_fair_runnable(struct rq *rq) 2387 { 2388 return rq->cfs.nr_running > 0; 2389 } 2390 2391 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 2392 extern struct task_struct *pick_next_task_idle(struct rq *rq); 2393 2394 #define SCA_CHECK 0x01 2395 #define SCA_MIGRATE_DISABLE 0x02 2396 #define SCA_MIGRATE_ENABLE 0x04 2397 #define SCA_USER 0x08 2398 2399 #ifdef CONFIG_SMP 2400 2401 extern void update_group_capacity(struct sched_domain *sd, int cpu); 2402 2403 extern void sched_balance_trigger(struct rq *rq); 2404 2405 extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); 2406 2407 static inline struct task_struct *get_push_task(struct rq *rq) 2408 { 2409 struct task_struct *p = rq->curr; 2410 2411 lockdep_assert_rq_held(rq); 2412 2413 if (rq->push_busy) 2414 return NULL; 2415 2416 if (p->nr_cpus_allowed == 1) 2417 return NULL; 2418 2419 if (p->migration_disabled) 2420 return NULL; 2421 2422 rq->push_busy = true; 2423 return get_task_struct(p); 2424 } 2425 2426 extern int push_cpu_stop(void *arg); 2427 2428 #endif 2429 2430 #ifdef CONFIG_CPU_IDLE 2431 static inline void idle_set_state(struct rq *rq, 2432 struct cpuidle_state *idle_state) 2433 { 2434 rq->idle_state = idle_state; 2435 } 2436 2437 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2438 { 2439 SCHED_WARN_ON(!rcu_read_lock_held()); 2440 2441 return rq->idle_state; 2442 } 2443 #else 2444 static inline void idle_set_state(struct rq *rq, 2445 struct cpuidle_state *idle_state) 2446 { 2447 } 2448 2449 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 2450 { 2451 return NULL; 2452 } 2453 #endif 2454 2455 extern void schedule_idle(void); 2456 asmlinkage void schedule_user(void); 2457 2458 extern void sysrq_sched_debug_show(void); 2459 extern void sched_init_granularity(void); 2460 extern void update_max_interval(void); 2461 2462 extern void init_sched_dl_class(void); 2463 extern void init_sched_rt_class(void); 2464 extern void init_sched_fair_class(void); 2465 2466 extern void reweight_task(struct task_struct *p, int prio); 2467 2468 extern void resched_curr(struct rq *rq); 2469 extern void resched_cpu(int cpu); 2470 2471 extern struct rt_bandwidth def_rt_bandwidth; 2472 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 2473 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 2474 2475 extern void init_dl_entity(struct sched_dl_entity *dl_se); 2476 2477 #define BW_SHIFT 20 2478 #define BW_UNIT (1 << BW_SHIFT) 2479 #define RATIO_SHIFT 8 2480 #define MAX_BW_BITS (64 - BW_SHIFT) 2481 #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 2482 unsigned long to_ratio(u64 period, u64 runtime); 2483 2484 extern void init_entity_runnable_average(struct sched_entity *se); 2485 extern void post_init_entity_util_avg(struct task_struct *p); 2486 2487 #ifdef CONFIG_NO_HZ_FULL 2488 extern bool sched_can_stop_tick(struct rq *rq); 2489 extern int __init sched_tick_offload_init(void); 2490 2491 /* 2492 * Tick may be needed by tasks in the runqueue depending on their policy and 2493 * requirements. If tick is needed, lets send the target an IPI to kick it out of 2494 * nohz mode if necessary. 2495 */ 2496 static inline void sched_update_tick_dependency(struct rq *rq) 2497 { 2498 int cpu = cpu_of(rq); 2499 2500 if (!tick_nohz_full_cpu(cpu)) 2501 return; 2502 2503 if (sched_can_stop_tick(rq)) 2504 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 2505 else 2506 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 2507 } 2508 #else 2509 static inline int sched_tick_offload_init(void) { return 0; } 2510 static inline void sched_update_tick_dependency(struct rq *rq) { } 2511 #endif 2512 2513 static inline void add_nr_running(struct rq *rq, unsigned count) 2514 { 2515 unsigned prev_nr = rq->nr_running; 2516 2517 rq->nr_running = prev_nr + count; 2518 if (trace_sched_update_nr_running_tp_enabled()) { 2519 call_trace_sched_update_nr_running(rq, count); 2520 } 2521 2522 #ifdef CONFIG_SMP 2523 if (prev_nr < 2 && rq->nr_running >= 2) 2524 set_rd_overloaded(rq->rd, 1); 2525 #endif 2526 2527 sched_update_tick_dependency(rq); 2528 } 2529 2530 static inline void sub_nr_running(struct rq *rq, unsigned count) 2531 { 2532 rq->nr_running -= count; 2533 if (trace_sched_update_nr_running_tp_enabled()) { 2534 call_trace_sched_update_nr_running(rq, -count); 2535 } 2536 2537 /* Check if we still need preemption */ 2538 sched_update_tick_dependency(rq); 2539 } 2540 2541 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 2542 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 2543 2544 extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); 2545 2546 #ifdef CONFIG_PREEMPT_RT 2547 #define SCHED_NR_MIGRATE_BREAK 8 2548 #else 2549 #define SCHED_NR_MIGRATE_BREAK 32 2550 #endif 2551 2552 extern const_debug unsigned int sysctl_sched_nr_migrate; 2553 extern const_debug unsigned int sysctl_sched_migration_cost; 2554 2555 extern unsigned int sysctl_sched_base_slice; 2556 2557 #ifdef CONFIG_SCHED_DEBUG 2558 extern int sysctl_resched_latency_warn_ms; 2559 extern int sysctl_resched_latency_warn_once; 2560 2561 extern unsigned int sysctl_sched_tunable_scaling; 2562 2563 extern unsigned int sysctl_numa_balancing_scan_delay; 2564 extern unsigned int sysctl_numa_balancing_scan_period_min; 2565 extern unsigned int sysctl_numa_balancing_scan_period_max; 2566 extern unsigned int sysctl_numa_balancing_scan_size; 2567 extern unsigned int sysctl_numa_balancing_hot_threshold; 2568 #endif 2569 2570 #ifdef CONFIG_SCHED_HRTICK 2571 2572 /* 2573 * Use hrtick when: 2574 * - enabled by features 2575 * - hrtimer is actually high res 2576 */ 2577 static inline int hrtick_enabled(struct rq *rq) 2578 { 2579 if (!cpu_active(cpu_of(rq))) 2580 return 0; 2581 return hrtimer_is_hres_active(&rq->hrtick_timer); 2582 } 2583 2584 static inline int hrtick_enabled_fair(struct rq *rq) 2585 { 2586 if (!sched_feat(HRTICK)) 2587 return 0; 2588 return hrtick_enabled(rq); 2589 } 2590 2591 static inline int hrtick_enabled_dl(struct rq *rq) 2592 { 2593 if (!sched_feat(HRTICK_DL)) 2594 return 0; 2595 return hrtick_enabled(rq); 2596 } 2597 2598 void hrtick_start(struct rq *rq, u64 delay); 2599 2600 #else 2601 2602 static inline int hrtick_enabled_fair(struct rq *rq) 2603 { 2604 return 0; 2605 } 2606 2607 static inline int hrtick_enabled_dl(struct rq *rq) 2608 { 2609 return 0; 2610 } 2611 2612 static inline int hrtick_enabled(struct rq *rq) 2613 { 2614 return 0; 2615 } 2616 2617 #endif /* CONFIG_SCHED_HRTICK */ 2618 2619 #ifndef arch_scale_freq_tick 2620 static __always_inline 2621 void arch_scale_freq_tick(void) 2622 { 2623 } 2624 #endif 2625 2626 #ifndef arch_scale_freq_capacity 2627 /** 2628 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. 2629 * @cpu: the CPU in question. 2630 * 2631 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. 2632 * 2633 * f_curr 2634 * ------ * SCHED_CAPACITY_SCALE 2635 * f_max 2636 */ 2637 static __always_inline 2638 unsigned long arch_scale_freq_capacity(int cpu) 2639 { 2640 return SCHED_CAPACITY_SCALE; 2641 } 2642 #endif 2643 2644 #ifdef CONFIG_SCHED_DEBUG 2645 /* 2646 * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to 2647 * acquire rq lock instead of rq_lock(). So at the end of these two functions 2648 * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of 2649 * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. 2650 */ 2651 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) 2652 { 2653 rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2654 /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ 2655 #ifdef CONFIG_SMP 2656 rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 2657 #endif 2658 } 2659 #else 2660 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} 2661 #endif 2662 2663 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ 2664 __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ 2665 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ 2666 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ 2667 _lock; return _t; } 2668 2669 #ifdef CONFIG_SMP 2670 2671 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) 2672 { 2673 #ifdef CONFIG_SCHED_CORE 2674 /* 2675 * In order to not have {0,2},{1,3} turn into into an AB-BA, 2676 * order by core-id first and cpu-id second. 2677 * 2678 * Notably: 2679 * 2680 * double_rq_lock(0,3); will take core-0, core-1 lock 2681 * double_rq_lock(1,2); will take core-1, core-0 lock 2682 * 2683 * when only cpu-id is considered. 2684 */ 2685 if (rq1->core->cpu < rq2->core->cpu) 2686 return true; 2687 if (rq1->core->cpu > rq2->core->cpu) 2688 return false; 2689 2690 /* 2691 * __sched_core_flip() relies on SMT having cpu-id lock order. 2692 */ 2693 #endif 2694 return rq1->cpu < rq2->cpu; 2695 } 2696 2697 extern void double_rq_lock(struct rq *rq1, struct rq *rq2); 2698 2699 #ifdef CONFIG_PREEMPTION 2700 2701 /* 2702 * fair double_lock_balance: Safely acquires both rq->locks in a fair 2703 * way at the expense of forcing extra atomic operations in all 2704 * invocations. This assures that the double_lock is acquired using the 2705 * same underlying policy as the spinlock_t on this architecture, which 2706 * reduces latency compared to the unfair variant below. However, it 2707 * also adds more overhead and therefore may reduce throughput. 2708 */ 2709 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2710 __releases(this_rq->lock) 2711 __acquires(busiest->lock) 2712 __acquires(this_rq->lock) 2713 { 2714 raw_spin_rq_unlock(this_rq); 2715 double_rq_lock(this_rq, busiest); 2716 2717 return 1; 2718 } 2719 2720 #else 2721 /* 2722 * Unfair double_lock_balance: Optimizes throughput at the expense of 2723 * latency by eliminating extra atomic operations when the locks are 2724 * already in proper order on entry. This favors lower CPU-ids and will 2725 * grant the double lock to lower CPUs over higher ids under contention, 2726 * regardless of entry order into the function. 2727 */ 2728 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2729 __releases(this_rq->lock) 2730 __acquires(busiest->lock) 2731 __acquires(this_rq->lock) 2732 { 2733 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 2734 likely(raw_spin_rq_trylock(busiest))) { 2735 double_rq_clock_clear_update(this_rq, busiest); 2736 return 0; 2737 } 2738 2739 if (rq_order_less(this_rq, busiest)) { 2740 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); 2741 double_rq_clock_clear_update(this_rq, busiest); 2742 return 0; 2743 } 2744 2745 raw_spin_rq_unlock(this_rq); 2746 double_rq_lock(this_rq, busiest); 2747 2748 return 1; 2749 } 2750 2751 #endif /* CONFIG_PREEMPTION */ 2752 2753 /* 2754 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2755 */ 2756 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2757 { 2758 lockdep_assert_irqs_disabled(); 2759 2760 return _double_lock_balance(this_rq, busiest); 2761 } 2762 2763 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2764 __releases(busiest->lock) 2765 { 2766 if (__rq_lockp(this_rq) != __rq_lockp(busiest)) 2767 raw_spin_rq_unlock(busiest); 2768 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); 2769 } 2770 2771 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 2772 { 2773 if (l1 > l2) 2774 swap(l1, l2); 2775 2776 spin_lock(l1); 2777 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2778 } 2779 2780 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 2781 { 2782 if (l1 > l2) 2783 swap(l1, l2); 2784 2785 spin_lock_irq(l1); 2786 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2787 } 2788 2789 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2790 { 2791 if (l1 > l2) 2792 swap(l1, l2); 2793 2794 raw_spin_lock(l1); 2795 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 2796 } 2797 2798 static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) 2799 { 2800 raw_spin_unlock(l1); 2801 raw_spin_unlock(l2); 2802 } 2803 2804 DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, 2805 double_raw_lock(_T->lock, _T->lock2), 2806 double_raw_unlock(_T->lock, _T->lock2)) 2807 2808 /* 2809 * double_rq_unlock - safely unlock two runqueues 2810 * 2811 * Note this does not restore interrupts like task_rq_unlock, 2812 * you need to do so manually after calling. 2813 */ 2814 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2815 __releases(rq1->lock) 2816 __releases(rq2->lock) 2817 { 2818 if (__rq_lockp(rq1) != __rq_lockp(rq2)) 2819 raw_spin_rq_unlock(rq2); 2820 else 2821 __release(rq2->lock); 2822 raw_spin_rq_unlock(rq1); 2823 } 2824 2825 extern void set_rq_online (struct rq *rq); 2826 extern void set_rq_offline(struct rq *rq); 2827 extern bool sched_smp_initialized; 2828 2829 #else /* CONFIG_SMP */ 2830 2831 /* 2832 * double_rq_lock - safely lock two runqueues 2833 * 2834 * Note this does not disable interrupts like task_rq_lock, 2835 * you need to do so manually before calling. 2836 */ 2837 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2838 __acquires(rq1->lock) 2839 __acquires(rq2->lock) 2840 { 2841 WARN_ON_ONCE(!irqs_disabled()); 2842 WARN_ON_ONCE(rq1 != rq2); 2843 raw_spin_rq_lock(rq1); 2844 __acquire(rq2->lock); /* Fake it out ;) */ 2845 double_rq_clock_clear_update(rq1, rq2); 2846 } 2847 2848 /* 2849 * double_rq_unlock - safely unlock two runqueues 2850 * 2851 * Note this does not restore interrupts like task_rq_unlock, 2852 * you need to do so manually after calling. 2853 */ 2854 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2855 __releases(rq1->lock) 2856 __releases(rq2->lock) 2857 { 2858 WARN_ON_ONCE(rq1 != rq2); 2859 raw_spin_rq_unlock(rq1); 2860 __release(rq2->lock); 2861 } 2862 2863 #endif 2864 2865 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, 2866 double_rq_lock(_T->lock, _T->lock2), 2867 double_rq_unlock(_T->lock, _T->lock2)) 2868 2869 extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq); 2870 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2871 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 2872 2873 #ifdef CONFIG_SCHED_DEBUG 2874 extern bool sched_debug_verbose; 2875 2876 extern void print_cfs_stats(struct seq_file *m, int cpu); 2877 extern void print_rt_stats(struct seq_file *m, int cpu); 2878 extern void print_dl_stats(struct seq_file *m, int cpu); 2879 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2880 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2881 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2882 2883 extern void resched_latency_warn(int cpu, u64 latency); 2884 #ifdef CONFIG_NUMA_BALANCING 2885 extern void 2886 show_numa_stats(struct task_struct *p, struct seq_file *m); 2887 extern void 2888 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2889 unsigned long tpf, unsigned long gsf, unsigned long gpf); 2890 #endif /* CONFIG_NUMA_BALANCING */ 2891 #else 2892 static inline void resched_latency_warn(int cpu, u64 latency) {} 2893 #endif /* CONFIG_SCHED_DEBUG */ 2894 2895 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 2896 extern void init_rt_rq(struct rt_rq *rt_rq); 2897 extern void init_dl_rq(struct dl_rq *dl_rq); 2898 2899 extern void cfs_bandwidth_usage_inc(void); 2900 extern void cfs_bandwidth_usage_dec(void); 2901 2902 #ifdef CONFIG_NO_HZ_COMMON 2903 #define NOHZ_BALANCE_KICK_BIT 0 2904 #define NOHZ_STATS_KICK_BIT 1 2905 #define NOHZ_NEWILB_KICK_BIT 2 2906 #define NOHZ_NEXT_KICK_BIT 3 2907 2908 /* Run sched_balance_domains() */ 2909 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2910 /* Update blocked load */ 2911 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2912 /* Update blocked load when entering idle */ 2913 #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) 2914 /* Update nohz.next_balance */ 2915 #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) 2916 2917 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) 2918 2919 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2920 2921 extern void nohz_balance_exit_idle(struct rq *rq); 2922 #else 2923 static inline void nohz_balance_exit_idle(struct rq *rq) { } 2924 #endif 2925 2926 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 2927 extern void nohz_run_idle_balance(int cpu); 2928 #else 2929 static inline void nohz_run_idle_balance(int cpu) { } 2930 #endif 2931 2932 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 2933 struct irqtime { 2934 u64 total; 2935 u64 tick_delta; 2936 u64 irq_start_time; 2937 struct u64_stats_sync sync; 2938 }; 2939 2940 DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 2941 2942 /* 2943 * Returns the irqtime minus the softirq time computed by ksoftirqd. 2944 * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime 2945 * and never move forward. 2946 */ 2947 static inline u64 irq_time_read(int cpu) 2948 { 2949 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 2950 unsigned int seq; 2951 u64 total; 2952 2953 do { 2954 seq = __u64_stats_fetch_begin(&irqtime->sync); 2955 total = irqtime->total; 2956 } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 2957 2958 return total; 2959 } 2960 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2961 2962 #ifdef CONFIG_CPU_FREQ 2963 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2964 2965 /** 2966 * cpufreq_update_util - Take a note about CPU utilization changes. 2967 * @rq: Runqueue to carry out the update for. 2968 * @flags: Update reason flags. 2969 * 2970 * This function is called by the scheduler on the CPU whose utilization is 2971 * being updated. 2972 * 2973 * It can only be called from RCU-sched read-side critical sections. 2974 * 2975 * The way cpufreq is currently arranged requires it to evaluate the CPU 2976 * performance state (frequency/voltage) on a regular basis to prevent it from 2977 * being stuck in a completely inadequate performance level for too long. 2978 * That is not guaranteed to happen if the updates are only triggered from CFS 2979 * and DL, though, because they may not be coming in if only RT tasks are 2980 * active all the time (or there are RT tasks only). 2981 * 2982 * As a workaround for that issue, this function is called periodically by the 2983 * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2984 * but that really is a band-aid. Going forward it should be replaced with 2985 * solutions targeted more specifically at RT tasks. 2986 */ 2987 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2988 { 2989 struct update_util_data *data; 2990 2991 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2992 cpu_of(rq))); 2993 if (data) 2994 data->func(data, rq_clock(rq), flags); 2995 } 2996 #else 2997 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2998 #endif /* CONFIG_CPU_FREQ */ 2999 3000 #ifdef arch_scale_freq_capacity 3001 # ifndef arch_scale_freq_invariant 3002 # define arch_scale_freq_invariant() true 3003 # endif 3004 #else 3005 # define arch_scale_freq_invariant() false 3006 #endif 3007 3008 #ifdef CONFIG_SMP 3009 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, 3010 unsigned long *min, 3011 unsigned long *max); 3012 3013 unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, 3014 unsigned long min, 3015 unsigned long max); 3016 3017 3018 /* 3019 * Verify the fitness of task @p to run on @cpu taking into account the 3020 * CPU original capacity and the runtime/deadline ratio of the task. 3021 * 3022 * The function will return true if the original capacity of @cpu is 3023 * greater than or equal to task's deadline density right shifted by 3024 * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. 3025 */ 3026 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 3027 { 3028 unsigned long cap = arch_scale_cpu_capacity(cpu); 3029 3030 return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); 3031 } 3032 3033 static inline unsigned long cpu_bw_dl(struct rq *rq) 3034 { 3035 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 3036 } 3037 3038 static inline unsigned long cpu_util_dl(struct rq *rq) 3039 { 3040 return READ_ONCE(rq->avg_dl.util_avg); 3041 } 3042 3043 3044 extern unsigned long cpu_util_cfs(int cpu); 3045 extern unsigned long cpu_util_cfs_boost(int cpu); 3046 3047 static inline unsigned long cpu_util_rt(struct rq *rq) 3048 { 3049 return READ_ONCE(rq->avg_rt.util_avg); 3050 } 3051 #endif 3052 3053 #ifdef CONFIG_UCLAMP_TASK 3054 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 3055 3056 static inline unsigned long uclamp_rq_get(struct rq *rq, 3057 enum uclamp_id clamp_id) 3058 { 3059 return READ_ONCE(rq->uclamp[clamp_id].value); 3060 } 3061 3062 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3063 unsigned int value) 3064 { 3065 WRITE_ONCE(rq->uclamp[clamp_id].value, value); 3066 } 3067 3068 static inline bool uclamp_rq_is_idle(struct rq *rq) 3069 { 3070 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; 3071 } 3072 3073 /* Is the rq being capped/throttled by uclamp_max? */ 3074 static inline bool uclamp_rq_is_capped(struct rq *rq) 3075 { 3076 unsigned long rq_util; 3077 unsigned long max_util; 3078 3079 if (!static_branch_likely(&sched_uclamp_used)) 3080 return false; 3081 3082 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); 3083 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 3084 3085 return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; 3086 } 3087 3088 /* 3089 * When uclamp is compiled in, the aggregation at rq level is 'turned off' 3090 * by default in the fast path and only gets turned on once userspace performs 3091 * an operation that requires it. 3092 * 3093 * Returns true if userspace opted-in to use uclamp and aggregation at rq level 3094 * hence is active. 3095 */ 3096 static inline bool uclamp_is_used(void) 3097 { 3098 return static_branch_likely(&sched_uclamp_used); 3099 } 3100 #else /* CONFIG_UCLAMP_TASK */ 3101 static inline unsigned long uclamp_eff_value(struct task_struct *p, 3102 enum uclamp_id clamp_id) 3103 { 3104 if (clamp_id == UCLAMP_MIN) 3105 return 0; 3106 3107 return SCHED_CAPACITY_SCALE; 3108 } 3109 3110 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } 3111 3112 static inline bool uclamp_is_used(void) 3113 { 3114 return false; 3115 } 3116 3117 static inline unsigned long uclamp_rq_get(struct rq *rq, 3118 enum uclamp_id clamp_id) 3119 { 3120 if (clamp_id == UCLAMP_MIN) 3121 return 0; 3122 3123 return SCHED_CAPACITY_SCALE; 3124 } 3125 3126 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, 3127 unsigned int value) 3128 { 3129 } 3130 3131 static inline bool uclamp_rq_is_idle(struct rq *rq) 3132 { 3133 return false; 3134 } 3135 #endif /* CONFIG_UCLAMP_TASK */ 3136 3137 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 3138 static inline unsigned long cpu_util_irq(struct rq *rq) 3139 { 3140 return READ_ONCE(rq->avg_irq.util_avg); 3141 } 3142 3143 static inline 3144 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3145 { 3146 util *= (max - irq); 3147 util /= max; 3148 3149 return util; 3150 3151 } 3152 #else 3153 static inline unsigned long cpu_util_irq(struct rq *rq) 3154 { 3155 return 0; 3156 } 3157 3158 static inline 3159 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 3160 { 3161 return util; 3162 } 3163 #endif 3164 3165 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 3166 3167 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 3168 3169 DECLARE_STATIC_KEY_FALSE(sched_energy_present); 3170 3171 static inline bool sched_energy_enabled(void) 3172 { 3173 return static_branch_unlikely(&sched_energy_present); 3174 } 3175 3176 extern struct cpufreq_governor schedutil_gov; 3177 3178 #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 3179 3180 #define perf_domain_span(pd) NULL 3181 static inline bool sched_energy_enabled(void) { return false; } 3182 3183 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 3184 3185 #ifdef CONFIG_MEMBARRIER 3186 /* 3187 * The scheduler provides memory barriers required by membarrier between: 3188 * - prior user-space memory accesses and store to rq->membarrier_state, 3189 * - store to rq->membarrier_state and following user-space memory accesses. 3190 * In the same way it provides those guarantees around store to rq->curr. 3191 */ 3192 static inline void membarrier_switch_mm(struct rq *rq, 3193 struct mm_struct *prev_mm, 3194 struct mm_struct *next_mm) 3195 { 3196 int membarrier_state; 3197 3198 if (prev_mm == next_mm) 3199 return; 3200 3201 membarrier_state = atomic_read(&next_mm->membarrier_state); 3202 if (READ_ONCE(rq->membarrier_state) == membarrier_state) 3203 return; 3204 3205 WRITE_ONCE(rq->membarrier_state, membarrier_state); 3206 } 3207 #else 3208 static inline void membarrier_switch_mm(struct rq *rq, 3209 struct mm_struct *prev_mm, 3210 struct mm_struct *next_mm) 3211 { 3212 } 3213 #endif 3214 3215 #ifdef CONFIG_SMP 3216 static inline bool is_per_cpu_kthread(struct task_struct *p) 3217 { 3218 if (!(p->flags & PF_KTHREAD)) 3219 return false; 3220 3221 if (p->nr_cpus_allowed != 1) 3222 return false; 3223 3224 return true; 3225 } 3226 #endif 3227 3228 extern void swake_up_all_locked(struct swait_queue_head *q); 3229 extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 3230 3231 extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); 3232 3233 #ifdef CONFIG_PREEMPT_DYNAMIC 3234 extern int preempt_dynamic_mode; 3235 extern int sched_dynamic_mode(const char *str); 3236 extern void sched_dynamic_update(int mode); 3237 #endif 3238 3239 #ifdef CONFIG_SCHED_MM_CID 3240 3241 #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ 3242 #define MM_CID_SCAN_DELAY 100 /* 100ms */ 3243 3244 extern raw_spinlock_t cid_lock; 3245 extern int use_cid_lock; 3246 3247 extern void sched_mm_cid_migrate_from(struct task_struct *t); 3248 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); 3249 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); 3250 extern void init_sched_mm_cid(struct task_struct *t); 3251 3252 static inline void __mm_cid_put(struct mm_struct *mm, int cid) 3253 { 3254 if (cid < 0) 3255 return; 3256 cpumask_clear_cpu(cid, mm_cidmask(mm)); 3257 } 3258 3259 /* 3260 * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to 3261 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to 3262 * be held to transition to other states. 3263 * 3264 * State transitions synchronized with cmpxchg or try_cmpxchg need to be 3265 * consistent across cpus, which prevents use of this_cpu_cmpxchg. 3266 */ 3267 static inline void mm_cid_put_lazy(struct task_struct *t) 3268 { 3269 struct mm_struct *mm = t->mm; 3270 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3271 int cid; 3272 3273 lockdep_assert_irqs_disabled(); 3274 cid = __this_cpu_read(pcpu_cid->cid); 3275 if (!mm_cid_is_lazy_put(cid) || 3276 !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3277 return; 3278 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3279 } 3280 3281 static inline int mm_cid_pcpu_unset(struct mm_struct *mm) 3282 { 3283 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3284 int cid, res; 3285 3286 lockdep_assert_irqs_disabled(); 3287 cid = __this_cpu_read(pcpu_cid->cid); 3288 for (;;) { 3289 if (mm_cid_is_unset(cid)) 3290 return MM_CID_UNSET; 3291 /* 3292 * Attempt transition from valid or lazy-put to unset. 3293 */ 3294 res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); 3295 if (res == cid) 3296 break; 3297 cid = res; 3298 } 3299 return cid; 3300 } 3301 3302 static inline void mm_cid_put(struct mm_struct *mm) 3303 { 3304 int cid; 3305 3306 lockdep_assert_irqs_disabled(); 3307 cid = mm_cid_pcpu_unset(mm); 3308 if (cid == MM_CID_UNSET) 3309 return; 3310 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3311 } 3312 3313 static inline int __mm_cid_try_get(struct mm_struct *mm) 3314 { 3315 struct cpumask *cpumask; 3316 int cid; 3317 3318 cpumask = mm_cidmask(mm); 3319 /* 3320 * Retry finding first zero bit if the mask is temporarily 3321 * filled. This only happens during concurrent remote-clear 3322 * which owns a cid without holding a rq lock. 3323 */ 3324 for (;;) { 3325 cid = cpumask_first_zero(cpumask); 3326 if (cid < nr_cpu_ids) 3327 break; 3328 cpu_relax(); 3329 } 3330 if (cpumask_test_and_set_cpu(cid, cpumask)) 3331 return -1; 3332 return cid; 3333 } 3334 3335 /* 3336 * Save a snapshot of the current runqueue time of this cpu 3337 * with the per-cpu cid value, allowing to estimate how recently it was used. 3338 */ 3339 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) 3340 { 3341 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); 3342 3343 lockdep_assert_rq_held(rq); 3344 WRITE_ONCE(pcpu_cid->time, rq->clock); 3345 } 3346 3347 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) 3348 { 3349 int cid; 3350 3351 /* 3352 * All allocations (even those using the cid_lock) are lock-free. If 3353 * use_cid_lock is set, hold the cid_lock to perform cid allocation to 3354 * guarantee forward progress. 3355 */ 3356 if (!READ_ONCE(use_cid_lock)) { 3357 cid = __mm_cid_try_get(mm); 3358 if (cid >= 0) 3359 goto end; 3360 raw_spin_lock(&cid_lock); 3361 } else { 3362 raw_spin_lock(&cid_lock); 3363 cid = __mm_cid_try_get(mm); 3364 if (cid >= 0) 3365 goto unlock; 3366 } 3367 3368 /* 3369 * cid concurrently allocated. Retry while forcing following 3370 * allocations to use the cid_lock to ensure forward progress. 3371 */ 3372 WRITE_ONCE(use_cid_lock, 1); 3373 /* 3374 * Set use_cid_lock before allocation. Only care about program order 3375 * because this is only required for forward progress. 3376 */ 3377 barrier(); 3378 /* 3379 * Retry until it succeeds. It is guaranteed to eventually succeed once 3380 * all newcoming allocations observe the use_cid_lock flag set. 3381 */ 3382 do { 3383 cid = __mm_cid_try_get(mm); 3384 cpu_relax(); 3385 } while (cid < 0); 3386 /* 3387 * Allocate before clearing use_cid_lock. Only care about 3388 * program order because this is for forward progress. 3389 */ 3390 barrier(); 3391 WRITE_ONCE(use_cid_lock, 0); 3392 unlock: 3393 raw_spin_unlock(&cid_lock); 3394 end: 3395 mm_cid_snapshot_time(rq, mm); 3396 return cid; 3397 } 3398 3399 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) 3400 { 3401 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3402 struct cpumask *cpumask; 3403 int cid; 3404 3405 lockdep_assert_rq_held(rq); 3406 cpumask = mm_cidmask(mm); 3407 cid = __this_cpu_read(pcpu_cid->cid); 3408 if (mm_cid_is_valid(cid)) { 3409 mm_cid_snapshot_time(rq, mm); 3410 return cid; 3411 } 3412 if (mm_cid_is_lazy_put(cid)) { 3413 if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) 3414 __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); 3415 } 3416 cid = __mm_cid_get(rq, mm); 3417 __this_cpu_write(pcpu_cid->cid, cid); 3418 return cid; 3419 } 3420 3421 static inline void switch_mm_cid(struct rq *rq, 3422 struct task_struct *prev, 3423 struct task_struct *next) 3424 { 3425 /* 3426 * Provide a memory barrier between rq->curr store and load of 3427 * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. 3428 * 3429 * Should be adapted if context_switch() is modified. 3430 */ 3431 if (!next->mm) { // to kernel 3432 /* 3433 * user -> kernel transition does not guarantee a barrier, but 3434 * we can use the fact that it performs an atomic operation in 3435 * mmgrab(). 3436 */ 3437 if (prev->mm) // from user 3438 smp_mb__after_mmgrab(); 3439 /* 3440 * kernel -> kernel transition does not change rq->curr->mm 3441 * state. It stays NULL. 3442 */ 3443 } else { // to user 3444 /* 3445 * kernel -> user transition does not provide a barrier 3446 * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. 3447 * Provide it here. 3448 */ 3449 if (!prev->mm) { // from kernel 3450 smp_mb(); 3451 } else { // from user 3452 /* 3453 * user->user transition relies on an implicit 3454 * memory barrier in switch_mm() when 3455 * current->mm changes. If the architecture 3456 * switch_mm() does not have an implicit memory 3457 * barrier, it is emitted here. If current->mm 3458 * is unchanged, no barrier is needed. 3459 */ 3460 smp_mb__after_switch_mm(); 3461 } 3462 } 3463 if (prev->mm_cid_active) { 3464 mm_cid_snapshot_time(rq, prev->mm); 3465 mm_cid_put_lazy(prev); 3466 prev->mm_cid = -1; 3467 } 3468 if (next->mm_cid_active) 3469 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); 3470 } 3471 3472 #else 3473 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } 3474 static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } 3475 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } 3476 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } 3477 static inline void init_sched_mm_cid(struct task_struct *t) { } 3478 #endif 3479 3480 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); 3481 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); 3482 3483 #endif /* _KERNEL_SCHED_SCHED_H */ 3484