1 2 #include <linux/sched.h> 3 #include <linux/sched/sysctl.h> 4 #include <linux/sched/rt.h> 5 #include <linux/sched/deadline.h> 6 #include <linux/mutex.h> 7 #include <linux/spinlock.h> 8 #include <linux/stop_machine.h> 9 #include <linux/irq_work.h> 10 #include <linux/tick.h> 11 #include <linux/slab.h> 12 13 #include "cpupri.h" 14 #include "cpudeadline.h" 15 #include "cpuacct.h" 16 17 struct rq; 18 struct cpuidle_state; 19 20 /* task_struct::on_rq states: */ 21 #define TASK_ON_RQ_QUEUED 1 22 #define TASK_ON_RQ_MIGRATING 2 23 24 extern __read_mostly int scheduler_running; 25 26 extern unsigned long calc_load_update; 27 extern atomic_long_t calc_load_tasks; 28 29 extern void calc_global_load_tick(struct rq *this_rq); 30 extern long calc_load_fold_active(struct rq *this_rq); 31 32 #ifdef CONFIG_SMP 33 extern void update_cpu_load_active(struct rq *this_rq); 34 #else 35 static inline void update_cpu_load_active(struct rq *this_rq) { } 36 #endif 37 38 /* 39 * Helpers for converting nanosecond timing to jiffy resolution 40 */ 41 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 42 43 /* 44 * Increase resolution of nice-level calculations for 64-bit architectures. 45 * The extra resolution improves shares distribution and load balancing of 46 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 47 * hierarchies, especially on larger systems. This is not a user-visible change 48 * and does not change the user-interface for setting shares/weights. 49 * 50 * We increase resolution only if we have enough bits to allow this increased 51 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution 52 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the 53 * increased costs. 54 */ 55 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ 56 # define SCHED_LOAD_RESOLUTION 10 57 # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) 58 # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) 59 #else 60 # define SCHED_LOAD_RESOLUTION 0 61 # define scale_load(w) (w) 62 # define scale_load_down(w) (w) 63 #endif 64 65 #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) 66 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) 67 68 #define NICE_0_LOAD SCHED_LOAD_SCALE 69 #define NICE_0_SHIFT SCHED_LOAD_SHIFT 70 71 /* 72 * Single value that decides SCHED_DEADLINE internal math precision. 73 * 10 -> just above 1us 74 * 9 -> just above 0.5us 75 */ 76 #define DL_SCALE (10) 77 78 /* 79 * These are the 'tuning knobs' of the scheduler: 80 */ 81 82 /* 83 * single value that denotes runtime == period, ie unlimited time. 84 */ 85 #define RUNTIME_INF ((u64)~0ULL) 86 87 static inline int fair_policy(int policy) 88 { 89 return policy == SCHED_NORMAL || policy == SCHED_BATCH; 90 } 91 92 static inline int rt_policy(int policy) 93 { 94 return policy == SCHED_FIFO || policy == SCHED_RR; 95 } 96 97 static inline int dl_policy(int policy) 98 { 99 return policy == SCHED_DEADLINE; 100 } 101 102 static inline int task_has_rt_policy(struct task_struct *p) 103 { 104 return rt_policy(p->policy); 105 } 106 107 static inline int task_has_dl_policy(struct task_struct *p) 108 { 109 return dl_policy(p->policy); 110 } 111 112 static inline bool dl_time_before(u64 a, u64 b) 113 { 114 return (s64)(a - b) < 0; 115 } 116 117 /* 118 * Tells if entity @a should preempt entity @b. 119 */ 120 static inline bool 121 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 122 { 123 return dl_time_before(a->deadline, b->deadline); 124 } 125 126 /* 127 * This is the priority-queue data structure of the RT scheduling class: 128 */ 129 struct rt_prio_array { 130 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 131 struct list_head queue[MAX_RT_PRIO]; 132 }; 133 134 struct rt_bandwidth { 135 /* nests inside the rq lock: */ 136 raw_spinlock_t rt_runtime_lock; 137 ktime_t rt_period; 138 u64 rt_runtime; 139 struct hrtimer rt_period_timer; 140 unsigned int rt_period_active; 141 }; 142 143 void __dl_clear_params(struct task_struct *p); 144 145 /* 146 * To keep the bandwidth of -deadline tasks and groups under control 147 * we need some place where: 148 * - store the maximum -deadline bandwidth of the system (the group); 149 * - cache the fraction of that bandwidth that is currently allocated. 150 * 151 * This is all done in the data structure below. It is similar to the 152 * one used for RT-throttling (rt_bandwidth), with the main difference 153 * that, since here we are only interested in admission control, we 154 * do not decrease any runtime while the group "executes", neither we 155 * need a timer to replenish it. 156 * 157 * With respect to SMP, the bandwidth is given on a per-CPU basis, 158 * meaning that: 159 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 160 * - dl_total_bw array contains, in the i-eth element, the currently 161 * allocated bandwidth on the i-eth CPU. 162 * Moreover, groups consume bandwidth on each CPU, while tasks only 163 * consume bandwidth on the CPU they're running on. 164 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 165 * that will be shown the next time the proc or cgroup controls will 166 * be red. It on its turn can be changed by writing on its own 167 * control. 168 */ 169 struct dl_bandwidth { 170 raw_spinlock_t dl_runtime_lock; 171 u64 dl_runtime; 172 u64 dl_period; 173 }; 174 175 static inline int dl_bandwidth_enabled(void) 176 { 177 return sysctl_sched_rt_runtime >= 0; 178 } 179 180 extern struct dl_bw *dl_bw_of(int i); 181 182 struct dl_bw { 183 raw_spinlock_t lock; 184 u64 bw, total_bw; 185 }; 186 187 static inline 188 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw) 189 { 190 dl_b->total_bw -= tsk_bw; 191 } 192 193 static inline 194 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw) 195 { 196 dl_b->total_bw += tsk_bw; 197 } 198 199 static inline 200 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) 201 { 202 return dl_b->bw != -1 && 203 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; 204 } 205 206 extern struct mutex sched_domains_mutex; 207 208 #ifdef CONFIG_CGROUP_SCHED 209 210 #include <linux/cgroup.h> 211 212 struct cfs_rq; 213 struct rt_rq; 214 215 extern struct list_head task_groups; 216 217 struct cfs_bandwidth { 218 #ifdef CONFIG_CFS_BANDWIDTH 219 raw_spinlock_t lock; 220 ktime_t period; 221 u64 quota, runtime; 222 s64 hierarchical_quota; 223 u64 runtime_expires; 224 225 int idle, period_active; 226 struct hrtimer period_timer, slack_timer; 227 struct list_head throttled_cfs_rq; 228 229 /* statistics */ 230 int nr_periods, nr_throttled; 231 u64 throttled_time; 232 #endif 233 }; 234 235 /* task group related information */ 236 struct task_group { 237 struct cgroup_subsys_state css; 238 239 #ifdef CONFIG_FAIR_GROUP_SCHED 240 /* schedulable entities of this group on each cpu */ 241 struct sched_entity **se; 242 /* runqueue "owned" by this group on each cpu */ 243 struct cfs_rq **cfs_rq; 244 unsigned long shares; 245 246 #ifdef CONFIG_SMP 247 atomic_long_t load_avg; 248 #endif 249 #endif 250 251 #ifdef CONFIG_RT_GROUP_SCHED 252 struct sched_rt_entity **rt_se; 253 struct rt_rq **rt_rq; 254 255 struct rt_bandwidth rt_bandwidth; 256 #endif 257 258 struct rcu_head rcu; 259 struct list_head list; 260 261 struct task_group *parent; 262 struct list_head siblings; 263 struct list_head children; 264 265 #ifdef CONFIG_SCHED_AUTOGROUP 266 struct autogroup *autogroup; 267 #endif 268 269 struct cfs_bandwidth cfs_bandwidth; 270 }; 271 272 #ifdef CONFIG_FAIR_GROUP_SCHED 273 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 274 275 /* 276 * A weight of 0 or 1 can cause arithmetics problems. 277 * A weight of a cfs_rq is the sum of weights of which entities 278 * are queued on this cfs_rq, so a weight of a entity should not be 279 * too large, so as the shares value of a task group. 280 * (The default weight is 1024 - so there's no practical 281 * limitation from this.) 282 */ 283 #define MIN_SHARES (1UL << 1) 284 #define MAX_SHARES (1UL << 18) 285 #endif 286 287 typedef int (*tg_visitor)(struct task_group *, void *); 288 289 extern int walk_tg_tree_from(struct task_group *from, 290 tg_visitor down, tg_visitor up, void *data); 291 292 /* 293 * Iterate the full tree, calling @down when first entering a node and @up when 294 * leaving it for the final time. 295 * 296 * Caller must hold rcu_lock or sufficient equivalent. 297 */ 298 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 299 { 300 return walk_tg_tree_from(&root_task_group, down, up, data); 301 } 302 303 extern int tg_nop(struct task_group *tg, void *data); 304 305 extern void free_fair_sched_group(struct task_group *tg); 306 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 307 extern void unregister_fair_sched_group(struct task_group *tg, int cpu); 308 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 309 struct sched_entity *se, int cpu, 310 struct sched_entity *parent); 311 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 312 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 313 314 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 315 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 316 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 317 318 extern void free_rt_sched_group(struct task_group *tg); 319 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 320 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 321 struct sched_rt_entity *rt_se, int cpu, 322 struct sched_rt_entity *parent); 323 324 extern struct task_group *sched_create_group(struct task_group *parent); 325 extern void sched_online_group(struct task_group *tg, 326 struct task_group *parent); 327 extern void sched_destroy_group(struct task_group *tg); 328 extern void sched_offline_group(struct task_group *tg); 329 330 extern void sched_move_task(struct task_struct *tsk); 331 332 #ifdef CONFIG_FAIR_GROUP_SCHED 333 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 334 #endif 335 336 #else /* CONFIG_CGROUP_SCHED */ 337 338 struct cfs_bandwidth { }; 339 340 #endif /* CONFIG_CGROUP_SCHED */ 341 342 /* CFS-related fields in a runqueue */ 343 struct cfs_rq { 344 struct load_weight load; 345 unsigned int nr_running, h_nr_running; 346 347 u64 exec_clock; 348 u64 min_vruntime; 349 #ifndef CONFIG_64BIT 350 u64 min_vruntime_copy; 351 #endif 352 353 struct rb_root tasks_timeline; 354 struct rb_node *rb_leftmost; 355 356 /* 357 * 'curr' points to currently running entity on this cfs_rq. 358 * It is set to NULL otherwise (i.e when none are currently running). 359 */ 360 struct sched_entity *curr, *next, *last, *skip; 361 362 #ifdef CONFIG_SCHED_DEBUG 363 unsigned int nr_spread_over; 364 #endif 365 366 #ifdef CONFIG_SMP 367 /* 368 * CFS load tracking 369 */ 370 struct sched_avg avg; 371 u64 runnable_load_sum; 372 unsigned long runnable_load_avg; 373 #ifdef CONFIG_FAIR_GROUP_SCHED 374 unsigned long tg_load_avg_contrib; 375 #endif 376 atomic_long_t removed_load_avg, removed_util_avg; 377 #ifndef CONFIG_64BIT 378 u64 load_last_update_time_copy; 379 #endif 380 381 #ifdef CONFIG_FAIR_GROUP_SCHED 382 /* 383 * h_load = weight * f(tg) 384 * 385 * Where f(tg) is the recursive weight fraction assigned to 386 * this group. 387 */ 388 unsigned long h_load; 389 u64 last_h_load_update; 390 struct sched_entity *h_load_next; 391 #endif /* CONFIG_FAIR_GROUP_SCHED */ 392 #endif /* CONFIG_SMP */ 393 394 #ifdef CONFIG_FAIR_GROUP_SCHED 395 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 396 397 /* 398 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 399 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 400 * (like users, containers etc.) 401 * 402 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 403 * list is used during load balance. 404 */ 405 int on_list; 406 struct list_head leaf_cfs_rq_list; 407 struct task_group *tg; /* group that "owns" this runqueue */ 408 409 #ifdef CONFIG_CFS_BANDWIDTH 410 int runtime_enabled; 411 u64 runtime_expires; 412 s64 runtime_remaining; 413 414 u64 throttled_clock, throttled_clock_task; 415 u64 throttled_clock_task_time; 416 int throttled, throttle_count; 417 struct list_head throttled_list; 418 #endif /* CONFIG_CFS_BANDWIDTH */ 419 #endif /* CONFIG_FAIR_GROUP_SCHED */ 420 }; 421 422 static inline int rt_bandwidth_enabled(void) 423 { 424 return sysctl_sched_rt_runtime >= 0; 425 } 426 427 /* RT IPI pull logic requires IRQ_WORK */ 428 #ifdef CONFIG_IRQ_WORK 429 # define HAVE_RT_PUSH_IPI 430 #endif 431 432 /* Real-Time classes' related field in a runqueue: */ 433 struct rt_rq { 434 struct rt_prio_array active; 435 unsigned int rt_nr_running; 436 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 437 struct { 438 int curr; /* highest queued rt task prio */ 439 #ifdef CONFIG_SMP 440 int next; /* next highest */ 441 #endif 442 } highest_prio; 443 #endif 444 #ifdef CONFIG_SMP 445 unsigned long rt_nr_migratory; 446 unsigned long rt_nr_total; 447 int overloaded; 448 struct plist_head pushable_tasks; 449 #ifdef HAVE_RT_PUSH_IPI 450 int push_flags; 451 int push_cpu; 452 struct irq_work push_work; 453 raw_spinlock_t push_lock; 454 #endif 455 #endif /* CONFIG_SMP */ 456 int rt_queued; 457 458 int rt_throttled; 459 u64 rt_time; 460 u64 rt_runtime; 461 /* Nests inside the rq lock: */ 462 raw_spinlock_t rt_runtime_lock; 463 464 #ifdef CONFIG_RT_GROUP_SCHED 465 unsigned long rt_nr_boosted; 466 467 struct rq *rq; 468 struct task_group *tg; 469 #endif 470 }; 471 472 /* Deadline class' related fields in a runqueue */ 473 struct dl_rq { 474 /* runqueue is an rbtree, ordered by deadline */ 475 struct rb_root rb_root; 476 struct rb_node *rb_leftmost; 477 478 unsigned long dl_nr_running; 479 480 #ifdef CONFIG_SMP 481 /* 482 * Deadline values of the currently executing and the 483 * earliest ready task on this rq. Caching these facilitates 484 * the decision wether or not a ready but not running task 485 * should migrate somewhere else. 486 */ 487 struct { 488 u64 curr; 489 u64 next; 490 } earliest_dl; 491 492 unsigned long dl_nr_migratory; 493 int overloaded; 494 495 /* 496 * Tasks on this rq that can be pushed away. They are kept in 497 * an rb-tree, ordered by tasks' deadlines, with caching 498 * of the leftmost (earliest deadline) element. 499 */ 500 struct rb_root pushable_dl_tasks_root; 501 struct rb_node *pushable_dl_tasks_leftmost; 502 #else 503 struct dl_bw dl_bw; 504 #endif 505 }; 506 507 #ifdef CONFIG_SMP 508 509 /* 510 * We add the notion of a root-domain which will be used to define per-domain 511 * variables. Each exclusive cpuset essentially defines an island domain by 512 * fully partitioning the member cpus from any other cpuset. Whenever a new 513 * exclusive cpuset is created, we also create and attach a new root-domain 514 * object. 515 * 516 */ 517 struct root_domain { 518 atomic_t refcount; 519 atomic_t rto_count; 520 struct rcu_head rcu; 521 cpumask_var_t span; 522 cpumask_var_t online; 523 524 /* Indicate more than one runnable task for any CPU */ 525 bool overload; 526 527 /* 528 * The bit corresponding to a CPU gets set here if such CPU has more 529 * than one runnable -deadline task (as it is below for RT tasks). 530 */ 531 cpumask_var_t dlo_mask; 532 atomic_t dlo_count; 533 struct dl_bw dl_bw; 534 struct cpudl cpudl; 535 536 /* 537 * The "RT overload" flag: it gets set if a CPU has more than 538 * one runnable RT task. 539 */ 540 cpumask_var_t rto_mask; 541 struct cpupri cpupri; 542 }; 543 544 extern struct root_domain def_root_domain; 545 546 #endif /* CONFIG_SMP */ 547 548 /* 549 * This is the main, per-CPU runqueue data structure. 550 * 551 * Locking rule: those places that want to lock multiple runqueues 552 * (such as the load balancing or the thread migration code), lock 553 * acquire operations must be ordered by ascending &runqueue. 554 */ 555 struct rq { 556 /* runqueue lock: */ 557 raw_spinlock_t lock; 558 559 /* 560 * nr_running and cpu_load should be in the same cacheline because 561 * remote CPUs use both these fields when doing load calculation. 562 */ 563 unsigned int nr_running; 564 #ifdef CONFIG_NUMA_BALANCING 565 unsigned int nr_numa_running; 566 unsigned int nr_preferred_running; 567 #endif 568 #define CPU_LOAD_IDX_MAX 5 569 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 570 unsigned long last_load_update_tick; 571 #ifdef CONFIG_NO_HZ_COMMON 572 u64 nohz_stamp; 573 unsigned long nohz_flags; 574 #endif 575 #ifdef CONFIG_NO_HZ_FULL 576 unsigned long last_sched_tick; 577 #endif 578 /* capture load from *all* tasks on this cpu: */ 579 struct load_weight load; 580 unsigned long nr_load_updates; 581 u64 nr_switches; 582 583 struct cfs_rq cfs; 584 struct rt_rq rt; 585 struct dl_rq dl; 586 587 #ifdef CONFIG_FAIR_GROUP_SCHED 588 /* list of leaf cfs_rq on this cpu: */ 589 struct list_head leaf_cfs_rq_list; 590 #endif /* CONFIG_FAIR_GROUP_SCHED */ 591 592 /* 593 * This is part of a global counter where only the total sum 594 * over all CPUs matters. A task can increase this counter on 595 * one CPU and if it got migrated afterwards it may decrease 596 * it on another CPU. Always updated under the runqueue lock: 597 */ 598 unsigned long nr_uninterruptible; 599 600 struct task_struct *curr, *idle, *stop; 601 unsigned long next_balance; 602 struct mm_struct *prev_mm; 603 604 unsigned int clock_skip_update; 605 u64 clock; 606 u64 clock_task; 607 608 atomic_t nr_iowait; 609 610 #ifdef CONFIG_SMP 611 struct root_domain *rd; 612 struct sched_domain *sd; 613 614 unsigned long cpu_capacity; 615 unsigned long cpu_capacity_orig; 616 617 struct callback_head *balance_callback; 618 619 unsigned char idle_balance; 620 /* For active balancing */ 621 int active_balance; 622 int push_cpu; 623 struct cpu_stop_work active_balance_work; 624 /* cpu of this runqueue: */ 625 int cpu; 626 int online; 627 628 struct list_head cfs_tasks; 629 630 u64 rt_avg; 631 u64 age_stamp; 632 u64 idle_stamp; 633 u64 avg_idle; 634 635 /* This is used to determine avg_idle's max value */ 636 u64 max_idle_balance_cost; 637 #endif 638 639 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 640 u64 prev_irq_time; 641 #endif 642 #ifdef CONFIG_PARAVIRT 643 u64 prev_steal_time; 644 #endif 645 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 646 u64 prev_steal_time_rq; 647 #endif 648 649 /* calc_load related fields */ 650 unsigned long calc_load_update; 651 long calc_load_active; 652 653 #ifdef CONFIG_SCHED_HRTICK 654 #ifdef CONFIG_SMP 655 int hrtick_csd_pending; 656 struct call_single_data hrtick_csd; 657 #endif 658 struct hrtimer hrtick_timer; 659 #endif 660 661 #ifdef CONFIG_SCHEDSTATS 662 /* latency stats */ 663 struct sched_info rq_sched_info; 664 unsigned long long rq_cpu_time; 665 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 666 667 /* sys_sched_yield() stats */ 668 unsigned int yld_count; 669 670 /* schedule() stats */ 671 unsigned int sched_count; 672 unsigned int sched_goidle; 673 674 /* try_to_wake_up() stats */ 675 unsigned int ttwu_count; 676 unsigned int ttwu_local; 677 #endif 678 679 #ifdef CONFIG_SMP 680 struct llist_head wake_list; 681 #endif 682 683 #ifdef CONFIG_CPU_IDLE 684 /* Must be inspected within a rcu lock section */ 685 struct cpuidle_state *idle_state; 686 #endif 687 }; 688 689 static inline int cpu_of(struct rq *rq) 690 { 691 #ifdef CONFIG_SMP 692 return rq->cpu; 693 #else 694 return 0; 695 #endif 696 } 697 698 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 699 700 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 701 #define this_rq() this_cpu_ptr(&runqueues) 702 #define task_rq(p) cpu_rq(task_cpu(p)) 703 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 704 #define raw_rq() raw_cpu_ptr(&runqueues) 705 706 static inline u64 __rq_clock_broken(struct rq *rq) 707 { 708 return READ_ONCE(rq->clock); 709 } 710 711 static inline u64 rq_clock(struct rq *rq) 712 { 713 lockdep_assert_held(&rq->lock); 714 return rq->clock; 715 } 716 717 static inline u64 rq_clock_task(struct rq *rq) 718 { 719 lockdep_assert_held(&rq->lock); 720 return rq->clock_task; 721 } 722 723 #define RQCF_REQ_SKIP 0x01 724 #define RQCF_ACT_SKIP 0x02 725 726 static inline void rq_clock_skip_update(struct rq *rq, bool skip) 727 { 728 lockdep_assert_held(&rq->lock); 729 if (skip) 730 rq->clock_skip_update |= RQCF_REQ_SKIP; 731 else 732 rq->clock_skip_update &= ~RQCF_REQ_SKIP; 733 } 734 735 #ifdef CONFIG_NUMA 736 enum numa_topology_type { 737 NUMA_DIRECT, 738 NUMA_GLUELESS_MESH, 739 NUMA_BACKPLANE, 740 }; 741 extern enum numa_topology_type sched_numa_topology_type; 742 extern int sched_max_numa_distance; 743 extern bool find_numa_distance(int distance); 744 #endif 745 746 #ifdef CONFIG_NUMA_BALANCING 747 /* The regions in numa_faults array from task_struct */ 748 enum numa_faults_stats { 749 NUMA_MEM = 0, 750 NUMA_CPU, 751 NUMA_MEMBUF, 752 NUMA_CPUBUF 753 }; 754 extern void sched_setnuma(struct task_struct *p, int node); 755 extern int migrate_task_to(struct task_struct *p, int cpu); 756 extern int migrate_swap(struct task_struct *, struct task_struct *); 757 #endif /* CONFIG_NUMA_BALANCING */ 758 759 #ifdef CONFIG_SMP 760 761 static inline void 762 queue_balance_callback(struct rq *rq, 763 struct callback_head *head, 764 void (*func)(struct rq *rq)) 765 { 766 lockdep_assert_held(&rq->lock); 767 768 if (unlikely(head->next)) 769 return; 770 771 head->func = (void (*)(struct callback_head *))func; 772 head->next = rq->balance_callback; 773 rq->balance_callback = head; 774 } 775 776 extern void sched_ttwu_pending(void); 777 778 #define rcu_dereference_check_sched_domain(p) \ 779 rcu_dereference_check((p), \ 780 lockdep_is_held(&sched_domains_mutex)) 781 782 /* 783 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 784 * See detach_destroy_domains: synchronize_sched for details. 785 * 786 * The domain tree of any CPU may only be accessed from within 787 * preempt-disabled sections. 788 */ 789 #define for_each_domain(cpu, __sd) \ 790 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 791 __sd; __sd = __sd->parent) 792 793 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 794 795 /** 796 * highest_flag_domain - Return highest sched_domain containing flag. 797 * @cpu: The cpu whose highest level of sched domain is to 798 * be returned. 799 * @flag: The flag to check for the highest sched_domain 800 * for the given cpu. 801 * 802 * Returns the highest sched_domain of a cpu which contains the given flag. 803 */ 804 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 805 { 806 struct sched_domain *sd, *hsd = NULL; 807 808 for_each_domain(cpu, sd) { 809 if (!(sd->flags & flag)) 810 break; 811 hsd = sd; 812 } 813 814 return hsd; 815 } 816 817 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 818 { 819 struct sched_domain *sd; 820 821 for_each_domain(cpu, sd) { 822 if (sd->flags & flag) 823 break; 824 } 825 826 return sd; 827 } 828 829 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 830 DECLARE_PER_CPU(int, sd_llc_size); 831 DECLARE_PER_CPU(int, sd_llc_id); 832 DECLARE_PER_CPU(struct sched_domain *, sd_numa); 833 DECLARE_PER_CPU(struct sched_domain *, sd_busy); 834 DECLARE_PER_CPU(struct sched_domain *, sd_asym); 835 836 struct sched_group_capacity { 837 atomic_t ref; 838 /* 839 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity 840 * for a single CPU. 841 */ 842 unsigned int capacity; 843 unsigned long next_update; 844 int imbalance; /* XXX unrelated to capacity but shared group state */ 845 /* 846 * Number of busy cpus in this group. 847 */ 848 atomic_t nr_busy_cpus; 849 850 unsigned long cpumask[0]; /* iteration mask */ 851 }; 852 853 struct sched_group { 854 struct sched_group *next; /* Must be a circular list */ 855 atomic_t ref; 856 857 unsigned int group_weight; 858 struct sched_group_capacity *sgc; 859 860 /* 861 * The CPUs this group covers. 862 * 863 * NOTE: this field is variable length. (Allocated dynamically 864 * by attaching extra space to the end of the structure, 865 * depending on how many CPUs the kernel has booted up with) 866 */ 867 unsigned long cpumask[0]; 868 }; 869 870 static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 871 { 872 return to_cpumask(sg->cpumask); 873 } 874 875 /* 876 * cpumask masking which cpus in the group are allowed to iterate up the domain 877 * tree. 878 */ 879 static inline struct cpumask *sched_group_mask(struct sched_group *sg) 880 { 881 return to_cpumask(sg->sgc->cpumask); 882 } 883 884 /** 885 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 886 * @group: The group whose first cpu is to be returned. 887 */ 888 static inline unsigned int group_first_cpu(struct sched_group *group) 889 { 890 return cpumask_first(sched_group_cpus(group)); 891 } 892 893 extern int group_balance_cpu(struct sched_group *sg); 894 895 #else 896 897 static inline void sched_ttwu_pending(void) { } 898 899 #endif /* CONFIG_SMP */ 900 901 #include "stats.h" 902 #include "auto_group.h" 903 904 #ifdef CONFIG_CGROUP_SCHED 905 906 /* 907 * Return the group to which this tasks belongs. 908 * 909 * We cannot use task_css() and friends because the cgroup subsystem 910 * changes that value before the cgroup_subsys::attach() method is called, 911 * therefore we cannot pin it and might observe the wrong value. 912 * 913 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 914 * core changes this before calling sched_move_task(). 915 * 916 * Instead we use a 'copy' which is updated from sched_move_task() while 917 * holding both task_struct::pi_lock and rq::lock. 918 */ 919 static inline struct task_group *task_group(struct task_struct *p) 920 { 921 return p->sched_task_group; 922 } 923 924 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 925 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 926 { 927 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 928 struct task_group *tg = task_group(p); 929 #endif 930 931 #ifdef CONFIG_FAIR_GROUP_SCHED 932 p->se.cfs_rq = tg->cfs_rq[cpu]; 933 p->se.parent = tg->se[cpu]; 934 #endif 935 936 #ifdef CONFIG_RT_GROUP_SCHED 937 p->rt.rt_rq = tg->rt_rq[cpu]; 938 p->rt.parent = tg->rt_se[cpu]; 939 #endif 940 } 941 942 #else /* CONFIG_CGROUP_SCHED */ 943 944 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 945 static inline struct task_group *task_group(struct task_struct *p) 946 { 947 return NULL; 948 } 949 950 #endif /* CONFIG_CGROUP_SCHED */ 951 952 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 953 { 954 set_task_rq(p, cpu); 955 #ifdef CONFIG_SMP 956 /* 957 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 958 * successfuly executed on another CPU. We must ensure that updates of 959 * per-task data have been completed by this moment. 960 */ 961 smp_wmb(); 962 task_thread_info(p)->cpu = cpu; 963 p->wake_cpu = cpu; 964 #endif 965 } 966 967 /* 968 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 969 */ 970 #ifdef CONFIG_SCHED_DEBUG 971 # include <linux/static_key.h> 972 # define const_debug __read_mostly 973 #else 974 # define const_debug const 975 #endif 976 977 extern const_debug unsigned int sysctl_sched_features; 978 979 #define SCHED_FEAT(name, enabled) \ 980 __SCHED_FEAT_##name , 981 982 enum { 983 #include "features.h" 984 __SCHED_FEAT_NR, 985 }; 986 987 #undef SCHED_FEAT 988 989 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 990 #define SCHED_FEAT(name, enabled) \ 991 static __always_inline bool static_branch_##name(struct static_key *key) \ 992 { \ 993 return static_key_##enabled(key); \ 994 } 995 996 #include "features.h" 997 998 #undef SCHED_FEAT 999 1000 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1001 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1002 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 1003 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1004 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 1005 1006 #ifdef CONFIG_NUMA_BALANCING 1007 #define sched_feat_numa(x) sched_feat(x) 1008 #ifdef CONFIG_SCHED_DEBUG 1009 #define numabalancing_enabled sched_feat_numa(NUMA) 1010 #else 1011 extern bool numabalancing_enabled; 1012 #endif /* CONFIG_SCHED_DEBUG */ 1013 #else 1014 #define sched_feat_numa(x) (0) 1015 #define numabalancing_enabled (0) 1016 #endif /* CONFIG_NUMA_BALANCING */ 1017 1018 static inline u64 global_rt_period(void) 1019 { 1020 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1021 } 1022 1023 static inline u64 global_rt_runtime(void) 1024 { 1025 if (sysctl_sched_rt_runtime < 0) 1026 return RUNTIME_INF; 1027 1028 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1029 } 1030 1031 static inline int task_current(struct rq *rq, struct task_struct *p) 1032 { 1033 return rq->curr == p; 1034 } 1035 1036 static inline int task_running(struct rq *rq, struct task_struct *p) 1037 { 1038 #ifdef CONFIG_SMP 1039 return p->on_cpu; 1040 #else 1041 return task_current(rq, p); 1042 #endif 1043 } 1044 1045 static inline int task_on_rq_queued(struct task_struct *p) 1046 { 1047 return p->on_rq == TASK_ON_RQ_QUEUED; 1048 } 1049 1050 static inline int task_on_rq_migrating(struct task_struct *p) 1051 { 1052 return p->on_rq == TASK_ON_RQ_MIGRATING; 1053 } 1054 1055 #ifndef prepare_arch_switch 1056 # define prepare_arch_switch(next) do { } while (0) 1057 #endif 1058 #ifndef finish_arch_post_lock_switch 1059 # define finish_arch_post_lock_switch() do { } while (0) 1060 #endif 1061 1062 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 1063 { 1064 #ifdef CONFIG_SMP 1065 /* 1066 * We can optimise this out completely for !SMP, because the 1067 * SMP rebalancing from interrupt is the only thing that cares 1068 * here. 1069 */ 1070 next->on_cpu = 1; 1071 #endif 1072 } 1073 1074 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 1075 { 1076 #ifdef CONFIG_SMP 1077 /* 1078 * After ->on_cpu is cleared, the task can be moved to a different CPU. 1079 * We must ensure this doesn't happen until the switch is completely 1080 * finished. 1081 */ 1082 smp_wmb(); 1083 prev->on_cpu = 0; 1084 #endif 1085 #ifdef CONFIG_DEBUG_SPINLOCK 1086 /* this is a valid case when another task releases the spinlock */ 1087 rq->lock.owner = current; 1088 #endif 1089 /* 1090 * If we are tracking spinlock dependencies then we have to 1091 * fix up the runqueue lock - which gets 'carried over' from 1092 * prev into current: 1093 */ 1094 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 1095 1096 raw_spin_unlock_irq(&rq->lock); 1097 } 1098 1099 /* 1100 * wake flags 1101 */ 1102 #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ 1103 #define WF_FORK 0x02 /* child wakeup after fork */ 1104 #define WF_MIGRATED 0x4 /* internal use, task got migrated */ 1105 1106 /* 1107 * To aid in avoiding the subversion of "niceness" due to uneven distribution 1108 * of tasks with abnormal "nice" values across CPUs the contribution that 1109 * each task makes to its run queue's load is weighted according to its 1110 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1111 * scaled version of the new time slice allocation that they receive on time 1112 * slice expiry etc. 1113 */ 1114 1115 #define WEIGHT_IDLEPRIO 3 1116 #define WMULT_IDLEPRIO 1431655765 1117 1118 /* 1119 * Nice levels are multiplicative, with a gentle 10% change for every 1120 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 1121 * nice 1, it will get ~10% less CPU time than another CPU-bound task 1122 * that remained on nice 0. 1123 * 1124 * The "10% effect" is relative and cumulative: from _any_ nice level, 1125 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 1126 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 1127 * If a task goes up by ~10% and another task goes down by ~10% then 1128 * the relative distance between them is ~25%.) 1129 */ 1130 static const int prio_to_weight[40] = { 1131 /* -20 */ 88761, 71755, 56483, 46273, 36291, 1132 /* -15 */ 29154, 23254, 18705, 14949, 11916, 1133 /* -10 */ 9548, 7620, 6100, 4904, 3906, 1134 /* -5 */ 3121, 2501, 1991, 1586, 1277, 1135 /* 0 */ 1024, 820, 655, 526, 423, 1136 /* 5 */ 335, 272, 215, 172, 137, 1137 /* 10 */ 110, 87, 70, 56, 45, 1138 /* 15 */ 36, 29, 23, 18, 15, 1139 }; 1140 1141 /* 1142 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. 1143 * 1144 * In cases where the weight does not change often, we can use the 1145 * precalculated inverse to speed up arithmetics by turning divisions 1146 * into multiplications: 1147 */ 1148 static const u32 prio_to_wmult[40] = { 1149 /* -20 */ 48388, 59856, 76040, 92818, 118348, 1150 /* -15 */ 147320, 184698, 229616, 287308, 360437, 1151 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 1152 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 1153 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 1154 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 1155 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 1156 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 1157 }; 1158 1159 #define ENQUEUE_WAKEUP 1 1160 #define ENQUEUE_HEAD 2 1161 #ifdef CONFIG_SMP 1162 #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ 1163 #else 1164 #define ENQUEUE_WAKING 0 1165 #endif 1166 #define ENQUEUE_REPLENISH 8 1167 1168 #define DEQUEUE_SLEEP 1 1169 1170 #define RETRY_TASK ((void *)-1UL) 1171 1172 struct sched_class { 1173 const struct sched_class *next; 1174 1175 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1176 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1177 void (*yield_task) (struct rq *rq); 1178 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); 1179 1180 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1181 1182 /* 1183 * It is the responsibility of the pick_next_task() method that will 1184 * return the next task to call put_prev_task() on the @prev task or 1185 * something equivalent. 1186 * 1187 * May return RETRY_TASK when it finds a higher prio class has runnable 1188 * tasks. 1189 */ 1190 struct task_struct * (*pick_next_task) (struct rq *rq, 1191 struct task_struct *prev); 1192 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1193 1194 #ifdef CONFIG_SMP 1195 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1196 void (*migrate_task_rq)(struct task_struct *p, int next_cpu); 1197 1198 void (*task_waking) (struct task_struct *task); 1199 void (*task_woken) (struct rq *this_rq, struct task_struct *task); 1200 1201 void (*set_cpus_allowed)(struct task_struct *p, 1202 const struct cpumask *newmask); 1203 1204 void (*rq_online)(struct rq *rq); 1205 void (*rq_offline)(struct rq *rq); 1206 #endif 1207 1208 void (*set_curr_task) (struct rq *rq); 1209 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1210 void (*task_fork) (struct task_struct *p); 1211 void (*task_dead) (struct task_struct *p); 1212 1213 /* 1214 * The switched_from() call is allowed to drop rq->lock, therefore we 1215 * cannot assume the switched_from/switched_to pair is serliazed by 1216 * rq->lock. They are however serialized by p->pi_lock. 1217 */ 1218 void (*switched_from) (struct rq *this_rq, struct task_struct *task); 1219 void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1220 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1221 int oldprio); 1222 1223 unsigned int (*get_rr_interval) (struct rq *rq, 1224 struct task_struct *task); 1225 1226 void (*update_curr) (struct rq *rq); 1227 1228 #ifdef CONFIG_FAIR_GROUP_SCHED 1229 void (*task_move_group) (struct task_struct *p, int on_rq); 1230 #endif 1231 }; 1232 1233 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1234 { 1235 prev->sched_class->put_prev_task(rq, prev); 1236 } 1237 1238 #define sched_class_highest (&stop_sched_class) 1239 #define for_each_class(class) \ 1240 for (class = sched_class_highest; class; class = class->next) 1241 1242 extern const struct sched_class stop_sched_class; 1243 extern const struct sched_class dl_sched_class; 1244 extern const struct sched_class rt_sched_class; 1245 extern const struct sched_class fair_sched_class; 1246 extern const struct sched_class idle_sched_class; 1247 1248 1249 #ifdef CONFIG_SMP 1250 1251 extern void update_group_capacity(struct sched_domain *sd, int cpu); 1252 1253 extern void trigger_load_balance(struct rq *rq); 1254 1255 extern void idle_enter_fair(struct rq *this_rq); 1256 extern void idle_exit_fair(struct rq *this_rq); 1257 1258 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1259 1260 #else 1261 1262 static inline void idle_enter_fair(struct rq *rq) { } 1263 static inline void idle_exit_fair(struct rq *rq) { } 1264 1265 #endif 1266 1267 #ifdef CONFIG_CPU_IDLE 1268 static inline void idle_set_state(struct rq *rq, 1269 struct cpuidle_state *idle_state) 1270 { 1271 rq->idle_state = idle_state; 1272 } 1273 1274 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1275 { 1276 WARN_ON(!rcu_read_lock_held()); 1277 return rq->idle_state; 1278 } 1279 #else 1280 static inline void idle_set_state(struct rq *rq, 1281 struct cpuidle_state *idle_state) 1282 { 1283 } 1284 1285 static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1286 { 1287 return NULL; 1288 } 1289 #endif 1290 1291 extern void sysrq_sched_debug_show(void); 1292 extern void sched_init_granularity(void); 1293 extern void update_max_interval(void); 1294 1295 extern void init_sched_dl_class(void); 1296 extern void init_sched_rt_class(void); 1297 extern void init_sched_fair_class(void); 1298 1299 extern void resched_curr(struct rq *rq); 1300 extern void resched_cpu(int cpu); 1301 1302 extern struct rt_bandwidth def_rt_bandwidth; 1303 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1304 1305 extern struct dl_bandwidth def_dl_bandwidth; 1306 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1307 extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1308 1309 unsigned long to_ratio(u64 period, u64 runtime); 1310 1311 extern void init_entity_runnable_average(struct sched_entity *se); 1312 1313 static inline void add_nr_running(struct rq *rq, unsigned count) 1314 { 1315 unsigned prev_nr = rq->nr_running; 1316 1317 rq->nr_running = prev_nr + count; 1318 1319 if (prev_nr < 2 && rq->nr_running >= 2) { 1320 #ifdef CONFIG_SMP 1321 if (!rq->rd->overload) 1322 rq->rd->overload = true; 1323 #endif 1324 1325 #ifdef CONFIG_NO_HZ_FULL 1326 if (tick_nohz_full_cpu(rq->cpu)) { 1327 /* 1328 * Tick is needed if more than one task runs on a CPU. 1329 * Send the target an IPI to kick it out of nohz mode. 1330 * 1331 * We assume that IPI implies full memory barrier and the 1332 * new value of rq->nr_running is visible on reception 1333 * from the target. 1334 */ 1335 tick_nohz_full_kick_cpu(rq->cpu); 1336 } 1337 #endif 1338 } 1339 } 1340 1341 static inline void sub_nr_running(struct rq *rq, unsigned count) 1342 { 1343 rq->nr_running -= count; 1344 } 1345 1346 static inline void rq_last_tick_reset(struct rq *rq) 1347 { 1348 #ifdef CONFIG_NO_HZ_FULL 1349 rq->last_sched_tick = jiffies; 1350 #endif 1351 } 1352 1353 extern void update_rq_clock(struct rq *rq); 1354 1355 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1356 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1357 1358 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1359 1360 extern const_debug unsigned int sysctl_sched_time_avg; 1361 extern const_debug unsigned int sysctl_sched_nr_migrate; 1362 extern const_debug unsigned int sysctl_sched_migration_cost; 1363 1364 static inline u64 sched_avg_period(void) 1365 { 1366 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 1367 } 1368 1369 #ifdef CONFIG_SCHED_HRTICK 1370 1371 /* 1372 * Use hrtick when: 1373 * - enabled by features 1374 * - hrtimer is actually high res 1375 */ 1376 static inline int hrtick_enabled(struct rq *rq) 1377 { 1378 if (!sched_feat(HRTICK)) 1379 return 0; 1380 if (!cpu_active(cpu_of(rq))) 1381 return 0; 1382 return hrtimer_is_hres_active(&rq->hrtick_timer); 1383 } 1384 1385 void hrtick_start(struct rq *rq, u64 delay); 1386 1387 #else 1388 1389 static inline int hrtick_enabled(struct rq *rq) 1390 { 1391 return 0; 1392 } 1393 1394 #endif /* CONFIG_SCHED_HRTICK */ 1395 1396 #ifdef CONFIG_SMP 1397 extern void sched_avg_update(struct rq *rq); 1398 1399 #ifndef arch_scale_freq_capacity 1400 static __always_inline 1401 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) 1402 { 1403 return SCHED_CAPACITY_SCALE; 1404 } 1405 #endif 1406 1407 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1408 { 1409 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); 1410 sched_avg_update(rq); 1411 } 1412 #else 1413 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 1414 static inline void sched_avg_update(struct rq *rq) { } 1415 #endif 1416 1417 /* 1418 * __task_rq_lock - lock the rq @p resides on. 1419 */ 1420 static inline struct rq *__task_rq_lock(struct task_struct *p) 1421 __acquires(rq->lock) 1422 { 1423 struct rq *rq; 1424 1425 lockdep_assert_held(&p->pi_lock); 1426 1427 for (;;) { 1428 rq = task_rq(p); 1429 raw_spin_lock(&rq->lock); 1430 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 1431 lockdep_pin_lock(&rq->lock); 1432 return rq; 1433 } 1434 raw_spin_unlock(&rq->lock); 1435 1436 while (unlikely(task_on_rq_migrating(p))) 1437 cpu_relax(); 1438 } 1439 } 1440 1441 /* 1442 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 1443 */ 1444 static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) 1445 __acquires(p->pi_lock) 1446 __acquires(rq->lock) 1447 { 1448 struct rq *rq; 1449 1450 for (;;) { 1451 raw_spin_lock_irqsave(&p->pi_lock, *flags); 1452 rq = task_rq(p); 1453 raw_spin_lock(&rq->lock); 1454 /* 1455 * move_queued_task() task_rq_lock() 1456 * 1457 * ACQUIRE (rq->lock) 1458 * [S] ->on_rq = MIGRATING [L] rq = task_rq() 1459 * WMB (__set_task_cpu()) ACQUIRE (rq->lock); 1460 * [S] ->cpu = new_cpu [L] task_rq() 1461 * [L] ->on_rq 1462 * RELEASE (rq->lock) 1463 * 1464 * If we observe the old cpu in task_rq_lock, the acquire of 1465 * the old rq->lock will fully serialize against the stores. 1466 * 1467 * If we observe the new cpu in task_rq_lock, the acquire will 1468 * pair with the WMB to ensure we must then also see migrating. 1469 */ 1470 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 1471 lockdep_pin_lock(&rq->lock); 1472 return rq; 1473 } 1474 raw_spin_unlock(&rq->lock); 1475 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1476 1477 while (unlikely(task_on_rq_migrating(p))) 1478 cpu_relax(); 1479 } 1480 } 1481 1482 static inline void __task_rq_unlock(struct rq *rq) 1483 __releases(rq->lock) 1484 { 1485 lockdep_unpin_lock(&rq->lock); 1486 raw_spin_unlock(&rq->lock); 1487 } 1488 1489 static inline void 1490 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) 1491 __releases(rq->lock) 1492 __releases(p->pi_lock) 1493 { 1494 lockdep_unpin_lock(&rq->lock); 1495 raw_spin_unlock(&rq->lock); 1496 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1497 } 1498 1499 #ifdef CONFIG_SMP 1500 #ifdef CONFIG_PREEMPT 1501 1502 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 1503 1504 /* 1505 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1506 * way at the expense of forcing extra atomic operations in all 1507 * invocations. This assures that the double_lock is acquired using the 1508 * same underlying policy as the spinlock_t on this architecture, which 1509 * reduces latency compared to the unfair variant below. However, it 1510 * also adds more overhead and therefore may reduce throughput. 1511 */ 1512 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1513 __releases(this_rq->lock) 1514 __acquires(busiest->lock) 1515 __acquires(this_rq->lock) 1516 { 1517 raw_spin_unlock(&this_rq->lock); 1518 double_rq_lock(this_rq, busiest); 1519 1520 return 1; 1521 } 1522 1523 #else 1524 /* 1525 * Unfair double_lock_balance: Optimizes throughput at the expense of 1526 * latency by eliminating extra atomic operations when the locks are 1527 * already in proper order on entry. This favors lower cpu-ids and will 1528 * grant the double lock to lower cpus over higher ids under contention, 1529 * regardless of entry order into the function. 1530 */ 1531 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1532 __releases(this_rq->lock) 1533 __acquires(busiest->lock) 1534 __acquires(this_rq->lock) 1535 { 1536 int ret = 0; 1537 1538 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1539 if (busiest < this_rq) { 1540 raw_spin_unlock(&this_rq->lock); 1541 raw_spin_lock(&busiest->lock); 1542 raw_spin_lock_nested(&this_rq->lock, 1543 SINGLE_DEPTH_NESTING); 1544 ret = 1; 1545 } else 1546 raw_spin_lock_nested(&busiest->lock, 1547 SINGLE_DEPTH_NESTING); 1548 } 1549 return ret; 1550 } 1551 1552 #endif /* CONFIG_PREEMPT */ 1553 1554 /* 1555 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1556 */ 1557 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1558 { 1559 if (unlikely(!irqs_disabled())) { 1560 /* printk() doesn't work good under rq->lock */ 1561 raw_spin_unlock(&this_rq->lock); 1562 BUG_ON(1); 1563 } 1564 1565 return _double_lock_balance(this_rq, busiest); 1566 } 1567 1568 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1569 __releases(busiest->lock) 1570 { 1571 raw_spin_unlock(&busiest->lock); 1572 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1573 } 1574 1575 static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 1576 { 1577 if (l1 > l2) 1578 swap(l1, l2); 1579 1580 spin_lock(l1); 1581 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1582 } 1583 1584 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 1585 { 1586 if (l1 > l2) 1587 swap(l1, l2); 1588 1589 spin_lock_irq(l1); 1590 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1591 } 1592 1593 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1594 { 1595 if (l1 > l2) 1596 swap(l1, l2); 1597 1598 raw_spin_lock(l1); 1599 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1600 } 1601 1602 /* 1603 * double_rq_lock - safely lock two runqueues 1604 * 1605 * Note this does not disable interrupts like task_rq_lock, 1606 * you need to do so manually before calling. 1607 */ 1608 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1609 __acquires(rq1->lock) 1610 __acquires(rq2->lock) 1611 { 1612 BUG_ON(!irqs_disabled()); 1613 if (rq1 == rq2) { 1614 raw_spin_lock(&rq1->lock); 1615 __acquire(rq2->lock); /* Fake it out ;) */ 1616 } else { 1617 if (rq1 < rq2) { 1618 raw_spin_lock(&rq1->lock); 1619 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1620 } else { 1621 raw_spin_lock(&rq2->lock); 1622 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1623 } 1624 } 1625 } 1626 1627 /* 1628 * double_rq_unlock - safely unlock two runqueues 1629 * 1630 * Note this does not restore interrupts like task_rq_unlock, 1631 * you need to do so manually after calling. 1632 */ 1633 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1634 __releases(rq1->lock) 1635 __releases(rq2->lock) 1636 { 1637 raw_spin_unlock(&rq1->lock); 1638 if (rq1 != rq2) 1639 raw_spin_unlock(&rq2->lock); 1640 else 1641 __release(rq2->lock); 1642 } 1643 1644 #else /* CONFIG_SMP */ 1645 1646 /* 1647 * double_rq_lock - safely lock two runqueues 1648 * 1649 * Note this does not disable interrupts like task_rq_lock, 1650 * you need to do so manually before calling. 1651 */ 1652 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1653 __acquires(rq1->lock) 1654 __acquires(rq2->lock) 1655 { 1656 BUG_ON(!irqs_disabled()); 1657 BUG_ON(rq1 != rq2); 1658 raw_spin_lock(&rq1->lock); 1659 __acquire(rq2->lock); /* Fake it out ;) */ 1660 } 1661 1662 /* 1663 * double_rq_unlock - safely unlock two runqueues 1664 * 1665 * Note this does not restore interrupts like task_rq_unlock, 1666 * you need to do so manually after calling. 1667 */ 1668 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1669 __releases(rq1->lock) 1670 __releases(rq2->lock) 1671 { 1672 BUG_ON(rq1 != rq2); 1673 raw_spin_unlock(&rq1->lock); 1674 __release(rq2->lock); 1675 } 1676 1677 #endif 1678 1679 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1680 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1681 1682 #ifdef CONFIG_SCHED_DEBUG 1683 extern void print_cfs_stats(struct seq_file *m, int cpu); 1684 extern void print_rt_stats(struct seq_file *m, int cpu); 1685 extern void print_dl_stats(struct seq_file *m, int cpu); 1686 extern void 1687 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 1688 1689 #ifdef CONFIG_NUMA_BALANCING 1690 extern void 1691 show_numa_stats(struct task_struct *p, struct seq_file *m); 1692 extern void 1693 print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 1694 unsigned long tpf, unsigned long gsf, unsigned long gpf); 1695 #endif /* CONFIG_NUMA_BALANCING */ 1696 #endif /* CONFIG_SCHED_DEBUG */ 1697 1698 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1699 extern void init_rt_rq(struct rt_rq *rt_rq); 1700 extern void init_dl_rq(struct dl_rq *dl_rq); 1701 1702 extern void cfs_bandwidth_usage_inc(void); 1703 extern void cfs_bandwidth_usage_dec(void); 1704 1705 #ifdef CONFIG_NO_HZ_COMMON 1706 enum rq_nohz_flag_bits { 1707 NOHZ_TICK_STOPPED, 1708 NOHZ_BALANCE_KICK, 1709 }; 1710 1711 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 1712 #endif 1713 1714 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1715 1716 DECLARE_PER_CPU(u64, cpu_hardirq_time); 1717 DECLARE_PER_CPU(u64, cpu_softirq_time); 1718 1719 #ifndef CONFIG_64BIT 1720 DECLARE_PER_CPU(seqcount_t, irq_time_seq); 1721 1722 static inline void irq_time_write_begin(void) 1723 { 1724 __this_cpu_inc(irq_time_seq.sequence); 1725 smp_wmb(); 1726 } 1727 1728 static inline void irq_time_write_end(void) 1729 { 1730 smp_wmb(); 1731 __this_cpu_inc(irq_time_seq.sequence); 1732 } 1733 1734 static inline u64 irq_time_read(int cpu) 1735 { 1736 u64 irq_time; 1737 unsigned seq; 1738 1739 do { 1740 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); 1741 irq_time = per_cpu(cpu_softirq_time, cpu) + 1742 per_cpu(cpu_hardirq_time, cpu); 1743 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); 1744 1745 return irq_time; 1746 } 1747 #else /* CONFIG_64BIT */ 1748 static inline void irq_time_write_begin(void) 1749 { 1750 } 1751 1752 static inline void irq_time_write_end(void) 1753 { 1754 } 1755 1756 static inline u64 irq_time_read(int cpu) 1757 { 1758 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); 1759 } 1760 #endif /* CONFIG_64BIT */ 1761 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1762