1 2 #include <linux/sched.h> 3 #include <linux/mutex.h> 4 #include <linux/spinlock.h> 5 #include <linux/stop_machine.h> 6 7 #include "cpupri.h" 8 9 extern __read_mostly int scheduler_running; 10 11 /* 12 * Convert user-nice values [ -20 ... 0 ... 19 ] 13 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 14 * and back. 15 */ 16 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) 17 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) 18 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) 19 20 /* 21 * 'User priority' is the nice value converted to something we 22 * can work with better when scaling various scheduler parameters, 23 * it's a [ 0 ... 39 ] range. 24 */ 25 #define USER_PRIO(p) ((p)-MAX_RT_PRIO) 26 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) 27 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) 28 29 /* 30 * Helpers for converting nanosecond timing to jiffy resolution 31 */ 32 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 33 34 #define NICE_0_LOAD SCHED_LOAD_SCALE 35 #define NICE_0_SHIFT SCHED_LOAD_SHIFT 36 37 /* 38 * These are the 'tuning knobs' of the scheduler: 39 * 40 * default timeslice is 100 msecs (used only for SCHED_RR tasks). 41 * Timeslices get refilled after they expire. 42 */ 43 #define DEF_TIMESLICE (100 * HZ / 1000) 44 45 /* 46 * single value that denotes runtime == period, ie unlimited time. 47 */ 48 #define RUNTIME_INF ((u64)~0ULL) 49 50 static inline int rt_policy(int policy) 51 { 52 if (policy == SCHED_FIFO || policy == SCHED_RR) 53 return 1; 54 return 0; 55 } 56 57 static inline int task_has_rt_policy(struct task_struct *p) 58 { 59 return rt_policy(p->policy); 60 } 61 62 /* 63 * This is the priority-queue data structure of the RT scheduling class: 64 */ 65 struct rt_prio_array { 66 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 67 struct list_head queue[MAX_RT_PRIO]; 68 }; 69 70 struct rt_bandwidth { 71 /* nests inside the rq lock: */ 72 raw_spinlock_t rt_runtime_lock; 73 ktime_t rt_period; 74 u64 rt_runtime; 75 struct hrtimer rt_period_timer; 76 }; 77 78 extern struct mutex sched_domains_mutex; 79 80 #ifdef CONFIG_CGROUP_SCHED 81 82 #include <linux/cgroup.h> 83 84 struct cfs_rq; 85 struct rt_rq; 86 87 static LIST_HEAD(task_groups); 88 89 struct cfs_bandwidth { 90 #ifdef CONFIG_CFS_BANDWIDTH 91 raw_spinlock_t lock; 92 ktime_t period; 93 u64 quota, runtime; 94 s64 hierarchal_quota; 95 u64 runtime_expires; 96 97 int idle, timer_active; 98 struct hrtimer period_timer, slack_timer; 99 struct list_head throttled_cfs_rq; 100 101 /* statistics */ 102 int nr_periods, nr_throttled; 103 u64 throttled_time; 104 #endif 105 }; 106 107 /* task group related information */ 108 struct task_group { 109 struct cgroup_subsys_state css; 110 111 #ifdef CONFIG_FAIR_GROUP_SCHED 112 /* schedulable entities of this group on each cpu */ 113 struct sched_entity **se; 114 /* runqueue "owned" by this group on each cpu */ 115 struct cfs_rq **cfs_rq; 116 unsigned long shares; 117 118 atomic_t load_weight; 119 #endif 120 121 #ifdef CONFIG_RT_GROUP_SCHED 122 struct sched_rt_entity **rt_se; 123 struct rt_rq **rt_rq; 124 125 struct rt_bandwidth rt_bandwidth; 126 #endif 127 128 struct rcu_head rcu; 129 struct list_head list; 130 131 struct task_group *parent; 132 struct list_head siblings; 133 struct list_head children; 134 135 #ifdef CONFIG_SCHED_AUTOGROUP 136 struct autogroup *autogroup; 137 #endif 138 139 struct cfs_bandwidth cfs_bandwidth; 140 }; 141 142 #ifdef CONFIG_FAIR_GROUP_SCHED 143 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 144 145 /* 146 * A weight of 0 or 1 can cause arithmetics problems. 147 * A weight of a cfs_rq is the sum of weights of which entities 148 * are queued on this cfs_rq, so a weight of a entity should not be 149 * too large, so as the shares value of a task group. 150 * (The default weight is 1024 - so there's no practical 151 * limitation from this.) 152 */ 153 #define MIN_SHARES (1UL << 1) 154 #define MAX_SHARES (1UL << 18) 155 #endif 156 157 /* Default task group. 158 * Every task in system belong to this group at bootup. 159 */ 160 extern struct task_group root_task_group; 161 162 typedef int (*tg_visitor)(struct task_group *, void *); 163 164 extern int walk_tg_tree_from(struct task_group *from, 165 tg_visitor down, tg_visitor up, void *data); 166 167 /* 168 * Iterate the full tree, calling @down when first entering a node and @up when 169 * leaving it for the final time. 170 * 171 * Caller must hold rcu_lock or sufficient equivalent. 172 */ 173 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 174 { 175 return walk_tg_tree_from(&root_task_group, down, up, data); 176 } 177 178 extern int tg_nop(struct task_group *tg, void *data); 179 180 extern void free_fair_sched_group(struct task_group *tg); 181 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 182 extern void unregister_fair_sched_group(struct task_group *tg, int cpu); 183 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 184 struct sched_entity *se, int cpu, 185 struct sched_entity *parent); 186 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 187 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 188 189 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 190 extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 191 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 192 193 extern void free_rt_sched_group(struct task_group *tg); 194 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 195 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 196 struct sched_rt_entity *rt_se, int cpu, 197 struct sched_rt_entity *parent); 198 199 #else /* CONFIG_CGROUP_SCHED */ 200 201 struct cfs_bandwidth { }; 202 203 #endif /* CONFIG_CGROUP_SCHED */ 204 205 /* CFS-related fields in a runqueue */ 206 struct cfs_rq { 207 struct load_weight load; 208 unsigned long nr_running, h_nr_running; 209 210 u64 exec_clock; 211 u64 min_vruntime; 212 #ifndef CONFIG_64BIT 213 u64 min_vruntime_copy; 214 #endif 215 216 struct rb_root tasks_timeline; 217 struct rb_node *rb_leftmost; 218 219 struct list_head tasks; 220 struct list_head *balance_iterator; 221 222 /* 223 * 'curr' points to currently running entity on this cfs_rq. 224 * It is set to NULL otherwise (i.e when none are currently running). 225 */ 226 struct sched_entity *curr, *next, *last, *skip; 227 228 #ifdef CONFIG_SCHED_DEBUG 229 unsigned int nr_spread_over; 230 #endif 231 232 #ifdef CONFIG_FAIR_GROUP_SCHED 233 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 234 235 /* 236 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 237 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 238 * (like users, containers etc.) 239 * 240 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This 241 * list is used during load balance. 242 */ 243 int on_list; 244 struct list_head leaf_cfs_rq_list; 245 struct task_group *tg; /* group that "owns" this runqueue */ 246 247 #ifdef CONFIG_SMP 248 /* 249 * the part of load.weight contributed by tasks 250 */ 251 unsigned long task_weight; 252 253 /* 254 * h_load = weight * f(tg) 255 * 256 * Where f(tg) is the recursive weight fraction assigned to 257 * this group. 258 */ 259 unsigned long h_load; 260 261 /* 262 * Maintaining per-cpu shares distribution for group scheduling 263 * 264 * load_stamp is the last time we updated the load average 265 * load_last is the last time we updated the load average and saw load 266 * load_unacc_exec_time is currently unaccounted execution time 267 */ 268 u64 load_avg; 269 u64 load_period; 270 u64 load_stamp, load_last, load_unacc_exec_time; 271 272 unsigned long load_contribution; 273 #endif /* CONFIG_SMP */ 274 #ifdef CONFIG_CFS_BANDWIDTH 275 int runtime_enabled; 276 u64 runtime_expires; 277 s64 runtime_remaining; 278 279 u64 throttled_timestamp; 280 int throttled, throttle_count; 281 struct list_head throttled_list; 282 #endif /* CONFIG_CFS_BANDWIDTH */ 283 #endif /* CONFIG_FAIR_GROUP_SCHED */ 284 }; 285 286 static inline int rt_bandwidth_enabled(void) 287 { 288 return sysctl_sched_rt_runtime >= 0; 289 } 290 291 /* Real-Time classes' related field in a runqueue: */ 292 struct rt_rq { 293 struct rt_prio_array active; 294 unsigned long rt_nr_running; 295 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 296 struct { 297 int curr; /* highest queued rt task prio */ 298 #ifdef CONFIG_SMP 299 int next; /* next highest */ 300 #endif 301 } highest_prio; 302 #endif 303 #ifdef CONFIG_SMP 304 unsigned long rt_nr_migratory; 305 unsigned long rt_nr_total; 306 int overloaded; 307 struct plist_head pushable_tasks; 308 #endif 309 int rt_throttled; 310 u64 rt_time; 311 u64 rt_runtime; 312 /* Nests inside the rq lock: */ 313 raw_spinlock_t rt_runtime_lock; 314 315 #ifdef CONFIG_RT_GROUP_SCHED 316 unsigned long rt_nr_boosted; 317 318 struct rq *rq; 319 struct list_head leaf_rt_rq_list; 320 struct task_group *tg; 321 #endif 322 }; 323 324 #ifdef CONFIG_SMP 325 326 /* 327 * We add the notion of a root-domain which will be used to define per-domain 328 * variables. Each exclusive cpuset essentially defines an island domain by 329 * fully partitioning the member cpus from any other cpuset. Whenever a new 330 * exclusive cpuset is created, we also create and attach a new root-domain 331 * object. 332 * 333 */ 334 struct root_domain { 335 atomic_t refcount; 336 atomic_t rto_count; 337 struct rcu_head rcu; 338 cpumask_var_t span; 339 cpumask_var_t online; 340 341 /* 342 * The "RT overload" flag: it gets set if a CPU has more than 343 * one runnable RT task. 344 */ 345 cpumask_var_t rto_mask; 346 struct cpupri cpupri; 347 }; 348 349 extern struct root_domain def_root_domain; 350 351 #endif /* CONFIG_SMP */ 352 353 /* 354 * This is the main, per-CPU runqueue data structure. 355 * 356 * Locking rule: those places that want to lock multiple runqueues 357 * (such as the load balancing or the thread migration code), lock 358 * acquire operations must be ordered by ascending &runqueue. 359 */ 360 struct rq { 361 /* runqueue lock: */ 362 raw_spinlock_t lock; 363 364 /* 365 * nr_running and cpu_load should be in the same cacheline because 366 * remote CPUs use both these fields when doing load calculation. 367 */ 368 unsigned long nr_running; 369 #define CPU_LOAD_IDX_MAX 5 370 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 371 unsigned long last_load_update_tick; 372 #ifdef CONFIG_NO_HZ 373 u64 nohz_stamp; 374 unsigned long nohz_flags; 375 #endif 376 int skip_clock_update; 377 378 /* capture load from *all* tasks on this cpu: */ 379 struct load_weight load; 380 unsigned long nr_load_updates; 381 u64 nr_switches; 382 383 struct cfs_rq cfs; 384 struct rt_rq rt; 385 386 #ifdef CONFIG_FAIR_GROUP_SCHED 387 /* list of leaf cfs_rq on this cpu: */ 388 struct list_head leaf_cfs_rq_list; 389 #endif 390 #ifdef CONFIG_RT_GROUP_SCHED 391 struct list_head leaf_rt_rq_list; 392 #endif 393 394 /* 395 * This is part of a global counter where only the total sum 396 * over all CPUs matters. A task can increase this counter on 397 * one CPU and if it got migrated afterwards it may decrease 398 * it on another CPU. Always updated under the runqueue lock: 399 */ 400 unsigned long nr_uninterruptible; 401 402 struct task_struct *curr, *idle, *stop; 403 unsigned long next_balance; 404 struct mm_struct *prev_mm; 405 406 u64 clock; 407 u64 clock_task; 408 409 atomic_t nr_iowait; 410 411 #ifdef CONFIG_SMP 412 struct root_domain *rd; 413 struct sched_domain *sd; 414 415 unsigned long cpu_power; 416 417 unsigned char idle_balance; 418 /* For active balancing */ 419 int post_schedule; 420 int active_balance; 421 int push_cpu; 422 struct cpu_stop_work active_balance_work; 423 /* cpu of this runqueue: */ 424 int cpu; 425 int online; 426 427 u64 rt_avg; 428 u64 age_stamp; 429 u64 idle_stamp; 430 u64 avg_idle; 431 #endif 432 433 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 434 u64 prev_irq_time; 435 #endif 436 #ifdef CONFIG_PARAVIRT 437 u64 prev_steal_time; 438 #endif 439 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 440 u64 prev_steal_time_rq; 441 #endif 442 443 /* calc_load related fields */ 444 unsigned long calc_load_update; 445 long calc_load_active; 446 447 #ifdef CONFIG_SCHED_HRTICK 448 #ifdef CONFIG_SMP 449 int hrtick_csd_pending; 450 struct call_single_data hrtick_csd; 451 #endif 452 struct hrtimer hrtick_timer; 453 #endif 454 455 #ifdef CONFIG_SCHEDSTATS 456 /* latency stats */ 457 struct sched_info rq_sched_info; 458 unsigned long long rq_cpu_time; 459 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 460 461 /* sys_sched_yield() stats */ 462 unsigned int yld_count; 463 464 /* schedule() stats */ 465 unsigned int sched_switch; 466 unsigned int sched_count; 467 unsigned int sched_goidle; 468 469 /* try_to_wake_up() stats */ 470 unsigned int ttwu_count; 471 unsigned int ttwu_local; 472 #endif 473 474 #ifdef CONFIG_SMP 475 struct llist_head wake_list; 476 #endif 477 }; 478 479 static inline int cpu_of(struct rq *rq) 480 { 481 #ifdef CONFIG_SMP 482 return rq->cpu; 483 #else 484 return 0; 485 #endif 486 } 487 488 DECLARE_PER_CPU(struct rq, runqueues); 489 490 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 491 #define this_rq() (&__get_cpu_var(runqueues)) 492 #define task_rq(p) cpu_rq(task_cpu(p)) 493 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 494 #define raw_rq() (&__raw_get_cpu_var(runqueues)) 495 496 #ifdef CONFIG_SMP 497 498 #define rcu_dereference_check_sched_domain(p) \ 499 rcu_dereference_check((p), \ 500 lockdep_is_held(&sched_domains_mutex)) 501 502 /* 503 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 504 * See detach_destroy_domains: synchronize_sched for details. 505 * 506 * The domain tree of any CPU may only be accessed from within 507 * preempt-disabled sections. 508 */ 509 #define for_each_domain(cpu, __sd) \ 510 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 511 __sd; __sd = __sd->parent) 512 513 #define for_each_lower_domain(sd) for (; sd; sd = sd->child) 514 515 /** 516 * highest_flag_domain - Return highest sched_domain containing flag. 517 * @cpu: The cpu whose highest level of sched domain is to 518 * be returned. 519 * @flag: The flag to check for the highest sched_domain 520 * for the given cpu. 521 * 522 * Returns the highest sched_domain of a cpu which contains the given flag. 523 */ 524 static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 525 { 526 struct sched_domain *sd, *hsd = NULL; 527 528 for_each_domain(cpu, sd) { 529 if (!(sd->flags & flag)) 530 break; 531 hsd = sd; 532 } 533 534 return hsd; 535 } 536 537 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 538 DECLARE_PER_CPU(int, sd_llc_id); 539 540 #endif /* CONFIG_SMP */ 541 542 #include "stats.h" 543 #include "auto_group.h" 544 545 #ifdef CONFIG_CGROUP_SCHED 546 547 /* 548 * Return the group to which this tasks belongs. 549 * 550 * We use task_subsys_state_check() and extend the RCU verification with 551 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each 552 * task it moves into the cgroup. Therefore by holding either of those locks, 553 * we pin the task to the current cgroup. 554 */ 555 static inline struct task_group *task_group(struct task_struct *p) 556 { 557 struct task_group *tg; 558 struct cgroup_subsys_state *css; 559 560 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 561 lockdep_is_held(&p->pi_lock) || 562 lockdep_is_held(&task_rq(p)->lock)); 563 tg = container_of(css, struct task_group, css); 564 565 return autogroup_task_group(p, tg); 566 } 567 568 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 569 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 570 { 571 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 572 struct task_group *tg = task_group(p); 573 #endif 574 575 #ifdef CONFIG_FAIR_GROUP_SCHED 576 p->se.cfs_rq = tg->cfs_rq[cpu]; 577 p->se.parent = tg->se[cpu]; 578 #endif 579 580 #ifdef CONFIG_RT_GROUP_SCHED 581 p->rt.rt_rq = tg->rt_rq[cpu]; 582 p->rt.parent = tg->rt_se[cpu]; 583 #endif 584 } 585 586 #else /* CONFIG_CGROUP_SCHED */ 587 588 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 589 static inline struct task_group *task_group(struct task_struct *p) 590 { 591 return NULL; 592 } 593 594 #endif /* CONFIG_CGROUP_SCHED */ 595 596 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 597 { 598 set_task_rq(p, cpu); 599 #ifdef CONFIG_SMP 600 /* 601 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 602 * successfuly executed on another CPU. We must ensure that updates of 603 * per-task data have been completed by this moment. 604 */ 605 smp_wmb(); 606 task_thread_info(p)->cpu = cpu; 607 #endif 608 } 609 610 /* 611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 612 */ 613 #ifdef CONFIG_SCHED_DEBUG 614 # include <linux/jump_label.h> 615 # define const_debug __read_mostly 616 #else 617 # define const_debug const 618 #endif 619 620 extern const_debug unsigned int sysctl_sched_features; 621 622 #define SCHED_FEAT(name, enabled) \ 623 __SCHED_FEAT_##name , 624 625 enum { 626 #include "features.h" 627 __SCHED_FEAT_NR, 628 }; 629 630 #undef SCHED_FEAT 631 632 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 633 static __always_inline bool static_branch__true(struct jump_label_key *key) 634 { 635 return likely(static_branch(key)); /* Not out of line branch. */ 636 } 637 638 static __always_inline bool static_branch__false(struct jump_label_key *key) 639 { 640 return unlikely(static_branch(key)); /* Out of line branch. */ 641 } 642 643 #define SCHED_FEAT(name, enabled) \ 644 static __always_inline bool static_branch_##name(struct jump_label_key *key) \ 645 { \ 646 return static_branch__##enabled(key); \ 647 } 648 649 #include "features.h" 650 651 #undef SCHED_FEAT 652 653 extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; 654 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 655 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 656 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 657 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */ 658 659 static inline u64 global_rt_period(void) 660 { 661 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 662 } 663 664 static inline u64 global_rt_runtime(void) 665 { 666 if (sysctl_sched_rt_runtime < 0) 667 return RUNTIME_INF; 668 669 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 670 } 671 672 673 674 static inline int task_current(struct rq *rq, struct task_struct *p) 675 { 676 return rq->curr == p; 677 } 678 679 static inline int task_running(struct rq *rq, struct task_struct *p) 680 { 681 #ifdef CONFIG_SMP 682 return p->on_cpu; 683 #else 684 return task_current(rq, p); 685 #endif 686 } 687 688 689 #ifndef prepare_arch_switch 690 # define prepare_arch_switch(next) do { } while (0) 691 #endif 692 #ifndef finish_arch_switch 693 # define finish_arch_switch(prev) do { } while (0) 694 #endif 695 696 #ifndef __ARCH_WANT_UNLOCKED_CTXSW 697 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 698 { 699 #ifdef CONFIG_SMP 700 /* 701 * We can optimise this out completely for !SMP, because the 702 * SMP rebalancing from interrupt is the only thing that cares 703 * here. 704 */ 705 next->on_cpu = 1; 706 #endif 707 } 708 709 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 710 { 711 #ifdef CONFIG_SMP 712 /* 713 * After ->on_cpu is cleared, the task can be moved to a different CPU. 714 * We must ensure this doesn't happen until the switch is completely 715 * finished. 716 */ 717 smp_wmb(); 718 prev->on_cpu = 0; 719 #endif 720 #ifdef CONFIG_DEBUG_SPINLOCK 721 /* this is a valid case when another task releases the spinlock */ 722 rq->lock.owner = current; 723 #endif 724 /* 725 * If we are tracking spinlock dependencies then we have to 726 * fix up the runqueue lock - which gets 'carried over' from 727 * prev into current: 728 */ 729 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 730 731 raw_spin_unlock_irq(&rq->lock); 732 } 733 734 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ 735 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 736 { 737 #ifdef CONFIG_SMP 738 /* 739 * We can optimise this out completely for !SMP, because the 740 * SMP rebalancing from interrupt is the only thing that cares 741 * here. 742 */ 743 next->on_cpu = 1; 744 #endif 745 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 746 raw_spin_unlock_irq(&rq->lock); 747 #else 748 raw_spin_unlock(&rq->lock); 749 #endif 750 } 751 752 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) 753 { 754 #ifdef CONFIG_SMP 755 /* 756 * After ->on_cpu is cleared, the task can be moved to a different CPU. 757 * We must ensure this doesn't happen until the switch is completely 758 * finished. 759 */ 760 smp_wmb(); 761 prev->on_cpu = 0; 762 #endif 763 #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW 764 local_irq_enable(); 765 #endif 766 } 767 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 768 769 770 static inline void update_load_add(struct load_weight *lw, unsigned long inc) 771 { 772 lw->weight += inc; 773 lw->inv_weight = 0; 774 } 775 776 static inline void update_load_sub(struct load_weight *lw, unsigned long dec) 777 { 778 lw->weight -= dec; 779 lw->inv_weight = 0; 780 } 781 782 static inline void update_load_set(struct load_weight *lw, unsigned long w) 783 { 784 lw->weight = w; 785 lw->inv_weight = 0; 786 } 787 788 /* 789 * To aid in avoiding the subversion of "niceness" due to uneven distribution 790 * of tasks with abnormal "nice" values across CPUs the contribution that 791 * each task makes to its run queue's load is weighted according to its 792 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 793 * scaled version of the new time slice allocation that they receive on time 794 * slice expiry etc. 795 */ 796 797 #define WEIGHT_IDLEPRIO 3 798 #define WMULT_IDLEPRIO 1431655765 799 800 /* 801 * Nice levels are multiplicative, with a gentle 10% change for every 802 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to 803 * nice 1, it will get ~10% less CPU time than another CPU-bound task 804 * that remained on nice 0. 805 * 806 * The "10% effect" is relative and cumulative: from _any_ nice level, 807 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level 808 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. 809 * If a task goes up by ~10% and another task goes down by ~10% then 810 * the relative distance between them is ~25%.) 811 */ 812 static const int prio_to_weight[40] = { 813 /* -20 */ 88761, 71755, 56483, 46273, 36291, 814 /* -15 */ 29154, 23254, 18705, 14949, 11916, 815 /* -10 */ 9548, 7620, 6100, 4904, 3906, 816 /* -5 */ 3121, 2501, 1991, 1586, 1277, 817 /* 0 */ 1024, 820, 655, 526, 423, 818 /* 5 */ 335, 272, 215, 172, 137, 819 /* 10 */ 110, 87, 70, 56, 45, 820 /* 15 */ 36, 29, 23, 18, 15, 821 }; 822 823 /* 824 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated. 825 * 826 * In cases where the weight does not change often, we can use the 827 * precalculated inverse to speed up arithmetics by turning divisions 828 * into multiplications: 829 */ 830 static const u32 prio_to_wmult[40] = { 831 /* -20 */ 48388, 59856, 76040, 92818, 118348, 832 /* -15 */ 147320, 184698, 229616, 287308, 360437, 833 /* -10 */ 449829, 563644, 704093, 875809, 1099582, 834 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, 835 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, 836 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, 837 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, 838 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 839 }; 840 841 /* Time spent by the tasks of the cpu accounting group executing in ... */ 842 enum cpuacct_stat_index { 843 CPUACCT_STAT_USER, /* ... user mode */ 844 CPUACCT_STAT_SYSTEM, /* ... kernel mode */ 845 846 CPUACCT_STAT_NSTATS, 847 }; 848 849 850 #define sched_class_highest (&stop_sched_class) 851 #define for_each_class(class) \ 852 for (class = sched_class_highest; class; class = class->next) 853 854 extern const struct sched_class stop_sched_class; 855 extern const struct sched_class rt_sched_class; 856 extern const struct sched_class fair_sched_class; 857 extern const struct sched_class idle_sched_class; 858 859 860 #ifdef CONFIG_SMP 861 862 extern void trigger_load_balance(struct rq *rq, int cpu); 863 extern void idle_balance(int this_cpu, struct rq *this_rq); 864 865 #else /* CONFIG_SMP */ 866 867 static inline void idle_balance(int cpu, struct rq *rq) 868 { 869 } 870 871 #endif 872 873 extern void sysrq_sched_debug_show(void); 874 extern void sched_init_granularity(void); 875 extern void update_max_interval(void); 876 extern void update_group_power(struct sched_domain *sd, int cpu); 877 extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); 878 extern void init_sched_rt_class(void); 879 extern void init_sched_fair_class(void); 880 881 extern void resched_task(struct task_struct *p); 882 extern void resched_cpu(int cpu); 883 884 extern struct rt_bandwidth def_rt_bandwidth; 885 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 886 887 extern void update_cpu_load(struct rq *this_rq); 888 889 #ifdef CONFIG_CGROUP_CPUACCT 890 #include <linux/cgroup.h> 891 /* track cpu usage of a group of tasks and its child groups */ 892 struct cpuacct { 893 struct cgroup_subsys_state css; 894 /* cpuusage holds pointer to a u64-type object on every cpu */ 895 u64 __percpu *cpuusage; 896 struct kernel_cpustat __percpu *cpustat; 897 }; 898 899 /* return cpu accounting group corresponding to this container */ 900 static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) 901 { 902 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id), 903 struct cpuacct, css); 904 } 905 906 /* return cpu accounting group to which this task belongs */ 907 static inline struct cpuacct *task_ca(struct task_struct *tsk) 908 { 909 return container_of(task_subsys_state(tsk, cpuacct_subsys_id), 910 struct cpuacct, css); 911 } 912 913 static inline struct cpuacct *parent_ca(struct cpuacct *ca) 914 { 915 if (!ca || !ca->css.cgroup->parent) 916 return NULL; 917 return cgroup_ca(ca->css.cgroup->parent); 918 } 919 920 extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); 921 #else 922 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 923 #endif 924 925 static inline void inc_nr_running(struct rq *rq) 926 { 927 rq->nr_running++; 928 } 929 930 static inline void dec_nr_running(struct rq *rq) 931 { 932 rq->nr_running--; 933 } 934 935 extern void update_rq_clock(struct rq *rq); 936 937 extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 938 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 939 940 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 941 942 extern const_debug unsigned int sysctl_sched_time_avg; 943 extern const_debug unsigned int sysctl_sched_nr_migrate; 944 extern const_debug unsigned int sysctl_sched_migration_cost; 945 946 static inline u64 sched_avg_period(void) 947 { 948 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2; 949 } 950 951 void calc_load_account_idle(struct rq *this_rq); 952 953 #ifdef CONFIG_SCHED_HRTICK 954 955 /* 956 * Use hrtick when: 957 * - enabled by features 958 * - hrtimer is actually high res 959 */ 960 static inline int hrtick_enabled(struct rq *rq) 961 { 962 if (!sched_feat(HRTICK)) 963 return 0; 964 if (!cpu_active(cpu_of(rq))) 965 return 0; 966 return hrtimer_is_hres_active(&rq->hrtick_timer); 967 } 968 969 void hrtick_start(struct rq *rq, u64 delay); 970 971 #else 972 973 static inline int hrtick_enabled(struct rq *rq) 974 { 975 return 0; 976 } 977 978 #endif /* CONFIG_SCHED_HRTICK */ 979 980 #ifdef CONFIG_SMP 981 extern void sched_avg_update(struct rq *rq); 982 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 983 { 984 rq->rt_avg += rt_delta; 985 sched_avg_update(rq); 986 } 987 #else 988 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } 989 static inline void sched_avg_update(struct rq *rq) { } 990 #endif 991 992 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); 993 994 #ifdef CONFIG_SMP 995 #ifdef CONFIG_PREEMPT 996 997 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 998 999 /* 1000 * fair double_lock_balance: Safely acquires both rq->locks in a fair 1001 * way at the expense of forcing extra atomic operations in all 1002 * invocations. This assures that the double_lock is acquired using the 1003 * same underlying policy as the spinlock_t on this architecture, which 1004 * reduces latency compared to the unfair variant below. However, it 1005 * also adds more overhead and therefore may reduce throughput. 1006 */ 1007 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1008 __releases(this_rq->lock) 1009 __acquires(busiest->lock) 1010 __acquires(this_rq->lock) 1011 { 1012 raw_spin_unlock(&this_rq->lock); 1013 double_rq_lock(this_rq, busiest); 1014 1015 return 1; 1016 } 1017 1018 #else 1019 /* 1020 * Unfair double_lock_balance: Optimizes throughput at the expense of 1021 * latency by eliminating extra atomic operations when the locks are 1022 * already in proper order on entry. This favors lower cpu-ids and will 1023 * grant the double lock to lower cpus over higher ids under contention, 1024 * regardless of entry order into the function. 1025 */ 1026 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 1027 __releases(this_rq->lock) 1028 __acquires(busiest->lock) 1029 __acquires(this_rq->lock) 1030 { 1031 int ret = 0; 1032 1033 if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1034 if (busiest < this_rq) { 1035 raw_spin_unlock(&this_rq->lock); 1036 raw_spin_lock(&busiest->lock); 1037 raw_spin_lock_nested(&this_rq->lock, 1038 SINGLE_DEPTH_NESTING); 1039 ret = 1; 1040 } else 1041 raw_spin_lock_nested(&busiest->lock, 1042 SINGLE_DEPTH_NESTING); 1043 } 1044 return ret; 1045 } 1046 1047 #endif /* CONFIG_PREEMPT */ 1048 1049 /* 1050 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1051 */ 1052 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1053 { 1054 if (unlikely(!irqs_disabled())) { 1055 /* printk() doesn't work good under rq->lock */ 1056 raw_spin_unlock(&this_rq->lock); 1057 BUG_ON(1); 1058 } 1059 1060 return _double_lock_balance(this_rq, busiest); 1061 } 1062 1063 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1064 __releases(busiest->lock) 1065 { 1066 raw_spin_unlock(&busiest->lock); 1067 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1068 } 1069 1070 /* 1071 * double_rq_lock - safely lock two runqueues 1072 * 1073 * Note this does not disable interrupts like task_rq_lock, 1074 * you need to do so manually before calling. 1075 */ 1076 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1077 __acquires(rq1->lock) 1078 __acquires(rq2->lock) 1079 { 1080 BUG_ON(!irqs_disabled()); 1081 if (rq1 == rq2) { 1082 raw_spin_lock(&rq1->lock); 1083 __acquire(rq2->lock); /* Fake it out ;) */ 1084 } else { 1085 if (rq1 < rq2) { 1086 raw_spin_lock(&rq1->lock); 1087 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 1088 } else { 1089 raw_spin_lock(&rq2->lock); 1090 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1091 } 1092 } 1093 } 1094 1095 /* 1096 * double_rq_unlock - safely unlock two runqueues 1097 * 1098 * Note this does not restore interrupts like task_rq_unlock, 1099 * you need to do so manually after calling. 1100 */ 1101 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1102 __releases(rq1->lock) 1103 __releases(rq2->lock) 1104 { 1105 raw_spin_unlock(&rq1->lock); 1106 if (rq1 != rq2) 1107 raw_spin_unlock(&rq2->lock); 1108 else 1109 __release(rq2->lock); 1110 } 1111 1112 #else /* CONFIG_SMP */ 1113 1114 /* 1115 * double_rq_lock - safely lock two runqueues 1116 * 1117 * Note this does not disable interrupts like task_rq_lock, 1118 * you need to do so manually before calling. 1119 */ 1120 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 1121 __acquires(rq1->lock) 1122 __acquires(rq2->lock) 1123 { 1124 BUG_ON(!irqs_disabled()); 1125 BUG_ON(rq1 != rq2); 1126 raw_spin_lock(&rq1->lock); 1127 __acquire(rq2->lock); /* Fake it out ;) */ 1128 } 1129 1130 /* 1131 * double_rq_unlock - safely unlock two runqueues 1132 * 1133 * Note this does not restore interrupts like task_rq_unlock, 1134 * you need to do so manually after calling. 1135 */ 1136 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 1137 __releases(rq1->lock) 1138 __releases(rq2->lock) 1139 { 1140 BUG_ON(rq1 != rq2); 1141 raw_spin_unlock(&rq1->lock); 1142 __release(rq2->lock); 1143 } 1144 1145 #endif 1146 1147 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 1148 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1149 extern void print_cfs_stats(struct seq_file *m, int cpu); 1150 extern void print_rt_stats(struct seq_file *m, int cpu); 1151 1152 extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1153 extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1154 extern void unthrottle_offline_cfs_rqs(struct rq *rq); 1155 1156 extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1157 1158 #ifdef CONFIG_NO_HZ 1159 enum rq_nohz_flag_bits { 1160 NOHZ_TICK_STOPPED, 1161 NOHZ_BALANCE_KICK, 1162 NOHZ_IDLE, 1163 }; 1164 1165 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 1166 #endif 1167