1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 297fb7a0aSIngo Molnar /* 397fb7a0aSIngo Molnar * Scheduler internal types and methods: 497fb7a0aSIngo Molnar */ 5391e43daSPeter Zijlstra #include <linux/sched.h> 6325ea10cSIngo Molnar 7dfc3401aSIngo Molnar #include <linux/sched/autogroup.h> 8e6017571SIngo Molnar #include <linux/sched/clock.h> 9325ea10cSIngo Molnar #include <linux/sched/coredump.h> 1055687da1SIngo Molnar #include <linux/sched/cpufreq.h> 11325ea10cSIngo Molnar #include <linux/sched/cputime.h> 12325ea10cSIngo Molnar #include <linux/sched/deadline.h> 13b17b0153SIngo Molnar #include <linux/sched/debug.h> 14ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h> 15325ea10cSIngo Molnar #include <linux/sched/idle.h> 16325ea10cSIngo Molnar #include <linux/sched/init.h> 17325ea10cSIngo Molnar #include <linux/sched/isolation.h> 18325ea10cSIngo Molnar #include <linux/sched/jobctl.h> 19325ea10cSIngo Molnar #include <linux/sched/loadavg.h> 20325ea10cSIngo Molnar #include <linux/sched/mm.h> 21325ea10cSIngo Molnar #include <linux/sched/nohz.h> 22325ea10cSIngo Molnar #include <linux/sched/numa_balancing.h> 23325ea10cSIngo Molnar #include <linux/sched/prio.h> 24325ea10cSIngo Molnar #include <linux/sched/rt.h> 25325ea10cSIngo Molnar #include <linux/sched/signal.h> 26321a874aSThomas Gleixner #include <linux/sched/smt.h> 27325ea10cSIngo Molnar #include <linux/sched/stat.h> 28325ea10cSIngo Molnar #include <linux/sched/sysctl.h> 2929930025SIngo Molnar #include <linux/sched/task.h> 3068db0cf1SIngo Molnar #include <linux/sched/task_stack.h> 31325ea10cSIngo Molnar #include <linux/sched/topology.h> 32325ea10cSIngo Molnar #include <linux/sched/user.h> 33325ea10cSIngo Molnar #include <linux/sched/wake_q.h> 34325ea10cSIngo Molnar #include <linux/sched/xacct.h> 35ef8bd77fSIngo Molnar 36325ea10cSIngo Molnar #include <uapi/linux/sched/types.h> 37325ea10cSIngo Molnar 383866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h> 39325ea10cSIngo Molnar #include <linux/blkdev.h> 40325ea10cSIngo Molnar #include <linux/compat.h> 41325ea10cSIngo Molnar #include <linux/context_tracking.h> 42325ea10cSIngo Molnar #include <linux/cpufreq.h> 43325ea10cSIngo Molnar #include <linux/cpuidle.h> 44325ea10cSIngo Molnar #include <linux/cpuset.h> 45325ea10cSIngo Molnar #include <linux/ctype.h> 46325ea10cSIngo Molnar #include <linux/debugfs.h> 47325ea10cSIngo Molnar #include <linux/delayacct.h> 486aa140faSQuentin Perret #include <linux/energy_model.h> 49325ea10cSIngo Molnar #include <linux/init_task.h> 50325ea10cSIngo Molnar #include <linux/kprobes.h> 51325ea10cSIngo Molnar #include <linux/kthread.h> 52325ea10cSIngo Molnar #include <linux/membarrier.h> 53325ea10cSIngo Molnar #include <linux/migrate.h> 54325ea10cSIngo Molnar #include <linux/mmu_context.h> 55325ea10cSIngo Molnar #include <linux/nmi.h> 56325ea10cSIngo Molnar #include <linux/proc_fs.h> 57325ea10cSIngo Molnar #include <linux/prefetch.h> 58325ea10cSIngo Molnar #include <linux/profile.h> 59eb414681SJohannes Weiner #include <linux/psi.h> 60325ea10cSIngo Molnar #include <linux/rcupdate_wait.h> 61325ea10cSIngo Molnar #include <linux/security.h> 62391e43daSPeter Zijlstra #include <linux/stop_machine.h> 63325ea10cSIngo Molnar #include <linux/suspend.h> 64325ea10cSIngo Molnar #include <linux/swait.h> 65325ea10cSIngo Molnar #include <linux/syscalls.h> 66325ea10cSIngo Molnar #include <linux/task_work.h> 67325ea10cSIngo Molnar #include <linux/tsacct_kern.h> 68325ea10cSIngo Molnar 69325ea10cSIngo Molnar #include <asm/tlb.h> 7085c2ce91SPeter Zijlstra #include <asm-generic/vmlinux.lds.h> 71391e43daSPeter Zijlstra 727fce777cSIngo Molnar #ifdef CONFIG_PARAVIRT 737fce777cSIngo Molnar # include <asm/paravirt.h> 747fce777cSIngo Molnar #endif 757fce777cSIngo Molnar 76391e43daSPeter Zijlstra #include "cpupri.h" 776bfd6d72SJuri Lelli #include "cpudeadline.h" 78391e43daSPeter Zijlstra 799148a3a1SPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 809148a3a1SPeter Zijlstra # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) 819148a3a1SPeter Zijlstra #else 826d3aed3dSIngo Molnar # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) 839148a3a1SPeter Zijlstra #endif 849148a3a1SPeter Zijlstra 8545ceebf7SPaul Gortmaker struct rq; 86442bf3aaSDaniel Lezcano struct cpuidle_state; 8745ceebf7SPaul Gortmaker 88da0c1e65SKirill Tkhai /* task_struct::on_rq states: */ 89da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED 1 90cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING 2 91da0c1e65SKirill Tkhai 92391e43daSPeter Zijlstra extern __read_mostly int scheduler_running; 93391e43daSPeter Zijlstra 9445ceebf7SPaul Gortmaker extern unsigned long calc_load_update; 9545ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks; 9645ceebf7SPaul Gortmaker 973289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq); 98d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust); 993289bdb4SPeter Zijlstra 100391e43daSPeter Zijlstra /* 101391e43daSPeter Zijlstra * Helpers for converting nanosecond timing to jiffy resolution 102391e43daSPeter Zijlstra */ 103391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) 104391e43daSPeter Zijlstra 105cc1f4b1fSLi Zefan /* 106cc1f4b1fSLi Zefan * Increase resolution of nice-level calculations for 64-bit architectures. 107cc1f4b1fSLi Zefan * The extra resolution improves shares distribution and load balancing of 108cc1f4b1fSLi Zefan * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup 109cc1f4b1fSLi Zefan * hierarchies, especially on larger systems. This is not a user-visible change 110cc1f4b1fSLi Zefan * and does not change the user-interface for setting shares/weights. 111cc1f4b1fSLi Zefan * 112cc1f4b1fSLi Zefan * We increase resolution only if we have enough bits to allow this increased 11397fb7a0aSIngo Molnar * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit 11497fb7a0aSIngo Molnar * are pretty high and the returns do not justify the increased costs. 1152159197dSPeter Zijlstra * 11697fb7a0aSIngo Molnar * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to 11797fb7a0aSIngo Molnar * increase coverage and consistency always enable it on 64-bit platforms. 118cc1f4b1fSLi Zefan */ 1192159197dSPeter Zijlstra #ifdef CONFIG_64BIT 120172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) 1216ecdd749SYuyang Du # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) 12226cf5222SMichael Wang # define scale_load_down(w) \ 12326cf5222SMichael Wang ({ \ 12426cf5222SMichael Wang unsigned long __w = (w); \ 12526cf5222SMichael Wang if (__w) \ 12626cf5222SMichael Wang __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ 12726cf5222SMichael Wang __w; \ 12826cf5222SMichael Wang }) 129cc1f4b1fSLi Zefan #else 130172895e6SYuyang Du # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) 131cc1f4b1fSLi Zefan # define scale_load(w) (w) 132cc1f4b1fSLi Zefan # define scale_load_down(w) (w) 133cc1f4b1fSLi Zefan #endif 134cc1f4b1fSLi Zefan 1356ecdd749SYuyang Du /* 136172895e6SYuyang Du * Task weight (visible to users) and its load (invisible to users) have 137172895e6SYuyang Du * independent resolution, but they should be well calibrated. We use 138172895e6SYuyang Du * scale_load() and scale_load_down(w) to convert between them. The 139172895e6SYuyang Du * following must be true: 140172895e6SYuyang Du * 141172895e6SYuyang Du * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD 142172895e6SYuyang Du * 1436ecdd749SYuyang Du */ 144172895e6SYuyang Du #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) 145391e43daSPeter Zijlstra 146391e43daSPeter Zijlstra /* 147332ac17eSDario Faggioli * Single value that decides SCHED_DEADLINE internal math precision. 148332ac17eSDario Faggioli * 10 -> just above 1us 149332ac17eSDario Faggioli * 9 -> just above 0.5us 150332ac17eSDario Faggioli */ 15197fb7a0aSIngo Molnar #define DL_SCALE 10 152332ac17eSDario Faggioli 153332ac17eSDario Faggioli /* 15497fb7a0aSIngo Molnar * Single value that denotes runtime == period, ie unlimited time. 155391e43daSPeter Zijlstra */ 156391e43daSPeter Zijlstra #define RUNTIME_INF ((u64)~0ULL) 157391e43daSPeter Zijlstra 15820f9cd2aSHenrik Austad static inline int idle_policy(int policy) 15920f9cd2aSHenrik Austad { 16020f9cd2aSHenrik Austad return policy == SCHED_IDLE; 16120f9cd2aSHenrik Austad } 162d50dde5aSDario Faggioli static inline int fair_policy(int policy) 163d50dde5aSDario Faggioli { 164d50dde5aSDario Faggioli return policy == SCHED_NORMAL || policy == SCHED_BATCH; 165d50dde5aSDario Faggioli } 166d50dde5aSDario Faggioli 167391e43daSPeter Zijlstra static inline int rt_policy(int policy) 168391e43daSPeter Zijlstra { 169d50dde5aSDario Faggioli return policy == SCHED_FIFO || policy == SCHED_RR; 170391e43daSPeter Zijlstra } 171391e43daSPeter Zijlstra 172aab03e05SDario Faggioli static inline int dl_policy(int policy) 173aab03e05SDario Faggioli { 174aab03e05SDario Faggioli return policy == SCHED_DEADLINE; 175aab03e05SDario Faggioli } 17620f9cd2aSHenrik Austad static inline bool valid_policy(int policy) 17720f9cd2aSHenrik Austad { 17820f9cd2aSHenrik Austad return idle_policy(policy) || fair_policy(policy) || 17920f9cd2aSHenrik Austad rt_policy(policy) || dl_policy(policy); 18020f9cd2aSHenrik Austad } 181aab03e05SDario Faggioli 1821da1843fSViresh Kumar static inline int task_has_idle_policy(struct task_struct *p) 1831da1843fSViresh Kumar { 1841da1843fSViresh Kumar return idle_policy(p->policy); 1851da1843fSViresh Kumar } 1861da1843fSViresh Kumar 187391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p) 188391e43daSPeter Zijlstra { 189391e43daSPeter Zijlstra return rt_policy(p->policy); 190391e43daSPeter Zijlstra } 191391e43daSPeter Zijlstra 192aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p) 193aab03e05SDario Faggioli { 194aab03e05SDario Faggioli return dl_policy(p->policy); 195aab03e05SDario Faggioli } 196aab03e05SDario Faggioli 19707881166SJuri Lelli #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) 19807881166SJuri Lelli 199d76343c6SValentin Schneider static inline void update_avg(u64 *avg, u64 sample) 200d76343c6SValentin Schneider { 201d76343c6SValentin Schneider s64 diff = sample - *avg; 202d76343c6SValentin Schneider *avg += diff / 8; 203d76343c6SValentin Schneider } 204d76343c6SValentin Schneider 2052d3d891dSDario Faggioli /* 206794a56ebSJuri Lelli * !! For sched_setattr_nocheck() (kernel) only !! 207794a56ebSJuri Lelli * 208794a56ebSJuri Lelli * This is actually gross. :( 209794a56ebSJuri Lelli * 210794a56ebSJuri Lelli * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE 211794a56ebSJuri Lelli * tasks, but still be able to sleep. We need this on platforms that cannot 212794a56ebSJuri Lelli * atomically change clock frequency. Remove once fast switching will be 213794a56ebSJuri Lelli * available on such platforms. 214794a56ebSJuri Lelli * 215794a56ebSJuri Lelli * SUGOV stands for SchedUtil GOVernor. 216794a56ebSJuri Lelli */ 217794a56ebSJuri Lelli #define SCHED_FLAG_SUGOV 0x10000000 218794a56ebSJuri Lelli 219794a56ebSJuri Lelli static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) 220794a56ebSJuri Lelli { 221794a56ebSJuri Lelli #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 222794a56ebSJuri Lelli return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); 223794a56ebSJuri Lelli #else 224794a56ebSJuri Lelli return false; 225794a56ebSJuri Lelli #endif 226794a56ebSJuri Lelli } 227794a56ebSJuri Lelli 228794a56ebSJuri Lelli /* 2292d3d891dSDario Faggioli * Tells if entity @a should preempt entity @b. 2302d3d891dSDario Faggioli */ 231332ac17eSDario Faggioli static inline bool 232332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) 2332d3d891dSDario Faggioli { 234794a56ebSJuri Lelli return dl_entity_is_special(a) || 235794a56ebSJuri Lelli dl_time_before(a->deadline, b->deadline); 2362d3d891dSDario Faggioli } 2372d3d891dSDario Faggioli 238391e43daSPeter Zijlstra /* 239391e43daSPeter Zijlstra * This is the priority-queue data structure of the RT scheduling class: 240391e43daSPeter Zijlstra */ 241391e43daSPeter Zijlstra struct rt_prio_array { 242391e43daSPeter Zijlstra DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ 243391e43daSPeter Zijlstra struct list_head queue[MAX_RT_PRIO]; 244391e43daSPeter Zijlstra }; 245391e43daSPeter Zijlstra 246391e43daSPeter Zijlstra struct rt_bandwidth { 247391e43daSPeter Zijlstra /* nests inside the rq lock: */ 248391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 249391e43daSPeter Zijlstra ktime_t rt_period; 250391e43daSPeter Zijlstra u64 rt_runtime; 251391e43daSPeter Zijlstra struct hrtimer rt_period_timer; 2524cfafd30SPeter Zijlstra unsigned int rt_period_active; 253391e43daSPeter Zijlstra }; 254a5e7be3bSJuri Lelli 255a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p); 256a5e7be3bSJuri Lelli 257332ac17eSDario Faggioli /* 258332ac17eSDario Faggioli * To keep the bandwidth of -deadline tasks and groups under control 259332ac17eSDario Faggioli * we need some place where: 260332ac17eSDario Faggioli * - store the maximum -deadline bandwidth of the system (the group); 261332ac17eSDario Faggioli * - cache the fraction of that bandwidth that is currently allocated. 262332ac17eSDario Faggioli * 263332ac17eSDario Faggioli * This is all done in the data structure below. It is similar to the 264332ac17eSDario Faggioli * one used for RT-throttling (rt_bandwidth), with the main difference 265332ac17eSDario Faggioli * that, since here we are only interested in admission control, we 266332ac17eSDario Faggioli * do not decrease any runtime while the group "executes", neither we 267332ac17eSDario Faggioli * need a timer to replenish it. 268332ac17eSDario Faggioli * 269332ac17eSDario Faggioli * With respect to SMP, the bandwidth is given on a per-CPU basis, 270332ac17eSDario Faggioli * meaning that: 271332ac17eSDario Faggioli * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU; 272332ac17eSDario Faggioli * - dl_total_bw array contains, in the i-eth element, the currently 273332ac17eSDario Faggioli * allocated bandwidth on the i-eth CPU. 274332ac17eSDario Faggioli * Moreover, groups consume bandwidth on each CPU, while tasks only 275332ac17eSDario Faggioli * consume bandwidth on the CPU they're running on. 276332ac17eSDario Faggioli * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw 277332ac17eSDario Faggioli * that will be shown the next time the proc or cgroup controls will 278332ac17eSDario Faggioli * be red. It on its turn can be changed by writing on its own 279332ac17eSDario Faggioli * control. 280332ac17eSDario Faggioli */ 281332ac17eSDario Faggioli struct dl_bandwidth { 282332ac17eSDario Faggioli raw_spinlock_t dl_runtime_lock; 283332ac17eSDario Faggioli u64 dl_runtime; 284332ac17eSDario Faggioli u64 dl_period; 285332ac17eSDario Faggioli }; 286332ac17eSDario Faggioli 287332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void) 288332ac17eSDario Faggioli { 2891724813dSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 290332ac17eSDario Faggioli } 291332ac17eSDario Faggioli 292332ac17eSDario Faggioli struct dl_bw { 293332ac17eSDario Faggioli raw_spinlock_t lock; 29497fb7a0aSIngo Molnar u64 bw; 29597fb7a0aSIngo Molnar u64 total_bw; 296332ac17eSDario Faggioli }; 297332ac17eSDario Faggioli 298daec5798SLuca Abeni static inline void __dl_update(struct dl_bw *dl_b, s64 bw); 299daec5798SLuca Abeni 3007f51412aSJuri Lelli static inline 3018c0944ceSPeter Zijlstra void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3027f51412aSJuri Lelli { 3037f51412aSJuri Lelli dl_b->total_bw -= tsk_bw; 304daec5798SLuca Abeni __dl_update(dl_b, (s32)tsk_bw / cpus); 3057f51412aSJuri Lelli } 3067f51412aSJuri Lelli 3077f51412aSJuri Lelli static inline 308daec5798SLuca Abeni void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) 3097f51412aSJuri Lelli { 3107f51412aSJuri Lelli dl_b->total_bw += tsk_bw; 311daec5798SLuca Abeni __dl_update(dl_b, -((s32)tsk_bw / cpus)); 3127f51412aSJuri Lelli } 3137f51412aSJuri Lelli 31460ffd5edSLuca Abeni static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, 31560ffd5edSLuca Abeni u64 old_bw, u64 new_bw) 3167f51412aSJuri Lelli { 3177f51412aSJuri Lelli return dl_b->bw != -1 && 31860ffd5edSLuca Abeni cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw; 3197f51412aSJuri Lelli } 3207f51412aSJuri Lelli 321b4118988SLuca Abeni /* 322b4118988SLuca Abeni * Verify the fitness of task @p to run on @cpu taking into account the 323b4118988SLuca Abeni * CPU original capacity and the runtime/deadline ratio of the task. 324b4118988SLuca Abeni * 325b4118988SLuca Abeni * The function will return true if the CPU original capacity of the 326b4118988SLuca Abeni * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 327b4118988SLuca Abeni * task and false otherwise. 328b4118988SLuca Abeni */ 329b4118988SLuca Abeni static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) 330b4118988SLuca Abeni { 331b4118988SLuca Abeni unsigned long cap = arch_scale_cpu_capacity(cpu); 332b4118988SLuca Abeni 333b4118988SLuca Abeni return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime; 334b4118988SLuca Abeni } 335b4118988SLuca Abeni 336f2cb1360SIngo Molnar extern void init_dl_bw(struct dl_bw *dl_b); 33706a76fe0SNicolas Pitre extern int sched_dl_global_validate(void); 33806a76fe0SNicolas Pitre extern void sched_dl_do_global(void); 33997fb7a0aSIngo Molnar extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); 34006a76fe0SNicolas Pitre extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); 34106a76fe0SNicolas Pitre extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); 34206a76fe0SNicolas Pitre extern bool __checkparam_dl(const struct sched_attr *attr); 34306a76fe0SNicolas Pitre extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); 34497fb7a0aSIngo Molnar extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 34597fb7a0aSIngo Molnar extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 34606a76fe0SNicolas Pitre extern bool dl_cpu_busy(unsigned int cpu); 347391e43daSPeter Zijlstra 348391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 349391e43daSPeter Zijlstra 350391e43daSPeter Zijlstra #include <linux/cgroup.h> 351eb414681SJohannes Weiner #include <linux/psi.h> 352391e43daSPeter Zijlstra 353391e43daSPeter Zijlstra struct cfs_rq; 354391e43daSPeter Zijlstra struct rt_rq; 355391e43daSPeter Zijlstra 35635cf4e50SMike Galbraith extern struct list_head task_groups; 357391e43daSPeter Zijlstra 358391e43daSPeter Zijlstra struct cfs_bandwidth { 359391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 360391e43daSPeter Zijlstra raw_spinlock_t lock; 361391e43daSPeter Zijlstra ktime_t period; 36297fb7a0aSIngo Molnar u64 quota; 36397fb7a0aSIngo Molnar u64 runtime; 3649c58c79aSZhihui Zhang s64 hierarchical_quota; 365391e43daSPeter Zijlstra 36666567fcbSbsegall@google.com u8 idle; 36766567fcbSbsegall@google.com u8 period_active; 36866567fcbSbsegall@google.com u8 slack_started; 36997fb7a0aSIngo Molnar struct hrtimer period_timer; 37097fb7a0aSIngo Molnar struct hrtimer slack_timer; 371391e43daSPeter Zijlstra struct list_head throttled_cfs_rq; 372391e43daSPeter Zijlstra 37397fb7a0aSIngo Molnar /* Statistics: */ 37497fb7a0aSIngo Molnar int nr_periods; 37597fb7a0aSIngo Molnar int nr_throttled; 376391e43daSPeter Zijlstra u64 throttled_time; 377391e43daSPeter Zijlstra #endif 378391e43daSPeter Zijlstra }; 379391e43daSPeter Zijlstra 38097fb7a0aSIngo Molnar /* Task group related information */ 381391e43daSPeter Zijlstra struct task_group { 382391e43daSPeter Zijlstra struct cgroup_subsys_state css; 383391e43daSPeter Zijlstra 384391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 38597fb7a0aSIngo Molnar /* schedulable entities of this group on each CPU */ 386391e43daSPeter Zijlstra struct sched_entity **se; 38797fb7a0aSIngo Molnar /* runqueue "owned" by this group on each CPU */ 388391e43daSPeter Zijlstra struct cfs_rq **cfs_rq; 389391e43daSPeter Zijlstra unsigned long shares; 390391e43daSPeter Zijlstra 391fa6bddebSAlex Shi #ifdef CONFIG_SMP 392b0367629SWaiman Long /* 393b0367629SWaiman Long * load_avg can be heavily contended at clock tick time, so put 394b0367629SWaiman Long * it in its own cacheline separated from the fields above which 395b0367629SWaiman Long * will also be accessed at each tick. 396b0367629SWaiman Long */ 397b0367629SWaiman Long atomic_long_t load_avg ____cacheline_aligned; 398391e43daSPeter Zijlstra #endif 399fa6bddebSAlex Shi #endif 400391e43daSPeter Zijlstra 401391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 402391e43daSPeter Zijlstra struct sched_rt_entity **rt_se; 403391e43daSPeter Zijlstra struct rt_rq **rt_rq; 404391e43daSPeter Zijlstra 405391e43daSPeter Zijlstra struct rt_bandwidth rt_bandwidth; 406391e43daSPeter Zijlstra #endif 407391e43daSPeter Zijlstra 408391e43daSPeter Zijlstra struct rcu_head rcu; 409391e43daSPeter Zijlstra struct list_head list; 410391e43daSPeter Zijlstra 411391e43daSPeter Zijlstra struct task_group *parent; 412391e43daSPeter Zijlstra struct list_head siblings; 413391e43daSPeter Zijlstra struct list_head children; 414391e43daSPeter Zijlstra 415391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP 416391e43daSPeter Zijlstra struct autogroup *autogroup; 417391e43daSPeter Zijlstra #endif 418391e43daSPeter Zijlstra 419391e43daSPeter Zijlstra struct cfs_bandwidth cfs_bandwidth; 4202480c093SPatrick Bellasi 4212480c093SPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK_GROUP 4222480c093SPatrick Bellasi /* The two decimal precision [%] value requested from user-space */ 4232480c093SPatrick Bellasi unsigned int uclamp_pct[UCLAMP_CNT]; 4242480c093SPatrick Bellasi /* Clamp values requested for a task group */ 4252480c093SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 4260b60ba2dSPatrick Bellasi /* Effective clamp values used for a task group */ 4270b60ba2dSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 4282480c093SPatrick Bellasi #endif 4292480c093SPatrick Bellasi 430391e43daSPeter Zijlstra }; 431391e43daSPeter Zijlstra 432391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 433391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD 434391e43daSPeter Zijlstra 435391e43daSPeter Zijlstra /* 436391e43daSPeter Zijlstra * A weight of 0 or 1 can cause arithmetics problems. 437391e43daSPeter Zijlstra * A weight of a cfs_rq is the sum of weights of which entities 438391e43daSPeter Zijlstra * are queued on this cfs_rq, so a weight of a entity should not be 439391e43daSPeter Zijlstra * too large, so as the shares value of a task group. 440391e43daSPeter Zijlstra * (The default weight is 1024 - so there's no practical 441391e43daSPeter Zijlstra * limitation from this.) 442391e43daSPeter Zijlstra */ 443391e43daSPeter Zijlstra #define MIN_SHARES (1UL << 1) 444391e43daSPeter Zijlstra #define MAX_SHARES (1UL << 18) 445391e43daSPeter Zijlstra #endif 446391e43daSPeter Zijlstra 447391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *); 448391e43daSPeter Zijlstra 449391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from, 450391e43daSPeter Zijlstra tg_visitor down, tg_visitor up, void *data); 451391e43daSPeter Zijlstra 452391e43daSPeter Zijlstra /* 453391e43daSPeter Zijlstra * Iterate the full tree, calling @down when first entering a node and @up when 454391e43daSPeter Zijlstra * leaving it for the final time. 455391e43daSPeter Zijlstra * 456391e43daSPeter Zijlstra * Caller must hold rcu_lock or sufficient equivalent. 457391e43daSPeter Zijlstra */ 458391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) 459391e43daSPeter Zijlstra { 460391e43daSPeter Zijlstra return walk_tg_tree_from(&root_task_group, down, up, data); 461391e43daSPeter Zijlstra } 462391e43daSPeter Zijlstra 463391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data); 464391e43daSPeter Zijlstra 465391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg); 466391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); 4678663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg); 4686fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg); 469391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 470391e43daSPeter Zijlstra struct sched_entity *se, int cpu, 471391e43daSPeter Zijlstra struct sched_entity *parent); 472391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 473391e43daSPeter Zijlstra 474391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 47577a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 476391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 477391e43daSPeter Zijlstra 478391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg); 479391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); 480391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 481391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 482391e43daSPeter Zijlstra struct sched_rt_entity *parent); 4838887cd99SNicolas Pitre extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); 4848887cd99SNicolas Pitre extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); 4858887cd99SNicolas Pitre extern long sched_group_rt_runtime(struct task_group *tg); 4868887cd99SNicolas Pitre extern long sched_group_rt_period(struct task_group *tg); 4878887cd99SNicolas Pitre extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); 488391e43daSPeter Zijlstra 48925cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent); 49025cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg, 49125cc7da7SLi Zefan struct task_group *parent); 49225cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg); 49325cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg); 49425cc7da7SLi Zefan 49525cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk); 49625cc7da7SLi Zefan 49725cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 49825cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 499ad936d86SByungchul Park 500ad936d86SByungchul Park #ifdef CONFIG_SMP 501ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se, 502ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next); 503ad936d86SByungchul Park #else /* !CONFIG_SMP */ 504ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se, 505ad936d86SByungchul Park struct cfs_rq *prev, struct cfs_rq *next) { } 506ad936d86SByungchul Park #endif /* CONFIG_SMP */ 507ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */ 50825cc7da7SLi Zefan 509391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 510391e43daSPeter Zijlstra 511391e43daSPeter Zijlstra struct cfs_bandwidth { }; 512391e43daSPeter Zijlstra 513391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 514391e43daSPeter Zijlstra 515391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */ 516391e43daSPeter Zijlstra struct cfs_rq { 517391e43daSPeter Zijlstra struct load_weight load; 51897fb7a0aSIngo Molnar unsigned int nr_running; 51943e9f7f2SViresh Kumar unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ 52043e9f7f2SViresh Kumar unsigned int idle_h_nr_running; /* SCHED_IDLE */ 521391e43daSPeter Zijlstra 522391e43daSPeter Zijlstra u64 exec_clock; 523391e43daSPeter Zijlstra u64 min_vruntime; 524391e43daSPeter Zijlstra #ifndef CONFIG_64BIT 525391e43daSPeter Zijlstra u64 min_vruntime_copy; 526391e43daSPeter Zijlstra #endif 527391e43daSPeter Zijlstra 528bfb06889SDavidlohr Bueso struct rb_root_cached tasks_timeline; 529391e43daSPeter Zijlstra 530391e43daSPeter Zijlstra /* 531391e43daSPeter Zijlstra * 'curr' points to currently running entity on this cfs_rq. 532391e43daSPeter Zijlstra * It is set to NULL otherwise (i.e when none are currently running). 533391e43daSPeter Zijlstra */ 53497fb7a0aSIngo Molnar struct sched_entity *curr; 53597fb7a0aSIngo Molnar struct sched_entity *next; 53697fb7a0aSIngo Molnar struct sched_entity *last; 53797fb7a0aSIngo Molnar struct sched_entity *skip; 538391e43daSPeter Zijlstra 539391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 540391e43daSPeter Zijlstra unsigned int nr_spread_over; 541391e43daSPeter Zijlstra #endif 542391e43daSPeter Zijlstra 5432dac754eSPaul Turner #ifdef CONFIG_SMP 5442dac754eSPaul Turner /* 5459d89c257SYuyang Du * CFS load tracking 5462dac754eSPaul Turner */ 5479d89c257SYuyang Du struct sched_avg avg; 5482a2f5d4eSPeter Zijlstra #ifndef CONFIG_64BIT 5492a2f5d4eSPeter Zijlstra u64 load_last_update_time_copy; 5502a2f5d4eSPeter Zijlstra #endif 5512a2f5d4eSPeter Zijlstra struct { 5522a2f5d4eSPeter Zijlstra raw_spinlock_t lock ____cacheline_aligned; 5532a2f5d4eSPeter Zijlstra int nr; 5542a2f5d4eSPeter Zijlstra unsigned long load_avg; 5552a2f5d4eSPeter Zijlstra unsigned long util_avg; 5569f683953SVincent Guittot unsigned long runnable_avg; 5572a2f5d4eSPeter Zijlstra } removed; 558141965c7SAlex Shi 559c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED 5600e2d2aaaSPeter Zijlstra unsigned long tg_load_avg_contrib; 5610e2d2aaaSPeter Zijlstra long propagate; 5620e2d2aaaSPeter Zijlstra long prop_runnable_sum; 5630e2d2aaaSPeter Zijlstra 56482958366SPaul Turner /* 56582958366SPaul Turner * h_load = weight * f(tg) 56682958366SPaul Turner * 56782958366SPaul Turner * Where f(tg) is the recursive weight fraction assigned to 56882958366SPaul Turner * this group. 56982958366SPaul Turner */ 57082958366SPaul Turner unsigned long h_load; 57168520796SVladimir Davydov u64 last_h_load_update; 57268520796SVladimir Davydov struct sched_entity *h_load_next; 57368520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */ 57482958366SPaul Turner #endif /* CONFIG_SMP */ 57582958366SPaul Turner 576391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 57797fb7a0aSIngo Molnar struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ 578391e43daSPeter Zijlstra 579391e43daSPeter Zijlstra /* 580391e43daSPeter Zijlstra * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in 581391e43daSPeter Zijlstra * a hierarchy). Non-leaf lrqs hold other higher schedulable entities 582391e43daSPeter Zijlstra * (like users, containers etc.) 583391e43daSPeter Zijlstra * 58497fb7a0aSIngo Molnar * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. 58597fb7a0aSIngo Molnar * This list is used during load balance. 586391e43daSPeter Zijlstra */ 587391e43daSPeter Zijlstra int on_list; 588391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 589391e43daSPeter Zijlstra struct task_group *tg; /* group that "owns" this runqueue */ 590391e43daSPeter Zijlstra 591391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH 592391e43daSPeter Zijlstra int runtime_enabled; 593391e43daSPeter Zijlstra s64 runtime_remaining; 594391e43daSPeter Zijlstra 59597fb7a0aSIngo Molnar u64 throttled_clock; 59697fb7a0aSIngo Molnar u64 throttled_clock_task; 597f1b17280SPaul Turner u64 throttled_clock_task_time; 59897fb7a0aSIngo Molnar int throttled; 59997fb7a0aSIngo Molnar int throttle_count; 600391e43daSPeter Zijlstra struct list_head throttled_list; 601391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */ 602391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 603391e43daSPeter Zijlstra }; 604391e43daSPeter Zijlstra 605391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void) 606391e43daSPeter Zijlstra { 607391e43daSPeter Zijlstra return sysctl_sched_rt_runtime >= 0; 608391e43daSPeter Zijlstra } 609391e43daSPeter Zijlstra 610b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */ 6114bdced5cSSteven Rostedt (Red Hat) #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) 612b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI 613b6366f04SSteven Rostedt #endif 614b6366f04SSteven Rostedt 615391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */ 616391e43daSPeter Zijlstra struct rt_rq { 617391e43daSPeter Zijlstra struct rt_prio_array active; 618c82513e5SPeter Zijlstra unsigned int rt_nr_running; 61901d36d0aSFrederic Weisbecker unsigned int rr_nr_running; 620391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 621391e43daSPeter Zijlstra struct { 622391e43daSPeter Zijlstra int curr; /* highest queued rt task prio */ 623391e43daSPeter Zijlstra #ifdef CONFIG_SMP 624391e43daSPeter Zijlstra int next; /* next highest */ 625391e43daSPeter Zijlstra #endif 626391e43daSPeter Zijlstra } highest_prio; 627391e43daSPeter Zijlstra #endif 628391e43daSPeter Zijlstra #ifdef CONFIG_SMP 629391e43daSPeter Zijlstra unsigned long rt_nr_migratory; 630391e43daSPeter Zijlstra unsigned long rt_nr_total; 631391e43daSPeter Zijlstra int overloaded; 632391e43daSPeter Zijlstra struct plist_head pushable_tasks; 633371bf427SVincent Guittot 634b6366f04SSteven Rostedt #endif /* CONFIG_SMP */ 635f4ebcbc0SKirill Tkhai int rt_queued; 636f4ebcbc0SKirill Tkhai 637391e43daSPeter Zijlstra int rt_throttled; 638391e43daSPeter Zijlstra u64 rt_time; 639391e43daSPeter Zijlstra u64 rt_runtime; 640391e43daSPeter Zijlstra /* Nests inside the rq lock: */ 641391e43daSPeter Zijlstra raw_spinlock_t rt_runtime_lock; 642391e43daSPeter Zijlstra 643391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 644391e43daSPeter Zijlstra unsigned long rt_nr_boosted; 645391e43daSPeter Zijlstra 646391e43daSPeter Zijlstra struct rq *rq; 647391e43daSPeter Zijlstra struct task_group *tg; 648391e43daSPeter Zijlstra #endif 649391e43daSPeter Zijlstra }; 650391e43daSPeter Zijlstra 651296b2ffeSVincent Guittot static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) 652296b2ffeSVincent Guittot { 653296b2ffeSVincent Guittot return rt_rq->rt_queued && rt_rq->rt_nr_running; 654296b2ffeSVincent Guittot } 655296b2ffeSVincent Guittot 656aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */ 657aab03e05SDario Faggioli struct dl_rq { 658aab03e05SDario Faggioli /* runqueue is an rbtree, ordered by deadline */ 6592161573eSDavidlohr Bueso struct rb_root_cached root; 660aab03e05SDario Faggioli 661aab03e05SDario Faggioli unsigned long dl_nr_running; 6621baca4ceSJuri Lelli 6631baca4ceSJuri Lelli #ifdef CONFIG_SMP 6641baca4ceSJuri Lelli /* 6651baca4ceSJuri Lelli * Deadline values of the currently executing and the 6661baca4ceSJuri Lelli * earliest ready task on this rq. Caching these facilitates 667dfcb245eSIngo Molnar * the decision whether or not a ready but not running task 6681baca4ceSJuri Lelli * should migrate somewhere else. 6691baca4ceSJuri Lelli */ 6701baca4ceSJuri Lelli struct { 6711baca4ceSJuri Lelli u64 curr; 6721baca4ceSJuri Lelli u64 next; 6731baca4ceSJuri Lelli } earliest_dl; 6741baca4ceSJuri Lelli 6751baca4ceSJuri Lelli unsigned long dl_nr_migratory; 6761baca4ceSJuri Lelli int overloaded; 6771baca4ceSJuri Lelli 6781baca4ceSJuri Lelli /* 6791baca4ceSJuri Lelli * Tasks on this rq that can be pushed away. They are kept in 6801baca4ceSJuri Lelli * an rb-tree, ordered by tasks' deadlines, with caching 6811baca4ceSJuri Lelli * of the leftmost (earliest deadline) element. 6821baca4ceSJuri Lelli */ 6832161573eSDavidlohr Bueso struct rb_root_cached pushable_dl_tasks_root; 684332ac17eSDario Faggioli #else 685332ac17eSDario Faggioli struct dl_bw dl_bw; 6861baca4ceSJuri Lelli #endif 687e36d8677SLuca Abeni /* 688e36d8677SLuca Abeni * "Active utilization" for this runqueue: increased when a 689e36d8677SLuca Abeni * task wakes up (becomes TASK_RUNNING) and decreased when a 690e36d8677SLuca Abeni * task blocks 691e36d8677SLuca Abeni */ 692e36d8677SLuca Abeni u64 running_bw; 6934da3abceSLuca Abeni 6944da3abceSLuca Abeni /* 6958fd27231SLuca Abeni * Utilization of the tasks "assigned" to this runqueue (including 6968fd27231SLuca Abeni * the tasks that are in runqueue and the tasks that executed on this 6978fd27231SLuca Abeni * CPU and blocked). Increased when a task moves to this runqueue, and 6988fd27231SLuca Abeni * decreased when the task moves away (migrates, changes scheduling 6998fd27231SLuca Abeni * policy, or terminates). 7008fd27231SLuca Abeni * This is needed to compute the "inactive utilization" for the 7018fd27231SLuca Abeni * runqueue (inactive utilization = this_bw - running_bw). 7028fd27231SLuca Abeni */ 7038fd27231SLuca Abeni u64 this_bw; 704daec5798SLuca Abeni u64 extra_bw; 7058fd27231SLuca Abeni 7068fd27231SLuca Abeni /* 7074da3abceSLuca Abeni * Inverse of the fraction of CPU utilization that can be reclaimed 7084da3abceSLuca Abeni * by the GRUB algorithm. 7094da3abceSLuca Abeni */ 7104da3abceSLuca Abeni u64 bw_ratio; 711aab03e05SDario Faggioli }; 712aab03e05SDario Faggioli 713c0796298SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 714c0796298SVincent Guittot /* An entity is a task if it doesn't "own" a runqueue */ 715c0796298SVincent Guittot #define entity_is_task(se) (!se->my_q) 7160dacee1bSVincent Guittot 7179f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) 7189f683953SVincent Guittot { 7199f683953SVincent Guittot if (!entity_is_task(se)) 7209f683953SVincent Guittot se->runnable_weight = se->my_q->h_nr_running; 7219f683953SVincent Guittot } 7229f683953SVincent Guittot 7239f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7249f683953SVincent Guittot { 7259f683953SVincent Guittot if (entity_is_task(se)) 7269f683953SVincent Guittot return !!se->on_rq; 7279f683953SVincent Guittot else 7289f683953SVincent Guittot return se->runnable_weight; 7299f683953SVincent Guittot } 7309f683953SVincent Guittot 731c0796298SVincent Guittot #else 732c0796298SVincent Guittot #define entity_is_task(se) 1 7330dacee1bSVincent Guittot 7349f683953SVincent Guittot static inline void se_update_runnable(struct sched_entity *se) {} 7359f683953SVincent Guittot 7369f683953SVincent Guittot static inline long se_runnable(struct sched_entity *se) 7379f683953SVincent Guittot { 7389f683953SVincent Guittot return !!se->on_rq; 7399f683953SVincent Guittot } 740c0796298SVincent Guittot #endif 741c0796298SVincent Guittot 742391e43daSPeter Zijlstra #ifdef CONFIG_SMP 743c0796298SVincent Guittot /* 744c0796298SVincent Guittot * XXX we want to get rid of these helpers and use the full load resolution. 745c0796298SVincent Guittot */ 746c0796298SVincent Guittot static inline long se_weight(struct sched_entity *se) 747c0796298SVincent Guittot { 748c0796298SVincent Guittot return scale_load_down(se->load.weight); 749c0796298SVincent Guittot } 750c0796298SVincent Guittot 751391e43daSPeter Zijlstra 752afe06efdSTim Chen static inline bool sched_asym_prefer(int a, int b) 753afe06efdSTim Chen { 754afe06efdSTim Chen return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); 755afe06efdSTim Chen } 756afe06efdSTim Chen 7576aa140faSQuentin Perret struct perf_domain { 7586aa140faSQuentin Perret struct em_perf_domain *em_pd; 7596aa140faSQuentin Perret struct perf_domain *next; 7606aa140faSQuentin Perret struct rcu_head rcu; 7616aa140faSQuentin Perret }; 7626aa140faSQuentin Perret 763630246a0SQuentin Perret /* Scheduling group status flags */ 764630246a0SQuentin Perret #define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */ 7652802bf3cSMorten Rasmussen #define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */ 766630246a0SQuentin Perret 767391e43daSPeter Zijlstra /* 768391e43daSPeter Zijlstra * We add the notion of a root-domain which will be used to define per-domain 769391e43daSPeter Zijlstra * variables. Each exclusive cpuset essentially defines an island domain by 77097fb7a0aSIngo Molnar * fully partitioning the member CPUs from any other cpuset. Whenever a new 771391e43daSPeter Zijlstra * exclusive cpuset is created, we also create and attach a new root-domain 772391e43daSPeter Zijlstra * object. 773391e43daSPeter Zijlstra * 774391e43daSPeter Zijlstra */ 775391e43daSPeter Zijlstra struct root_domain { 776391e43daSPeter Zijlstra atomic_t refcount; 777391e43daSPeter Zijlstra atomic_t rto_count; 778391e43daSPeter Zijlstra struct rcu_head rcu; 779391e43daSPeter Zijlstra cpumask_var_t span; 780391e43daSPeter Zijlstra cpumask_var_t online; 781391e43daSPeter Zijlstra 782757ffdd7SValentin Schneider /* 783757ffdd7SValentin Schneider * Indicate pullable load on at least one CPU, e.g: 784757ffdd7SValentin Schneider * - More than one runnable task 785757ffdd7SValentin Schneider * - Running task is misfit 786757ffdd7SValentin Schneider */ 787575638d1SValentin Schneider int overload; 7884486edd1STim Chen 7892802bf3cSMorten Rasmussen /* Indicate one or more cpus over-utilized (tipping point) */ 7902802bf3cSMorten Rasmussen int overutilized; 7912802bf3cSMorten Rasmussen 792391e43daSPeter Zijlstra /* 7931baca4ceSJuri Lelli * The bit corresponding to a CPU gets set here if such CPU has more 7941baca4ceSJuri Lelli * than one runnable -deadline task (as it is below for RT tasks). 7951baca4ceSJuri Lelli */ 7961baca4ceSJuri Lelli cpumask_var_t dlo_mask; 7971baca4ceSJuri Lelli atomic_t dlo_count; 798332ac17eSDario Faggioli struct dl_bw dl_bw; 7996bfd6d72SJuri Lelli struct cpudl cpudl; 8001baca4ceSJuri Lelli 8014bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 8024bdced5cSSteven Rostedt (Red Hat) /* 8034bdced5cSSteven Rostedt (Red Hat) * For IPI pull requests, loop across the rto_mask. 8044bdced5cSSteven Rostedt (Red Hat) */ 8054bdced5cSSteven Rostedt (Red Hat) struct irq_work rto_push_work; 8064bdced5cSSteven Rostedt (Red Hat) raw_spinlock_t rto_lock; 8074bdced5cSSteven Rostedt (Red Hat) /* These are only updated and read within rto_lock */ 8084bdced5cSSteven Rostedt (Red Hat) int rto_loop; 8094bdced5cSSteven Rostedt (Red Hat) int rto_cpu; 8104bdced5cSSteven Rostedt (Red Hat) /* These atomics are updated outside of a lock */ 8114bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_next; 8124bdced5cSSteven Rostedt (Red Hat) atomic_t rto_loop_start; 8134bdced5cSSteven Rostedt (Red Hat) #endif 8141baca4ceSJuri Lelli /* 815391e43daSPeter Zijlstra * The "RT overload" flag: it gets set if a CPU has more than 816391e43daSPeter Zijlstra * one runnable RT task. 817391e43daSPeter Zijlstra */ 818391e43daSPeter Zijlstra cpumask_var_t rto_mask; 819391e43daSPeter Zijlstra struct cpupri cpupri; 820cd92bfd3SDietmar Eggemann 821cd92bfd3SDietmar Eggemann unsigned long max_cpu_capacity; 8226aa140faSQuentin Perret 8236aa140faSQuentin Perret /* 8246aa140faSQuentin Perret * NULL-terminated list of performance domains intersecting with the 8256aa140faSQuentin Perret * CPUs of the rd. Protected by RCU. 8266aa140faSQuentin Perret */ 8277ba7319fSJoel Fernandes (Google) struct perf_domain __rcu *pd; 828391e43daSPeter Zijlstra }; 829391e43daSPeter Zijlstra 830f2cb1360SIngo Molnar extern void init_defrootdomain(void); 8318d5dc512SPeter Zijlstra extern int sched_init_domains(const struct cpumask *cpu_map); 832f2cb1360SIngo Molnar extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 833364f5665SSteven Rostedt (VMware) extern void sched_get_rd(struct root_domain *rd); 834364f5665SSteven Rostedt (VMware) extern void sched_put_rd(struct root_domain *rd); 835391e43daSPeter Zijlstra 8364bdced5cSSteven Rostedt (Red Hat) #ifdef HAVE_RT_PUSH_IPI 8374bdced5cSSteven Rostedt (Red Hat) extern void rto_push_irq_work_func(struct irq_work *work); 8384bdced5cSSteven Rostedt (Red Hat) #endif 839391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 840391e43daSPeter Zijlstra 84169842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 84269842cbaSPatrick Bellasi /* 84369842cbaSPatrick Bellasi * struct uclamp_bucket - Utilization clamp bucket 84469842cbaSPatrick Bellasi * @value: utilization clamp value for tasks on this clamp bucket 84569842cbaSPatrick Bellasi * @tasks: number of RUNNABLE tasks on this clamp bucket 84669842cbaSPatrick Bellasi * 84769842cbaSPatrick Bellasi * Keep track of how many tasks are RUNNABLE for a given utilization 84869842cbaSPatrick Bellasi * clamp value. 84969842cbaSPatrick Bellasi */ 85069842cbaSPatrick Bellasi struct uclamp_bucket { 85169842cbaSPatrick Bellasi unsigned long value : bits_per(SCHED_CAPACITY_SCALE); 85269842cbaSPatrick Bellasi unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); 85369842cbaSPatrick Bellasi }; 85469842cbaSPatrick Bellasi 85569842cbaSPatrick Bellasi /* 85669842cbaSPatrick Bellasi * struct uclamp_rq - rq's utilization clamp 85769842cbaSPatrick Bellasi * @value: currently active clamp values for a rq 85869842cbaSPatrick Bellasi * @bucket: utilization clamp buckets affecting a rq 85969842cbaSPatrick Bellasi * 86069842cbaSPatrick Bellasi * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. 86169842cbaSPatrick Bellasi * A clamp value is affecting a rq when there is at least one task RUNNABLE 86269842cbaSPatrick Bellasi * (or actually running) with that value. 86369842cbaSPatrick Bellasi * 86469842cbaSPatrick Bellasi * There are up to UCLAMP_CNT possible different clamp values, currently there 86569842cbaSPatrick Bellasi * are only two: minimum utilization and maximum utilization. 86669842cbaSPatrick Bellasi * 86769842cbaSPatrick Bellasi * All utilization clamping values are MAX aggregated, since: 86869842cbaSPatrick Bellasi * - for util_min: we want to run the CPU at least at the max of the minimum 86969842cbaSPatrick Bellasi * utilization required by its currently RUNNABLE tasks. 87069842cbaSPatrick Bellasi * - for util_max: we want to allow the CPU to run up to the max of the 87169842cbaSPatrick Bellasi * maximum utilization allowed by its currently RUNNABLE tasks. 87269842cbaSPatrick Bellasi * 87369842cbaSPatrick Bellasi * Since on each system we expect only a limited number of different 87469842cbaSPatrick Bellasi * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track 87569842cbaSPatrick Bellasi * the metrics required to compute all the per-rq utilization clamp values. 87669842cbaSPatrick Bellasi */ 87769842cbaSPatrick Bellasi struct uclamp_rq { 87869842cbaSPatrick Bellasi unsigned int value; 87969842cbaSPatrick Bellasi struct uclamp_bucket bucket[UCLAMP_BUCKETS]; 88069842cbaSPatrick Bellasi }; 881*46609ce2SQais Yousef 882*46609ce2SQais Yousef DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); 88369842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 88469842cbaSPatrick Bellasi 885391e43daSPeter Zijlstra /* 886391e43daSPeter Zijlstra * This is the main, per-CPU runqueue data structure. 887391e43daSPeter Zijlstra * 888391e43daSPeter Zijlstra * Locking rule: those places that want to lock multiple runqueues 889391e43daSPeter Zijlstra * (such as the load balancing or the thread migration code), lock 890391e43daSPeter Zijlstra * acquire operations must be ordered by ascending &runqueue. 891391e43daSPeter Zijlstra */ 892391e43daSPeter Zijlstra struct rq { 893391e43daSPeter Zijlstra /* runqueue lock: */ 894391e43daSPeter Zijlstra raw_spinlock_t lock; 895391e43daSPeter Zijlstra 896391e43daSPeter Zijlstra /* 897391e43daSPeter Zijlstra * nr_running and cpu_load should be in the same cacheline because 898391e43daSPeter Zijlstra * remote CPUs use both these fields when doing load calculation. 899391e43daSPeter Zijlstra */ 900c82513e5SPeter Zijlstra unsigned int nr_running; 9010ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 9020ec8aa00SPeter Zijlstra unsigned int nr_numa_running; 9030ec8aa00SPeter Zijlstra unsigned int nr_preferred_running; 904a4739ecaSSrikar Dronamraju unsigned int numa_migrate_on; 9050ec8aa00SPeter Zijlstra #endif 9063451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 9079fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP 908e022e0d3SPeter Zijlstra unsigned long last_blocked_load_update_tick; 909f643ea22SVincent Guittot unsigned int has_blocked_load; 91090b5363aSPeter Zijlstra (Intel) call_single_data_t nohz_csd; 9119fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */ 91200357f5eSPeter Zijlstra unsigned int nohz_tick_stopped; 913a22e47a4SPeter Zijlstra atomic_t nohz_flags; 9149fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */ 915dcdedb24SFrederic Weisbecker 916126c2092SPeter Zijlstra #ifdef CONFIG_SMP 917126c2092SPeter Zijlstra unsigned int ttwu_pending; 918126c2092SPeter Zijlstra #endif 919391e43daSPeter Zijlstra u64 nr_switches; 920391e43daSPeter Zijlstra 92169842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 92269842cbaSPatrick Bellasi /* Utilization clamp values based on CPU's RUNNABLE tasks */ 92369842cbaSPatrick Bellasi struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; 924e496187dSPatrick Bellasi unsigned int uclamp_flags; 925e496187dSPatrick Bellasi #define UCLAMP_FLAG_IDLE 0x01 92669842cbaSPatrick Bellasi #endif 92769842cbaSPatrick Bellasi 928391e43daSPeter Zijlstra struct cfs_rq cfs; 929391e43daSPeter Zijlstra struct rt_rq rt; 930aab03e05SDario Faggioli struct dl_rq dl; 931391e43daSPeter Zijlstra 932391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 93397fb7a0aSIngo Molnar /* list of leaf cfs_rq on this CPU: */ 934391e43daSPeter Zijlstra struct list_head leaf_cfs_rq_list; 9359c2791f9SVincent Guittot struct list_head *tmp_alone_branch; 936a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */ 937a35b6466SPeter Zijlstra 938391e43daSPeter Zijlstra /* 939391e43daSPeter Zijlstra * This is part of a global counter where only the total sum 940391e43daSPeter Zijlstra * over all CPUs matters. A task can increase this counter on 941391e43daSPeter Zijlstra * one CPU and if it got migrated afterwards it may decrease 942391e43daSPeter Zijlstra * it on another CPU. Always updated under the runqueue lock: 943391e43daSPeter Zijlstra */ 944391e43daSPeter Zijlstra unsigned long nr_uninterruptible; 945391e43daSPeter Zijlstra 9464104a562SMadhuparna Bhowmik struct task_struct __rcu *curr; 94797fb7a0aSIngo Molnar struct task_struct *idle; 94897fb7a0aSIngo Molnar struct task_struct *stop; 949391e43daSPeter Zijlstra unsigned long next_balance; 950391e43daSPeter Zijlstra struct mm_struct *prev_mm; 951391e43daSPeter Zijlstra 952cb42c9a3SMatt Fleming unsigned int clock_update_flags; 953391e43daSPeter Zijlstra u64 clock; 95423127296SVincent Guittot /* Ensure that all clocks are in the same cache line */ 95523127296SVincent Guittot u64 clock_task ____cacheline_aligned; 95623127296SVincent Guittot u64 clock_pelt; 95723127296SVincent Guittot unsigned long lost_idle_time; 958391e43daSPeter Zijlstra 959391e43daSPeter Zijlstra atomic_t nr_iowait; 960391e43daSPeter Zijlstra 961227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 962227a4aadSMathieu Desnoyers int membarrier_state; 963227a4aadSMathieu Desnoyers #endif 964227a4aadSMathieu Desnoyers 965391e43daSPeter Zijlstra #ifdef CONFIG_SMP 966391e43daSPeter Zijlstra struct root_domain *rd; 967994aeb7aSJoel Fernandes (Google) struct sched_domain __rcu *sd; 968391e43daSPeter Zijlstra 969ced549faSNicolas Pitre unsigned long cpu_capacity; 970ca6d75e6SVincent Guittot unsigned long cpu_capacity_orig; 971391e43daSPeter Zijlstra 972e3fca9e7SPeter Zijlstra struct callback_head *balance_callback; 973e3fca9e7SPeter Zijlstra 97419a1f5ecSPeter Zijlstra unsigned char nohz_idle_balance; 975391e43daSPeter Zijlstra unsigned char idle_balance; 97697fb7a0aSIngo Molnar 9773b1baa64SMorten Rasmussen unsigned long misfit_task_load; 9783b1baa64SMorten Rasmussen 979391e43daSPeter Zijlstra /* For active balancing */ 980391e43daSPeter Zijlstra int active_balance; 981391e43daSPeter Zijlstra int push_cpu; 982391e43daSPeter Zijlstra struct cpu_stop_work active_balance_work; 98397fb7a0aSIngo Molnar 98497fb7a0aSIngo Molnar /* CPU of this runqueue: */ 985391e43daSPeter Zijlstra int cpu; 986391e43daSPeter Zijlstra int online; 987391e43daSPeter Zijlstra 988367456c7SPeter Zijlstra struct list_head cfs_tasks; 989367456c7SPeter Zijlstra 990371bf427SVincent Guittot struct sched_avg avg_rt; 9913727e0e1SVincent Guittot struct sched_avg avg_dl; 99211d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 99391c27493SVincent Guittot struct sched_avg avg_irq; 99491c27493SVincent Guittot #endif 99576504793SThara Gopinath #ifdef CONFIG_SCHED_THERMAL_PRESSURE 99676504793SThara Gopinath struct sched_avg avg_thermal; 99776504793SThara Gopinath #endif 998391e43daSPeter Zijlstra u64 idle_stamp; 999391e43daSPeter Zijlstra u64 avg_idle; 10009bd721c5SJason Low 10019bd721c5SJason Low /* This is used to determine avg_idle's max value */ 10029bd721c5SJason Low u64 max_idle_balance_cost; 100390b5363aSPeter Zijlstra (Intel) #endif /* CONFIG_SMP */ 1004391e43daSPeter Zijlstra 1005391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1006391e43daSPeter Zijlstra u64 prev_irq_time; 1007391e43daSPeter Zijlstra #endif 1008391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT 1009391e43daSPeter Zijlstra u64 prev_steal_time; 1010391e43daSPeter Zijlstra #endif 1011391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 1012391e43daSPeter Zijlstra u64 prev_steal_time_rq; 1013391e43daSPeter Zijlstra #endif 1014391e43daSPeter Zijlstra 1015391e43daSPeter Zijlstra /* calc_load related fields */ 1016391e43daSPeter Zijlstra unsigned long calc_load_update; 1017391e43daSPeter Zijlstra long calc_load_active; 1018391e43daSPeter Zijlstra 1019391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 1020391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1021966a9671SYing Huang call_single_data_t hrtick_csd; 1022391e43daSPeter Zijlstra #endif 1023391e43daSPeter Zijlstra struct hrtimer hrtick_timer; 1024391e43daSPeter Zijlstra #endif 1025391e43daSPeter Zijlstra 1026391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS 1027391e43daSPeter Zijlstra /* latency stats */ 1028391e43daSPeter Zijlstra struct sched_info rq_sched_info; 1029391e43daSPeter Zijlstra unsigned long long rq_cpu_time; 1030391e43daSPeter Zijlstra /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 1031391e43daSPeter Zijlstra 1032391e43daSPeter Zijlstra /* sys_sched_yield() stats */ 1033391e43daSPeter Zijlstra unsigned int yld_count; 1034391e43daSPeter Zijlstra 1035391e43daSPeter Zijlstra /* schedule() stats */ 1036391e43daSPeter Zijlstra unsigned int sched_count; 1037391e43daSPeter Zijlstra unsigned int sched_goidle; 1038391e43daSPeter Zijlstra 1039391e43daSPeter Zijlstra /* try_to_wake_up() stats */ 1040391e43daSPeter Zijlstra unsigned int ttwu_count; 1041391e43daSPeter Zijlstra unsigned int ttwu_local; 1042391e43daSPeter Zijlstra #endif 1043391e43daSPeter Zijlstra 1044442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1045442bf3aaSDaniel Lezcano /* Must be inspected within a rcu lock section */ 1046442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state; 1047442bf3aaSDaniel Lezcano #endif 1048391e43daSPeter Zijlstra }; 1049391e43daSPeter Zijlstra 105062478d99SVincent Guittot #ifdef CONFIG_FAIR_GROUP_SCHED 105162478d99SVincent Guittot 105262478d99SVincent Guittot /* CPU runqueue to which this cfs_rq is attached */ 105362478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 105462478d99SVincent Guittot { 105562478d99SVincent Guittot return cfs_rq->rq; 105662478d99SVincent Guittot } 105762478d99SVincent Guittot 105862478d99SVincent Guittot #else 105962478d99SVincent Guittot 106062478d99SVincent Guittot static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 106162478d99SVincent Guittot { 106262478d99SVincent Guittot return container_of(cfs_rq, struct rq, cfs); 106362478d99SVincent Guittot } 106462478d99SVincent Guittot #endif 106562478d99SVincent Guittot 1066391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq) 1067391e43daSPeter Zijlstra { 1068391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1069391e43daSPeter Zijlstra return rq->cpu; 1070391e43daSPeter Zijlstra #else 1071391e43daSPeter Zijlstra return 0; 1072391e43daSPeter Zijlstra #endif 1073391e43daSPeter Zijlstra } 1074391e43daSPeter Zijlstra 10751b568f0aSPeter Zijlstra 10761b568f0aSPeter Zijlstra #ifdef CONFIG_SCHED_SMT 10771b568f0aSPeter Zijlstra extern void __update_idle_core(struct rq *rq); 10781b568f0aSPeter Zijlstra 10791b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) 10801b568f0aSPeter Zijlstra { 10811b568f0aSPeter Zijlstra if (static_branch_unlikely(&sched_smt_present)) 10821b568f0aSPeter Zijlstra __update_idle_core(rq); 10831b568f0aSPeter Zijlstra } 10841b568f0aSPeter Zijlstra 10851b568f0aSPeter Zijlstra #else 10861b568f0aSPeter Zijlstra static inline void update_idle_core(struct rq *rq) { } 10871b568f0aSPeter Zijlstra #endif 10881b568f0aSPeter Zijlstra 10898b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 1090391e43daSPeter Zijlstra 1091518cd623SPeter Zijlstra #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 10924a32fea9SChristoph Lameter #define this_rq() this_cpu_ptr(&runqueues) 1093518cd623SPeter Zijlstra #define task_rq(p) cpu_rq(task_cpu(p)) 1094518cd623SPeter Zijlstra #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 10954a32fea9SChristoph Lameter #define raw_rq() raw_cpu_ptr(&runqueues) 1096518cd623SPeter Zijlstra 10971f351d7fSJohannes Weiner extern void update_rq_clock(struct rq *rq); 10981f351d7fSJohannes Weiner 1099cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq) 1100cebde6d6SPeter Zijlstra { 1101316c1608SJason Low return READ_ONCE(rq->clock); 1102cebde6d6SPeter Zijlstra } 1103cebde6d6SPeter Zijlstra 1104cb42c9a3SMatt Fleming /* 1105cb42c9a3SMatt Fleming * rq::clock_update_flags bits 1106cb42c9a3SMatt Fleming * 1107cb42c9a3SMatt Fleming * %RQCF_REQ_SKIP - will request skipping of clock update on the next 1108cb42c9a3SMatt Fleming * call to __schedule(). This is an optimisation to avoid 1109cb42c9a3SMatt Fleming * neighbouring rq clock updates. 1110cb42c9a3SMatt Fleming * 1111cb42c9a3SMatt Fleming * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is 1112cb42c9a3SMatt Fleming * in effect and calls to update_rq_clock() are being ignored. 1113cb42c9a3SMatt Fleming * 1114cb42c9a3SMatt Fleming * %RQCF_UPDATED - is a debug flag that indicates whether a call has been 1115cb42c9a3SMatt Fleming * made to update_rq_clock() since the last time rq::lock was pinned. 1116cb42c9a3SMatt Fleming * 1117cb42c9a3SMatt Fleming * If inside of __schedule(), clock_update_flags will have been 1118cb42c9a3SMatt Fleming * shifted left (a left shift is a cheap operation for the fast path 1119cb42c9a3SMatt Fleming * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, 1120cb42c9a3SMatt Fleming * 1121cb42c9a3SMatt Fleming * if (rq-clock_update_flags >= RQCF_UPDATED) 1122cb42c9a3SMatt Fleming * 1123cb42c9a3SMatt Fleming * to check if %RQCF_UPADTED is set. It'll never be shifted more than 1124cb42c9a3SMatt Fleming * one position though, because the next rq_unpin_lock() will shift it 1125cb42c9a3SMatt Fleming * back. 1126cb42c9a3SMatt Fleming */ 1127cb42c9a3SMatt Fleming #define RQCF_REQ_SKIP 0x01 1128cb42c9a3SMatt Fleming #define RQCF_ACT_SKIP 0x02 1129cb42c9a3SMatt Fleming #define RQCF_UPDATED 0x04 1130cb42c9a3SMatt Fleming 1131cb42c9a3SMatt Fleming static inline void assert_clock_updated(struct rq *rq) 1132cb42c9a3SMatt Fleming { 1133cb42c9a3SMatt Fleming /* 1134cb42c9a3SMatt Fleming * The only reason for not seeing a clock update since the 1135cb42c9a3SMatt Fleming * last rq_pin_lock() is if we're currently skipping updates. 1136cb42c9a3SMatt Fleming */ 1137cb42c9a3SMatt Fleming SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); 1138cb42c9a3SMatt Fleming } 1139cb42c9a3SMatt Fleming 114078becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq) 114178becc27SFrederic Weisbecker { 1142cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1143cb42c9a3SMatt Fleming assert_clock_updated(rq); 1144cb42c9a3SMatt Fleming 114578becc27SFrederic Weisbecker return rq->clock; 114678becc27SFrederic Weisbecker } 114778becc27SFrederic Weisbecker 114878becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq) 114978becc27SFrederic Weisbecker { 1150cebde6d6SPeter Zijlstra lockdep_assert_held(&rq->lock); 1151cb42c9a3SMatt Fleming assert_clock_updated(rq); 1152cb42c9a3SMatt Fleming 115378becc27SFrederic Weisbecker return rq->clock_task; 115478becc27SFrederic Weisbecker } 115578becc27SFrederic Weisbecker 115605289b90SThara Gopinath /** 115705289b90SThara Gopinath * By default the decay is the default pelt decay period. 115805289b90SThara Gopinath * The decay shift can change the decay period in 115905289b90SThara Gopinath * multiples of 32. 116005289b90SThara Gopinath * Decay shift Decay period(ms) 116105289b90SThara Gopinath * 0 32 116205289b90SThara Gopinath * 1 64 116305289b90SThara Gopinath * 2 128 116405289b90SThara Gopinath * 3 256 116505289b90SThara Gopinath * 4 512 116605289b90SThara Gopinath */ 116705289b90SThara Gopinath extern int sched_thermal_decay_shift; 116805289b90SThara Gopinath 116905289b90SThara Gopinath static inline u64 rq_clock_thermal(struct rq *rq) 117005289b90SThara Gopinath { 117105289b90SThara Gopinath return rq_clock_task(rq) >> sched_thermal_decay_shift; 117205289b90SThara Gopinath } 117305289b90SThara Gopinath 1174adcc8da8SDavidlohr Bueso static inline void rq_clock_skip_update(struct rq *rq) 11759edfbfedSPeter Zijlstra { 11769edfbfedSPeter Zijlstra lockdep_assert_held(&rq->lock); 1177cb42c9a3SMatt Fleming rq->clock_update_flags |= RQCF_REQ_SKIP; 1178adcc8da8SDavidlohr Bueso } 1179adcc8da8SDavidlohr Bueso 1180adcc8da8SDavidlohr Bueso /* 1181595058b6SDavidlohr Bueso * See rt task throttling, which is the only time a skip 1182adcc8da8SDavidlohr Bueso * request is cancelled. 1183adcc8da8SDavidlohr Bueso */ 1184adcc8da8SDavidlohr Bueso static inline void rq_clock_cancel_skipupdate(struct rq *rq) 1185adcc8da8SDavidlohr Bueso { 1186adcc8da8SDavidlohr Bueso lockdep_assert_held(&rq->lock); 1187cb42c9a3SMatt Fleming rq->clock_update_flags &= ~RQCF_REQ_SKIP; 11889edfbfedSPeter Zijlstra } 11899edfbfedSPeter Zijlstra 1190d8ac8971SMatt Fleming struct rq_flags { 1191d8ac8971SMatt Fleming unsigned long flags; 1192d8ac8971SMatt Fleming struct pin_cookie cookie; 1193cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1194cb42c9a3SMatt Fleming /* 1195cb42c9a3SMatt Fleming * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the 1196cb42c9a3SMatt Fleming * current pin context is stashed here in case it needs to be 1197cb42c9a3SMatt Fleming * restored in rq_repin_lock(). 1198cb42c9a3SMatt Fleming */ 1199cb42c9a3SMatt Fleming unsigned int clock_update_flags; 1200cb42c9a3SMatt Fleming #endif 1201d8ac8971SMatt Fleming }; 1202d8ac8971SMatt Fleming 1203d8ac8971SMatt Fleming static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) 1204d8ac8971SMatt Fleming { 1205d8ac8971SMatt Fleming rf->cookie = lockdep_pin_lock(&rq->lock); 1206cb42c9a3SMatt Fleming 1207cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1208cb42c9a3SMatt Fleming rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); 1209cb42c9a3SMatt Fleming rf->clock_update_flags = 0; 1210cb42c9a3SMatt Fleming #endif 1211d8ac8971SMatt Fleming } 1212d8ac8971SMatt Fleming 1213d8ac8971SMatt Fleming static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) 1214d8ac8971SMatt Fleming { 1215cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1216cb42c9a3SMatt Fleming if (rq->clock_update_flags > RQCF_ACT_SKIP) 1217cb42c9a3SMatt Fleming rf->clock_update_flags = RQCF_UPDATED; 1218cb42c9a3SMatt Fleming #endif 1219cb42c9a3SMatt Fleming 1220d8ac8971SMatt Fleming lockdep_unpin_lock(&rq->lock, rf->cookie); 1221d8ac8971SMatt Fleming } 1222d8ac8971SMatt Fleming 1223d8ac8971SMatt Fleming static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) 1224d8ac8971SMatt Fleming { 1225d8ac8971SMatt Fleming lockdep_repin_lock(&rq->lock, rf->cookie); 1226cb42c9a3SMatt Fleming 1227cb42c9a3SMatt Fleming #ifdef CONFIG_SCHED_DEBUG 1228cb42c9a3SMatt Fleming /* 1229cb42c9a3SMatt Fleming * Restore the value we stashed in @rf for this pin context. 1230cb42c9a3SMatt Fleming */ 1231cb42c9a3SMatt Fleming rq->clock_update_flags |= rf->clock_update_flags; 1232cb42c9a3SMatt Fleming #endif 1233d8ac8971SMatt Fleming } 1234d8ac8971SMatt Fleming 12351f351d7fSJohannes Weiner struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12361f351d7fSJohannes Weiner __acquires(rq->lock); 12371f351d7fSJohannes Weiner 12381f351d7fSJohannes Weiner struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 12391f351d7fSJohannes Weiner __acquires(p->pi_lock) 12401f351d7fSJohannes Weiner __acquires(rq->lock); 12411f351d7fSJohannes Weiner 12421f351d7fSJohannes Weiner static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 12431f351d7fSJohannes Weiner __releases(rq->lock) 12441f351d7fSJohannes Weiner { 12451f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12461f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12471f351d7fSJohannes Weiner } 12481f351d7fSJohannes Weiner 12491f351d7fSJohannes Weiner static inline void 12501f351d7fSJohannes Weiner task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 12511f351d7fSJohannes Weiner __releases(rq->lock) 12521f351d7fSJohannes Weiner __releases(p->pi_lock) 12531f351d7fSJohannes Weiner { 12541f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12551f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 12561f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 12571f351d7fSJohannes Weiner } 12581f351d7fSJohannes Weiner 12591f351d7fSJohannes Weiner static inline void 12601f351d7fSJohannes Weiner rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) 12611f351d7fSJohannes Weiner __acquires(rq->lock) 12621f351d7fSJohannes Weiner { 12631f351d7fSJohannes Weiner raw_spin_lock_irqsave(&rq->lock, rf->flags); 12641f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12651f351d7fSJohannes Weiner } 12661f351d7fSJohannes Weiner 12671f351d7fSJohannes Weiner static inline void 12681f351d7fSJohannes Weiner rq_lock_irq(struct rq *rq, struct rq_flags *rf) 12691f351d7fSJohannes Weiner __acquires(rq->lock) 12701f351d7fSJohannes Weiner { 12711f351d7fSJohannes Weiner raw_spin_lock_irq(&rq->lock); 12721f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12731f351d7fSJohannes Weiner } 12741f351d7fSJohannes Weiner 12751f351d7fSJohannes Weiner static inline void 12761f351d7fSJohannes Weiner rq_lock(struct rq *rq, struct rq_flags *rf) 12771f351d7fSJohannes Weiner __acquires(rq->lock) 12781f351d7fSJohannes Weiner { 12791f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 12801f351d7fSJohannes Weiner rq_pin_lock(rq, rf); 12811f351d7fSJohannes Weiner } 12821f351d7fSJohannes Weiner 12831f351d7fSJohannes Weiner static inline void 12841f351d7fSJohannes Weiner rq_relock(struct rq *rq, struct rq_flags *rf) 12851f351d7fSJohannes Weiner __acquires(rq->lock) 12861f351d7fSJohannes Weiner { 12871f351d7fSJohannes Weiner raw_spin_lock(&rq->lock); 12881f351d7fSJohannes Weiner rq_repin_lock(rq, rf); 12891f351d7fSJohannes Weiner } 12901f351d7fSJohannes Weiner 12911f351d7fSJohannes Weiner static inline void 12921f351d7fSJohannes Weiner rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) 12931f351d7fSJohannes Weiner __releases(rq->lock) 12941f351d7fSJohannes Weiner { 12951f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 12961f351d7fSJohannes Weiner raw_spin_unlock_irqrestore(&rq->lock, rf->flags); 12971f351d7fSJohannes Weiner } 12981f351d7fSJohannes Weiner 12991f351d7fSJohannes Weiner static inline void 13001f351d7fSJohannes Weiner rq_unlock_irq(struct rq *rq, struct rq_flags *rf) 13011f351d7fSJohannes Weiner __releases(rq->lock) 13021f351d7fSJohannes Weiner { 13031f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 13041f351d7fSJohannes Weiner raw_spin_unlock_irq(&rq->lock); 13051f351d7fSJohannes Weiner } 13061f351d7fSJohannes Weiner 13071f351d7fSJohannes Weiner static inline void 13081f351d7fSJohannes Weiner rq_unlock(struct rq *rq, struct rq_flags *rf) 13091f351d7fSJohannes Weiner __releases(rq->lock) 13101f351d7fSJohannes Weiner { 13111f351d7fSJohannes Weiner rq_unpin_lock(rq, rf); 13121f351d7fSJohannes Weiner raw_spin_unlock(&rq->lock); 13131f351d7fSJohannes Weiner } 13141f351d7fSJohannes Weiner 1315246b3b33SJohannes Weiner static inline struct rq * 1316246b3b33SJohannes Weiner this_rq_lock_irq(struct rq_flags *rf) 1317246b3b33SJohannes Weiner __acquires(rq->lock) 1318246b3b33SJohannes Weiner { 1319246b3b33SJohannes Weiner struct rq *rq; 1320246b3b33SJohannes Weiner 1321246b3b33SJohannes Weiner local_irq_disable(); 1322246b3b33SJohannes Weiner rq = this_rq(); 1323246b3b33SJohannes Weiner rq_lock(rq, rf); 1324246b3b33SJohannes Weiner return rq; 1325246b3b33SJohannes Weiner } 1326246b3b33SJohannes Weiner 13279942f79bSRik van Riel #ifdef CONFIG_NUMA 1328e3fe70b1SRik van Riel enum numa_topology_type { 1329e3fe70b1SRik van Riel NUMA_DIRECT, 1330e3fe70b1SRik van Riel NUMA_GLUELESS_MESH, 1331e3fe70b1SRik van Riel NUMA_BACKPLANE, 1332e3fe70b1SRik van Riel }; 1333e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type; 13349942f79bSRik van Riel extern int sched_max_numa_distance; 13359942f79bSRik van Riel extern bool find_numa_distance(int distance); 1336f2cb1360SIngo Molnar extern void sched_init_numa(void); 1337f2cb1360SIngo Molnar extern void sched_domains_numa_masks_set(unsigned int cpu); 1338f2cb1360SIngo Molnar extern void sched_domains_numa_masks_clear(unsigned int cpu); 1339e0e8d491SWanpeng Li extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); 1340f2cb1360SIngo Molnar #else 1341f2cb1360SIngo Molnar static inline void sched_init_numa(void) { } 1342f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_set(unsigned int cpu) { } 1343f2cb1360SIngo Molnar static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } 1344e0e8d491SWanpeng Li static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) 1345e0e8d491SWanpeng Li { 1346e0e8d491SWanpeng Li return nr_cpu_ids; 1347e0e8d491SWanpeng Li } 1348f2cb1360SIngo Molnar #endif 1349f2cb1360SIngo Molnar 1350f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING 135144dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */ 135244dba3d5SIulia Manda enum numa_faults_stats { 135344dba3d5SIulia Manda NUMA_MEM = 0, 135444dba3d5SIulia Manda NUMA_CPU, 135544dba3d5SIulia Manda NUMA_MEMBUF, 135644dba3d5SIulia Manda NUMA_CPUBUF 135744dba3d5SIulia Manda }; 13580ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node); 1359e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu); 13600ad4e3dfSSrikar Dronamraju extern int migrate_swap(struct task_struct *p, struct task_struct *t, 13610ad4e3dfSSrikar Dronamraju int cpu, int scpu); 136213784475SMel Gorman extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); 136313784475SMel Gorman #else 136413784475SMel Gorman static inline void 136513784475SMel Gorman init_numa_balancing(unsigned long clone_flags, struct task_struct *p) 136613784475SMel Gorman { 136713784475SMel Gorman } 1368f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */ 1369f809ca9aSMel Gorman 1370518cd623SPeter Zijlstra #ifdef CONFIG_SMP 1371518cd623SPeter Zijlstra 1372e3fca9e7SPeter Zijlstra static inline void 1373e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq, 1374e3fca9e7SPeter Zijlstra struct callback_head *head, 1375e3fca9e7SPeter Zijlstra void (*func)(struct rq *rq)) 1376e3fca9e7SPeter Zijlstra { 1377e3fca9e7SPeter Zijlstra lockdep_assert_held(&rq->lock); 1378e3fca9e7SPeter Zijlstra 1379e3fca9e7SPeter Zijlstra if (unlikely(head->next)) 1380e3fca9e7SPeter Zijlstra return; 1381e3fca9e7SPeter Zijlstra 1382e3fca9e7SPeter Zijlstra head->func = (void (*)(struct callback_head *))func; 1383e3fca9e7SPeter Zijlstra head->next = rq->balance_callback; 1384e3fca9e7SPeter Zijlstra rq->balance_callback = head; 1385e3fca9e7SPeter Zijlstra } 1386e3fca9e7SPeter Zijlstra 1387391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \ 1388391e43daSPeter Zijlstra rcu_dereference_check((p), \ 1389391e43daSPeter Zijlstra lockdep_is_held(&sched_domains_mutex)) 1390391e43daSPeter Zijlstra 1391391e43daSPeter Zijlstra /* 1392391e43daSPeter Zijlstra * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 1393337e9b07SPaul E. McKenney * See destroy_sched_domains: call_rcu for details. 1394391e43daSPeter Zijlstra * 1395391e43daSPeter Zijlstra * The domain tree of any CPU may only be accessed from within 1396391e43daSPeter Zijlstra * preempt-disabled sections. 1397391e43daSPeter Zijlstra */ 1398391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \ 1399518cd623SPeter Zijlstra for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 1400518cd623SPeter Zijlstra __sd; __sd = __sd->parent) 1401391e43daSPeter Zijlstra 1402518cd623SPeter Zijlstra /** 1403518cd623SPeter Zijlstra * highest_flag_domain - Return highest sched_domain containing flag. 140497fb7a0aSIngo Molnar * @cpu: The CPU whose highest level of sched domain is to 1405518cd623SPeter Zijlstra * be returned. 1406518cd623SPeter Zijlstra * @flag: The flag to check for the highest sched_domain 140797fb7a0aSIngo Molnar * for the given CPU. 1408518cd623SPeter Zijlstra * 140997fb7a0aSIngo Molnar * Returns the highest sched_domain of a CPU which contains the given flag. 1410518cd623SPeter Zijlstra */ 1411518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag) 1412518cd623SPeter Zijlstra { 1413518cd623SPeter Zijlstra struct sched_domain *sd, *hsd = NULL; 1414518cd623SPeter Zijlstra 1415518cd623SPeter Zijlstra for_each_domain(cpu, sd) { 1416518cd623SPeter Zijlstra if (!(sd->flags & flag)) 1417518cd623SPeter Zijlstra break; 1418518cd623SPeter Zijlstra hsd = sd; 1419518cd623SPeter Zijlstra } 1420518cd623SPeter Zijlstra 1421518cd623SPeter Zijlstra return hsd; 1422518cd623SPeter Zijlstra } 1423518cd623SPeter Zijlstra 1424fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) 1425fb13c7eeSMel Gorman { 1426fb13c7eeSMel Gorman struct sched_domain *sd; 1427fb13c7eeSMel Gorman 1428fb13c7eeSMel Gorman for_each_domain(cpu, sd) { 1429fb13c7eeSMel Gorman if (sd->flags & flag) 1430fb13c7eeSMel Gorman break; 1431fb13c7eeSMel Gorman } 1432fb13c7eeSMel Gorman 1433fb13c7eeSMel Gorman return sd; 1434fb13c7eeSMel Gorman } 1435fb13c7eeSMel Gorman 1436994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); 14377d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size); 1438518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id); 1439994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); 1440994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); 1441994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); 1442994aeb7aSJoel Fernandes (Google) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); 1443df054e84SMorten Rasmussen extern struct static_key_false sched_asym_cpucapacity; 1444518cd623SPeter Zijlstra 144563b2ca30SNicolas Pitre struct sched_group_capacity { 14465e6521eaSLi Zefan atomic_t ref; 14475e6521eaSLi Zefan /* 1448172895e6SYuyang Du * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity 144963b2ca30SNicolas Pitre * for a single CPU. 14505e6521eaSLi Zefan */ 1451bf475ce0SMorten Rasmussen unsigned long capacity; 1452bf475ce0SMorten Rasmussen unsigned long min_capacity; /* Min per-CPU capacity in group */ 1453e3d6d0cbSMorten Rasmussen unsigned long max_capacity; /* Max per-CPU capacity in group */ 14545e6521eaSLi Zefan unsigned long next_update; 145563b2ca30SNicolas Pitre int imbalance; /* XXX unrelated to capacity but shared group state */ 14565e6521eaSLi Zefan 1457005f874dSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1458005f874dSPeter Zijlstra int id; 1459005f874dSPeter Zijlstra #endif 1460005f874dSPeter Zijlstra 146197fb7a0aSIngo Molnar unsigned long cpumask[0]; /* Balance mask */ 14625e6521eaSLi Zefan }; 14635e6521eaSLi Zefan 14645e6521eaSLi Zefan struct sched_group { 14655e6521eaSLi Zefan struct sched_group *next; /* Must be a circular list */ 14665e6521eaSLi Zefan atomic_t ref; 14675e6521eaSLi Zefan 14685e6521eaSLi Zefan unsigned int group_weight; 146963b2ca30SNicolas Pitre struct sched_group_capacity *sgc; 147097fb7a0aSIngo Molnar int asym_prefer_cpu; /* CPU of highest priority in group */ 14715e6521eaSLi Zefan 14725e6521eaSLi Zefan /* 14735e6521eaSLi Zefan * The CPUs this group covers. 14745e6521eaSLi Zefan * 14755e6521eaSLi Zefan * NOTE: this field is variable length. (Allocated dynamically 14765e6521eaSLi Zefan * by attaching extra space to the end of the structure, 14775e6521eaSLi Zefan * depending on how many CPUs the kernel has booted up with) 14785e6521eaSLi Zefan */ 147904f5c362SGustavo A. R. Silva unsigned long cpumask[]; 14805e6521eaSLi Zefan }; 14815e6521eaSLi Zefan 1482ae4df9d6SPeter Zijlstra static inline struct cpumask *sched_group_span(struct sched_group *sg) 14835e6521eaSLi Zefan { 14845e6521eaSLi Zefan return to_cpumask(sg->cpumask); 14855e6521eaSLi Zefan } 14865e6521eaSLi Zefan 14875e6521eaSLi Zefan /* 1488e5c14b1fSPeter Zijlstra * See build_balance_mask(). 14895e6521eaSLi Zefan */ 1490e5c14b1fSPeter Zijlstra static inline struct cpumask *group_balance_mask(struct sched_group *sg) 14915e6521eaSLi Zefan { 149263b2ca30SNicolas Pitre return to_cpumask(sg->sgc->cpumask); 14935e6521eaSLi Zefan } 14945e6521eaSLi Zefan 14955e6521eaSLi Zefan /** 149697fb7a0aSIngo Molnar * group_first_cpu - Returns the first CPU in the cpumask of a sched_group. 149797fb7a0aSIngo Molnar * @group: The group whose first CPU is to be returned. 14985e6521eaSLi Zefan */ 14995e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group) 15005e6521eaSLi Zefan { 1501ae4df9d6SPeter Zijlstra return cpumask_first(sched_group_span(group)); 15025e6521eaSLi Zefan } 15035e6521eaSLi Zefan 1504c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg); 1505c1174876SPeter Zijlstra 15063866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 15073866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void); 1508bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu); 15093866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void); 15103866e845SSteven Rostedt (Red Hat) #else 15113866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void) 15123866e845SSteven Rostedt (Red Hat) { 15133866e845SSteven Rostedt (Red Hat) } 1514bbdacdfeSPeter Zijlstra static inline void dirty_sched_domain_sysctl(int cpu) 1515bbdacdfeSPeter Zijlstra { 1516bbdacdfeSPeter Zijlstra } 15173866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void) 15183866e845SSteven Rostedt (Red Hat) { 15193866e845SSteven Rostedt (Red Hat) } 15203866e845SSteven Rostedt (Red Hat) #endif 15213866e845SSteven Rostedt (Red Hat) 1522b2a02fc4SPeter Zijlstra extern void flush_smp_call_function_from_idle(void); 1523e3baac47SPeter Zijlstra 1524b2a02fc4SPeter Zijlstra #else /* !CONFIG_SMP: */ 1525b2a02fc4SPeter Zijlstra static inline void flush_smp_call_function_from_idle(void) { } 1526b2a02fc4SPeter Zijlstra #endif 1527391e43daSPeter Zijlstra 1528391e43daSPeter Zijlstra #include "stats.h" 15291051408fSIngo Molnar #include "autogroup.h" 1530391e43daSPeter Zijlstra 1531391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 1532391e43daSPeter Zijlstra 1533391e43daSPeter Zijlstra /* 1534391e43daSPeter Zijlstra * Return the group to which this tasks belongs. 1535391e43daSPeter Zijlstra * 15368af01f56STejun Heo * We cannot use task_css() and friends because the cgroup subsystem 15378af01f56STejun Heo * changes that value before the cgroup_subsys::attach() method is called, 15388af01f56STejun Heo * therefore we cannot pin it and might observe the wrong value. 15398323f26cSPeter Zijlstra * 15408323f26cSPeter Zijlstra * The same is true for autogroup's p->signal->autogroup->tg, the autogroup 15418323f26cSPeter Zijlstra * core changes this before calling sched_move_task(). 15428323f26cSPeter Zijlstra * 15438323f26cSPeter Zijlstra * Instead we use a 'copy' which is updated from sched_move_task() while 15448323f26cSPeter Zijlstra * holding both task_struct::pi_lock and rq::lock. 1545391e43daSPeter Zijlstra */ 1546391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1547391e43daSPeter Zijlstra { 15488323f26cSPeter Zijlstra return p->sched_task_group; 1549391e43daSPeter Zijlstra } 1550391e43daSPeter Zijlstra 1551391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 1552391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 1553391e43daSPeter Zijlstra { 1554391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) 1555391e43daSPeter Zijlstra struct task_group *tg = task_group(p); 1556391e43daSPeter Zijlstra #endif 1557391e43daSPeter Zijlstra 1558391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED 1559ad936d86SByungchul Park set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); 1560391e43daSPeter Zijlstra p->se.cfs_rq = tg->cfs_rq[cpu]; 1561391e43daSPeter Zijlstra p->se.parent = tg->se[cpu]; 1562391e43daSPeter Zijlstra #endif 1563391e43daSPeter Zijlstra 1564391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 1565391e43daSPeter Zijlstra p->rt.rt_rq = tg->rt_rq[cpu]; 1566391e43daSPeter Zijlstra p->rt.parent = tg->rt_se[cpu]; 1567391e43daSPeter Zijlstra #endif 1568391e43daSPeter Zijlstra } 1569391e43daSPeter Zijlstra 1570391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */ 1571391e43daSPeter Zijlstra 1572391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 1573391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p) 1574391e43daSPeter Zijlstra { 1575391e43daSPeter Zijlstra return NULL; 1576391e43daSPeter Zijlstra } 1577391e43daSPeter Zijlstra 1578391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */ 1579391e43daSPeter Zijlstra 1580391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1581391e43daSPeter Zijlstra { 1582391e43daSPeter Zijlstra set_task_rq(p, cpu); 1583391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1584391e43daSPeter Zijlstra /* 1585391e43daSPeter Zijlstra * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be 1586dfcb245eSIngo Molnar * successfully executed on another CPU. We must ensure that updates of 1587391e43daSPeter Zijlstra * per-task data have been completed by this moment. 1588391e43daSPeter Zijlstra */ 1589391e43daSPeter Zijlstra smp_wmb(); 1590c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 1591c546951dSAndrea Parri WRITE_ONCE(p->cpu, cpu); 1592c65eacbeSAndy Lutomirski #else 1593c546951dSAndrea Parri WRITE_ONCE(task_thread_info(p)->cpu, cpu); 1594c65eacbeSAndy Lutomirski #endif 1595ac66f547SPeter Zijlstra p->wake_cpu = cpu; 1596391e43daSPeter Zijlstra #endif 1597391e43daSPeter Zijlstra } 1598391e43daSPeter Zijlstra 1599391e43daSPeter Zijlstra /* 1600391e43daSPeter Zijlstra * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 1601391e43daSPeter Zijlstra */ 1602391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 1603c5905afbSIngo Molnar # include <linux/static_key.h> 1604391e43daSPeter Zijlstra # define const_debug __read_mostly 1605391e43daSPeter Zijlstra #else 1606391e43daSPeter Zijlstra # define const_debug const 1607391e43daSPeter Zijlstra #endif 1608391e43daSPeter Zijlstra 1609391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1610391e43daSPeter Zijlstra __SCHED_FEAT_##name , 1611391e43daSPeter Zijlstra 1612391e43daSPeter Zijlstra enum { 1613391e43daSPeter Zijlstra #include "features.h" 1614f8b6d1ccSPeter Zijlstra __SCHED_FEAT_NR, 1615391e43daSPeter Zijlstra }; 1616391e43daSPeter Zijlstra 1617391e43daSPeter Zijlstra #undef SCHED_FEAT 1618391e43daSPeter Zijlstra 1619e9666d10SMasahiro Yamada #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL) 1620765cc3a4SPatrick Bellasi 1621765cc3a4SPatrick Bellasi /* 1622765cc3a4SPatrick Bellasi * To support run-time toggling of sched features, all the translation units 1623765cc3a4SPatrick Bellasi * (but core.c) reference the sysctl_sched_features defined in core.c. 1624765cc3a4SPatrick Bellasi */ 1625765cc3a4SPatrick Bellasi extern const_debug unsigned int sysctl_sched_features; 1626765cc3a4SPatrick Bellasi 1627f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled) \ 1628c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \ 1629f8b6d1ccSPeter Zijlstra { \ 16306e76ea8aSJason Baron return static_key_##enabled(key); \ 1631f8b6d1ccSPeter Zijlstra } 1632f8b6d1ccSPeter Zijlstra 1633f8b6d1ccSPeter Zijlstra #include "features.h" 1634f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT 1635f8b6d1ccSPeter Zijlstra 1636c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; 1637f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 1638765cc3a4SPatrick Bellasi 1639e9666d10SMasahiro Yamada #else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */ 1640765cc3a4SPatrick Bellasi 1641765cc3a4SPatrick Bellasi /* 1642765cc3a4SPatrick Bellasi * Each translation unit has its own copy of sysctl_sched_features to allow 1643765cc3a4SPatrick Bellasi * constants propagation at compile time and compiler optimization based on 1644765cc3a4SPatrick Bellasi * features default. 1645765cc3a4SPatrick Bellasi */ 1646765cc3a4SPatrick Bellasi #define SCHED_FEAT(name, enabled) \ 1647765cc3a4SPatrick Bellasi (1UL << __SCHED_FEAT_##name) * enabled | 1648765cc3a4SPatrick Bellasi static const_debug __maybe_unused unsigned int sysctl_sched_features = 1649765cc3a4SPatrick Bellasi #include "features.h" 1650765cc3a4SPatrick Bellasi 0; 1651765cc3a4SPatrick Bellasi #undef SCHED_FEAT 1652765cc3a4SPatrick Bellasi 16537e6f4c5dSPeter Zijlstra #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 1654765cc3a4SPatrick Bellasi 1655e9666d10SMasahiro Yamada #endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */ 1656391e43daSPeter Zijlstra 16572a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing; 1658cb251765SMel Gorman extern struct static_key_false sched_schedstats; 1659cbee9f88SPeter Zijlstra 1660391e43daSPeter Zijlstra static inline u64 global_rt_period(void) 1661391e43daSPeter Zijlstra { 1662391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; 1663391e43daSPeter Zijlstra } 1664391e43daSPeter Zijlstra 1665391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void) 1666391e43daSPeter Zijlstra { 1667391e43daSPeter Zijlstra if (sysctl_sched_rt_runtime < 0) 1668391e43daSPeter Zijlstra return RUNTIME_INF; 1669391e43daSPeter Zijlstra 1670391e43daSPeter Zijlstra return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; 1671391e43daSPeter Zijlstra } 1672391e43daSPeter Zijlstra 1673391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p) 1674391e43daSPeter Zijlstra { 1675391e43daSPeter Zijlstra return rq->curr == p; 1676391e43daSPeter Zijlstra } 1677391e43daSPeter Zijlstra 1678391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p) 1679391e43daSPeter Zijlstra { 1680391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1681391e43daSPeter Zijlstra return p->on_cpu; 1682391e43daSPeter Zijlstra #else 1683391e43daSPeter Zijlstra return task_current(rq, p); 1684391e43daSPeter Zijlstra #endif 1685391e43daSPeter Zijlstra } 1686391e43daSPeter Zijlstra 1687da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p) 1688da0c1e65SKirill Tkhai { 1689da0c1e65SKirill Tkhai return p->on_rq == TASK_ON_RQ_QUEUED; 1690da0c1e65SKirill Tkhai } 1691391e43daSPeter Zijlstra 1692cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p) 1693cca26e80SKirill Tkhai { 1694c546951dSAndrea Parri return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; 1695cca26e80SKirill Tkhai } 1696cca26e80SKirill Tkhai 1697b13095f0SLi Zefan /* 1698b13095f0SLi Zefan * wake flags 1699b13095f0SLi Zefan */ 170097fb7a0aSIngo Molnar #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ 170197fb7a0aSIngo Molnar #define WF_FORK 0x02 /* Child wakeup after fork */ 17022ebb1771SMel Gorman #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ 1703739f70b4SPeter Zijlstra #define WF_ON_CPU 0x08 /* Wakee is on_cpu */ 1704b13095f0SLi Zefan 1705391e43daSPeter Zijlstra /* 1706391e43daSPeter Zijlstra * To aid in avoiding the subversion of "niceness" due to uneven distribution 1707391e43daSPeter Zijlstra * of tasks with abnormal "nice" values across CPUs the contribution that 1708391e43daSPeter Zijlstra * each task makes to its run queue's load is weighted according to its 1709391e43daSPeter Zijlstra * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a 1710391e43daSPeter Zijlstra * scaled version of the new time slice allocation that they receive on time 1711391e43daSPeter Zijlstra * slice expiry etc. 1712391e43daSPeter Zijlstra */ 1713391e43daSPeter Zijlstra 1714391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO 3 1715391e43daSPeter Zijlstra #define WMULT_IDLEPRIO 1431655765 1716391e43daSPeter Zijlstra 1717ed82b8a1SAndi Kleen extern const int sched_prio_to_weight[40]; 1718ed82b8a1SAndi Kleen extern const u32 sched_prio_to_wmult[40]; 1719391e43daSPeter Zijlstra 1720ff77e468SPeter Zijlstra /* 1721ff77e468SPeter Zijlstra * {de,en}queue flags: 1722ff77e468SPeter Zijlstra * 1723ff77e468SPeter Zijlstra * DEQUEUE_SLEEP - task is no longer runnable 1724ff77e468SPeter Zijlstra * ENQUEUE_WAKEUP - task just became runnable 1725ff77e468SPeter Zijlstra * 1726ff77e468SPeter Zijlstra * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks 1727ff77e468SPeter Zijlstra * are in a known state which allows modification. Such pairs 1728ff77e468SPeter Zijlstra * should preserve as much state as possible. 1729ff77e468SPeter Zijlstra * 1730ff77e468SPeter Zijlstra * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location 1731ff77e468SPeter Zijlstra * in the runqueue. 1732ff77e468SPeter Zijlstra * 1733ff77e468SPeter Zijlstra * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) 1734ff77e468SPeter Zijlstra * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) 173559efa0baSPeter Zijlstra * ENQUEUE_MIGRATED - the task was migrated during wakeup 1736ff77e468SPeter Zijlstra * 1737ff77e468SPeter Zijlstra */ 1738ff77e468SPeter Zijlstra 1739ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP 0x01 174097fb7a0aSIngo Molnar #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ 174197fb7a0aSIngo Molnar #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ 174297fb7a0aSIngo Molnar #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ 1743ff77e468SPeter Zijlstra 17441de64443SPeter Zijlstra #define ENQUEUE_WAKEUP 0x01 1745ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE 0x02 1746ff77e468SPeter Zijlstra #define ENQUEUE_MOVE 0x04 17470a67d1eeSPeter Zijlstra #define ENQUEUE_NOCLOCK 0x08 1748ff77e468SPeter Zijlstra 17490a67d1eeSPeter Zijlstra #define ENQUEUE_HEAD 0x10 17500a67d1eeSPeter Zijlstra #define ENQUEUE_REPLENISH 0x20 1751c82ba9faSLi Zefan #ifdef CONFIG_SMP 17520a67d1eeSPeter Zijlstra #define ENQUEUE_MIGRATED 0x40 1753c82ba9faSLi Zefan #else 175459efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED 0x00 1755c82ba9faSLi Zefan #endif 1756c82ba9faSLi Zefan 175737e117c0SPeter Zijlstra #define RETRY_TASK ((void *)-1UL) 175837e117c0SPeter Zijlstra 1759c82ba9faSLi Zefan struct sched_class { 1760c82ba9faSLi Zefan 176169842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 176269842cbaSPatrick Bellasi int uclamp_enabled; 176369842cbaSPatrick Bellasi #endif 176469842cbaSPatrick Bellasi 1765c82ba9faSLi Zefan void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); 1766c82ba9faSLi Zefan void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); 1767c82ba9faSLi Zefan void (*yield_task) (struct rq *rq); 17680900acf2SDietmar Eggemann bool (*yield_to_task)(struct rq *rq, struct task_struct *p); 1769c82ba9faSLi Zefan 1770c82ba9faSLi Zefan void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags); 1771c82ba9faSLi Zefan 177298c2f700SPeter Zijlstra struct task_struct *(*pick_next_task)(struct rq *rq); 177398c2f700SPeter Zijlstra 17746e2df058SPeter Zijlstra void (*put_prev_task)(struct rq *rq, struct task_struct *p); 1775a0e813f2SPeter Zijlstra void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); 1776c82ba9faSLi Zefan 1777c82ba9faSLi Zefan #ifdef CONFIG_SMP 17786e2df058SPeter Zijlstra int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 1779ac66f547SPeter Zijlstra int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 17801327237aSSrikar Dronamraju void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1781c82ba9faSLi Zefan 1782c82ba9faSLi Zefan void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1783c82ba9faSLi Zefan 1784c82ba9faSLi Zefan void (*set_cpus_allowed)(struct task_struct *p, 1785c82ba9faSLi Zefan const struct cpumask *newmask); 1786c82ba9faSLi Zefan 1787c82ba9faSLi Zefan void (*rq_online)(struct rq *rq); 1788c82ba9faSLi Zefan void (*rq_offline)(struct rq *rq); 1789c82ba9faSLi Zefan #endif 1790c82ba9faSLi Zefan 1791c82ba9faSLi Zefan void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); 1792c82ba9faSLi Zefan void (*task_fork)(struct task_struct *p); 1793e6c390f2SDario Faggioli void (*task_dead)(struct task_struct *p); 1794c82ba9faSLi Zefan 179567dfa1b7SKirill Tkhai /* 179667dfa1b7SKirill Tkhai * The switched_from() call is allowed to drop rq->lock, therefore we 179767dfa1b7SKirill Tkhai * cannot assume the switched_from/switched_to pair is serliazed by 179867dfa1b7SKirill Tkhai * rq->lock. They are however serialized by p->pi_lock. 179967dfa1b7SKirill Tkhai */ 1800c82ba9faSLi Zefan void (*switched_from)(struct rq *this_rq, struct task_struct *task); 1801c82ba9faSLi Zefan void (*switched_to) (struct rq *this_rq, struct task_struct *task); 1802c82ba9faSLi Zefan void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1803c82ba9faSLi Zefan int oldprio); 1804c82ba9faSLi Zefan 1805c82ba9faSLi Zefan unsigned int (*get_rr_interval)(struct rq *rq, 1806c82ba9faSLi Zefan struct task_struct *task); 1807c82ba9faSLi Zefan 18086e998916SStanislaw Gruszka void (*update_curr)(struct rq *rq); 18096e998916SStanislaw Gruszka 1810ea86cb4bSVincent Guittot #define TASK_SET_GROUP 0 1811ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP 1 1812ea86cb4bSVincent Guittot 1813c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED 1814ea86cb4bSVincent Guittot void (*task_change_group)(struct task_struct *p, int type); 1815c82ba9faSLi Zefan #endif 181685c2ce91SPeter Zijlstra } __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */ 1817391e43daSPeter Zijlstra 18183f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 18193f1d2a31SPeter Zijlstra { 182010e7071bSPeter Zijlstra WARN_ON_ONCE(rq->curr != prev); 18216e2df058SPeter Zijlstra prev->sched_class->put_prev_task(rq, prev); 18223f1d2a31SPeter Zijlstra } 18233f1d2a31SPeter Zijlstra 182403b7fad1SPeter Zijlstra static inline void set_next_task(struct rq *rq, struct task_struct *next) 1825b2bf6c31SPeter Zijlstra { 182603b7fad1SPeter Zijlstra WARN_ON_ONCE(rq->curr != next); 1827a0e813f2SPeter Zijlstra next->sched_class->set_next_task(rq, next, false); 1828b2bf6c31SPeter Zijlstra } 1829b2bf6c31SPeter Zijlstra 1830c3a340f7SSteven Rostedt (VMware) /* Defined in include/asm-generic/vmlinux.lds.h */ 1831c3a340f7SSteven Rostedt (VMware) extern struct sched_class __begin_sched_classes[]; 1832c3a340f7SSteven Rostedt (VMware) extern struct sched_class __end_sched_classes[]; 1833c3a340f7SSteven Rostedt (VMware) 1834c3a340f7SSteven Rostedt (VMware) #define sched_class_highest (__end_sched_classes - 1) 1835c3a340f7SSteven Rostedt (VMware) #define sched_class_lowest (__begin_sched_classes - 1) 18366e2df058SPeter Zijlstra 18376e2df058SPeter Zijlstra #define for_class_range(class, _from, _to) \ 1838c3a340f7SSteven Rostedt (VMware) for (class = (_from); class != (_to); class--) 18396e2df058SPeter Zijlstra 1840391e43daSPeter Zijlstra #define for_each_class(class) \ 1841c3a340f7SSteven Rostedt (VMware) for_class_range(class, sched_class_highest, sched_class_lowest) 1842391e43daSPeter Zijlstra 1843391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class; 1844aab03e05SDario Faggioli extern const struct sched_class dl_sched_class; 1845391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class; 1846391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class; 1847391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class; 1848391e43daSPeter Zijlstra 18496e2df058SPeter Zijlstra static inline bool sched_stop_runnable(struct rq *rq) 18506e2df058SPeter Zijlstra { 18516e2df058SPeter Zijlstra return rq->stop && task_on_rq_queued(rq->stop); 18526e2df058SPeter Zijlstra } 18536e2df058SPeter Zijlstra 18546e2df058SPeter Zijlstra static inline bool sched_dl_runnable(struct rq *rq) 18556e2df058SPeter Zijlstra { 18566e2df058SPeter Zijlstra return rq->dl.dl_nr_running > 0; 18576e2df058SPeter Zijlstra } 18586e2df058SPeter Zijlstra 18596e2df058SPeter Zijlstra static inline bool sched_rt_runnable(struct rq *rq) 18606e2df058SPeter Zijlstra { 18616e2df058SPeter Zijlstra return rq->rt.rt_queued > 0; 18626e2df058SPeter Zijlstra } 18636e2df058SPeter Zijlstra 18646e2df058SPeter Zijlstra static inline bool sched_fair_runnable(struct rq *rq) 18656e2df058SPeter Zijlstra { 18666e2df058SPeter Zijlstra return rq->cfs.nr_running > 0; 18676e2df058SPeter Zijlstra } 1868391e43daSPeter Zijlstra 18695d7d6056SPeter Zijlstra extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); 187098c2f700SPeter Zijlstra extern struct task_struct *pick_next_task_idle(struct rq *rq); 18715d7d6056SPeter Zijlstra 1872391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1873391e43daSPeter Zijlstra 187463b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu); 1875b719203bSLi Zefan 18767caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq); 1877391e43daSPeter Zijlstra 1878c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); 1879c5b28038SPeter Zijlstra 1880391e43daSPeter Zijlstra #endif 1881391e43daSPeter Zijlstra 1882442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE 1883442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1884442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1885442bf3aaSDaniel Lezcano { 1886442bf3aaSDaniel Lezcano rq->idle_state = idle_state; 1887442bf3aaSDaniel Lezcano } 1888442bf3aaSDaniel Lezcano 1889442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1890442bf3aaSDaniel Lezcano { 18919148a3a1SPeter Zijlstra SCHED_WARN_ON(!rcu_read_lock_held()); 189297fb7a0aSIngo Molnar 1893442bf3aaSDaniel Lezcano return rq->idle_state; 1894442bf3aaSDaniel Lezcano } 1895442bf3aaSDaniel Lezcano #else 1896442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq, 1897442bf3aaSDaniel Lezcano struct cpuidle_state *idle_state) 1898442bf3aaSDaniel Lezcano { 1899442bf3aaSDaniel Lezcano } 1900442bf3aaSDaniel Lezcano 1901442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq) 1902442bf3aaSDaniel Lezcano { 1903442bf3aaSDaniel Lezcano return NULL; 1904442bf3aaSDaniel Lezcano } 1905442bf3aaSDaniel Lezcano #endif 1906442bf3aaSDaniel Lezcano 19078663effbSSteven Rostedt (VMware) extern void schedule_idle(void); 19088663effbSSteven Rostedt (VMware) 1909391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void); 1910391e43daSPeter Zijlstra extern void sched_init_granularity(void); 1911391e43daSPeter Zijlstra extern void update_max_interval(void); 19121baca4ceSJuri Lelli 19131baca4ceSJuri Lelli extern void init_sched_dl_class(void); 1914391e43daSPeter Zijlstra extern void init_sched_rt_class(void); 1915391e43daSPeter Zijlstra extern void init_sched_fair_class(void); 1916391e43daSPeter Zijlstra 19179059393eSVincent Guittot extern void reweight_task(struct task_struct *p, int prio); 19189059393eSVincent Guittot 19198875125eSKirill Tkhai extern void resched_curr(struct rq *rq); 1920391e43daSPeter Zijlstra extern void resched_cpu(int cpu); 1921391e43daSPeter Zijlstra 1922391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth; 1923391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); 1924391e43daSPeter Zijlstra 1925332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth; 1926332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); 1927aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se); 1928209a0cbdSLuca Abeni extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); 1929aab03e05SDario Faggioli 1930c52f14d3SLuca Abeni #define BW_SHIFT 20 1931c52f14d3SLuca Abeni #define BW_UNIT (1 << BW_SHIFT) 19324da3abceSLuca Abeni #define RATIO_SHIFT 8 1933d505b8afSHuaixin Chang #define MAX_BW_BITS (64 - BW_SHIFT) 1934d505b8afSHuaixin Chang #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) 1935332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime); 1936332ac17eSDario Faggioli 1937540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se); 1938d0fe0b9cSDietmar Eggemann extern void post_init_entity_util_avg(struct task_struct *p); 1939a75cdaa9SAlex Shi 194076d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 194176d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq); 1942d84b3131SFrederic Weisbecker extern int __init sched_tick_offload_init(void); 194376d92ac3SFrederic Weisbecker 194476d92ac3SFrederic Weisbecker /* 194576d92ac3SFrederic Weisbecker * Tick may be needed by tasks in the runqueue depending on their policy and 194676d92ac3SFrederic Weisbecker * requirements. If tick is needed, lets send the target an IPI to kick it out of 194776d92ac3SFrederic Weisbecker * nohz mode if necessary. 194876d92ac3SFrederic Weisbecker */ 194976d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) 195076d92ac3SFrederic Weisbecker { 195176d92ac3SFrederic Weisbecker int cpu; 195276d92ac3SFrederic Weisbecker 195376d92ac3SFrederic Weisbecker if (!tick_nohz_full_enabled()) 195476d92ac3SFrederic Weisbecker return; 195576d92ac3SFrederic Weisbecker 195676d92ac3SFrederic Weisbecker cpu = cpu_of(rq); 195776d92ac3SFrederic Weisbecker 195876d92ac3SFrederic Weisbecker if (!tick_nohz_full_cpu(cpu)) 195976d92ac3SFrederic Weisbecker return; 196076d92ac3SFrederic Weisbecker 196176d92ac3SFrederic Weisbecker if (sched_can_stop_tick(rq)) 196276d92ac3SFrederic Weisbecker tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); 196376d92ac3SFrederic Weisbecker else 196476d92ac3SFrederic Weisbecker tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); 196576d92ac3SFrederic Weisbecker } 196676d92ac3SFrederic Weisbecker #else 1967d84b3131SFrederic Weisbecker static inline int sched_tick_offload_init(void) { return 0; } 196876d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { } 196976d92ac3SFrederic Weisbecker #endif 197076d92ac3SFrederic Weisbecker 197172465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count) 1972391e43daSPeter Zijlstra { 197372465447SKirill Tkhai unsigned prev_nr = rq->nr_running; 197472465447SKirill Tkhai 197572465447SKirill Tkhai rq->nr_running = prev_nr + count; 19769f3660c2SFrederic Weisbecker 19774486edd1STim Chen #ifdef CONFIG_SMP 19783e184501SViresh Kumar if (prev_nr < 2 && rq->nr_running >= 2) { 1979e90c8fe1SValentin Schneider if (!READ_ONCE(rq->rd->overload)) 1980e90c8fe1SValentin Schneider WRITE_ONCE(rq->rd->overload, 1); 198176d92ac3SFrederic Weisbecker } 19823e184501SViresh Kumar #endif 19834486edd1STim Chen 198476d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 19854486edd1STim Chen } 1986391e43daSPeter Zijlstra 198772465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count) 1988391e43daSPeter Zijlstra { 198972465447SKirill Tkhai rq->nr_running -= count; 199076d92ac3SFrederic Weisbecker /* Check if we still need preemption */ 199176d92ac3SFrederic Weisbecker sched_update_tick_dependency(rq); 1992391e43daSPeter Zijlstra } 1993391e43daSPeter Zijlstra 1994391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1995391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); 1996391e43daSPeter Zijlstra 1997391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); 1998391e43daSPeter Zijlstra 1999391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate; 2000391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost; 2001391e43daSPeter Zijlstra 2002391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK 2003391e43daSPeter Zijlstra 2004391e43daSPeter Zijlstra /* 2005391e43daSPeter Zijlstra * Use hrtick when: 2006391e43daSPeter Zijlstra * - enabled by features 2007391e43daSPeter Zijlstra * - hrtimer is actually high res 2008391e43daSPeter Zijlstra */ 2009391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq) 2010391e43daSPeter Zijlstra { 2011391e43daSPeter Zijlstra if (!sched_feat(HRTICK)) 2012391e43daSPeter Zijlstra return 0; 2013391e43daSPeter Zijlstra if (!cpu_active(cpu_of(rq))) 2014391e43daSPeter Zijlstra return 0; 2015391e43daSPeter Zijlstra return hrtimer_is_hres_active(&rq->hrtick_timer); 2016391e43daSPeter Zijlstra } 2017391e43daSPeter Zijlstra 2018391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay); 2019391e43daSPeter Zijlstra 2020b39e66eaSMike Galbraith #else 2021b39e66eaSMike Galbraith 2022b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq) 2023b39e66eaSMike Galbraith { 2024b39e66eaSMike Galbraith return 0; 2025b39e66eaSMike Galbraith } 2026b39e66eaSMike Galbraith 2027391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */ 2028391e43daSPeter Zijlstra 20291567c3e3SGiovanni Gherdovich #ifndef arch_scale_freq_tick 20301567c3e3SGiovanni Gherdovich static __always_inline 20311567c3e3SGiovanni Gherdovich void arch_scale_freq_tick(void) 20321567c3e3SGiovanni Gherdovich { 20331567c3e3SGiovanni Gherdovich } 20341567c3e3SGiovanni Gherdovich #endif 20351567c3e3SGiovanni Gherdovich 2036dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity 2037dfbca41fSPeter Zijlstra static __always_inline 20387673c8a4SJuri Lelli unsigned long arch_scale_freq_capacity(int cpu) 2039dfbca41fSPeter Zijlstra { 2040dfbca41fSPeter Zijlstra return SCHED_CAPACITY_SCALE; 2041dfbca41fSPeter Zijlstra } 2042dfbca41fSPeter Zijlstra #endif 2043b5b4860dSVincent Guittot 20447e1a9208SJuri Lelli #ifdef CONFIG_SMP 2045c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 2046391e43daSPeter Zijlstra 2047391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); 2048391e43daSPeter Zijlstra 2049391e43daSPeter Zijlstra /* 2050391e43daSPeter Zijlstra * fair double_lock_balance: Safely acquires both rq->locks in a fair 2051391e43daSPeter Zijlstra * way at the expense of forcing extra atomic operations in all 2052391e43daSPeter Zijlstra * invocations. This assures that the double_lock is acquired using the 2053391e43daSPeter Zijlstra * same underlying policy as the spinlock_t on this architecture, which 2054391e43daSPeter Zijlstra * reduces latency compared to the unfair variant below. However, it 2055391e43daSPeter Zijlstra * also adds more overhead and therefore may reduce throughput. 2056391e43daSPeter Zijlstra */ 2057391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2058391e43daSPeter Zijlstra __releases(this_rq->lock) 2059391e43daSPeter Zijlstra __acquires(busiest->lock) 2060391e43daSPeter Zijlstra __acquires(this_rq->lock) 2061391e43daSPeter Zijlstra { 2062391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2063391e43daSPeter Zijlstra double_rq_lock(this_rq, busiest); 2064391e43daSPeter Zijlstra 2065391e43daSPeter Zijlstra return 1; 2066391e43daSPeter Zijlstra } 2067391e43daSPeter Zijlstra 2068391e43daSPeter Zijlstra #else 2069391e43daSPeter Zijlstra /* 2070391e43daSPeter Zijlstra * Unfair double_lock_balance: Optimizes throughput at the expense of 2071391e43daSPeter Zijlstra * latency by eliminating extra atomic operations when the locks are 207297fb7a0aSIngo Molnar * already in proper order on entry. This favors lower CPU-ids and will 207397fb7a0aSIngo Molnar * grant the double lock to lower CPUs over higher ids under contention, 2074391e43daSPeter Zijlstra * regardless of entry order into the function. 2075391e43daSPeter Zijlstra */ 2076391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2077391e43daSPeter Zijlstra __releases(this_rq->lock) 2078391e43daSPeter Zijlstra __acquires(busiest->lock) 2079391e43daSPeter Zijlstra __acquires(this_rq->lock) 2080391e43daSPeter Zijlstra { 2081391e43daSPeter Zijlstra int ret = 0; 2082391e43daSPeter Zijlstra 2083391e43daSPeter Zijlstra if (unlikely(!raw_spin_trylock(&busiest->lock))) { 2084391e43daSPeter Zijlstra if (busiest < this_rq) { 2085391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2086391e43daSPeter Zijlstra raw_spin_lock(&busiest->lock); 2087391e43daSPeter Zijlstra raw_spin_lock_nested(&this_rq->lock, 2088391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2089391e43daSPeter Zijlstra ret = 1; 2090391e43daSPeter Zijlstra } else 2091391e43daSPeter Zijlstra raw_spin_lock_nested(&busiest->lock, 2092391e43daSPeter Zijlstra SINGLE_DEPTH_NESTING); 2093391e43daSPeter Zijlstra } 2094391e43daSPeter Zijlstra return ret; 2095391e43daSPeter Zijlstra } 2096391e43daSPeter Zijlstra 2097c1a280b6SThomas Gleixner #endif /* CONFIG_PREEMPTION */ 2098391e43daSPeter Zijlstra 2099391e43daSPeter Zijlstra /* 2100391e43daSPeter Zijlstra * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 2101391e43daSPeter Zijlstra */ 2102391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) 2103391e43daSPeter Zijlstra { 2104391e43daSPeter Zijlstra if (unlikely(!irqs_disabled())) { 210597fb7a0aSIngo Molnar /* printk() doesn't work well under rq->lock */ 2106391e43daSPeter Zijlstra raw_spin_unlock(&this_rq->lock); 2107391e43daSPeter Zijlstra BUG_ON(1); 2108391e43daSPeter Zijlstra } 2109391e43daSPeter Zijlstra 2110391e43daSPeter Zijlstra return _double_lock_balance(this_rq, busiest); 2111391e43daSPeter Zijlstra } 2112391e43daSPeter Zijlstra 2113391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 2114391e43daSPeter Zijlstra __releases(busiest->lock) 2115391e43daSPeter Zijlstra { 2116391e43daSPeter Zijlstra raw_spin_unlock(&busiest->lock); 2117391e43daSPeter Zijlstra lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 2118391e43daSPeter Zijlstra } 2119391e43daSPeter Zijlstra 212074602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2) 212174602315SPeter Zijlstra { 212274602315SPeter Zijlstra if (l1 > l2) 212374602315SPeter Zijlstra swap(l1, l2); 212474602315SPeter Zijlstra 212574602315SPeter Zijlstra spin_lock(l1); 212674602315SPeter Zijlstra spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 212774602315SPeter Zijlstra } 212874602315SPeter Zijlstra 212960e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) 213060e69eedSMike Galbraith { 213160e69eedSMike Galbraith if (l1 > l2) 213260e69eedSMike Galbraith swap(l1, l2); 213360e69eedSMike Galbraith 213460e69eedSMike Galbraith spin_lock_irq(l1); 213560e69eedSMike Galbraith spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 213660e69eedSMike Galbraith } 213760e69eedSMike Galbraith 213874602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 213974602315SPeter Zijlstra { 214074602315SPeter Zijlstra if (l1 > l2) 214174602315SPeter Zijlstra swap(l1, l2); 214274602315SPeter Zijlstra 214374602315SPeter Zijlstra raw_spin_lock(l1); 214474602315SPeter Zijlstra raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 214574602315SPeter Zijlstra } 214674602315SPeter Zijlstra 2147391e43daSPeter Zijlstra /* 2148391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2149391e43daSPeter Zijlstra * 2150391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2151391e43daSPeter Zijlstra * you need to do so manually before calling. 2152391e43daSPeter Zijlstra */ 2153391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2154391e43daSPeter Zijlstra __acquires(rq1->lock) 2155391e43daSPeter Zijlstra __acquires(rq2->lock) 2156391e43daSPeter Zijlstra { 2157391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2158391e43daSPeter Zijlstra if (rq1 == rq2) { 2159391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2160391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2161391e43daSPeter Zijlstra } else { 2162391e43daSPeter Zijlstra if (rq1 < rq2) { 2163391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2164391e43daSPeter Zijlstra raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 2165391e43daSPeter Zijlstra } else { 2166391e43daSPeter Zijlstra raw_spin_lock(&rq2->lock); 2167391e43daSPeter Zijlstra raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 2168391e43daSPeter Zijlstra } 2169391e43daSPeter Zijlstra } 2170391e43daSPeter Zijlstra } 2171391e43daSPeter Zijlstra 2172391e43daSPeter Zijlstra /* 2173391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2174391e43daSPeter Zijlstra * 2175391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2176391e43daSPeter Zijlstra * you need to do so manually after calling. 2177391e43daSPeter Zijlstra */ 2178391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2179391e43daSPeter Zijlstra __releases(rq1->lock) 2180391e43daSPeter Zijlstra __releases(rq2->lock) 2181391e43daSPeter Zijlstra { 2182391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2183391e43daSPeter Zijlstra if (rq1 != rq2) 2184391e43daSPeter Zijlstra raw_spin_unlock(&rq2->lock); 2185391e43daSPeter Zijlstra else 2186391e43daSPeter Zijlstra __release(rq2->lock); 2187391e43daSPeter Zijlstra } 2188391e43daSPeter Zijlstra 2189f2cb1360SIngo Molnar extern void set_rq_online (struct rq *rq); 2190f2cb1360SIngo Molnar extern void set_rq_offline(struct rq *rq); 2191f2cb1360SIngo Molnar extern bool sched_smp_initialized; 2192f2cb1360SIngo Molnar 2193391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 2194391e43daSPeter Zijlstra 2195391e43daSPeter Zijlstra /* 2196391e43daSPeter Zijlstra * double_rq_lock - safely lock two runqueues 2197391e43daSPeter Zijlstra * 2198391e43daSPeter Zijlstra * Note this does not disable interrupts like task_rq_lock, 2199391e43daSPeter Zijlstra * you need to do so manually before calling. 2200391e43daSPeter Zijlstra */ 2201391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) 2202391e43daSPeter Zijlstra __acquires(rq1->lock) 2203391e43daSPeter Zijlstra __acquires(rq2->lock) 2204391e43daSPeter Zijlstra { 2205391e43daSPeter Zijlstra BUG_ON(!irqs_disabled()); 2206391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2207391e43daSPeter Zijlstra raw_spin_lock(&rq1->lock); 2208391e43daSPeter Zijlstra __acquire(rq2->lock); /* Fake it out ;) */ 2209391e43daSPeter Zijlstra } 2210391e43daSPeter Zijlstra 2211391e43daSPeter Zijlstra /* 2212391e43daSPeter Zijlstra * double_rq_unlock - safely unlock two runqueues 2213391e43daSPeter Zijlstra * 2214391e43daSPeter Zijlstra * Note this does not restore interrupts like task_rq_unlock, 2215391e43daSPeter Zijlstra * you need to do so manually after calling. 2216391e43daSPeter Zijlstra */ 2217391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) 2218391e43daSPeter Zijlstra __releases(rq1->lock) 2219391e43daSPeter Zijlstra __releases(rq2->lock) 2220391e43daSPeter Zijlstra { 2221391e43daSPeter Zijlstra BUG_ON(rq1 != rq2); 2222391e43daSPeter Zijlstra raw_spin_unlock(&rq1->lock); 2223391e43daSPeter Zijlstra __release(rq2->lock); 2224391e43daSPeter Zijlstra } 2225391e43daSPeter Zijlstra 2226391e43daSPeter Zijlstra #endif 2227391e43daSPeter Zijlstra 2228391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); 2229391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 22306b55c965SSrikar Dronamraju 22316b55c965SSrikar Dronamraju #ifdef CONFIG_SCHED_DEBUG 22329469eb01SPeter Zijlstra extern bool sched_debug_enabled; 22339469eb01SPeter Zijlstra 2234391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu); 2235391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu); 2236acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu); 2237f6a34630SMathieu Malaterre extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); 2238f6a34630SMathieu Malaterre extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2239f6a34630SMathieu Malaterre extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); 2240397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING 2241397f2378SSrikar Dronamraju extern void 2242397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m); 2243397f2378SSrikar Dronamraju extern void 2244397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 2245397f2378SSrikar Dronamraju unsigned long tpf, unsigned long gsf, unsigned long gpf); 2246397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */ 2247397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */ 2248391e43daSPeter Zijlstra 2249391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq); 225007c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq); 225107c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq); 2252391e43daSPeter Zijlstra 22531ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void); 22541ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void); 22551c792db7SSuresh Siddha 22563451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON 225700357f5eSPeter Zijlstra #define NOHZ_BALANCE_KICK_BIT 0 225800357f5eSPeter Zijlstra #define NOHZ_STATS_KICK_BIT 1 2259a22e47a4SPeter Zijlstra 2260a22e47a4SPeter Zijlstra #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) 2261b7031a02SPeter Zijlstra #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) 2262b7031a02SPeter Zijlstra 2263b7031a02SPeter Zijlstra #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) 22641c792db7SSuresh Siddha 22651c792db7SSuresh Siddha #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 226620a5c8ccSThomas Gleixner 226700357f5eSPeter Zijlstra extern void nohz_balance_exit_idle(struct rq *rq); 226820a5c8ccSThomas Gleixner #else 226900357f5eSPeter Zijlstra static inline void nohz_balance_exit_idle(struct rq *rq) { } 22701c792db7SSuresh Siddha #endif 227173fbec60SFrederic Weisbecker 2272daec5798SLuca Abeni 2273daec5798SLuca Abeni #ifdef CONFIG_SMP 2274daec5798SLuca Abeni static inline 2275daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2276daec5798SLuca Abeni { 2277daec5798SLuca Abeni struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw); 2278daec5798SLuca Abeni int i; 2279daec5798SLuca Abeni 2280daec5798SLuca Abeni RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), 2281daec5798SLuca Abeni "sched RCU must be held"); 2282daec5798SLuca Abeni for_each_cpu_and(i, rd->span, cpu_active_mask) { 2283daec5798SLuca Abeni struct rq *rq = cpu_rq(i); 2284daec5798SLuca Abeni 2285daec5798SLuca Abeni rq->dl.extra_bw += bw; 2286daec5798SLuca Abeni } 2287daec5798SLuca Abeni } 2288daec5798SLuca Abeni #else 2289daec5798SLuca Abeni static inline 2290daec5798SLuca Abeni void __dl_update(struct dl_bw *dl_b, s64 bw) 2291daec5798SLuca Abeni { 2292daec5798SLuca Abeni struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw); 2293daec5798SLuca Abeni 2294daec5798SLuca Abeni dl->extra_bw += bw; 2295daec5798SLuca Abeni } 2296daec5798SLuca Abeni #endif 2297daec5798SLuca Abeni 2298daec5798SLuca Abeni 229973fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING 230019d23dbfSFrederic Weisbecker struct irqtime { 230125e2d8c1SFrederic Weisbecker u64 total; 2302a499a5a1SFrederic Weisbecker u64 tick_delta; 230319d23dbfSFrederic Weisbecker u64 irq_start_time; 230419d23dbfSFrederic Weisbecker struct u64_stats_sync sync; 230519d23dbfSFrederic Weisbecker }; 230673fbec60SFrederic Weisbecker 230719d23dbfSFrederic Weisbecker DECLARE_PER_CPU(struct irqtime, cpu_irqtime); 230873fbec60SFrederic Weisbecker 230925e2d8c1SFrederic Weisbecker /* 231025e2d8c1SFrederic Weisbecker * Returns the irqtime minus the softirq time computed by ksoftirqd. 231125e2d8c1SFrederic Weisbecker * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime 231225e2d8c1SFrederic Weisbecker * and never move forward. 231325e2d8c1SFrederic Weisbecker */ 231473fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu) 231573fbec60SFrederic Weisbecker { 231619d23dbfSFrederic Weisbecker struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); 231719d23dbfSFrederic Weisbecker unsigned int seq; 231819d23dbfSFrederic Weisbecker u64 total; 231973fbec60SFrederic Weisbecker 232073fbec60SFrederic Weisbecker do { 232119d23dbfSFrederic Weisbecker seq = __u64_stats_fetch_begin(&irqtime->sync); 232225e2d8c1SFrederic Weisbecker total = irqtime->total; 232319d23dbfSFrederic Weisbecker } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); 232473fbec60SFrederic Weisbecker 232519d23dbfSFrederic Weisbecker return total; 232673fbec60SFrederic Weisbecker } 232773fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 2328adaf9fcdSRafael J. Wysocki 2329adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ 2330b10abd0aSJoel Fernandes (Google) DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); 2331adaf9fcdSRafael J. Wysocki 2332adaf9fcdSRafael J. Wysocki /** 2333adaf9fcdSRafael J. Wysocki * cpufreq_update_util - Take a note about CPU utilization changes. 233412bde33dSRafael J. Wysocki * @rq: Runqueue to carry out the update for. 233558919e83SRafael J. Wysocki * @flags: Update reason flags. 2336adaf9fcdSRafael J. Wysocki * 233758919e83SRafael J. Wysocki * This function is called by the scheduler on the CPU whose utilization is 233858919e83SRafael J. Wysocki * being updated. 2339adaf9fcdSRafael J. Wysocki * 2340adaf9fcdSRafael J. Wysocki * It can only be called from RCU-sched read-side critical sections. 2341adaf9fcdSRafael J. Wysocki * 2342adaf9fcdSRafael J. Wysocki * The way cpufreq is currently arranged requires it to evaluate the CPU 2343adaf9fcdSRafael J. Wysocki * performance state (frequency/voltage) on a regular basis to prevent it from 2344adaf9fcdSRafael J. Wysocki * being stuck in a completely inadequate performance level for too long. 2345e0367b12SJuri Lelli * That is not guaranteed to happen if the updates are only triggered from CFS 2346e0367b12SJuri Lelli * and DL, though, because they may not be coming in if only RT tasks are 2347e0367b12SJuri Lelli * active all the time (or there are RT tasks only). 2348adaf9fcdSRafael J. Wysocki * 2349e0367b12SJuri Lelli * As a workaround for that issue, this function is called periodically by the 2350e0367b12SJuri Lelli * RT sched class to trigger extra cpufreq updates to prevent it from stalling, 2351adaf9fcdSRafael J. Wysocki * but that really is a band-aid. Going forward it should be replaced with 2352e0367b12SJuri Lelli * solutions targeted more specifically at RT tasks. 2353adaf9fcdSRafael J. Wysocki */ 235412bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) 2355adaf9fcdSRafael J. Wysocki { 235658919e83SRafael J. Wysocki struct update_util_data *data; 235758919e83SRafael J. Wysocki 2358674e7541SViresh Kumar data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, 2359674e7541SViresh Kumar cpu_of(rq))); 236058919e83SRafael J. Wysocki if (data) 236112bde33dSRafael J. Wysocki data->func(data, rq_clock(rq), flags); 236212bde33dSRafael J. Wysocki } 2363adaf9fcdSRafael J. Wysocki #else 236412bde33dSRafael J. Wysocki static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} 2365adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */ 2366be53f58fSLinus Torvalds 2367982d9cdcSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 2368686516b5SValentin Schneider unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); 23699d20ad7dSPatrick Bellasi 2370*46609ce2SQais Yousef /** 2371*46609ce2SQais Yousef * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. 2372*46609ce2SQais Yousef * @rq: The rq to clamp against. Must not be NULL. 2373*46609ce2SQais Yousef * @util: The util value to clamp. 2374*46609ce2SQais Yousef * @p: The task to clamp against. Can be NULL if you want to clamp 2375*46609ce2SQais Yousef * against @rq only. 2376*46609ce2SQais Yousef * 2377*46609ce2SQais Yousef * Clamps the passed @util to the max(@rq, @p) effective uclamp values. 2378*46609ce2SQais Yousef * 2379*46609ce2SQais Yousef * If sched_uclamp_used static key is disabled, then just return the util 2380*46609ce2SQais Yousef * without any clamping since uclamp aggregation at the rq level in the fast 2381*46609ce2SQais Yousef * path is disabled, rendering this operation a NOP. 2382*46609ce2SQais Yousef * 2383*46609ce2SQais Yousef * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It 2384*46609ce2SQais Yousef * will return the correct effective uclamp value of the task even if the 2385*46609ce2SQais Yousef * static key is disabled. 2386*46609ce2SQais Yousef */ 23879d20ad7dSPatrick Bellasi static __always_inline 2388d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 23899d20ad7dSPatrick Bellasi struct task_struct *p) 2390982d9cdcSPatrick Bellasi { 2391*46609ce2SQais Yousef unsigned long min_util; 2392*46609ce2SQais Yousef unsigned long max_util; 2393*46609ce2SQais Yousef 2394*46609ce2SQais Yousef if (!static_branch_likely(&sched_uclamp_used)) 2395*46609ce2SQais Yousef return util; 2396*46609ce2SQais Yousef 2397*46609ce2SQais Yousef min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); 2398*46609ce2SQais Yousef max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); 2399982d9cdcSPatrick Bellasi 24009d20ad7dSPatrick Bellasi if (p) { 24019d20ad7dSPatrick Bellasi min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN)); 24029d20ad7dSPatrick Bellasi max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX)); 24039d20ad7dSPatrick Bellasi } 24049d20ad7dSPatrick Bellasi 2405982d9cdcSPatrick Bellasi /* 2406982d9cdcSPatrick Bellasi * Since CPU's {min,max}_util clamps are MAX aggregated considering 2407982d9cdcSPatrick Bellasi * RUNNABLE tasks with _different_ clamps, we can end up with an 2408982d9cdcSPatrick Bellasi * inversion. Fix it now when the clamps are applied. 2409982d9cdcSPatrick Bellasi */ 2410982d9cdcSPatrick Bellasi if (unlikely(min_util >= max_util)) 2411982d9cdcSPatrick Bellasi return min_util; 2412982d9cdcSPatrick Bellasi 2413982d9cdcSPatrick Bellasi return clamp(util, min_util, max_util); 2414982d9cdcSPatrick Bellasi } 2415*46609ce2SQais Yousef 2416*46609ce2SQais Yousef /* 2417*46609ce2SQais Yousef * When uclamp is compiled in, the aggregation at rq level is 'turned off' 2418*46609ce2SQais Yousef * by default in the fast path and only gets turned on once userspace performs 2419*46609ce2SQais Yousef * an operation that requires it. 2420*46609ce2SQais Yousef * 2421*46609ce2SQais Yousef * Returns true if userspace opted-in to use uclamp and aggregation at rq level 2422*46609ce2SQais Yousef * hence is active. 2423*46609ce2SQais Yousef */ 2424*46609ce2SQais Yousef static inline bool uclamp_is_used(void) 2425*46609ce2SQais Yousef { 2426*46609ce2SQais Yousef return static_branch_likely(&sched_uclamp_used); 2427*46609ce2SQais Yousef } 2428982d9cdcSPatrick Bellasi #else /* CONFIG_UCLAMP_TASK */ 2429d2b58a28SValentin Schneider static inline 2430d2b58a28SValentin Schneider unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, 24319d20ad7dSPatrick Bellasi struct task_struct *p) 24329d20ad7dSPatrick Bellasi { 24339d20ad7dSPatrick Bellasi return util; 24349d20ad7dSPatrick Bellasi } 2435*46609ce2SQais Yousef 2436*46609ce2SQais Yousef static inline bool uclamp_is_used(void) 2437*46609ce2SQais Yousef { 2438*46609ce2SQais Yousef return false; 2439*46609ce2SQais Yousef } 2440982d9cdcSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 2441982d9cdcSPatrick Bellasi 24429bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity 24439bdcb44eSRafael J. Wysocki # ifndef arch_scale_freq_invariant 244497fb7a0aSIngo Molnar # define arch_scale_freq_invariant() true 24459bdcb44eSRafael J. Wysocki # endif 244697fb7a0aSIngo Molnar #else 244797fb7a0aSIngo Molnar # define arch_scale_freq_invariant() false 24489bdcb44eSRafael J. Wysocki #endif 2449d4edd662SJuri Lelli 245010a35e68SVincent Guittot #ifdef CONFIG_SMP 245110a35e68SVincent Guittot static inline unsigned long capacity_orig_of(int cpu) 245210a35e68SVincent Guittot { 245310a35e68SVincent Guittot return cpu_rq(cpu)->cpu_capacity_orig; 245410a35e68SVincent Guittot } 245510a35e68SVincent Guittot #endif 245610a35e68SVincent Guittot 2457938e5e4bSQuentin Perret /** 2458938e5e4bSQuentin Perret * enum schedutil_type - CPU utilization type 2459938e5e4bSQuentin Perret * @FREQUENCY_UTIL: Utilization used to select frequency 2460938e5e4bSQuentin Perret * @ENERGY_UTIL: Utilization used during energy calculation 2461938e5e4bSQuentin Perret * 2462938e5e4bSQuentin Perret * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time 2463938e5e4bSQuentin Perret * need to be aggregated differently depending on the usage made of them. This 2464938e5e4bSQuentin Perret * enum is used within schedutil_freq_util() to differentiate the types of 2465938e5e4bSQuentin Perret * utilization expected by the callers, and adjust the aggregation accordingly. 2466938e5e4bSQuentin Perret */ 2467938e5e4bSQuentin Perret enum schedutil_type { 2468938e5e4bSQuentin Perret FREQUENCY_UTIL, 2469938e5e4bSQuentin Perret ENERGY_UTIL, 2470938e5e4bSQuentin Perret }; 2471938e5e4bSQuentin Perret 2472af24bde8SPatrick Bellasi #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL 2473938e5e4bSQuentin Perret 2474af24bde8SPatrick Bellasi unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2475af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2476af24bde8SPatrick Bellasi struct task_struct *p); 2477938e5e4bSQuentin Perret 24788cc90515SVincent Guittot static inline unsigned long cpu_bw_dl(struct rq *rq) 2479d4edd662SJuri Lelli { 2480d4edd662SJuri Lelli return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; 2481d4edd662SJuri Lelli } 2482d4edd662SJuri Lelli 24838cc90515SVincent Guittot static inline unsigned long cpu_util_dl(struct rq *rq) 24848cc90515SVincent Guittot { 24858cc90515SVincent Guittot return READ_ONCE(rq->avg_dl.util_avg); 24868cc90515SVincent Guittot } 24878cc90515SVincent Guittot 2488d4edd662SJuri Lelli static inline unsigned long cpu_util_cfs(struct rq *rq) 2489d4edd662SJuri Lelli { 2490a07630b8SPatrick Bellasi unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); 2491a07630b8SPatrick Bellasi 2492a07630b8SPatrick Bellasi if (sched_feat(UTIL_EST)) { 2493a07630b8SPatrick Bellasi util = max_t(unsigned long, util, 2494a07630b8SPatrick Bellasi READ_ONCE(rq->cfs.avg.util_est.enqueued)); 2495a07630b8SPatrick Bellasi } 2496a07630b8SPatrick Bellasi 2497a07630b8SPatrick Bellasi return util; 2498d4edd662SJuri Lelli } 2499371bf427SVincent Guittot 2500371bf427SVincent Guittot static inline unsigned long cpu_util_rt(struct rq *rq) 2501371bf427SVincent Guittot { 2502dfa444dcSVincent Guittot return READ_ONCE(rq->avg_rt.util_avg); 2503371bf427SVincent Guittot } 2504938e5e4bSQuentin Perret #else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2505af24bde8SPatrick Bellasi static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, 2506af24bde8SPatrick Bellasi unsigned long max, enum schedutil_type type, 2507af24bde8SPatrick Bellasi struct task_struct *p) 2508938e5e4bSQuentin Perret { 2509af24bde8SPatrick Bellasi return 0; 2510938e5e4bSQuentin Perret } 2511af24bde8SPatrick Bellasi #endif /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 25129033ea11SVincent Guittot 251311d4afd4SVincent Guittot #ifdef CONFIG_HAVE_SCHED_AVG_IRQ 25149033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 25159033ea11SVincent Guittot { 25169033ea11SVincent Guittot return rq->avg_irq.util_avg; 25179033ea11SVincent Guittot } 25182e62c474SVincent Guittot 25192e62c474SVincent Guittot static inline 25202e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 25212e62c474SVincent Guittot { 25222e62c474SVincent Guittot util *= (max - irq); 25232e62c474SVincent Guittot util /= max; 25242e62c474SVincent Guittot 25252e62c474SVincent Guittot return util; 25262e62c474SVincent Guittot 25272e62c474SVincent Guittot } 25289033ea11SVincent Guittot #else 25299033ea11SVincent Guittot static inline unsigned long cpu_util_irq(struct rq *rq) 25309033ea11SVincent Guittot { 25319033ea11SVincent Guittot return 0; 25329033ea11SVincent Guittot } 25339033ea11SVincent Guittot 25342e62c474SVincent Guittot static inline 25352e62c474SVincent Guittot unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) 25362e62c474SVincent Guittot { 25372e62c474SVincent Guittot return util; 25382e62c474SVincent Guittot } 2539794a56ebSJuri Lelli #endif 25406aa140faSQuentin Perret 2541531b5c9fSQuentin Perret #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) 25421f74de87SQuentin Perret 2543f8a696f2SPeter Zijlstra #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) 2544f8a696f2SPeter Zijlstra 2545f8a696f2SPeter Zijlstra DECLARE_STATIC_KEY_FALSE(sched_energy_present); 2546f8a696f2SPeter Zijlstra 2547f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) 2548f8a696f2SPeter Zijlstra { 2549f8a696f2SPeter Zijlstra return static_branch_unlikely(&sched_energy_present); 2550f8a696f2SPeter Zijlstra } 2551f8a696f2SPeter Zijlstra 2552f8a696f2SPeter Zijlstra #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ 2553f8a696f2SPeter Zijlstra 2554f8a696f2SPeter Zijlstra #define perf_domain_span(pd) NULL 2555f8a696f2SPeter Zijlstra static inline bool sched_energy_enabled(void) { return false; } 2556f8a696f2SPeter Zijlstra 2557f8a696f2SPeter Zijlstra #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ 2558227a4aadSMathieu Desnoyers 2559227a4aadSMathieu Desnoyers #ifdef CONFIG_MEMBARRIER 2560227a4aadSMathieu Desnoyers /* 2561227a4aadSMathieu Desnoyers * The scheduler provides memory barriers required by membarrier between: 2562227a4aadSMathieu Desnoyers * - prior user-space memory accesses and store to rq->membarrier_state, 2563227a4aadSMathieu Desnoyers * - store to rq->membarrier_state and following user-space memory accesses. 2564227a4aadSMathieu Desnoyers * In the same way it provides those guarantees around store to rq->curr. 2565227a4aadSMathieu Desnoyers */ 2566227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2567227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2568227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2569227a4aadSMathieu Desnoyers { 2570227a4aadSMathieu Desnoyers int membarrier_state; 2571227a4aadSMathieu Desnoyers 2572227a4aadSMathieu Desnoyers if (prev_mm == next_mm) 2573227a4aadSMathieu Desnoyers return; 2574227a4aadSMathieu Desnoyers 2575227a4aadSMathieu Desnoyers membarrier_state = atomic_read(&next_mm->membarrier_state); 2576227a4aadSMathieu Desnoyers if (READ_ONCE(rq->membarrier_state) == membarrier_state) 2577227a4aadSMathieu Desnoyers return; 2578227a4aadSMathieu Desnoyers 2579227a4aadSMathieu Desnoyers WRITE_ONCE(rq->membarrier_state, membarrier_state); 2580227a4aadSMathieu Desnoyers } 2581227a4aadSMathieu Desnoyers #else 2582227a4aadSMathieu Desnoyers static inline void membarrier_switch_mm(struct rq *rq, 2583227a4aadSMathieu Desnoyers struct mm_struct *prev_mm, 2584227a4aadSMathieu Desnoyers struct mm_struct *next_mm) 2585227a4aadSMathieu Desnoyers { 2586227a4aadSMathieu Desnoyers } 2587227a4aadSMathieu Desnoyers #endif 258852262ee5SMel Gorman 258952262ee5SMel Gorman #ifdef CONFIG_SMP 259052262ee5SMel Gorman static inline bool is_per_cpu_kthread(struct task_struct *p) 259152262ee5SMel Gorman { 259252262ee5SMel Gorman if (!(p->flags & PF_KTHREAD)) 259352262ee5SMel Gorman return false; 259452262ee5SMel Gorman 259552262ee5SMel Gorman if (p->nr_cpus_allowed != 1) 259652262ee5SMel Gorman return false; 259752262ee5SMel Gorman 259852262ee5SMel Gorman return true; 259952262ee5SMel Gorman } 260052262ee5SMel Gorman #endif 2601b3212fe2SThomas Gleixner 2602b3212fe2SThomas Gleixner void swake_up_all_locked(struct swait_queue_head *q); 2603b3212fe2SThomas Gleixner void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 2604