xref: /linux/kernel/sched/sched.h (revision c65eacbe290b8141554c71b2c94489e73ade8c8d)
1391e43daSPeter Zijlstra 
2391e43daSPeter Zijlstra #include <linux/sched.h>
3cf4aebc2SClark Williams #include <linux/sched/sysctl.h>
48bd75c77SClark Williams #include <linux/sched/rt.h>
5aab03e05SDario Faggioli #include <linux/sched/deadline.h>
63866e845SSteven Rostedt (Red Hat) #include <linux/binfmts.h>
7391e43daSPeter Zijlstra #include <linux/mutex.h>
8391e43daSPeter Zijlstra #include <linux/spinlock.h>
9391e43daSPeter Zijlstra #include <linux/stop_machine.h>
10b6366f04SSteven Rostedt #include <linux/irq_work.h>
119f3660c2SFrederic Weisbecker #include <linux/tick.h>
12f809ca9aSMel Gorman #include <linux/slab.h>
13391e43daSPeter Zijlstra 
14391e43daSPeter Zijlstra #include "cpupri.h"
156bfd6d72SJuri Lelli #include "cpudeadline.h"
1660fed789SLi Zefan #include "cpuacct.h"
17391e43daSPeter Zijlstra 
1845ceebf7SPaul Gortmaker struct rq;
19442bf3aaSDaniel Lezcano struct cpuidle_state;
2045ceebf7SPaul Gortmaker 
21da0c1e65SKirill Tkhai /* task_struct::on_rq states: */
22da0c1e65SKirill Tkhai #define TASK_ON_RQ_QUEUED	1
23cca26e80SKirill Tkhai #define TASK_ON_RQ_MIGRATING	2
24da0c1e65SKirill Tkhai 
25391e43daSPeter Zijlstra extern __read_mostly int scheduler_running;
26391e43daSPeter Zijlstra 
2745ceebf7SPaul Gortmaker extern unsigned long calc_load_update;
2845ceebf7SPaul Gortmaker extern atomic_long_t calc_load_tasks;
2945ceebf7SPaul Gortmaker 
303289bdb4SPeter Zijlstra extern void calc_global_load_tick(struct rq *this_rq);
31d60585c5SThomas Gleixner extern long calc_load_fold_active(struct rq *this_rq, long adjust);
323289bdb4SPeter Zijlstra 
333289bdb4SPeter Zijlstra #ifdef CONFIG_SMP
34cee1afceSFrederic Weisbecker extern void cpu_load_update_active(struct rq *this_rq);
353289bdb4SPeter Zijlstra #else
36cee1afceSFrederic Weisbecker static inline void cpu_load_update_active(struct rq *this_rq) { }
373289bdb4SPeter Zijlstra #endif
3845ceebf7SPaul Gortmaker 
39391e43daSPeter Zijlstra /*
40391e43daSPeter Zijlstra  * Helpers for converting nanosecond timing to jiffy resolution
41391e43daSPeter Zijlstra  */
42391e43daSPeter Zijlstra #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
43391e43daSPeter Zijlstra 
44cc1f4b1fSLi Zefan /*
45cc1f4b1fSLi Zefan  * Increase resolution of nice-level calculations for 64-bit architectures.
46cc1f4b1fSLi Zefan  * The extra resolution improves shares distribution and load balancing of
47cc1f4b1fSLi Zefan  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
48cc1f4b1fSLi Zefan  * hierarchies, especially on larger systems. This is not a user-visible change
49cc1f4b1fSLi Zefan  * and does not change the user-interface for setting shares/weights.
50cc1f4b1fSLi Zefan  *
51cc1f4b1fSLi Zefan  * We increase resolution only if we have enough bits to allow this increased
522159197dSPeter Zijlstra  * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
532159197dSPeter Zijlstra  * pretty high and the returns do not justify the increased costs.
542159197dSPeter Zijlstra  *
552159197dSPeter Zijlstra  * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
562159197dSPeter Zijlstra  * increase coverage and consistency always enable it on 64bit platforms.
57cc1f4b1fSLi Zefan  */
582159197dSPeter Zijlstra #ifdef CONFIG_64BIT
59172895e6SYuyang Du # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
606ecdd749SYuyang Du # define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
616ecdd749SYuyang Du # define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
62cc1f4b1fSLi Zefan #else
63172895e6SYuyang Du # define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
64cc1f4b1fSLi Zefan # define scale_load(w)		(w)
65cc1f4b1fSLi Zefan # define scale_load_down(w)	(w)
66cc1f4b1fSLi Zefan #endif
67cc1f4b1fSLi Zefan 
686ecdd749SYuyang Du /*
69172895e6SYuyang Du  * Task weight (visible to users) and its load (invisible to users) have
70172895e6SYuyang Du  * independent resolution, but they should be well calibrated. We use
71172895e6SYuyang Du  * scale_load() and scale_load_down(w) to convert between them. The
72172895e6SYuyang Du  * following must be true:
73172895e6SYuyang Du  *
74172895e6SYuyang Du  *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
75172895e6SYuyang Du  *
766ecdd749SYuyang Du  */
77172895e6SYuyang Du #define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
78391e43daSPeter Zijlstra 
79391e43daSPeter Zijlstra /*
80332ac17eSDario Faggioli  * Single value that decides SCHED_DEADLINE internal math precision.
81332ac17eSDario Faggioli  * 10 -> just above 1us
82332ac17eSDario Faggioli  * 9  -> just above 0.5us
83332ac17eSDario Faggioli  */
84332ac17eSDario Faggioli #define DL_SCALE (10)
85332ac17eSDario Faggioli 
86332ac17eSDario Faggioli /*
87391e43daSPeter Zijlstra  * These are the 'tuning knobs' of the scheduler:
88391e43daSPeter Zijlstra  */
89391e43daSPeter Zijlstra 
90391e43daSPeter Zijlstra /*
91391e43daSPeter Zijlstra  * single value that denotes runtime == period, ie unlimited time.
92391e43daSPeter Zijlstra  */
93391e43daSPeter Zijlstra #define RUNTIME_INF	((u64)~0ULL)
94391e43daSPeter Zijlstra 
9520f9cd2aSHenrik Austad static inline int idle_policy(int policy)
9620f9cd2aSHenrik Austad {
9720f9cd2aSHenrik Austad 	return policy == SCHED_IDLE;
9820f9cd2aSHenrik Austad }
99d50dde5aSDario Faggioli static inline int fair_policy(int policy)
100d50dde5aSDario Faggioli {
101d50dde5aSDario Faggioli 	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
102d50dde5aSDario Faggioli }
103d50dde5aSDario Faggioli 
104391e43daSPeter Zijlstra static inline int rt_policy(int policy)
105391e43daSPeter Zijlstra {
106d50dde5aSDario Faggioli 	return policy == SCHED_FIFO || policy == SCHED_RR;
107391e43daSPeter Zijlstra }
108391e43daSPeter Zijlstra 
109aab03e05SDario Faggioli static inline int dl_policy(int policy)
110aab03e05SDario Faggioli {
111aab03e05SDario Faggioli 	return policy == SCHED_DEADLINE;
112aab03e05SDario Faggioli }
11320f9cd2aSHenrik Austad static inline bool valid_policy(int policy)
11420f9cd2aSHenrik Austad {
11520f9cd2aSHenrik Austad 	return idle_policy(policy) || fair_policy(policy) ||
11620f9cd2aSHenrik Austad 		rt_policy(policy) || dl_policy(policy);
11720f9cd2aSHenrik Austad }
118aab03e05SDario Faggioli 
119391e43daSPeter Zijlstra static inline int task_has_rt_policy(struct task_struct *p)
120391e43daSPeter Zijlstra {
121391e43daSPeter Zijlstra 	return rt_policy(p->policy);
122391e43daSPeter Zijlstra }
123391e43daSPeter Zijlstra 
124aab03e05SDario Faggioli static inline int task_has_dl_policy(struct task_struct *p)
125aab03e05SDario Faggioli {
126aab03e05SDario Faggioli 	return dl_policy(p->policy);
127aab03e05SDario Faggioli }
128aab03e05SDario Faggioli 
1292d3d891dSDario Faggioli /*
1302d3d891dSDario Faggioli  * Tells if entity @a should preempt entity @b.
1312d3d891dSDario Faggioli  */
132332ac17eSDario Faggioli static inline bool
133332ac17eSDario Faggioli dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
1342d3d891dSDario Faggioli {
1352d3d891dSDario Faggioli 	return dl_time_before(a->deadline, b->deadline);
1362d3d891dSDario Faggioli }
1372d3d891dSDario Faggioli 
138391e43daSPeter Zijlstra /*
139391e43daSPeter Zijlstra  * This is the priority-queue data structure of the RT scheduling class:
140391e43daSPeter Zijlstra  */
141391e43daSPeter Zijlstra struct rt_prio_array {
142391e43daSPeter Zijlstra 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
143391e43daSPeter Zijlstra 	struct list_head queue[MAX_RT_PRIO];
144391e43daSPeter Zijlstra };
145391e43daSPeter Zijlstra 
146391e43daSPeter Zijlstra struct rt_bandwidth {
147391e43daSPeter Zijlstra 	/* nests inside the rq lock: */
148391e43daSPeter Zijlstra 	raw_spinlock_t		rt_runtime_lock;
149391e43daSPeter Zijlstra 	ktime_t			rt_period;
150391e43daSPeter Zijlstra 	u64			rt_runtime;
151391e43daSPeter Zijlstra 	struct hrtimer		rt_period_timer;
1524cfafd30SPeter Zijlstra 	unsigned int		rt_period_active;
153391e43daSPeter Zijlstra };
154a5e7be3bSJuri Lelli 
155a5e7be3bSJuri Lelli void __dl_clear_params(struct task_struct *p);
156a5e7be3bSJuri Lelli 
157332ac17eSDario Faggioli /*
158332ac17eSDario Faggioli  * To keep the bandwidth of -deadline tasks and groups under control
159332ac17eSDario Faggioli  * we need some place where:
160332ac17eSDario Faggioli  *  - store the maximum -deadline bandwidth of the system (the group);
161332ac17eSDario Faggioli  *  - cache the fraction of that bandwidth that is currently allocated.
162332ac17eSDario Faggioli  *
163332ac17eSDario Faggioli  * This is all done in the data structure below. It is similar to the
164332ac17eSDario Faggioli  * one used for RT-throttling (rt_bandwidth), with the main difference
165332ac17eSDario Faggioli  * that, since here we are only interested in admission control, we
166332ac17eSDario Faggioli  * do not decrease any runtime while the group "executes", neither we
167332ac17eSDario Faggioli  * need a timer to replenish it.
168332ac17eSDario Faggioli  *
169332ac17eSDario Faggioli  * With respect to SMP, the bandwidth is given on a per-CPU basis,
170332ac17eSDario Faggioli  * meaning that:
171332ac17eSDario Faggioli  *  - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
172332ac17eSDario Faggioli  *  - dl_total_bw array contains, in the i-eth element, the currently
173332ac17eSDario Faggioli  *    allocated bandwidth on the i-eth CPU.
174332ac17eSDario Faggioli  * Moreover, groups consume bandwidth on each CPU, while tasks only
175332ac17eSDario Faggioli  * consume bandwidth on the CPU they're running on.
176332ac17eSDario Faggioli  * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
177332ac17eSDario Faggioli  * that will be shown the next time the proc or cgroup controls will
178332ac17eSDario Faggioli  * be red. It on its turn can be changed by writing on its own
179332ac17eSDario Faggioli  * control.
180332ac17eSDario Faggioli  */
181332ac17eSDario Faggioli struct dl_bandwidth {
182332ac17eSDario Faggioli 	raw_spinlock_t dl_runtime_lock;
183332ac17eSDario Faggioli 	u64 dl_runtime;
184332ac17eSDario Faggioli 	u64 dl_period;
185332ac17eSDario Faggioli };
186332ac17eSDario Faggioli 
187332ac17eSDario Faggioli static inline int dl_bandwidth_enabled(void)
188332ac17eSDario Faggioli {
1891724813dSPeter Zijlstra 	return sysctl_sched_rt_runtime >= 0;
190332ac17eSDario Faggioli }
191332ac17eSDario Faggioli 
192332ac17eSDario Faggioli extern struct dl_bw *dl_bw_of(int i);
193332ac17eSDario Faggioli 
194332ac17eSDario Faggioli struct dl_bw {
195332ac17eSDario Faggioli 	raw_spinlock_t lock;
196332ac17eSDario Faggioli 	u64 bw, total_bw;
197332ac17eSDario Faggioli };
198332ac17eSDario Faggioli 
1997f51412aSJuri Lelli static inline
2007f51412aSJuri Lelli void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
2017f51412aSJuri Lelli {
2027f51412aSJuri Lelli 	dl_b->total_bw -= tsk_bw;
2037f51412aSJuri Lelli }
2047f51412aSJuri Lelli 
2057f51412aSJuri Lelli static inline
2067f51412aSJuri Lelli void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
2077f51412aSJuri Lelli {
2087f51412aSJuri Lelli 	dl_b->total_bw += tsk_bw;
2097f51412aSJuri Lelli }
2107f51412aSJuri Lelli 
2117f51412aSJuri Lelli static inline
2127f51412aSJuri Lelli bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
2137f51412aSJuri Lelli {
2147f51412aSJuri Lelli 	return dl_b->bw != -1 &&
2157f51412aSJuri Lelli 	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
2167f51412aSJuri Lelli }
2177f51412aSJuri Lelli 
218391e43daSPeter Zijlstra extern struct mutex sched_domains_mutex;
219391e43daSPeter Zijlstra 
220391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
221391e43daSPeter Zijlstra 
222391e43daSPeter Zijlstra #include <linux/cgroup.h>
223391e43daSPeter Zijlstra 
224391e43daSPeter Zijlstra struct cfs_rq;
225391e43daSPeter Zijlstra struct rt_rq;
226391e43daSPeter Zijlstra 
22735cf4e50SMike Galbraith extern struct list_head task_groups;
228391e43daSPeter Zijlstra 
229391e43daSPeter Zijlstra struct cfs_bandwidth {
230391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
231391e43daSPeter Zijlstra 	raw_spinlock_t lock;
232391e43daSPeter Zijlstra 	ktime_t period;
233391e43daSPeter Zijlstra 	u64 quota, runtime;
2349c58c79aSZhihui Zhang 	s64 hierarchical_quota;
235391e43daSPeter Zijlstra 	u64 runtime_expires;
236391e43daSPeter Zijlstra 
2374cfafd30SPeter Zijlstra 	int idle, period_active;
238391e43daSPeter Zijlstra 	struct hrtimer period_timer, slack_timer;
239391e43daSPeter Zijlstra 	struct list_head throttled_cfs_rq;
240391e43daSPeter Zijlstra 
241391e43daSPeter Zijlstra 	/* statistics */
242391e43daSPeter Zijlstra 	int nr_periods, nr_throttled;
243391e43daSPeter Zijlstra 	u64 throttled_time;
244391e43daSPeter Zijlstra #endif
245391e43daSPeter Zijlstra };
246391e43daSPeter Zijlstra 
247391e43daSPeter Zijlstra /* task group related information */
248391e43daSPeter Zijlstra struct task_group {
249391e43daSPeter Zijlstra 	struct cgroup_subsys_state css;
250391e43daSPeter Zijlstra 
251391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
252391e43daSPeter Zijlstra 	/* schedulable entities of this group on each cpu */
253391e43daSPeter Zijlstra 	struct sched_entity **se;
254391e43daSPeter Zijlstra 	/* runqueue "owned" by this group on each cpu */
255391e43daSPeter Zijlstra 	struct cfs_rq **cfs_rq;
256391e43daSPeter Zijlstra 	unsigned long shares;
257391e43daSPeter Zijlstra 
258fa6bddebSAlex Shi #ifdef	CONFIG_SMP
259b0367629SWaiman Long 	/*
260b0367629SWaiman Long 	 * load_avg can be heavily contended at clock tick time, so put
261b0367629SWaiman Long 	 * it in its own cacheline separated from the fields above which
262b0367629SWaiman Long 	 * will also be accessed at each tick.
263b0367629SWaiman Long 	 */
264b0367629SWaiman Long 	atomic_long_t load_avg ____cacheline_aligned;
265391e43daSPeter Zijlstra #endif
266fa6bddebSAlex Shi #endif
267391e43daSPeter Zijlstra 
268391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
269391e43daSPeter Zijlstra 	struct sched_rt_entity **rt_se;
270391e43daSPeter Zijlstra 	struct rt_rq **rt_rq;
271391e43daSPeter Zijlstra 
272391e43daSPeter Zijlstra 	struct rt_bandwidth rt_bandwidth;
273391e43daSPeter Zijlstra #endif
274391e43daSPeter Zijlstra 
275391e43daSPeter Zijlstra 	struct rcu_head rcu;
276391e43daSPeter Zijlstra 	struct list_head list;
277391e43daSPeter Zijlstra 
278391e43daSPeter Zijlstra 	struct task_group *parent;
279391e43daSPeter Zijlstra 	struct list_head siblings;
280391e43daSPeter Zijlstra 	struct list_head children;
281391e43daSPeter Zijlstra 
282391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_AUTOGROUP
283391e43daSPeter Zijlstra 	struct autogroup *autogroup;
284391e43daSPeter Zijlstra #endif
285391e43daSPeter Zijlstra 
286391e43daSPeter Zijlstra 	struct cfs_bandwidth cfs_bandwidth;
287391e43daSPeter Zijlstra };
288391e43daSPeter Zijlstra 
289391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
290391e43daSPeter Zijlstra #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
291391e43daSPeter Zijlstra 
292391e43daSPeter Zijlstra /*
293391e43daSPeter Zijlstra  * A weight of 0 or 1 can cause arithmetics problems.
294391e43daSPeter Zijlstra  * A weight of a cfs_rq is the sum of weights of which entities
295391e43daSPeter Zijlstra  * are queued on this cfs_rq, so a weight of a entity should not be
296391e43daSPeter Zijlstra  * too large, so as the shares value of a task group.
297391e43daSPeter Zijlstra  * (The default weight is 1024 - so there's no practical
298391e43daSPeter Zijlstra  *  limitation from this.)
299391e43daSPeter Zijlstra  */
300391e43daSPeter Zijlstra #define MIN_SHARES	(1UL <<  1)
301391e43daSPeter Zijlstra #define MAX_SHARES	(1UL << 18)
302391e43daSPeter Zijlstra #endif
303391e43daSPeter Zijlstra 
304391e43daSPeter Zijlstra typedef int (*tg_visitor)(struct task_group *, void *);
305391e43daSPeter Zijlstra 
306391e43daSPeter Zijlstra extern int walk_tg_tree_from(struct task_group *from,
307391e43daSPeter Zijlstra 			     tg_visitor down, tg_visitor up, void *data);
308391e43daSPeter Zijlstra 
309391e43daSPeter Zijlstra /*
310391e43daSPeter Zijlstra  * Iterate the full tree, calling @down when first entering a node and @up when
311391e43daSPeter Zijlstra  * leaving it for the final time.
312391e43daSPeter Zijlstra  *
313391e43daSPeter Zijlstra  * Caller must hold rcu_lock or sufficient equivalent.
314391e43daSPeter Zijlstra  */
315391e43daSPeter Zijlstra static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
316391e43daSPeter Zijlstra {
317391e43daSPeter Zijlstra 	return walk_tg_tree_from(&root_task_group, down, up, data);
318391e43daSPeter Zijlstra }
319391e43daSPeter Zijlstra 
320391e43daSPeter Zijlstra extern int tg_nop(struct task_group *tg, void *data);
321391e43daSPeter Zijlstra 
322391e43daSPeter Zijlstra extern void free_fair_sched_group(struct task_group *tg);
323391e43daSPeter Zijlstra extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
3248663e24dSPeter Zijlstra extern void online_fair_sched_group(struct task_group *tg);
3256fe1f348SPeter Zijlstra extern void unregister_fair_sched_group(struct task_group *tg);
326391e43daSPeter Zijlstra extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
327391e43daSPeter Zijlstra 			struct sched_entity *se, int cpu,
328391e43daSPeter Zijlstra 			struct sched_entity *parent);
329391e43daSPeter Zijlstra extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
330391e43daSPeter Zijlstra 
331391e43daSPeter Zijlstra extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
33277a4d1a1SPeter Zijlstra extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
333391e43daSPeter Zijlstra extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
334391e43daSPeter Zijlstra 
335391e43daSPeter Zijlstra extern void free_rt_sched_group(struct task_group *tg);
336391e43daSPeter Zijlstra extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
337391e43daSPeter Zijlstra extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
338391e43daSPeter Zijlstra 		struct sched_rt_entity *rt_se, int cpu,
339391e43daSPeter Zijlstra 		struct sched_rt_entity *parent);
340391e43daSPeter Zijlstra 
34125cc7da7SLi Zefan extern struct task_group *sched_create_group(struct task_group *parent);
34225cc7da7SLi Zefan extern void sched_online_group(struct task_group *tg,
34325cc7da7SLi Zefan 			       struct task_group *parent);
34425cc7da7SLi Zefan extern void sched_destroy_group(struct task_group *tg);
34525cc7da7SLi Zefan extern void sched_offline_group(struct task_group *tg);
34625cc7da7SLi Zefan 
34725cc7da7SLi Zefan extern void sched_move_task(struct task_struct *tsk);
34825cc7da7SLi Zefan 
34925cc7da7SLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED
35025cc7da7SLi Zefan extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
351ad936d86SByungchul Park 
352ad936d86SByungchul Park #ifdef CONFIG_SMP
353ad936d86SByungchul Park extern void set_task_rq_fair(struct sched_entity *se,
354ad936d86SByungchul Park 			     struct cfs_rq *prev, struct cfs_rq *next);
355ad936d86SByungchul Park #else /* !CONFIG_SMP */
356ad936d86SByungchul Park static inline void set_task_rq_fair(struct sched_entity *se,
357ad936d86SByungchul Park 			     struct cfs_rq *prev, struct cfs_rq *next) { }
358ad936d86SByungchul Park #endif /* CONFIG_SMP */
359ad936d86SByungchul Park #endif /* CONFIG_FAIR_GROUP_SCHED */
36025cc7da7SLi Zefan 
361391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
362391e43daSPeter Zijlstra 
363391e43daSPeter Zijlstra struct cfs_bandwidth { };
364391e43daSPeter Zijlstra 
365391e43daSPeter Zijlstra #endif	/* CONFIG_CGROUP_SCHED */
366391e43daSPeter Zijlstra 
367391e43daSPeter Zijlstra /* CFS-related fields in a runqueue */
368391e43daSPeter Zijlstra struct cfs_rq {
369391e43daSPeter Zijlstra 	struct load_weight load;
370c82513e5SPeter Zijlstra 	unsigned int nr_running, h_nr_running;
371391e43daSPeter Zijlstra 
372391e43daSPeter Zijlstra 	u64 exec_clock;
373391e43daSPeter Zijlstra 	u64 min_vruntime;
374391e43daSPeter Zijlstra #ifndef CONFIG_64BIT
375391e43daSPeter Zijlstra 	u64 min_vruntime_copy;
376391e43daSPeter Zijlstra #endif
377391e43daSPeter Zijlstra 
378391e43daSPeter Zijlstra 	struct rb_root tasks_timeline;
379391e43daSPeter Zijlstra 	struct rb_node *rb_leftmost;
380391e43daSPeter Zijlstra 
381391e43daSPeter Zijlstra 	/*
382391e43daSPeter Zijlstra 	 * 'curr' points to currently running entity on this cfs_rq.
383391e43daSPeter Zijlstra 	 * It is set to NULL otherwise (i.e when none are currently running).
384391e43daSPeter Zijlstra 	 */
385391e43daSPeter Zijlstra 	struct sched_entity *curr, *next, *last, *skip;
386391e43daSPeter Zijlstra 
387391e43daSPeter Zijlstra #ifdef	CONFIG_SCHED_DEBUG
388391e43daSPeter Zijlstra 	unsigned int nr_spread_over;
389391e43daSPeter Zijlstra #endif
390391e43daSPeter Zijlstra 
3912dac754eSPaul Turner #ifdef CONFIG_SMP
3922dac754eSPaul Turner 	/*
3939d89c257SYuyang Du 	 * CFS load tracking
3942dac754eSPaul Turner 	 */
3959d89c257SYuyang Du 	struct sched_avg avg;
39613962234SYuyang Du 	u64 runnable_load_sum;
39713962234SYuyang Du 	unsigned long runnable_load_avg;
3989d89c257SYuyang Du #ifdef CONFIG_FAIR_GROUP_SCHED
3999d89c257SYuyang Du 	unsigned long tg_load_avg_contrib;
4009d89c257SYuyang Du #endif
4019d89c257SYuyang Du 	atomic_long_t removed_load_avg, removed_util_avg;
4029d89c257SYuyang Du #ifndef CONFIG_64BIT
4039d89c257SYuyang Du 	u64 load_last_update_time_copy;
4049d89c257SYuyang Du #endif
405141965c7SAlex Shi 
406c566e8e9SPaul Turner #ifdef CONFIG_FAIR_GROUP_SCHED
40782958366SPaul Turner 	/*
40882958366SPaul Turner 	 *   h_load = weight * f(tg)
40982958366SPaul Turner 	 *
41082958366SPaul Turner 	 * Where f(tg) is the recursive weight fraction assigned to
41182958366SPaul Turner 	 * this group.
41282958366SPaul Turner 	 */
41382958366SPaul Turner 	unsigned long h_load;
41468520796SVladimir Davydov 	u64 last_h_load_update;
41568520796SVladimir Davydov 	struct sched_entity *h_load_next;
41668520796SVladimir Davydov #endif /* CONFIG_FAIR_GROUP_SCHED */
41782958366SPaul Turner #endif /* CONFIG_SMP */
41882958366SPaul Turner 
419391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
420391e43daSPeter Zijlstra 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
421391e43daSPeter Zijlstra 
422391e43daSPeter Zijlstra 	/*
423391e43daSPeter Zijlstra 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
424391e43daSPeter Zijlstra 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
425391e43daSPeter Zijlstra 	 * (like users, containers etc.)
426391e43daSPeter Zijlstra 	 *
427391e43daSPeter Zijlstra 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
428391e43daSPeter Zijlstra 	 * list is used during load balance.
429391e43daSPeter Zijlstra 	 */
430391e43daSPeter Zijlstra 	int on_list;
431391e43daSPeter Zijlstra 	struct list_head leaf_cfs_rq_list;
432391e43daSPeter Zijlstra 	struct task_group *tg;	/* group that "owns" this runqueue */
433391e43daSPeter Zijlstra 
434391e43daSPeter Zijlstra #ifdef CONFIG_CFS_BANDWIDTH
435391e43daSPeter Zijlstra 	int runtime_enabled;
436391e43daSPeter Zijlstra 	u64 runtime_expires;
437391e43daSPeter Zijlstra 	s64 runtime_remaining;
438391e43daSPeter Zijlstra 
439f1b17280SPaul Turner 	u64 throttled_clock, throttled_clock_task;
440f1b17280SPaul Turner 	u64 throttled_clock_task_time;
44155e16d30SPeter Zijlstra 	int throttled, throttle_count;
442391e43daSPeter Zijlstra 	struct list_head throttled_list;
443391e43daSPeter Zijlstra #endif /* CONFIG_CFS_BANDWIDTH */
444391e43daSPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
445391e43daSPeter Zijlstra };
446391e43daSPeter Zijlstra 
447391e43daSPeter Zijlstra static inline int rt_bandwidth_enabled(void)
448391e43daSPeter Zijlstra {
449391e43daSPeter Zijlstra 	return sysctl_sched_rt_runtime >= 0;
450391e43daSPeter Zijlstra }
451391e43daSPeter Zijlstra 
452b6366f04SSteven Rostedt /* RT IPI pull logic requires IRQ_WORK */
453b6366f04SSteven Rostedt #ifdef CONFIG_IRQ_WORK
454b6366f04SSteven Rostedt # define HAVE_RT_PUSH_IPI
455b6366f04SSteven Rostedt #endif
456b6366f04SSteven Rostedt 
457391e43daSPeter Zijlstra /* Real-Time classes' related field in a runqueue: */
458391e43daSPeter Zijlstra struct rt_rq {
459391e43daSPeter Zijlstra 	struct rt_prio_array active;
460c82513e5SPeter Zijlstra 	unsigned int rt_nr_running;
46101d36d0aSFrederic Weisbecker 	unsigned int rr_nr_running;
462391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
463391e43daSPeter Zijlstra 	struct {
464391e43daSPeter Zijlstra 		int curr; /* highest queued rt task prio */
465391e43daSPeter Zijlstra #ifdef CONFIG_SMP
466391e43daSPeter Zijlstra 		int next; /* next highest */
467391e43daSPeter Zijlstra #endif
468391e43daSPeter Zijlstra 	} highest_prio;
469391e43daSPeter Zijlstra #endif
470391e43daSPeter Zijlstra #ifdef CONFIG_SMP
471391e43daSPeter Zijlstra 	unsigned long rt_nr_migratory;
472391e43daSPeter Zijlstra 	unsigned long rt_nr_total;
473391e43daSPeter Zijlstra 	int overloaded;
474391e43daSPeter Zijlstra 	struct plist_head pushable_tasks;
475b6366f04SSteven Rostedt #ifdef HAVE_RT_PUSH_IPI
476b6366f04SSteven Rostedt 	int push_flags;
477b6366f04SSteven Rostedt 	int push_cpu;
478b6366f04SSteven Rostedt 	struct irq_work push_work;
479b6366f04SSteven Rostedt 	raw_spinlock_t push_lock;
480391e43daSPeter Zijlstra #endif
481b6366f04SSteven Rostedt #endif /* CONFIG_SMP */
482f4ebcbc0SKirill Tkhai 	int rt_queued;
483f4ebcbc0SKirill Tkhai 
484391e43daSPeter Zijlstra 	int rt_throttled;
485391e43daSPeter Zijlstra 	u64 rt_time;
486391e43daSPeter Zijlstra 	u64 rt_runtime;
487391e43daSPeter Zijlstra 	/* Nests inside the rq lock: */
488391e43daSPeter Zijlstra 	raw_spinlock_t rt_runtime_lock;
489391e43daSPeter Zijlstra 
490391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
491391e43daSPeter Zijlstra 	unsigned long rt_nr_boosted;
492391e43daSPeter Zijlstra 
493391e43daSPeter Zijlstra 	struct rq *rq;
494391e43daSPeter Zijlstra 	struct task_group *tg;
495391e43daSPeter Zijlstra #endif
496391e43daSPeter Zijlstra };
497391e43daSPeter Zijlstra 
498aab03e05SDario Faggioli /* Deadline class' related fields in a runqueue */
499aab03e05SDario Faggioli struct dl_rq {
500aab03e05SDario Faggioli 	/* runqueue is an rbtree, ordered by deadline */
501aab03e05SDario Faggioli 	struct rb_root rb_root;
502aab03e05SDario Faggioli 	struct rb_node *rb_leftmost;
503aab03e05SDario Faggioli 
504aab03e05SDario Faggioli 	unsigned long dl_nr_running;
5051baca4ceSJuri Lelli 
5061baca4ceSJuri Lelli #ifdef CONFIG_SMP
5071baca4ceSJuri Lelli 	/*
5081baca4ceSJuri Lelli 	 * Deadline values of the currently executing and the
5091baca4ceSJuri Lelli 	 * earliest ready task on this rq. Caching these facilitates
5101baca4ceSJuri Lelli 	 * the decision wether or not a ready but not running task
5111baca4ceSJuri Lelli 	 * should migrate somewhere else.
5121baca4ceSJuri Lelli 	 */
5131baca4ceSJuri Lelli 	struct {
5141baca4ceSJuri Lelli 		u64 curr;
5151baca4ceSJuri Lelli 		u64 next;
5161baca4ceSJuri Lelli 	} earliest_dl;
5171baca4ceSJuri Lelli 
5181baca4ceSJuri Lelli 	unsigned long dl_nr_migratory;
5191baca4ceSJuri Lelli 	int overloaded;
5201baca4ceSJuri Lelli 
5211baca4ceSJuri Lelli 	/*
5221baca4ceSJuri Lelli 	 * Tasks on this rq that can be pushed away. They are kept in
5231baca4ceSJuri Lelli 	 * an rb-tree, ordered by tasks' deadlines, with caching
5241baca4ceSJuri Lelli 	 * of the leftmost (earliest deadline) element.
5251baca4ceSJuri Lelli 	 */
5261baca4ceSJuri Lelli 	struct rb_root pushable_dl_tasks_root;
5271baca4ceSJuri Lelli 	struct rb_node *pushable_dl_tasks_leftmost;
528332ac17eSDario Faggioli #else
529332ac17eSDario Faggioli 	struct dl_bw dl_bw;
5301baca4ceSJuri Lelli #endif
531aab03e05SDario Faggioli };
532aab03e05SDario Faggioli 
533391e43daSPeter Zijlstra #ifdef CONFIG_SMP
534391e43daSPeter Zijlstra 
535391e43daSPeter Zijlstra /*
536391e43daSPeter Zijlstra  * We add the notion of a root-domain which will be used to define per-domain
537391e43daSPeter Zijlstra  * variables. Each exclusive cpuset essentially defines an island domain by
538391e43daSPeter Zijlstra  * fully partitioning the member cpus from any other cpuset. Whenever a new
539391e43daSPeter Zijlstra  * exclusive cpuset is created, we also create and attach a new root-domain
540391e43daSPeter Zijlstra  * object.
541391e43daSPeter Zijlstra  *
542391e43daSPeter Zijlstra  */
543391e43daSPeter Zijlstra struct root_domain {
544391e43daSPeter Zijlstra 	atomic_t refcount;
545391e43daSPeter Zijlstra 	atomic_t rto_count;
546391e43daSPeter Zijlstra 	struct rcu_head rcu;
547391e43daSPeter Zijlstra 	cpumask_var_t span;
548391e43daSPeter Zijlstra 	cpumask_var_t online;
549391e43daSPeter Zijlstra 
5504486edd1STim Chen 	/* Indicate more than one runnable task for any CPU */
5514486edd1STim Chen 	bool overload;
5524486edd1STim Chen 
553391e43daSPeter Zijlstra 	/*
5541baca4ceSJuri Lelli 	 * The bit corresponding to a CPU gets set here if such CPU has more
5551baca4ceSJuri Lelli 	 * than one runnable -deadline task (as it is below for RT tasks).
5561baca4ceSJuri Lelli 	 */
5571baca4ceSJuri Lelli 	cpumask_var_t dlo_mask;
5581baca4ceSJuri Lelli 	atomic_t dlo_count;
559332ac17eSDario Faggioli 	struct dl_bw dl_bw;
5606bfd6d72SJuri Lelli 	struct cpudl cpudl;
5611baca4ceSJuri Lelli 
5621baca4ceSJuri Lelli 	/*
563391e43daSPeter Zijlstra 	 * The "RT overload" flag: it gets set if a CPU has more than
564391e43daSPeter Zijlstra 	 * one runnable RT task.
565391e43daSPeter Zijlstra 	 */
566391e43daSPeter Zijlstra 	cpumask_var_t rto_mask;
567391e43daSPeter Zijlstra 	struct cpupri cpupri;
568391e43daSPeter Zijlstra };
569391e43daSPeter Zijlstra 
570391e43daSPeter Zijlstra extern struct root_domain def_root_domain;
571391e43daSPeter Zijlstra 
572391e43daSPeter Zijlstra #endif /* CONFIG_SMP */
573391e43daSPeter Zijlstra 
574391e43daSPeter Zijlstra /*
575391e43daSPeter Zijlstra  * This is the main, per-CPU runqueue data structure.
576391e43daSPeter Zijlstra  *
577391e43daSPeter Zijlstra  * Locking rule: those places that want to lock multiple runqueues
578391e43daSPeter Zijlstra  * (such as the load balancing or the thread migration code), lock
579391e43daSPeter Zijlstra  * acquire operations must be ordered by ascending &runqueue.
580391e43daSPeter Zijlstra  */
581391e43daSPeter Zijlstra struct rq {
582391e43daSPeter Zijlstra 	/* runqueue lock: */
583391e43daSPeter Zijlstra 	raw_spinlock_t lock;
584391e43daSPeter Zijlstra 
585391e43daSPeter Zijlstra 	/*
586391e43daSPeter Zijlstra 	 * nr_running and cpu_load should be in the same cacheline because
587391e43daSPeter Zijlstra 	 * remote CPUs use both these fields when doing load calculation.
588391e43daSPeter Zijlstra 	 */
589c82513e5SPeter Zijlstra 	unsigned int nr_running;
5900ec8aa00SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
5910ec8aa00SPeter Zijlstra 	unsigned int nr_numa_running;
5920ec8aa00SPeter Zijlstra 	unsigned int nr_preferred_running;
5930ec8aa00SPeter Zijlstra #endif
594391e43daSPeter Zijlstra 	#define CPU_LOAD_IDX_MAX 5
595391e43daSPeter Zijlstra 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
5963451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
5979fd81dd5SFrederic Weisbecker #ifdef CONFIG_SMP
5989fd81dd5SFrederic Weisbecker 	unsigned long last_load_update_tick;
5999fd81dd5SFrederic Weisbecker #endif /* CONFIG_SMP */
600391e43daSPeter Zijlstra 	u64 nohz_stamp;
6011c792db7SSuresh Siddha 	unsigned long nohz_flags;
6029fd81dd5SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
603265f22a9SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
604265f22a9SFrederic Weisbecker 	unsigned long last_sched_tick;
605265f22a9SFrederic Weisbecker #endif
606391e43daSPeter Zijlstra 	/* capture load from *all* tasks on this cpu: */
607391e43daSPeter Zijlstra 	struct load_weight load;
608391e43daSPeter Zijlstra 	unsigned long nr_load_updates;
609391e43daSPeter Zijlstra 	u64 nr_switches;
610391e43daSPeter Zijlstra 
611391e43daSPeter Zijlstra 	struct cfs_rq cfs;
612391e43daSPeter Zijlstra 	struct rt_rq rt;
613aab03e05SDario Faggioli 	struct dl_rq dl;
614391e43daSPeter Zijlstra 
615391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
616391e43daSPeter Zijlstra 	/* list of leaf cfs_rq on this cpu: */
617391e43daSPeter Zijlstra 	struct list_head leaf_cfs_rq_list;
618a35b6466SPeter Zijlstra #endif /* CONFIG_FAIR_GROUP_SCHED */
619a35b6466SPeter Zijlstra 
620391e43daSPeter Zijlstra 	/*
621391e43daSPeter Zijlstra 	 * This is part of a global counter where only the total sum
622391e43daSPeter Zijlstra 	 * over all CPUs matters. A task can increase this counter on
623391e43daSPeter Zijlstra 	 * one CPU and if it got migrated afterwards it may decrease
624391e43daSPeter Zijlstra 	 * it on another CPU. Always updated under the runqueue lock:
625391e43daSPeter Zijlstra 	 */
626391e43daSPeter Zijlstra 	unsigned long nr_uninterruptible;
627391e43daSPeter Zijlstra 
628391e43daSPeter Zijlstra 	struct task_struct *curr, *idle, *stop;
629391e43daSPeter Zijlstra 	unsigned long next_balance;
630391e43daSPeter Zijlstra 	struct mm_struct *prev_mm;
631391e43daSPeter Zijlstra 
6329edfbfedSPeter Zijlstra 	unsigned int clock_skip_update;
633391e43daSPeter Zijlstra 	u64 clock;
634391e43daSPeter Zijlstra 	u64 clock_task;
635391e43daSPeter Zijlstra 
636391e43daSPeter Zijlstra 	atomic_t nr_iowait;
637391e43daSPeter Zijlstra 
638391e43daSPeter Zijlstra #ifdef CONFIG_SMP
639391e43daSPeter Zijlstra 	struct root_domain *rd;
640391e43daSPeter Zijlstra 	struct sched_domain *sd;
641391e43daSPeter Zijlstra 
642ced549faSNicolas Pitre 	unsigned long cpu_capacity;
643ca6d75e6SVincent Guittot 	unsigned long cpu_capacity_orig;
644391e43daSPeter Zijlstra 
645e3fca9e7SPeter Zijlstra 	struct callback_head *balance_callback;
646e3fca9e7SPeter Zijlstra 
647391e43daSPeter Zijlstra 	unsigned char idle_balance;
648391e43daSPeter Zijlstra 	/* For active balancing */
649391e43daSPeter Zijlstra 	int active_balance;
650391e43daSPeter Zijlstra 	int push_cpu;
651391e43daSPeter Zijlstra 	struct cpu_stop_work active_balance_work;
652391e43daSPeter Zijlstra 	/* cpu of this runqueue: */
653391e43daSPeter Zijlstra 	int cpu;
654391e43daSPeter Zijlstra 	int online;
655391e43daSPeter Zijlstra 
656367456c7SPeter Zijlstra 	struct list_head cfs_tasks;
657367456c7SPeter Zijlstra 
658391e43daSPeter Zijlstra 	u64 rt_avg;
659391e43daSPeter Zijlstra 	u64 age_stamp;
660391e43daSPeter Zijlstra 	u64 idle_stamp;
661391e43daSPeter Zijlstra 	u64 avg_idle;
6629bd721c5SJason Low 
6639bd721c5SJason Low 	/* This is used to determine avg_idle's max value */
6649bd721c5SJason Low 	u64 max_idle_balance_cost;
665391e43daSPeter Zijlstra #endif
666391e43daSPeter Zijlstra 
667391e43daSPeter Zijlstra #ifdef CONFIG_IRQ_TIME_ACCOUNTING
668391e43daSPeter Zijlstra 	u64 prev_irq_time;
669391e43daSPeter Zijlstra #endif
670391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT
671391e43daSPeter Zijlstra 	u64 prev_steal_time;
672391e43daSPeter Zijlstra #endif
673391e43daSPeter Zijlstra #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
674391e43daSPeter Zijlstra 	u64 prev_steal_time_rq;
675391e43daSPeter Zijlstra #endif
676391e43daSPeter Zijlstra 
677391e43daSPeter Zijlstra 	/* calc_load related fields */
678391e43daSPeter Zijlstra 	unsigned long calc_load_update;
679391e43daSPeter Zijlstra 	long calc_load_active;
680391e43daSPeter Zijlstra 
681391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
682391e43daSPeter Zijlstra #ifdef CONFIG_SMP
683391e43daSPeter Zijlstra 	int hrtick_csd_pending;
684391e43daSPeter Zijlstra 	struct call_single_data hrtick_csd;
685391e43daSPeter Zijlstra #endif
686391e43daSPeter Zijlstra 	struct hrtimer hrtick_timer;
687391e43daSPeter Zijlstra #endif
688391e43daSPeter Zijlstra 
689391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS
690391e43daSPeter Zijlstra 	/* latency stats */
691391e43daSPeter Zijlstra 	struct sched_info rq_sched_info;
692391e43daSPeter Zijlstra 	unsigned long long rq_cpu_time;
693391e43daSPeter Zijlstra 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
694391e43daSPeter Zijlstra 
695391e43daSPeter Zijlstra 	/* sys_sched_yield() stats */
696391e43daSPeter Zijlstra 	unsigned int yld_count;
697391e43daSPeter Zijlstra 
698391e43daSPeter Zijlstra 	/* schedule() stats */
699391e43daSPeter Zijlstra 	unsigned int sched_count;
700391e43daSPeter Zijlstra 	unsigned int sched_goidle;
701391e43daSPeter Zijlstra 
702391e43daSPeter Zijlstra 	/* try_to_wake_up() stats */
703391e43daSPeter Zijlstra 	unsigned int ttwu_count;
704391e43daSPeter Zijlstra 	unsigned int ttwu_local;
705391e43daSPeter Zijlstra #endif
706391e43daSPeter Zijlstra 
707391e43daSPeter Zijlstra #ifdef CONFIG_SMP
708391e43daSPeter Zijlstra 	struct llist_head wake_list;
709391e43daSPeter Zijlstra #endif
710442bf3aaSDaniel Lezcano 
711442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE
712442bf3aaSDaniel Lezcano 	/* Must be inspected within a rcu lock section */
713442bf3aaSDaniel Lezcano 	struct cpuidle_state *idle_state;
714442bf3aaSDaniel Lezcano #endif
715391e43daSPeter Zijlstra };
716391e43daSPeter Zijlstra 
717391e43daSPeter Zijlstra static inline int cpu_of(struct rq *rq)
718391e43daSPeter Zijlstra {
719391e43daSPeter Zijlstra #ifdef CONFIG_SMP
720391e43daSPeter Zijlstra 	return rq->cpu;
721391e43daSPeter Zijlstra #else
722391e43daSPeter Zijlstra 	return 0;
723391e43daSPeter Zijlstra #endif
724391e43daSPeter Zijlstra }
725391e43daSPeter Zijlstra 
7268b06c55bSPranith Kumar DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
727391e43daSPeter Zijlstra 
728518cd623SPeter Zijlstra #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
7294a32fea9SChristoph Lameter #define this_rq()		this_cpu_ptr(&runqueues)
730518cd623SPeter Zijlstra #define task_rq(p)		cpu_rq(task_cpu(p))
731518cd623SPeter Zijlstra #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
7324a32fea9SChristoph Lameter #define raw_rq()		raw_cpu_ptr(&runqueues)
733518cd623SPeter Zijlstra 
734cebde6d6SPeter Zijlstra static inline u64 __rq_clock_broken(struct rq *rq)
735cebde6d6SPeter Zijlstra {
736316c1608SJason Low 	return READ_ONCE(rq->clock);
737cebde6d6SPeter Zijlstra }
738cebde6d6SPeter Zijlstra 
73978becc27SFrederic Weisbecker static inline u64 rq_clock(struct rq *rq)
74078becc27SFrederic Weisbecker {
741cebde6d6SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
74278becc27SFrederic Weisbecker 	return rq->clock;
74378becc27SFrederic Weisbecker }
74478becc27SFrederic Weisbecker 
74578becc27SFrederic Weisbecker static inline u64 rq_clock_task(struct rq *rq)
74678becc27SFrederic Weisbecker {
747cebde6d6SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
74878becc27SFrederic Weisbecker 	return rq->clock_task;
74978becc27SFrederic Weisbecker }
75078becc27SFrederic Weisbecker 
7519edfbfedSPeter Zijlstra #define RQCF_REQ_SKIP	0x01
7529edfbfedSPeter Zijlstra #define RQCF_ACT_SKIP	0x02
7539edfbfedSPeter Zijlstra 
7549edfbfedSPeter Zijlstra static inline void rq_clock_skip_update(struct rq *rq, bool skip)
7559edfbfedSPeter Zijlstra {
7569edfbfedSPeter Zijlstra 	lockdep_assert_held(&rq->lock);
7579edfbfedSPeter Zijlstra 	if (skip)
7589edfbfedSPeter Zijlstra 		rq->clock_skip_update |= RQCF_REQ_SKIP;
7599edfbfedSPeter Zijlstra 	else
7609edfbfedSPeter Zijlstra 		rq->clock_skip_update &= ~RQCF_REQ_SKIP;
7619edfbfedSPeter Zijlstra }
7629edfbfedSPeter Zijlstra 
7639942f79bSRik van Riel #ifdef CONFIG_NUMA
764e3fe70b1SRik van Riel enum numa_topology_type {
765e3fe70b1SRik van Riel 	NUMA_DIRECT,
766e3fe70b1SRik van Riel 	NUMA_GLUELESS_MESH,
767e3fe70b1SRik van Riel 	NUMA_BACKPLANE,
768e3fe70b1SRik van Riel };
769e3fe70b1SRik van Riel extern enum numa_topology_type sched_numa_topology_type;
7709942f79bSRik van Riel extern int sched_max_numa_distance;
7719942f79bSRik van Riel extern bool find_numa_distance(int distance);
7729942f79bSRik van Riel #endif
7739942f79bSRik van Riel 
774f809ca9aSMel Gorman #ifdef CONFIG_NUMA_BALANCING
77544dba3d5SIulia Manda /* The regions in numa_faults array from task_struct */
77644dba3d5SIulia Manda enum numa_faults_stats {
77744dba3d5SIulia Manda 	NUMA_MEM = 0,
77844dba3d5SIulia Manda 	NUMA_CPU,
77944dba3d5SIulia Manda 	NUMA_MEMBUF,
78044dba3d5SIulia Manda 	NUMA_CPUBUF
78144dba3d5SIulia Manda };
7820ec8aa00SPeter Zijlstra extern void sched_setnuma(struct task_struct *p, int node);
783e6628d5bSMel Gorman extern int migrate_task_to(struct task_struct *p, int cpu);
784ac66f547SPeter Zijlstra extern int migrate_swap(struct task_struct *, struct task_struct *);
785f809ca9aSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
786f809ca9aSMel Gorman 
787518cd623SPeter Zijlstra #ifdef CONFIG_SMP
788518cd623SPeter Zijlstra 
789e3fca9e7SPeter Zijlstra static inline void
790e3fca9e7SPeter Zijlstra queue_balance_callback(struct rq *rq,
791e3fca9e7SPeter Zijlstra 		       struct callback_head *head,
792e3fca9e7SPeter Zijlstra 		       void (*func)(struct rq *rq))
793e3fca9e7SPeter Zijlstra {
794e3fca9e7SPeter Zijlstra 	lockdep_assert_held(&rq->lock);
795e3fca9e7SPeter Zijlstra 
796e3fca9e7SPeter Zijlstra 	if (unlikely(head->next))
797e3fca9e7SPeter Zijlstra 		return;
798e3fca9e7SPeter Zijlstra 
799e3fca9e7SPeter Zijlstra 	head->func = (void (*)(struct callback_head *))func;
800e3fca9e7SPeter Zijlstra 	head->next = rq->balance_callback;
801e3fca9e7SPeter Zijlstra 	rq->balance_callback = head;
802e3fca9e7SPeter Zijlstra }
803e3fca9e7SPeter Zijlstra 
804e3baac47SPeter Zijlstra extern void sched_ttwu_pending(void);
805e3baac47SPeter Zijlstra 
806391e43daSPeter Zijlstra #define rcu_dereference_check_sched_domain(p) \
807391e43daSPeter Zijlstra 	rcu_dereference_check((p), \
808391e43daSPeter Zijlstra 			      lockdep_is_held(&sched_domains_mutex))
809391e43daSPeter Zijlstra 
810391e43daSPeter Zijlstra /*
811391e43daSPeter Zijlstra  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
812391e43daSPeter Zijlstra  * See detach_destroy_domains: synchronize_sched for details.
813391e43daSPeter Zijlstra  *
814391e43daSPeter Zijlstra  * The domain tree of any CPU may only be accessed from within
815391e43daSPeter Zijlstra  * preempt-disabled sections.
816391e43daSPeter Zijlstra  */
817391e43daSPeter Zijlstra #define for_each_domain(cpu, __sd) \
818518cd623SPeter Zijlstra 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
819518cd623SPeter Zijlstra 			__sd; __sd = __sd->parent)
820391e43daSPeter Zijlstra 
82177e81365SSuresh Siddha #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
82277e81365SSuresh Siddha 
823518cd623SPeter Zijlstra /**
824518cd623SPeter Zijlstra  * highest_flag_domain - Return highest sched_domain containing flag.
825518cd623SPeter Zijlstra  * @cpu:	The cpu whose highest level of sched domain is to
826518cd623SPeter Zijlstra  *		be returned.
827518cd623SPeter Zijlstra  * @flag:	The flag to check for the highest sched_domain
828518cd623SPeter Zijlstra  *		for the given cpu.
829518cd623SPeter Zijlstra  *
830518cd623SPeter Zijlstra  * Returns the highest sched_domain of a cpu which contains the given flag.
831518cd623SPeter Zijlstra  */
832518cd623SPeter Zijlstra static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
833518cd623SPeter Zijlstra {
834518cd623SPeter Zijlstra 	struct sched_domain *sd, *hsd = NULL;
835518cd623SPeter Zijlstra 
836518cd623SPeter Zijlstra 	for_each_domain(cpu, sd) {
837518cd623SPeter Zijlstra 		if (!(sd->flags & flag))
838518cd623SPeter Zijlstra 			break;
839518cd623SPeter Zijlstra 		hsd = sd;
840518cd623SPeter Zijlstra 	}
841518cd623SPeter Zijlstra 
842518cd623SPeter Zijlstra 	return hsd;
843518cd623SPeter Zijlstra }
844518cd623SPeter Zijlstra 
845fb13c7eeSMel Gorman static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
846fb13c7eeSMel Gorman {
847fb13c7eeSMel Gorman 	struct sched_domain *sd;
848fb13c7eeSMel Gorman 
849fb13c7eeSMel Gorman 	for_each_domain(cpu, sd) {
850fb13c7eeSMel Gorman 		if (sd->flags & flag)
851fb13c7eeSMel Gorman 			break;
852fb13c7eeSMel Gorman 	}
853fb13c7eeSMel Gorman 
854fb13c7eeSMel Gorman 	return sd;
855fb13c7eeSMel Gorman }
856fb13c7eeSMel Gorman 
857518cd623SPeter Zijlstra DECLARE_PER_CPU(struct sched_domain *, sd_llc);
8587d9ffa89SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_size);
859518cd623SPeter Zijlstra DECLARE_PER_CPU(int, sd_llc_id);
860fb13c7eeSMel Gorman DECLARE_PER_CPU(struct sched_domain *, sd_numa);
86137dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_busy);
86237dc6b50SPreeti U Murthy DECLARE_PER_CPU(struct sched_domain *, sd_asym);
863518cd623SPeter Zijlstra 
86463b2ca30SNicolas Pitre struct sched_group_capacity {
8655e6521eaSLi Zefan 	atomic_t ref;
8665e6521eaSLi Zefan 	/*
867172895e6SYuyang Du 	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
86863b2ca30SNicolas Pitre 	 * for a single CPU.
8695e6521eaSLi Zefan 	 */
870dc7ff76eSVincent Guittot 	unsigned int capacity;
8715e6521eaSLi Zefan 	unsigned long next_update;
87263b2ca30SNicolas Pitre 	int imbalance; /* XXX unrelated to capacity but shared group state */
8735e6521eaSLi Zefan 	/*
8745e6521eaSLi Zefan 	 * Number of busy cpus in this group.
8755e6521eaSLi Zefan 	 */
8765e6521eaSLi Zefan 	atomic_t nr_busy_cpus;
8775e6521eaSLi Zefan 
8785e6521eaSLi Zefan 	unsigned long cpumask[0]; /* iteration mask */
8795e6521eaSLi Zefan };
8805e6521eaSLi Zefan 
8815e6521eaSLi Zefan struct sched_group {
8825e6521eaSLi Zefan 	struct sched_group *next;	/* Must be a circular list */
8835e6521eaSLi Zefan 	atomic_t ref;
8845e6521eaSLi Zefan 
8855e6521eaSLi Zefan 	unsigned int group_weight;
88663b2ca30SNicolas Pitre 	struct sched_group_capacity *sgc;
8875e6521eaSLi Zefan 
8885e6521eaSLi Zefan 	/*
8895e6521eaSLi Zefan 	 * The CPUs this group covers.
8905e6521eaSLi Zefan 	 *
8915e6521eaSLi Zefan 	 * NOTE: this field is variable length. (Allocated dynamically
8925e6521eaSLi Zefan 	 * by attaching extra space to the end of the structure,
8935e6521eaSLi Zefan 	 * depending on how many CPUs the kernel has booted up with)
8945e6521eaSLi Zefan 	 */
8955e6521eaSLi Zefan 	unsigned long cpumask[0];
8965e6521eaSLi Zefan };
8975e6521eaSLi Zefan 
8985e6521eaSLi Zefan static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
8995e6521eaSLi Zefan {
9005e6521eaSLi Zefan 	return to_cpumask(sg->cpumask);
9015e6521eaSLi Zefan }
9025e6521eaSLi Zefan 
9035e6521eaSLi Zefan /*
9045e6521eaSLi Zefan  * cpumask masking which cpus in the group are allowed to iterate up the domain
9055e6521eaSLi Zefan  * tree.
9065e6521eaSLi Zefan  */
9075e6521eaSLi Zefan static inline struct cpumask *sched_group_mask(struct sched_group *sg)
9085e6521eaSLi Zefan {
90963b2ca30SNicolas Pitre 	return to_cpumask(sg->sgc->cpumask);
9105e6521eaSLi Zefan }
9115e6521eaSLi Zefan 
9125e6521eaSLi Zefan /**
9135e6521eaSLi Zefan  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
9145e6521eaSLi Zefan  * @group: The group whose first cpu is to be returned.
9155e6521eaSLi Zefan  */
9165e6521eaSLi Zefan static inline unsigned int group_first_cpu(struct sched_group *group)
9175e6521eaSLi Zefan {
9185e6521eaSLi Zefan 	return cpumask_first(sched_group_cpus(group));
9195e6521eaSLi Zefan }
9205e6521eaSLi Zefan 
921c1174876SPeter Zijlstra extern int group_balance_cpu(struct sched_group *sg);
922c1174876SPeter Zijlstra 
9233866e845SSteven Rostedt (Red Hat) #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
9243866e845SSteven Rostedt (Red Hat) void register_sched_domain_sysctl(void);
9253866e845SSteven Rostedt (Red Hat) void unregister_sched_domain_sysctl(void);
9263866e845SSteven Rostedt (Red Hat) #else
9273866e845SSteven Rostedt (Red Hat) static inline void register_sched_domain_sysctl(void)
9283866e845SSteven Rostedt (Red Hat) {
9293866e845SSteven Rostedt (Red Hat) }
9303866e845SSteven Rostedt (Red Hat) static inline void unregister_sched_domain_sysctl(void)
9313866e845SSteven Rostedt (Red Hat) {
9323866e845SSteven Rostedt (Red Hat) }
9333866e845SSteven Rostedt (Red Hat) #endif
9343866e845SSteven Rostedt (Red Hat) 
935e3baac47SPeter Zijlstra #else
936e3baac47SPeter Zijlstra 
937e3baac47SPeter Zijlstra static inline void sched_ttwu_pending(void) { }
938e3baac47SPeter Zijlstra 
939518cd623SPeter Zijlstra #endif /* CONFIG_SMP */
940391e43daSPeter Zijlstra 
941391e43daSPeter Zijlstra #include "stats.h"
942391e43daSPeter Zijlstra #include "auto_group.h"
943391e43daSPeter Zijlstra 
944391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
945391e43daSPeter Zijlstra 
946391e43daSPeter Zijlstra /*
947391e43daSPeter Zijlstra  * Return the group to which this tasks belongs.
948391e43daSPeter Zijlstra  *
9498af01f56STejun Heo  * We cannot use task_css() and friends because the cgroup subsystem
9508af01f56STejun Heo  * changes that value before the cgroup_subsys::attach() method is called,
9518af01f56STejun Heo  * therefore we cannot pin it and might observe the wrong value.
9528323f26cSPeter Zijlstra  *
9538323f26cSPeter Zijlstra  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
9548323f26cSPeter Zijlstra  * core changes this before calling sched_move_task().
9558323f26cSPeter Zijlstra  *
9568323f26cSPeter Zijlstra  * Instead we use a 'copy' which is updated from sched_move_task() while
9578323f26cSPeter Zijlstra  * holding both task_struct::pi_lock and rq::lock.
958391e43daSPeter Zijlstra  */
959391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
960391e43daSPeter Zijlstra {
9618323f26cSPeter Zijlstra 	return p->sched_task_group;
962391e43daSPeter Zijlstra }
963391e43daSPeter Zijlstra 
964391e43daSPeter Zijlstra /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
965391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
966391e43daSPeter Zijlstra {
967391e43daSPeter Zijlstra #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
968391e43daSPeter Zijlstra 	struct task_group *tg = task_group(p);
969391e43daSPeter Zijlstra #endif
970391e43daSPeter Zijlstra 
971391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
972ad936d86SByungchul Park 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
973391e43daSPeter Zijlstra 	p->se.cfs_rq = tg->cfs_rq[cpu];
974391e43daSPeter Zijlstra 	p->se.parent = tg->se[cpu];
975391e43daSPeter Zijlstra #endif
976391e43daSPeter Zijlstra 
977391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
978391e43daSPeter Zijlstra 	p->rt.rt_rq  = tg->rt_rq[cpu];
979391e43daSPeter Zijlstra 	p->rt.parent = tg->rt_se[cpu];
980391e43daSPeter Zijlstra #endif
981391e43daSPeter Zijlstra }
982391e43daSPeter Zijlstra 
983391e43daSPeter Zijlstra #else /* CONFIG_CGROUP_SCHED */
984391e43daSPeter Zijlstra 
985391e43daSPeter Zijlstra static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
986391e43daSPeter Zijlstra static inline struct task_group *task_group(struct task_struct *p)
987391e43daSPeter Zijlstra {
988391e43daSPeter Zijlstra 	return NULL;
989391e43daSPeter Zijlstra }
990391e43daSPeter Zijlstra 
991391e43daSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
992391e43daSPeter Zijlstra 
993391e43daSPeter Zijlstra static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
994391e43daSPeter Zijlstra {
995391e43daSPeter Zijlstra 	set_task_rq(p, cpu);
996391e43daSPeter Zijlstra #ifdef CONFIG_SMP
997391e43daSPeter Zijlstra 	/*
998391e43daSPeter Zijlstra 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
999391e43daSPeter Zijlstra 	 * successfuly executed on another CPU. We must ensure that updates of
1000391e43daSPeter Zijlstra 	 * per-task data have been completed by this moment.
1001391e43daSPeter Zijlstra 	 */
1002391e43daSPeter Zijlstra 	smp_wmb();
1003*c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1004*c65eacbeSAndy Lutomirski 	p->cpu = cpu;
1005*c65eacbeSAndy Lutomirski #else
1006391e43daSPeter Zijlstra 	task_thread_info(p)->cpu = cpu;
1007*c65eacbeSAndy Lutomirski #endif
1008ac66f547SPeter Zijlstra 	p->wake_cpu = cpu;
1009391e43daSPeter Zijlstra #endif
1010391e43daSPeter Zijlstra }
1011391e43daSPeter Zijlstra 
1012391e43daSPeter Zijlstra /*
1013391e43daSPeter Zijlstra  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1014391e43daSPeter Zijlstra  */
1015391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG
1016c5905afbSIngo Molnar # include <linux/static_key.h>
1017391e43daSPeter Zijlstra # define const_debug __read_mostly
1018391e43daSPeter Zijlstra #else
1019391e43daSPeter Zijlstra # define const_debug const
1020391e43daSPeter Zijlstra #endif
1021391e43daSPeter Zijlstra 
1022391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_features;
1023391e43daSPeter Zijlstra 
1024391e43daSPeter Zijlstra #define SCHED_FEAT(name, enabled)	\
1025391e43daSPeter Zijlstra 	__SCHED_FEAT_##name ,
1026391e43daSPeter Zijlstra 
1027391e43daSPeter Zijlstra enum {
1028391e43daSPeter Zijlstra #include "features.h"
1029f8b6d1ccSPeter Zijlstra 	__SCHED_FEAT_NR,
1030391e43daSPeter Zijlstra };
1031391e43daSPeter Zijlstra 
1032391e43daSPeter Zijlstra #undef SCHED_FEAT
1033391e43daSPeter Zijlstra 
1034f8b6d1ccSPeter Zijlstra #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1035f8b6d1ccSPeter Zijlstra #define SCHED_FEAT(name, enabled)					\
1036c5905afbSIngo Molnar static __always_inline bool static_branch_##name(struct static_key *key) \
1037f8b6d1ccSPeter Zijlstra {									\
10386e76ea8aSJason Baron 	return static_key_##enabled(key);				\
1039f8b6d1ccSPeter Zijlstra }
1040f8b6d1ccSPeter Zijlstra 
1041f8b6d1ccSPeter Zijlstra #include "features.h"
1042f8b6d1ccSPeter Zijlstra 
1043f8b6d1ccSPeter Zijlstra #undef SCHED_FEAT
1044f8b6d1ccSPeter Zijlstra 
1045c5905afbSIngo Molnar extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1046f8b6d1ccSPeter Zijlstra #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1047f8b6d1ccSPeter Zijlstra #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1048391e43daSPeter Zijlstra #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1049f8b6d1ccSPeter Zijlstra #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1050391e43daSPeter Zijlstra 
10512a595721SSrikar Dronamraju extern struct static_key_false sched_numa_balancing;
1052cb251765SMel Gorman extern struct static_key_false sched_schedstats;
1053cbee9f88SPeter Zijlstra 
1054391e43daSPeter Zijlstra static inline u64 global_rt_period(void)
1055391e43daSPeter Zijlstra {
1056391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1057391e43daSPeter Zijlstra }
1058391e43daSPeter Zijlstra 
1059391e43daSPeter Zijlstra static inline u64 global_rt_runtime(void)
1060391e43daSPeter Zijlstra {
1061391e43daSPeter Zijlstra 	if (sysctl_sched_rt_runtime < 0)
1062391e43daSPeter Zijlstra 		return RUNTIME_INF;
1063391e43daSPeter Zijlstra 
1064391e43daSPeter Zijlstra 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1065391e43daSPeter Zijlstra }
1066391e43daSPeter Zijlstra 
1067391e43daSPeter Zijlstra static inline int task_current(struct rq *rq, struct task_struct *p)
1068391e43daSPeter Zijlstra {
1069391e43daSPeter Zijlstra 	return rq->curr == p;
1070391e43daSPeter Zijlstra }
1071391e43daSPeter Zijlstra 
1072391e43daSPeter Zijlstra static inline int task_running(struct rq *rq, struct task_struct *p)
1073391e43daSPeter Zijlstra {
1074391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1075391e43daSPeter Zijlstra 	return p->on_cpu;
1076391e43daSPeter Zijlstra #else
1077391e43daSPeter Zijlstra 	return task_current(rq, p);
1078391e43daSPeter Zijlstra #endif
1079391e43daSPeter Zijlstra }
1080391e43daSPeter Zijlstra 
1081da0c1e65SKirill Tkhai static inline int task_on_rq_queued(struct task_struct *p)
1082da0c1e65SKirill Tkhai {
1083da0c1e65SKirill Tkhai 	return p->on_rq == TASK_ON_RQ_QUEUED;
1084da0c1e65SKirill Tkhai }
1085391e43daSPeter Zijlstra 
1086cca26e80SKirill Tkhai static inline int task_on_rq_migrating(struct task_struct *p)
1087cca26e80SKirill Tkhai {
1088cca26e80SKirill Tkhai 	return p->on_rq == TASK_ON_RQ_MIGRATING;
1089cca26e80SKirill Tkhai }
1090cca26e80SKirill Tkhai 
1091391e43daSPeter Zijlstra #ifndef prepare_arch_switch
1092391e43daSPeter Zijlstra # define prepare_arch_switch(next)	do { } while (0)
1093391e43daSPeter Zijlstra #endif
109401f23e16SCatalin Marinas #ifndef finish_arch_post_lock_switch
109501f23e16SCatalin Marinas # define finish_arch_post_lock_switch()	do { } while (0)
109601f23e16SCatalin Marinas #endif
1097391e43daSPeter Zijlstra 
1098391e43daSPeter Zijlstra static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1099391e43daSPeter Zijlstra {
1100391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1101391e43daSPeter Zijlstra 	/*
1102391e43daSPeter Zijlstra 	 * We can optimise this out completely for !SMP, because the
1103391e43daSPeter Zijlstra 	 * SMP rebalancing from interrupt is the only thing that cares
1104391e43daSPeter Zijlstra 	 * here.
1105391e43daSPeter Zijlstra 	 */
1106391e43daSPeter Zijlstra 	next->on_cpu = 1;
1107391e43daSPeter Zijlstra #endif
1108391e43daSPeter Zijlstra }
1109391e43daSPeter Zijlstra 
1110391e43daSPeter Zijlstra static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1111391e43daSPeter Zijlstra {
1112391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1113391e43daSPeter Zijlstra 	/*
1114391e43daSPeter Zijlstra 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1115391e43daSPeter Zijlstra 	 * We must ensure this doesn't happen until the switch is completely
1116391e43daSPeter Zijlstra 	 * finished.
111795913d97SPeter Zijlstra 	 *
1118b75a2253SPeter Zijlstra 	 * In particular, the load of prev->state in finish_task_switch() must
1119b75a2253SPeter Zijlstra 	 * happen before this.
1120b75a2253SPeter Zijlstra 	 *
11211f03e8d2SPeter Zijlstra 	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
1122391e43daSPeter Zijlstra 	 */
112395913d97SPeter Zijlstra 	smp_store_release(&prev->on_cpu, 0);
1124391e43daSPeter Zijlstra #endif
1125391e43daSPeter Zijlstra #ifdef CONFIG_DEBUG_SPINLOCK
1126391e43daSPeter Zijlstra 	/* this is a valid case when another task releases the spinlock */
1127391e43daSPeter Zijlstra 	rq->lock.owner = current;
1128391e43daSPeter Zijlstra #endif
1129391e43daSPeter Zijlstra 	/*
1130391e43daSPeter Zijlstra 	 * If we are tracking spinlock dependencies then we have to
1131391e43daSPeter Zijlstra 	 * fix up the runqueue lock - which gets 'carried over' from
1132391e43daSPeter Zijlstra 	 * prev into current:
1133391e43daSPeter Zijlstra 	 */
1134391e43daSPeter Zijlstra 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1135391e43daSPeter Zijlstra 
1136391e43daSPeter Zijlstra 	raw_spin_unlock_irq(&rq->lock);
1137391e43daSPeter Zijlstra }
1138391e43daSPeter Zijlstra 
1139b13095f0SLi Zefan /*
1140b13095f0SLi Zefan  * wake flags
1141b13095f0SLi Zefan  */
1142b13095f0SLi Zefan #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
1143b13095f0SLi Zefan #define WF_FORK		0x02		/* child wakeup after fork */
1144b13095f0SLi Zefan #define WF_MIGRATED	0x4		/* internal use, task got migrated */
1145b13095f0SLi Zefan 
1146391e43daSPeter Zijlstra /*
1147391e43daSPeter Zijlstra  * To aid in avoiding the subversion of "niceness" due to uneven distribution
1148391e43daSPeter Zijlstra  * of tasks with abnormal "nice" values across CPUs the contribution that
1149391e43daSPeter Zijlstra  * each task makes to its run queue's load is weighted according to its
1150391e43daSPeter Zijlstra  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1151391e43daSPeter Zijlstra  * scaled version of the new time slice allocation that they receive on time
1152391e43daSPeter Zijlstra  * slice expiry etc.
1153391e43daSPeter Zijlstra  */
1154391e43daSPeter Zijlstra 
1155391e43daSPeter Zijlstra #define WEIGHT_IDLEPRIO                3
1156391e43daSPeter Zijlstra #define WMULT_IDLEPRIO         1431655765
1157391e43daSPeter Zijlstra 
1158ed82b8a1SAndi Kleen extern const int sched_prio_to_weight[40];
1159ed82b8a1SAndi Kleen extern const u32 sched_prio_to_wmult[40];
1160391e43daSPeter Zijlstra 
1161ff77e468SPeter Zijlstra /*
1162ff77e468SPeter Zijlstra  * {de,en}queue flags:
1163ff77e468SPeter Zijlstra  *
1164ff77e468SPeter Zijlstra  * DEQUEUE_SLEEP  - task is no longer runnable
1165ff77e468SPeter Zijlstra  * ENQUEUE_WAKEUP - task just became runnable
1166ff77e468SPeter Zijlstra  *
1167ff77e468SPeter Zijlstra  * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1168ff77e468SPeter Zijlstra  *                are in a known state which allows modification. Such pairs
1169ff77e468SPeter Zijlstra  *                should preserve as much state as possible.
1170ff77e468SPeter Zijlstra  *
1171ff77e468SPeter Zijlstra  * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1172ff77e468SPeter Zijlstra  *        in the runqueue.
1173ff77e468SPeter Zijlstra  *
1174ff77e468SPeter Zijlstra  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
1175ff77e468SPeter Zijlstra  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
117659efa0baSPeter Zijlstra  * ENQUEUE_MIGRATED  - the task was migrated during wakeup
1177ff77e468SPeter Zijlstra  *
1178ff77e468SPeter Zijlstra  */
1179ff77e468SPeter Zijlstra 
1180ff77e468SPeter Zijlstra #define DEQUEUE_SLEEP		0x01
1181ff77e468SPeter Zijlstra #define DEQUEUE_SAVE		0x02 /* matches ENQUEUE_RESTORE */
1182ff77e468SPeter Zijlstra #define DEQUEUE_MOVE		0x04 /* matches ENQUEUE_MOVE */
1183ff77e468SPeter Zijlstra 
11841de64443SPeter Zijlstra #define ENQUEUE_WAKEUP		0x01
1185ff77e468SPeter Zijlstra #define ENQUEUE_RESTORE		0x02
1186ff77e468SPeter Zijlstra #define ENQUEUE_MOVE		0x04
1187ff77e468SPeter Zijlstra 
1188ff77e468SPeter Zijlstra #define ENQUEUE_HEAD		0x08
1189ff77e468SPeter Zijlstra #define ENQUEUE_REPLENISH	0x10
1190c82ba9faSLi Zefan #ifdef CONFIG_SMP
119159efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED	0x20
1192c82ba9faSLi Zefan #else
119359efa0baSPeter Zijlstra #define ENQUEUE_MIGRATED	0x00
1194c82ba9faSLi Zefan #endif
1195c82ba9faSLi Zefan 
119637e117c0SPeter Zijlstra #define RETRY_TASK		((void *)-1UL)
119737e117c0SPeter Zijlstra 
1198c82ba9faSLi Zefan struct sched_class {
1199c82ba9faSLi Zefan 	const struct sched_class *next;
1200c82ba9faSLi Zefan 
1201c82ba9faSLi Zefan 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1202c82ba9faSLi Zefan 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1203c82ba9faSLi Zefan 	void (*yield_task) (struct rq *rq);
1204c82ba9faSLi Zefan 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1205c82ba9faSLi Zefan 
1206c82ba9faSLi Zefan 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1207c82ba9faSLi Zefan 
1208606dba2eSPeter Zijlstra 	/*
1209606dba2eSPeter Zijlstra 	 * It is the responsibility of the pick_next_task() method that will
1210606dba2eSPeter Zijlstra 	 * return the next task to call put_prev_task() on the @prev task or
1211606dba2eSPeter Zijlstra 	 * something equivalent.
121237e117c0SPeter Zijlstra 	 *
121337e117c0SPeter Zijlstra 	 * May return RETRY_TASK when it finds a higher prio class has runnable
121437e117c0SPeter Zijlstra 	 * tasks.
1215606dba2eSPeter Zijlstra 	 */
1216606dba2eSPeter Zijlstra 	struct task_struct * (*pick_next_task) (struct rq *rq,
1217e7904a28SPeter Zijlstra 						struct task_struct *prev,
1218e7904a28SPeter Zijlstra 						struct pin_cookie cookie);
1219c82ba9faSLi Zefan 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1220c82ba9faSLi Zefan 
1221c82ba9faSLi Zefan #ifdef CONFIG_SMP
1222ac66f547SPeter Zijlstra 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
12235a4fd036Sxiaofeng.yan 	void (*migrate_task_rq)(struct task_struct *p);
1224c82ba9faSLi Zefan 
1225c82ba9faSLi Zefan 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1226c82ba9faSLi Zefan 
1227c82ba9faSLi Zefan 	void (*set_cpus_allowed)(struct task_struct *p,
1228c82ba9faSLi Zefan 				 const struct cpumask *newmask);
1229c82ba9faSLi Zefan 
1230c82ba9faSLi Zefan 	void (*rq_online)(struct rq *rq);
1231c82ba9faSLi Zefan 	void (*rq_offline)(struct rq *rq);
1232c82ba9faSLi Zefan #endif
1233c82ba9faSLi Zefan 
1234c82ba9faSLi Zefan 	void (*set_curr_task) (struct rq *rq);
1235c82ba9faSLi Zefan 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1236c82ba9faSLi Zefan 	void (*task_fork) (struct task_struct *p);
1237e6c390f2SDario Faggioli 	void (*task_dead) (struct task_struct *p);
1238c82ba9faSLi Zefan 
123967dfa1b7SKirill Tkhai 	/*
124067dfa1b7SKirill Tkhai 	 * The switched_from() call is allowed to drop rq->lock, therefore we
124167dfa1b7SKirill Tkhai 	 * cannot assume the switched_from/switched_to pair is serliazed by
124267dfa1b7SKirill Tkhai 	 * rq->lock. They are however serialized by p->pi_lock.
124367dfa1b7SKirill Tkhai 	 */
1244c82ba9faSLi Zefan 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1245c82ba9faSLi Zefan 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1246c82ba9faSLi Zefan 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1247c82ba9faSLi Zefan 			     int oldprio);
1248c82ba9faSLi Zefan 
1249c82ba9faSLi Zefan 	unsigned int (*get_rr_interval) (struct rq *rq,
1250c82ba9faSLi Zefan 					 struct task_struct *task);
1251c82ba9faSLi Zefan 
12526e998916SStanislaw Gruszka 	void (*update_curr) (struct rq *rq);
12536e998916SStanislaw Gruszka 
1254ea86cb4bSVincent Guittot #define TASK_SET_GROUP  0
1255ea86cb4bSVincent Guittot #define TASK_MOVE_GROUP	1
1256ea86cb4bSVincent Guittot 
1257c82ba9faSLi Zefan #ifdef CONFIG_FAIR_GROUP_SCHED
1258ea86cb4bSVincent Guittot 	void (*task_change_group) (struct task_struct *p, int type);
1259c82ba9faSLi Zefan #endif
1260c82ba9faSLi Zefan };
1261391e43daSPeter Zijlstra 
12623f1d2a31SPeter Zijlstra static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
12633f1d2a31SPeter Zijlstra {
12643f1d2a31SPeter Zijlstra 	prev->sched_class->put_prev_task(rq, prev);
12653f1d2a31SPeter Zijlstra }
12663f1d2a31SPeter Zijlstra 
1267391e43daSPeter Zijlstra #define sched_class_highest (&stop_sched_class)
1268391e43daSPeter Zijlstra #define for_each_class(class) \
1269391e43daSPeter Zijlstra    for (class = sched_class_highest; class; class = class->next)
1270391e43daSPeter Zijlstra 
1271391e43daSPeter Zijlstra extern const struct sched_class stop_sched_class;
1272aab03e05SDario Faggioli extern const struct sched_class dl_sched_class;
1273391e43daSPeter Zijlstra extern const struct sched_class rt_sched_class;
1274391e43daSPeter Zijlstra extern const struct sched_class fair_sched_class;
1275391e43daSPeter Zijlstra extern const struct sched_class idle_sched_class;
1276391e43daSPeter Zijlstra 
1277391e43daSPeter Zijlstra 
1278391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1279391e43daSPeter Zijlstra 
128063b2ca30SNicolas Pitre extern void update_group_capacity(struct sched_domain *sd, int cpu);
1281b719203bSLi Zefan 
12827caff66fSDaniel Lezcano extern void trigger_load_balance(struct rq *rq);
1283391e43daSPeter Zijlstra 
1284c5b28038SPeter Zijlstra extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1285c5b28038SPeter Zijlstra 
1286391e43daSPeter Zijlstra #endif
1287391e43daSPeter Zijlstra 
1288442bf3aaSDaniel Lezcano #ifdef CONFIG_CPU_IDLE
1289442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq,
1290442bf3aaSDaniel Lezcano 				  struct cpuidle_state *idle_state)
1291442bf3aaSDaniel Lezcano {
1292442bf3aaSDaniel Lezcano 	rq->idle_state = idle_state;
1293442bf3aaSDaniel Lezcano }
1294442bf3aaSDaniel Lezcano 
1295442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1296442bf3aaSDaniel Lezcano {
1297442bf3aaSDaniel Lezcano 	WARN_ON(!rcu_read_lock_held());
1298442bf3aaSDaniel Lezcano 	return rq->idle_state;
1299442bf3aaSDaniel Lezcano }
1300442bf3aaSDaniel Lezcano #else
1301442bf3aaSDaniel Lezcano static inline void idle_set_state(struct rq *rq,
1302442bf3aaSDaniel Lezcano 				  struct cpuidle_state *idle_state)
1303442bf3aaSDaniel Lezcano {
1304442bf3aaSDaniel Lezcano }
1305442bf3aaSDaniel Lezcano 
1306442bf3aaSDaniel Lezcano static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1307442bf3aaSDaniel Lezcano {
1308442bf3aaSDaniel Lezcano 	return NULL;
1309442bf3aaSDaniel Lezcano }
1310442bf3aaSDaniel Lezcano #endif
1311442bf3aaSDaniel Lezcano 
1312391e43daSPeter Zijlstra extern void sysrq_sched_debug_show(void);
1313391e43daSPeter Zijlstra extern void sched_init_granularity(void);
1314391e43daSPeter Zijlstra extern void update_max_interval(void);
13151baca4ceSJuri Lelli 
13161baca4ceSJuri Lelli extern void init_sched_dl_class(void);
1317391e43daSPeter Zijlstra extern void init_sched_rt_class(void);
1318391e43daSPeter Zijlstra extern void init_sched_fair_class(void);
1319391e43daSPeter Zijlstra 
13208875125eSKirill Tkhai extern void resched_curr(struct rq *rq);
1321391e43daSPeter Zijlstra extern void resched_cpu(int cpu);
1322391e43daSPeter Zijlstra 
1323391e43daSPeter Zijlstra extern struct rt_bandwidth def_rt_bandwidth;
1324391e43daSPeter Zijlstra extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1325391e43daSPeter Zijlstra 
1326332ac17eSDario Faggioli extern struct dl_bandwidth def_dl_bandwidth;
1327332ac17eSDario Faggioli extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1328aab03e05SDario Faggioli extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1329aab03e05SDario Faggioli 
1330332ac17eSDario Faggioli unsigned long to_ratio(u64 period, u64 runtime);
1331332ac17eSDario Faggioli 
1332540247fbSYuyang Du extern void init_entity_runnable_average(struct sched_entity *se);
13332b8c41daSYuyang Du extern void post_init_entity_util_avg(struct sched_entity *se);
1334a75cdaa9SAlex Shi 
133576d92ac3SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
133676d92ac3SFrederic Weisbecker extern bool sched_can_stop_tick(struct rq *rq);
133776d92ac3SFrederic Weisbecker 
133876d92ac3SFrederic Weisbecker /*
133976d92ac3SFrederic Weisbecker  * Tick may be needed by tasks in the runqueue depending on their policy and
134076d92ac3SFrederic Weisbecker  * requirements. If tick is needed, lets send the target an IPI to kick it out of
134176d92ac3SFrederic Weisbecker  * nohz mode if necessary.
134276d92ac3SFrederic Weisbecker  */
134376d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq)
134476d92ac3SFrederic Weisbecker {
134576d92ac3SFrederic Weisbecker 	int cpu;
134676d92ac3SFrederic Weisbecker 
134776d92ac3SFrederic Weisbecker 	if (!tick_nohz_full_enabled())
134876d92ac3SFrederic Weisbecker 		return;
134976d92ac3SFrederic Weisbecker 
135076d92ac3SFrederic Weisbecker 	cpu = cpu_of(rq);
135176d92ac3SFrederic Weisbecker 
135276d92ac3SFrederic Weisbecker 	if (!tick_nohz_full_cpu(cpu))
135376d92ac3SFrederic Weisbecker 		return;
135476d92ac3SFrederic Weisbecker 
135576d92ac3SFrederic Weisbecker 	if (sched_can_stop_tick(rq))
135676d92ac3SFrederic Weisbecker 		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
135776d92ac3SFrederic Weisbecker 	else
135876d92ac3SFrederic Weisbecker 		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
135976d92ac3SFrederic Weisbecker }
136076d92ac3SFrederic Weisbecker #else
136176d92ac3SFrederic Weisbecker static inline void sched_update_tick_dependency(struct rq *rq) { }
136276d92ac3SFrederic Weisbecker #endif
136376d92ac3SFrederic Weisbecker 
136472465447SKirill Tkhai static inline void add_nr_running(struct rq *rq, unsigned count)
1365391e43daSPeter Zijlstra {
136672465447SKirill Tkhai 	unsigned prev_nr = rq->nr_running;
136772465447SKirill Tkhai 
136872465447SKirill Tkhai 	rq->nr_running = prev_nr + count;
13699f3660c2SFrederic Weisbecker 
137072465447SKirill Tkhai 	if (prev_nr < 2 && rq->nr_running >= 2) {
13714486edd1STim Chen #ifdef CONFIG_SMP
13724486edd1STim Chen 		if (!rq->rd->overload)
13734486edd1STim Chen 			rq->rd->overload = true;
13744486edd1STim Chen #endif
137576d92ac3SFrederic Weisbecker 	}
13764486edd1STim Chen 
137776d92ac3SFrederic Weisbecker 	sched_update_tick_dependency(rq);
13784486edd1STim Chen }
1379391e43daSPeter Zijlstra 
138072465447SKirill Tkhai static inline void sub_nr_running(struct rq *rq, unsigned count)
1381391e43daSPeter Zijlstra {
138272465447SKirill Tkhai 	rq->nr_running -= count;
138376d92ac3SFrederic Weisbecker 	/* Check if we still need preemption */
138476d92ac3SFrederic Weisbecker 	sched_update_tick_dependency(rq);
1385391e43daSPeter Zijlstra }
1386391e43daSPeter Zijlstra 
1387265f22a9SFrederic Weisbecker static inline void rq_last_tick_reset(struct rq *rq)
1388265f22a9SFrederic Weisbecker {
1389265f22a9SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
1390265f22a9SFrederic Weisbecker 	rq->last_sched_tick = jiffies;
1391265f22a9SFrederic Weisbecker #endif
1392265f22a9SFrederic Weisbecker }
1393265f22a9SFrederic Weisbecker 
1394391e43daSPeter Zijlstra extern void update_rq_clock(struct rq *rq);
1395391e43daSPeter Zijlstra 
1396391e43daSPeter Zijlstra extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1397391e43daSPeter Zijlstra extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1398391e43daSPeter Zijlstra 
1399391e43daSPeter Zijlstra extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1400391e43daSPeter Zijlstra 
1401391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_time_avg;
1402391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_nr_migrate;
1403391e43daSPeter Zijlstra extern const_debug unsigned int sysctl_sched_migration_cost;
1404391e43daSPeter Zijlstra 
1405391e43daSPeter Zijlstra static inline u64 sched_avg_period(void)
1406391e43daSPeter Zijlstra {
1407391e43daSPeter Zijlstra 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1408391e43daSPeter Zijlstra }
1409391e43daSPeter Zijlstra 
1410391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_HRTICK
1411391e43daSPeter Zijlstra 
1412391e43daSPeter Zijlstra /*
1413391e43daSPeter Zijlstra  * Use hrtick when:
1414391e43daSPeter Zijlstra  *  - enabled by features
1415391e43daSPeter Zijlstra  *  - hrtimer is actually high res
1416391e43daSPeter Zijlstra  */
1417391e43daSPeter Zijlstra static inline int hrtick_enabled(struct rq *rq)
1418391e43daSPeter Zijlstra {
1419391e43daSPeter Zijlstra 	if (!sched_feat(HRTICK))
1420391e43daSPeter Zijlstra 		return 0;
1421391e43daSPeter Zijlstra 	if (!cpu_active(cpu_of(rq)))
1422391e43daSPeter Zijlstra 		return 0;
1423391e43daSPeter Zijlstra 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1424391e43daSPeter Zijlstra }
1425391e43daSPeter Zijlstra 
1426391e43daSPeter Zijlstra void hrtick_start(struct rq *rq, u64 delay);
1427391e43daSPeter Zijlstra 
1428b39e66eaSMike Galbraith #else
1429b39e66eaSMike Galbraith 
1430b39e66eaSMike Galbraith static inline int hrtick_enabled(struct rq *rq)
1431b39e66eaSMike Galbraith {
1432b39e66eaSMike Galbraith 	return 0;
1433b39e66eaSMike Galbraith }
1434b39e66eaSMike Galbraith 
1435391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_HRTICK */
1436391e43daSPeter Zijlstra 
1437391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1438391e43daSPeter Zijlstra extern void sched_avg_update(struct rq *rq);
1439dfbca41fSPeter Zijlstra 
1440dfbca41fSPeter Zijlstra #ifndef arch_scale_freq_capacity
1441dfbca41fSPeter Zijlstra static __always_inline
1442dfbca41fSPeter Zijlstra unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1443dfbca41fSPeter Zijlstra {
1444dfbca41fSPeter Zijlstra 	return SCHED_CAPACITY_SCALE;
1445dfbca41fSPeter Zijlstra }
1446dfbca41fSPeter Zijlstra #endif
1447b5b4860dSVincent Guittot 
14488cd5601cSMorten Rasmussen #ifndef arch_scale_cpu_capacity
14498cd5601cSMorten Rasmussen static __always_inline
14508cd5601cSMorten Rasmussen unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
14518cd5601cSMorten Rasmussen {
1452e3279a2eSDietmar Eggemann 	if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
14538cd5601cSMorten Rasmussen 		return sd->smt_gain / sd->span_weight;
14548cd5601cSMorten Rasmussen 
14558cd5601cSMorten Rasmussen 	return SCHED_CAPACITY_SCALE;
14568cd5601cSMorten Rasmussen }
14578cd5601cSMorten Rasmussen #endif
14588cd5601cSMorten Rasmussen 
1459391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1460391e43daSPeter Zijlstra {
1461b5b4860dSVincent Guittot 	rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1462391e43daSPeter Zijlstra 	sched_avg_update(rq);
1463391e43daSPeter Zijlstra }
1464391e43daSPeter Zijlstra #else
1465391e43daSPeter Zijlstra static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1466391e43daSPeter Zijlstra static inline void sched_avg_update(struct rq *rq) { }
1467391e43daSPeter Zijlstra #endif
1468391e43daSPeter Zijlstra 
1469eb580751SPeter Zijlstra struct rq_flags {
1470eb580751SPeter Zijlstra 	unsigned long flags;
1471e7904a28SPeter Zijlstra 	struct pin_cookie cookie;
1472eb580751SPeter Zijlstra };
1473eb580751SPeter Zijlstra 
1474eb580751SPeter Zijlstra struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
14753e71a462SPeter Zijlstra 	__acquires(rq->lock);
1476eb580751SPeter Zijlstra struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
14773960c8c0SPeter Zijlstra 	__acquires(p->pi_lock)
14783e71a462SPeter Zijlstra 	__acquires(rq->lock);
14793960c8c0SPeter Zijlstra 
1480eb580751SPeter Zijlstra static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
14813960c8c0SPeter Zijlstra 	__releases(rq->lock)
14823960c8c0SPeter Zijlstra {
1483e7904a28SPeter Zijlstra 	lockdep_unpin_lock(&rq->lock, rf->cookie);
14843960c8c0SPeter Zijlstra 	raw_spin_unlock(&rq->lock);
14853960c8c0SPeter Zijlstra }
14863960c8c0SPeter Zijlstra 
14873960c8c0SPeter Zijlstra static inline void
1488eb580751SPeter Zijlstra task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
14893960c8c0SPeter Zijlstra 	__releases(rq->lock)
14903960c8c0SPeter Zijlstra 	__releases(p->pi_lock)
14913960c8c0SPeter Zijlstra {
1492e7904a28SPeter Zijlstra 	lockdep_unpin_lock(&rq->lock, rf->cookie);
14933960c8c0SPeter Zijlstra 	raw_spin_unlock(&rq->lock);
1494eb580751SPeter Zijlstra 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
14953960c8c0SPeter Zijlstra }
14963960c8c0SPeter Zijlstra 
1497391e43daSPeter Zijlstra #ifdef CONFIG_SMP
1498391e43daSPeter Zijlstra #ifdef CONFIG_PREEMPT
1499391e43daSPeter Zijlstra 
1500391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1501391e43daSPeter Zijlstra 
1502391e43daSPeter Zijlstra /*
1503391e43daSPeter Zijlstra  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1504391e43daSPeter Zijlstra  * way at the expense of forcing extra atomic operations in all
1505391e43daSPeter Zijlstra  * invocations.  This assures that the double_lock is acquired using the
1506391e43daSPeter Zijlstra  * same underlying policy as the spinlock_t on this architecture, which
1507391e43daSPeter Zijlstra  * reduces latency compared to the unfair variant below.  However, it
1508391e43daSPeter Zijlstra  * also adds more overhead and therefore may reduce throughput.
1509391e43daSPeter Zijlstra  */
1510391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1511391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1512391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1513391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1514391e43daSPeter Zijlstra {
1515391e43daSPeter Zijlstra 	raw_spin_unlock(&this_rq->lock);
1516391e43daSPeter Zijlstra 	double_rq_lock(this_rq, busiest);
1517391e43daSPeter Zijlstra 
1518391e43daSPeter Zijlstra 	return 1;
1519391e43daSPeter Zijlstra }
1520391e43daSPeter Zijlstra 
1521391e43daSPeter Zijlstra #else
1522391e43daSPeter Zijlstra /*
1523391e43daSPeter Zijlstra  * Unfair double_lock_balance: Optimizes throughput at the expense of
1524391e43daSPeter Zijlstra  * latency by eliminating extra atomic operations when the locks are
1525391e43daSPeter Zijlstra  * already in proper order on entry.  This favors lower cpu-ids and will
1526391e43daSPeter Zijlstra  * grant the double lock to lower cpus over higher ids under contention,
1527391e43daSPeter Zijlstra  * regardless of entry order into the function.
1528391e43daSPeter Zijlstra  */
1529391e43daSPeter Zijlstra static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1530391e43daSPeter Zijlstra 	__releases(this_rq->lock)
1531391e43daSPeter Zijlstra 	__acquires(busiest->lock)
1532391e43daSPeter Zijlstra 	__acquires(this_rq->lock)
1533391e43daSPeter Zijlstra {
1534391e43daSPeter Zijlstra 	int ret = 0;
1535391e43daSPeter Zijlstra 
1536391e43daSPeter Zijlstra 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1537391e43daSPeter Zijlstra 		if (busiest < this_rq) {
1538391e43daSPeter Zijlstra 			raw_spin_unlock(&this_rq->lock);
1539391e43daSPeter Zijlstra 			raw_spin_lock(&busiest->lock);
1540391e43daSPeter Zijlstra 			raw_spin_lock_nested(&this_rq->lock,
1541391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1542391e43daSPeter Zijlstra 			ret = 1;
1543391e43daSPeter Zijlstra 		} else
1544391e43daSPeter Zijlstra 			raw_spin_lock_nested(&busiest->lock,
1545391e43daSPeter Zijlstra 					      SINGLE_DEPTH_NESTING);
1546391e43daSPeter Zijlstra 	}
1547391e43daSPeter Zijlstra 	return ret;
1548391e43daSPeter Zijlstra }
1549391e43daSPeter Zijlstra 
1550391e43daSPeter Zijlstra #endif /* CONFIG_PREEMPT */
1551391e43daSPeter Zijlstra 
1552391e43daSPeter Zijlstra /*
1553391e43daSPeter Zijlstra  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1554391e43daSPeter Zijlstra  */
1555391e43daSPeter Zijlstra static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1556391e43daSPeter Zijlstra {
1557391e43daSPeter Zijlstra 	if (unlikely(!irqs_disabled())) {
1558391e43daSPeter Zijlstra 		/* printk() doesn't work good under rq->lock */
1559391e43daSPeter Zijlstra 		raw_spin_unlock(&this_rq->lock);
1560391e43daSPeter Zijlstra 		BUG_ON(1);
1561391e43daSPeter Zijlstra 	}
1562391e43daSPeter Zijlstra 
1563391e43daSPeter Zijlstra 	return _double_lock_balance(this_rq, busiest);
1564391e43daSPeter Zijlstra }
1565391e43daSPeter Zijlstra 
1566391e43daSPeter Zijlstra static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1567391e43daSPeter Zijlstra 	__releases(busiest->lock)
1568391e43daSPeter Zijlstra {
1569391e43daSPeter Zijlstra 	raw_spin_unlock(&busiest->lock);
1570391e43daSPeter Zijlstra 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1571391e43daSPeter Zijlstra }
1572391e43daSPeter Zijlstra 
157374602315SPeter Zijlstra static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
157474602315SPeter Zijlstra {
157574602315SPeter Zijlstra 	if (l1 > l2)
157674602315SPeter Zijlstra 		swap(l1, l2);
157774602315SPeter Zijlstra 
157874602315SPeter Zijlstra 	spin_lock(l1);
157974602315SPeter Zijlstra 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
158074602315SPeter Zijlstra }
158174602315SPeter Zijlstra 
158260e69eedSMike Galbraith static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
158360e69eedSMike Galbraith {
158460e69eedSMike Galbraith 	if (l1 > l2)
158560e69eedSMike Galbraith 		swap(l1, l2);
158660e69eedSMike Galbraith 
158760e69eedSMike Galbraith 	spin_lock_irq(l1);
158860e69eedSMike Galbraith 	spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
158960e69eedSMike Galbraith }
159060e69eedSMike Galbraith 
159174602315SPeter Zijlstra static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
159274602315SPeter Zijlstra {
159374602315SPeter Zijlstra 	if (l1 > l2)
159474602315SPeter Zijlstra 		swap(l1, l2);
159574602315SPeter Zijlstra 
159674602315SPeter Zijlstra 	raw_spin_lock(l1);
159774602315SPeter Zijlstra 	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
159874602315SPeter Zijlstra }
159974602315SPeter Zijlstra 
1600391e43daSPeter Zijlstra /*
1601391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1602391e43daSPeter Zijlstra  *
1603391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1604391e43daSPeter Zijlstra  * you need to do so manually before calling.
1605391e43daSPeter Zijlstra  */
1606391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1607391e43daSPeter Zijlstra 	__acquires(rq1->lock)
1608391e43daSPeter Zijlstra 	__acquires(rq2->lock)
1609391e43daSPeter Zijlstra {
1610391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
1611391e43daSPeter Zijlstra 	if (rq1 == rq2) {
1612391e43daSPeter Zijlstra 		raw_spin_lock(&rq1->lock);
1613391e43daSPeter Zijlstra 		__acquire(rq2->lock);	/* Fake it out ;) */
1614391e43daSPeter Zijlstra 	} else {
1615391e43daSPeter Zijlstra 		if (rq1 < rq2) {
1616391e43daSPeter Zijlstra 			raw_spin_lock(&rq1->lock);
1617391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1618391e43daSPeter Zijlstra 		} else {
1619391e43daSPeter Zijlstra 			raw_spin_lock(&rq2->lock);
1620391e43daSPeter Zijlstra 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1621391e43daSPeter Zijlstra 		}
1622391e43daSPeter Zijlstra 	}
1623391e43daSPeter Zijlstra }
1624391e43daSPeter Zijlstra 
1625391e43daSPeter Zijlstra /*
1626391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
1627391e43daSPeter Zijlstra  *
1628391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
1629391e43daSPeter Zijlstra  * you need to do so manually after calling.
1630391e43daSPeter Zijlstra  */
1631391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1632391e43daSPeter Zijlstra 	__releases(rq1->lock)
1633391e43daSPeter Zijlstra 	__releases(rq2->lock)
1634391e43daSPeter Zijlstra {
1635391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
1636391e43daSPeter Zijlstra 	if (rq1 != rq2)
1637391e43daSPeter Zijlstra 		raw_spin_unlock(&rq2->lock);
1638391e43daSPeter Zijlstra 	else
1639391e43daSPeter Zijlstra 		__release(rq2->lock);
1640391e43daSPeter Zijlstra }
1641391e43daSPeter Zijlstra 
1642391e43daSPeter Zijlstra #else /* CONFIG_SMP */
1643391e43daSPeter Zijlstra 
1644391e43daSPeter Zijlstra /*
1645391e43daSPeter Zijlstra  * double_rq_lock - safely lock two runqueues
1646391e43daSPeter Zijlstra  *
1647391e43daSPeter Zijlstra  * Note this does not disable interrupts like task_rq_lock,
1648391e43daSPeter Zijlstra  * you need to do so manually before calling.
1649391e43daSPeter Zijlstra  */
1650391e43daSPeter Zijlstra static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1651391e43daSPeter Zijlstra 	__acquires(rq1->lock)
1652391e43daSPeter Zijlstra 	__acquires(rq2->lock)
1653391e43daSPeter Zijlstra {
1654391e43daSPeter Zijlstra 	BUG_ON(!irqs_disabled());
1655391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
1656391e43daSPeter Zijlstra 	raw_spin_lock(&rq1->lock);
1657391e43daSPeter Zijlstra 	__acquire(rq2->lock);	/* Fake it out ;) */
1658391e43daSPeter Zijlstra }
1659391e43daSPeter Zijlstra 
1660391e43daSPeter Zijlstra /*
1661391e43daSPeter Zijlstra  * double_rq_unlock - safely unlock two runqueues
1662391e43daSPeter Zijlstra  *
1663391e43daSPeter Zijlstra  * Note this does not restore interrupts like task_rq_unlock,
1664391e43daSPeter Zijlstra  * you need to do so manually after calling.
1665391e43daSPeter Zijlstra  */
1666391e43daSPeter Zijlstra static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1667391e43daSPeter Zijlstra 	__releases(rq1->lock)
1668391e43daSPeter Zijlstra 	__releases(rq2->lock)
1669391e43daSPeter Zijlstra {
1670391e43daSPeter Zijlstra 	BUG_ON(rq1 != rq2);
1671391e43daSPeter Zijlstra 	raw_spin_unlock(&rq1->lock);
1672391e43daSPeter Zijlstra 	__release(rq2->lock);
1673391e43daSPeter Zijlstra }
1674391e43daSPeter Zijlstra 
1675391e43daSPeter Zijlstra #endif
1676391e43daSPeter Zijlstra 
1677391e43daSPeter Zijlstra extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1678391e43daSPeter Zijlstra extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
16796b55c965SSrikar Dronamraju 
16806b55c965SSrikar Dronamraju #ifdef	CONFIG_SCHED_DEBUG
1681391e43daSPeter Zijlstra extern void print_cfs_stats(struct seq_file *m, int cpu);
1682391e43daSPeter Zijlstra extern void print_rt_stats(struct seq_file *m, int cpu);
1683acb32132SWanpeng Li extern void print_dl_stats(struct seq_file *m, int cpu);
16846b55c965SSrikar Dronamraju extern void
16856b55c965SSrikar Dronamraju print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
1686397f2378SSrikar Dronamraju 
1687397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING
1688397f2378SSrikar Dronamraju extern void
1689397f2378SSrikar Dronamraju show_numa_stats(struct task_struct *p, struct seq_file *m);
1690397f2378SSrikar Dronamraju extern void
1691397f2378SSrikar Dronamraju print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1692397f2378SSrikar Dronamraju 	unsigned long tpf, unsigned long gsf, unsigned long gpf);
1693397f2378SSrikar Dronamraju #endif /* CONFIG_NUMA_BALANCING */
1694397f2378SSrikar Dronamraju #endif /* CONFIG_SCHED_DEBUG */
1695391e43daSPeter Zijlstra 
1696391e43daSPeter Zijlstra extern void init_cfs_rq(struct cfs_rq *cfs_rq);
169707c54f7aSAbel Vesa extern void init_rt_rq(struct rt_rq *rt_rq);
169807c54f7aSAbel Vesa extern void init_dl_rq(struct dl_rq *dl_rq);
1699391e43daSPeter Zijlstra 
17001ee14e6cSBen Segall extern void cfs_bandwidth_usage_inc(void);
17011ee14e6cSBen Segall extern void cfs_bandwidth_usage_dec(void);
17021c792db7SSuresh Siddha 
17033451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
17041c792db7SSuresh Siddha enum rq_nohz_flag_bits {
17051c792db7SSuresh Siddha 	NOHZ_TICK_STOPPED,
17061c792db7SSuresh Siddha 	NOHZ_BALANCE_KICK,
17071c792db7SSuresh Siddha };
17081c792db7SSuresh Siddha 
17091c792db7SSuresh Siddha #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
171020a5c8ccSThomas Gleixner 
171120a5c8ccSThomas Gleixner extern void nohz_balance_exit_idle(unsigned int cpu);
171220a5c8ccSThomas Gleixner #else
171320a5c8ccSThomas Gleixner static inline void nohz_balance_exit_idle(unsigned int cpu) { }
17141c792db7SSuresh Siddha #endif
171573fbec60SFrederic Weisbecker 
171673fbec60SFrederic Weisbecker #ifdef CONFIG_IRQ_TIME_ACCOUNTING
171773fbec60SFrederic Weisbecker 
171873fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_hardirq_time);
171973fbec60SFrederic Weisbecker DECLARE_PER_CPU(u64, cpu_softirq_time);
172073fbec60SFrederic Weisbecker 
172173fbec60SFrederic Weisbecker #ifndef CONFIG_64BIT
172273fbec60SFrederic Weisbecker DECLARE_PER_CPU(seqcount_t, irq_time_seq);
172373fbec60SFrederic Weisbecker 
172473fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void)
172573fbec60SFrederic Weisbecker {
172673fbec60SFrederic Weisbecker 	__this_cpu_inc(irq_time_seq.sequence);
172773fbec60SFrederic Weisbecker 	smp_wmb();
172873fbec60SFrederic Weisbecker }
172973fbec60SFrederic Weisbecker 
173073fbec60SFrederic Weisbecker static inline void irq_time_write_end(void)
173173fbec60SFrederic Weisbecker {
173273fbec60SFrederic Weisbecker 	smp_wmb();
173373fbec60SFrederic Weisbecker 	__this_cpu_inc(irq_time_seq.sequence);
173473fbec60SFrederic Weisbecker }
173573fbec60SFrederic Weisbecker 
173673fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu)
173773fbec60SFrederic Weisbecker {
173873fbec60SFrederic Weisbecker 	u64 irq_time;
173973fbec60SFrederic Weisbecker 	unsigned seq;
174073fbec60SFrederic Weisbecker 
174173fbec60SFrederic Weisbecker 	do {
174273fbec60SFrederic Weisbecker 		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
174373fbec60SFrederic Weisbecker 		irq_time = per_cpu(cpu_softirq_time, cpu) +
174473fbec60SFrederic Weisbecker 			   per_cpu(cpu_hardirq_time, cpu);
174573fbec60SFrederic Weisbecker 	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
174673fbec60SFrederic Weisbecker 
174773fbec60SFrederic Weisbecker 	return irq_time;
174873fbec60SFrederic Weisbecker }
174973fbec60SFrederic Weisbecker #else /* CONFIG_64BIT */
175073fbec60SFrederic Weisbecker static inline void irq_time_write_begin(void)
175173fbec60SFrederic Weisbecker {
175273fbec60SFrederic Weisbecker }
175373fbec60SFrederic Weisbecker 
175473fbec60SFrederic Weisbecker static inline void irq_time_write_end(void)
175573fbec60SFrederic Weisbecker {
175673fbec60SFrederic Weisbecker }
175773fbec60SFrederic Weisbecker 
175873fbec60SFrederic Weisbecker static inline u64 irq_time_read(int cpu)
175973fbec60SFrederic Weisbecker {
176073fbec60SFrederic Weisbecker 	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
176173fbec60SFrederic Weisbecker }
176273fbec60SFrederic Weisbecker #endif /* CONFIG_64BIT */
176373fbec60SFrederic Weisbecker #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1764adaf9fcdSRafael J. Wysocki 
1765adaf9fcdSRafael J. Wysocki #ifdef CONFIG_CPU_FREQ
1766adaf9fcdSRafael J. Wysocki DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1767adaf9fcdSRafael J. Wysocki 
1768adaf9fcdSRafael J. Wysocki /**
1769adaf9fcdSRafael J. Wysocki  * cpufreq_update_util - Take a note about CPU utilization changes.
1770adaf9fcdSRafael J. Wysocki  * @time: Current time.
1771adaf9fcdSRafael J. Wysocki  * @util: Current utilization.
1772adaf9fcdSRafael J. Wysocki  * @max: Utilization ceiling.
1773adaf9fcdSRafael J. Wysocki  *
1774adaf9fcdSRafael J. Wysocki  * This function is called by the scheduler on every invocation of
1775adaf9fcdSRafael J. Wysocki  * update_load_avg() on the CPU whose utilization is being updated.
1776adaf9fcdSRafael J. Wysocki  *
1777adaf9fcdSRafael J. Wysocki  * It can only be called from RCU-sched read-side critical sections.
1778adaf9fcdSRafael J. Wysocki  */
1779adaf9fcdSRafael J. Wysocki static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max)
1780adaf9fcdSRafael J. Wysocki {
1781adaf9fcdSRafael J. Wysocki        struct update_util_data *data;
1782adaf9fcdSRafael J. Wysocki 
1783adaf9fcdSRafael J. Wysocki        data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1784adaf9fcdSRafael J. Wysocki        if (data)
1785adaf9fcdSRafael J. Wysocki                data->func(data, time, util, max);
1786adaf9fcdSRafael J. Wysocki }
1787adaf9fcdSRafael J. Wysocki 
1788adaf9fcdSRafael J. Wysocki /**
1789adaf9fcdSRafael J. Wysocki  * cpufreq_trigger_update - Trigger CPU performance state evaluation if needed.
1790adaf9fcdSRafael J. Wysocki  * @time: Current time.
1791adaf9fcdSRafael J. Wysocki  *
1792adaf9fcdSRafael J. Wysocki  * The way cpufreq is currently arranged requires it to evaluate the CPU
1793adaf9fcdSRafael J. Wysocki  * performance state (frequency/voltage) on a regular basis to prevent it from
1794adaf9fcdSRafael J. Wysocki  * being stuck in a completely inadequate performance level for too long.
1795adaf9fcdSRafael J. Wysocki  * That is not guaranteed to happen if the updates are only triggered from CFS,
1796adaf9fcdSRafael J. Wysocki  * though, because they may not be coming in if RT or deadline tasks are active
1797adaf9fcdSRafael J. Wysocki  * all the time (or there are RT and DL tasks only).
1798adaf9fcdSRafael J. Wysocki  *
1799adaf9fcdSRafael J. Wysocki  * As a workaround for that issue, this function is called by the RT and DL
1800adaf9fcdSRafael J. Wysocki  * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1801adaf9fcdSRafael J. Wysocki  * but that really is a band-aid.  Going forward it should be replaced with
1802adaf9fcdSRafael J. Wysocki  * solutions targeted more specifically at RT and DL tasks.
1803adaf9fcdSRafael J. Wysocki  */
1804adaf9fcdSRafael J. Wysocki static inline void cpufreq_trigger_update(u64 time)
1805adaf9fcdSRafael J. Wysocki {
1806adaf9fcdSRafael J. Wysocki 	cpufreq_update_util(time, ULONG_MAX, 0);
1807adaf9fcdSRafael J. Wysocki }
1808adaf9fcdSRafael J. Wysocki #else
1809adaf9fcdSRafael J. Wysocki static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) {}
1810adaf9fcdSRafael J. Wysocki static inline void cpufreq_trigger_update(u64 time) {}
1811adaf9fcdSRafael J. Wysocki #endif /* CONFIG_CPU_FREQ */
1812be53f58fSLinus Torvalds 
18139bdcb44eSRafael J. Wysocki #ifdef arch_scale_freq_capacity
18149bdcb44eSRafael J. Wysocki #ifndef arch_scale_freq_invariant
18159bdcb44eSRafael J. Wysocki #define arch_scale_freq_invariant()	(true)
18169bdcb44eSRafael J. Wysocki #endif
18179bdcb44eSRafael J. Wysocki #else /* arch_scale_freq_capacity */
18189bdcb44eSRafael J. Wysocki #define arch_scale_freq_invariant()	(false)
18199bdcb44eSRafael J. Wysocki #endif
1820