xref: /linux/kernel/sched/sched.h (revision c4c11dd160a8cc98f402c4e12f94b1572e822ffd)
1 
2 #include <linux/sched.h>
3 #include <linux/sched/sysctl.h>
4 #include <linux/sched/rt.h>
5 #include <linux/mutex.h>
6 #include <linux/spinlock.h>
7 #include <linux/stop_machine.h>
8 #include <linux/tick.h>
9 
10 #include "cpupri.h"
11 #include "cpuacct.h"
12 
13 struct rq;
14 
15 extern __read_mostly int scheduler_running;
16 
17 extern unsigned long calc_load_update;
18 extern atomic_long_t calc_load_tasks;
19 
20 extern long calc_load_fold_active(struct rq *this_rq);
21 extern void update_cpu_load_active(struct rq *this_rq);
22 
23 /*
24  * Convert user-nice values [ -20 ... 0 ... 19 ]
25  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
26  * and back.
27  */
28 #define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
29 #define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
30 #define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)
31 
32 /*
33  * 'User priority' is the nice value converted to something we
34  * can work with better when scaling various scheduler parameters,
35  * it's a [ 0 ... 39 ] range.
36  */
37 #define USER_PRIO(p)		((p)-MAX_RT_PRIO)
38 #define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
39 #define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
40 
41 /*
42  * Helpers for converting nanosecond timing to jiffy resolution
43  */
44 #define NS_TO_JIFFIES(TIME)	((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
45 
46 /*
47  * Increase resolution of nice-level calculations for 64-bit architectures.
48  * The extra resolution improves shares distribution and load balancing of
49  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
50  * hierarchies, especially on larger systems. This is not a user-visible change
51  * and does not change the user-interface for setting shares/weights.
52  *
53  * We increase resolution only if we have enough bits to allow this increased
54  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
55  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
56  * increased costs.
57  */
58 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
59 # define SCHED_LOAD_RESOLUTION	10
60 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
61 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
62 #else
63 # define SCHED_LOAD_RESOLUTION	0
64 # define scale_load(w)		(w)
65 # define scale_load_down(w)	(w)
66 #endif
67 
68 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
69 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
70 
71 #define NICE_0_LOAD		SCHED_LOAD_SCALE
72 #define NICE_0_SHIFT		SCHED_LOAD_SHIFT
73 
74 /*
75  * These are the 'tuning knobs' of the scheduler:
76  */
77 
78 /*
79  * single value that denotes runtime == period, ie unlimited time.
80  */
81 #define RUNTIME_INF	((u64)~0ULL)
82 
83 static inline int rt_policy(int policy)
84 {
85 	if (policy == SCHED_FIFO || policy == SCHED_RR)
86 		return 1;
87 	return 0;
88 }
89 
90 static inline int task_has_rt_policy(struct task_struct *p)
91 {
92 	return rt_policy(p->policy);
93 }
94 
95 /*
96  * This is the priority-queue data structure of the RT scheduling class:
97  */
98 struct rt_prio_array {
99 	DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
100 	struct list_head queue[MAX_RT_PRIO];
101 };
102 
103 struct rt_bandwidth {
104 	/* nests inside the rq lock: */
105 	raw_spinlock_t		rt_runtime_lock;
106 	ktime_t			rt_period;
107 	u64			rt_runtime;
108 	struct hrtimer		rt_period_timer;
109 };
110 
111 extern struct mutex sched_domains_mutex;
112 
113 #ifdef CONFIG_CGROUP_SCHED
114 
115 #include <linux/cgroup.h>
116 
117 struct cfs_rq;
118 struct rt_rq;
119 
120 extern struct list_head task_groups;
121 
122 struct cfs_bandwidth {
123 #ifdef CONFIG_CFS_BANDWIDTH
124 	raw_spinlock_t lock;
125 	ktime_t period;
126 	u64 quota, runtime;
127 	s64 hierarchal_quota;
128 	u64 runtime_expires;
129 
130 	int idle, timer_active;
131 	struct hrtimer period_timer, slack_timer;
132 	struct list_head throttled_cfs_rq;
133 
134 	/* statistics */
135 	int nr_periods, nr_throttled;
136 	u64 throttled_time;
137 #endif
138 };
139 
140 /* task group related information */
141 struct task_group {
142 	struct cgroup_subsys_state css;
143 
144 #ifdef CONFIG_FAIR_GROUP_SCHED
145 	/* schedulable entities of this group on each cpu */
146 	struct sched_entity **se;
147 	/* runqueue "owned" by this group on each cpu */
148 	struct cfs_rq **cfs_rq;
149 	unsigned long shares;
150 
151 #ifdef	CONFIG_SMP
152 	atomic_long_t load_avg;
153 	atomic_t runnable_avg;
154 #endif
155 #endif
156 
157 #ifdef CONFIG_RT_GROUP_SCHED
158 	struct sched_rt_entity **rt_se;
159 	struct rt_rq **rt_rq;
160 
161 	struct rt_bandwidth rt_bandwidth;
162 #endif
163 
164 	struct rcu_head rcu;
165 	struct list_head list;
166 
167 	struct task_group *parent;
168 	struct list_head siblings;
169 	struct list_head children;
170 
171 #ifdef CONFIG_SCHED_AUTOGROUP
172 	struct autogroup *autogroup;
173 #endif
174 
175 	struct cfs_bandwidth cfs_bandwidth;
176 };
177 
178 #ifdef CONFIG_FAIR_GROUP_SCHED
179 #define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
180 
181 /*
182  * A weight of 0 or 1 can cause arithmetics problems.
183  * A weight of a cfs_rq is the sum of weights of which entities
184  * are queued on this cfs_rq, so a weight of a entity should not be
185  * too large, so as the shares value of a task group.
186  * (The default weight is 1024 - so there's no practical
187  *  limitation from this.)
188  */
189 #define MIN_SHARES	(1UL <<  1)
190 #define MAX_SHARES	(1UL << 18)
191 #endif
192 
193 typedef int (*tg_visitor)(struct task_group *, void *);
194 
195 extern int walk_tg_tree_from(struct task_group *from,
196 			     tg_visitor down, tg_visitor up, void *data);
197 
198 /*
199  * Iterate the full tree, calling @down when first entering a node and @up when
200  * leaving it for the final time.
201  *
202  * Caller must hold rcu_lock or sufficient equivalent.
203  */
204 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
205 {
206 	return walk_tg_tree_from(&root_task_group, down, up, data);
207 }
208 
209 extern int tg_nop(struct task_group *tg, void *data);
210 
211 extern void free_fair_sched_group(struct task_group *tg);
212 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
213 extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
214 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
215 			struct sched_entity *se, int cpu,
216 			struct sched_entity *parent);
217 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
218 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
219 
220 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
221 extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
222 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
223 
224 extern void free_rt_sched_group(struct task_group *tg);
225 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
226 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
227 		struct sched_rt_entity *rt_se, int cpu,
228 		struct sched_rt_entity *parent);
229 
230 extern struct task_group *sched_create_group(struct task_group *parent);
231 extern void sched_online_group(struct task_group *tg,
232 			       struct task_group *parent);
233 extern void sched_destroy_group(struct task_group *tg);
234 extern void sched_offline_group(struct task_group *tg);
235 
236 extern void sched_move_task(struct task_struct *tsk);
237 
238 #ifdef CONFIG_FAIR_GROUP_SCHED
239 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
240 #endif
241 
242 #else /* CONFIG_CGROUP_SCHED */
243 
244 struct cfs_bandwidth { };
245 
246 #endif	/* CONFIG_CGROUP_SCHED */
247 
248 /* CFS-related fields in a runqueue */
249 struct cfs_rq {
250 	struct load_weight load;
251 	unsigned int nr_running, h_nr_running;
252 
253 	u64 exec_clock;
254 	u64 min_vruntime;
255 #ifndef CONFIG_64BIT
256 	u64 min_vruntime_copy;
257 #endif
258 
259 	struct rb_root tasks_timeline;
260 	struct rb_node *rb_leftmost;
261 
262 	/*
263 	 * 'curr' points to currently running entity on this cfs_rq.
264 	 * It is set to NULL otherwise (i.e when none are currently running).
265 	 */
266 	struct sched_entity *curr, *next, *last, *skip;
267 
268 #ifdef	CONFIG_SCHED_DEBUG
269 	unsigned int nr_spread_over;
270 #endif
271 
272 #ifdef CONFIG_SMP
273 	/*
274 	 * CFS Load tracking
275 	 * Under CFS, load is tracked on a per-entity basis and aggregated up.
276 	 * This allows for the description of both thread and group usage (in
277 	 * the FAIR_GROUP_SCHED case).
278 	 */
279 	unsigned long runnable_load_avg, blocked_load_avg;
280 	atomic64_t decay_counter;
281 	u64 last_decay;
282 	atomic_long_t removed_load;
283 
284 #ifdef CONFIG_FAIR_GROUP_SCHED
285 	/* Required to track per-cpu representation of a task_group */
286 	u32 tg_runnable_contrib;
287 	unsigned long tg_load_contrib;
288 #endif /* CONFIG_FAIR_GROUP_SCHED */
289 
290 	/*
291 	 *   h_load = weight * f(tg)
292 	 *
293 	 * Where f(tg) is the recursive weight fraction assigned to
294 	 * this group.
295 	 */
296 	unsigned long h_load;
297 #endif /* CONFIG_SMP */
298 
299 #ifdef CONFIG_FAIR_GROUP_SCHED
300 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
301 
302 	/*
303 	 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
304 	 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
305 	 * (like users, containers etc.)
306 	 *
307 	 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
308 	 * list is used during load balance.
309 	 */
310 	int on_list;
311 	struct list_head leaf_cfs_rq_list;
312 	struct task_group *tg;	/* group that "owns" this runqueue */
313 
314 #ifdef CONFIG_CFS_BANDWIDTH
315 	int runtime_enabled;
316 	u64 runtime_expires;
317 	s64 runtime_remaining;
318 
319 	u64 throttled_clock, throttled_clock_task;
320 	u64 throttled_clock_task_time;
321 	int throttled, throttle_count;
322 	struct list_head throttled_list;
323 #endif /* CONFIG_CFS_BANDWIDTH */
324 #endif /* CONFIG_FAIR_GROUP_SCHED */
325 };
326 
327 static inline int rt_bandwidth_enabled(void)
328 {
329 	return sysctl_sched_rt_runtime >= 0;
330 }
331 
332 /* Real-Time classes' related field in a runqueue: */
333 struct rt_rq {
334 	struct rt_prio_array active;
335 	unsigned int rt_nr_running;
336 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
337 	struct {
338 		int curr; /* highest queued rt task prio */
339 #ifdef CONFIG_SMP
340 		int next; /* next highest */
341 #endif
342 	} highest_prio;
343 #endif
344 #ifdef CONFIG_SMP
345 	unsigned long rt_nr_migratory;
346 	unsigned long rt_nr_total;
347 	int overloaded;
348 	struct plist_head pushable_tasks;
349 #endif
350 	int rt_throttled;
351 	u64 rt_time;
352 	u64 rt_runtime;
353 	/* Nests inside the rq lock: */
354 	raw_spinlock_t rt_runtime_lock;
355 
356 #ifdef CONFIG_RT_GROUP_SCHED
357 	unsigned long rt_nr_boosted;
358 
359 	struct rq *rq;
360 	struct task_group *tg;
361 #endif
362 };
363 
364 #ifdef CONFIG_SMP
365 
366 /*
367  * We add the notion of a root-domain which will be used to define per-domain
368  * variables. Each exclusive cpuset essentially defines an island domain by
369  * fully partitioning the member cpus from any other cpuset. Whenever a new
370  * exclusive cpuset is created, we also create and attach a new root-domain
371  * object.
372  *
373  */
374 struct root_domain {
375 	atomic_t refcount;
376 	atomic_t rto_count;
377 	struct rcu_head rcu;
378 	cpumask_var_t span;
379 	cpumask_var_t online;
380 
381 	/*
382 	 * The "RT overload" flag: it gets set if a CPU has more than
383 	 * one runnable RT task.
384 	 */
385 	cpumask_var_t rto_mask;
386 	struct cpupri cpupri;
387 };
388 
389 extern struct root_domain def_root_domain;
390 
391 #endif /* CONFIG_SMP */
392 
393 /*
394  * This is the main, per-CPU runqueue data structure.
395  *
396  * Locking rule: those places that want to lock multiple runqueues
397  * (such as the load balancing or the thread migration code), lock
398  * acquire operations must be ordered by ascending &runqueue.
399  */
400 struct rq {
401 	/* runqueue lock: */
402 	raw_spinlock_t lock;
403 
404 	/*
405 	 * nr_running and cpu_load should be in the same cacheline because
406 	 * remote CPUs use both these fields when doing load calculation.
407 	 */
408 	unsigned int nr_running;
409 	#define CPU_LOAD_IDX_MAX 5
410 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
411 	unsigned long last_load_update_tick;
412 #ifdef CONFIG_NO_HZ_COMMON
413 	u64 nohz_stamp;
414 	unsigned long nohz_flags;
415 #endif
416 #ifdef CONFIG_NO_HZ_FULL
417 	unsigned long last_sched_tick;
418 #endif
419 	int skip_clock_update;
420 
421 	/* capture load from *all* tasks on this cpu: */
422 	struct load_weight load;
423 	unsigned long nr_load_updates;
424 	u64 nr_switches;
425 
426 	struct cfs_rq cfs;
427 	struct rt_rq rt;
428 
429 #ifdef CONFIG_FAIR_GROUP_SCHED
430 	/* list of leaf cfs_rq on this cpu: */
431 	struct list_head leaf_cfs_rq_list;
432 #ifdef CONFIG_SMP
433 	unsigned long h_load_throttle;
434 #endif /* CONFIG_SMP */
435 #endif /* CONFIG_FAIR_GROUP_SCHED */
436 
437 #ifdef CONFIG_RT_GROUP_SCHED
438 	struct list_head leaf_rt_rq_list;
439 #endif
440 
441 	/*
442 	 * This is part of a global counter where only the total sum
443 	 * over all CPUs matters. A task can increase this counter on
444 	 * one CPU and if it got migrated afterwards it may decrease
445 	 * it on another CPU. Always updated under the runqueue lock:
446 	 */
447 	unsigned long nr_uninterruptible;
448 
449 	struct task_struct *curr, *idle, *stop;
450 	unsigned long next_balance;
451 	struct mm_struct *prev_mm;
452 
453 	u64 clock;
454 	u64 clock_task;
455 
456 	atomic_t nr_iowait;
457 
458 #ifdef CONFIG_SMP
459 	struct root_domain *rd;
460 	struct sched_domain *sd;
461 
462 	unsigned long cpu_power;
463 
464 	unsigned char idle_balance;
465 	/* For active balancing */
466 	int post_schedule;
467 	int active_balance;
468 	int push_cpu;
469 	struct cpu_stop_work active_balance_work;
470 	/* cpu of this runqueue: */
471 	int cpu;
472 	int online;
473 
474 	struct list_head cfs_tasks;
475 
476 	u64 rt_avg;
477 	u64 age_stamp;
478 	u64 idle_stamp;
479 	u64 avg_idle;
480 #endif
481 
482 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
483 	u64 prev_irq_time;
484 #endif
485 #ifdef CONFIG_PARAVIRT
486 	u64 prev_steal_time;
487 #endif
488 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
489 	u64 prev_steal_time_rq;
490 #endif
491 
492 	/* calc_load related fields */
493 	unsigned long calc_load_update;
494 	long calc_load_active;
495 
496 #ifdef CONFIG_SCHED_HRTICK
497 #ifdef CONFIG_SMP
498 	int hrtick_csd_pending;
499 	struct call_single_data hrtick_csd;
500 #endif
501 	struct hrtimer hrtick_timer;
502 #endif
503 
504 #ifdef CONFIG_SCHEDSTATS
505 	/* latency stats */
506 	struct sched_info rq_sched_info;
507 	unsigned long long rq_cpu_time;
508 	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
509 
510 	/* sys_sched_yield() stats */
511 	unsigned int yld_count;
512 
513 	/* schedule() stats */
514 	unsigned int sched_count;
515 	unsigned int sched_goidle;
516 
517 	/* try_to_wake_up() stats */
518 	unsigned int ttwu_count;
519 	unsigned int ttwu_local;
520 #endif
521 
522 #ifdef CONFIG_SMP
523 	struct llist_head wake_list;
524 #endif
525 
526 	struct sched_avg avg;
527 };
528 
529 static inline int cpu_of(struct rq *rq)
530 {
531 #ifdef CONFIG_SMP
532 	return rq->cpu;
533 #else
534 	return 0;
535 #endif
536 }
537 
538 DECLARE_PER_CPU(struct rq, runqueues);
539 
540 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
541 #define this_rq()		(&__get_cpu_var(runqueues))
542 #define task_rq(p)		cpu_rq(task_cpu(p))
543 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
544 #define raw_rq()		(&__raw_get_cpu_var(runqueues))
545 
546 static inline u64 rq_clock(struct rq *rq)
547 {
548 	return rq->clock;
549 }
550 
551 static inline u64 rq_clock_task(struct rq *rq)
552 {
553 	return rq->clock_task;
554 }
555 
556 #ifdef CONFIG_SMP
557 
558 #define rcu_dereference_check_sched_domain(p) \
559 	rcu_dereference_check((p), \
560 			      lockdep_is_held(&sched_domains_mutex))
561 
562 /*
563  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
564  * See detach_destroy_domains: synchronize_sched for details.
565  *
566  * The domain tree of any CPU may only be accessed from within
567  * preempt-disabled sections.
568  */
569 #define for_each_domain(cpu, __sd) \
570 	for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
571 			__sd; __sd = __sd->parent)
572 
573 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
574 
575 /**
576  * highest_flag_domain - Return highest sched_domain containing flag.
577  * @cpu:	The cpu whose highest level of sched domain is to
578  *		be returned.
579  * @flag:	The flag to check for the highest sched_domain
580  *		for the given cpu.
581  *
582  * Returns the highest sched_domain of a cpu which contains the given flag.
583  */
584 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
585 {
586 	struct sched_domain *sd, *hsd = NULL;
587 
588 	for_each_domain(cpu, sd) {
589 		if (!(sd->flags & flag))
590 			break;
591 		hsd = sd;
592 	}
593 
594 	return hsd;
595 }
596 
597 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
598 DECLARE_PER_CPU(int, sd_llc_id);
599 
600 struct sched_group_power {
601 	atomic_t ref;
602 	/*
603 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
604 	 * single CPU.
605 	 */
606 	unsigned int power, power_orig;
607 	unsigned long next_update;
608 	/*
609 	 * Number of busy cpus in this group.
610 	 */
611 	atomic_t nr_busy_cpus;
612 
613 	unsigned long cpumask[0]; /* iteration mask */
614 };
615 
616 struct sched_group {
617 	struct sched_group *next;	/* Must be a circular list */
618 	atomic_t ref;
619 
620 	unsigned int group_weight;
621 	struct sched_group_power *sgp;
622 
623 	/*
624 	 * The CPUs this group covers.
625 	 *
626 	 * NOTE: this field is variable length. (Allocated dynamically
627 	 * by attaching extra space to the end of the structure,
628 	 * depending on how many CPUs the kernel has booted up with)
629 	 */
630 	unsigned long cpumask[0];
631 };
632 
633 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
634 {
635 	return to_cpumask(sg->cpumask);
636 }
637 
638 /*
639  * cpumask masking which cpus in the group are allowed to iterate up the domain
640  * tree.
641  */
642 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
643 {
644 	return to_cpumask(sg->sgp->cpumask);
645 }
646 
647 /**
648  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
649  * @group: The group whose first cpu is to be returned.
650  */
651 static inline unsigned int group_first_cpu(struct sched_group *group)
652 {
653 	return cpumask_first(sched_group_cpus(group));
654 }
655 
656 extern int group_balance_cpu(struct sched_group *sg);
657 
658 #endif /* CONFIG_SMP */
659 
660 #include "stats.h"
661 #include "auto_group.h"
662 
663 #ifdef CONFIG_CGROUP_SCHED
664 
665 /*
666  * Return the group to which this tasks belongs.
667  *
668  * We cannot use task_subsys_state() and friends because the cgroup
669  * subsystem changes that value before the cgroup_subsys::attach() method
670  * is called, therefore we cannot pin it and might observe the wrong value.
671  *
672  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
673  * core changes this before calling sched_move_task().
674  *
675  * Instead we use a 'copy' which is updated from sched_move_task() while
676  * holding both task_struct::pi_lock and rq::lock.
677  */
678 static inline struct task_group *task_group(struct task_struct *p)
679 {
680 	return p->sched_task_group;
681 }
682 
683 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
684 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
685 {
686 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
687 	struct task_group *tg = task_group(p);
688 #endif
689 
690 #ifdef CONFIG_FAIR_GROUP_SCHED
691 	p->se.cfs_rq = tg->cfs_rq[cpu];
692 	p->se.parent = tg->se[cpu];
693 #endif
694 
695 #ifdef CONFIG_RT_GROUP_SCHED
696 	p->rt.rt_rq  = tg->rt_rq[cpu];
697 	p->rt.parent = tg->rt_se[cpu];
698 #endif
699 }
700 
701 #else /* CONFIG_CGROUP_SCHED */
702 
703 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
704 static inline struct task_group *task_group(struct task_struct *p)
705 {
706 	return NULL;
707 }
708 
709 #endif /* CONFIG_CGROUP_SCHED */
710 
711 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
712 {
713 	set_task_rq(p, cpu);
714 #ifdef CONFIG_SMP
715 	/*
716 	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
717 	 * successfuly executed on another CPU. We must ensure that updates of
718 	 * per-task data have been completed by this moment.
719 	 */
720 	smp_wmb();
721 	task_thread_info(p)->cpu = cpu;
722 #endif
723 }
724 
725 /*
726  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
727  */
728 #ifdef CONFIG_SCHED_DEBUG
729 # include <linux/static_key.h>
730 # define const_debug __read_mostly
731 #else
732 # define const_debug const
733 #endif
734 
735 extern const_debug unsigned int sysctl_sched_features;
736 
737 #define SCHED_FEAT(name, enabled)	\
738 	__SCHED_FEAT_##name ,
739 
740 enum {
741 #include "features.h"
742 	__SCHED_FEAT_NR,
743 };
744 
745 #undef SCHED_FEAT
746 
747 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
748 static __always_inline bool static_branch__true(struct static_key *key)
749 {
750 	return static_key_true(key); /* Not out of line branch. */
751 }
752 
753 static __always_inline bool static_branch__false(struct static_key *key)
754 {
755 	return static_key_false(key); /* Out of line branch. */
756 }
757 
758 #define SCHED_FEAT(name, enabled)					\
759 static __always_inline bool static_branch_##name(struct static_key *key) \
760 {									\
761 	return static_branch__##enabled(key);				\
762 }
763 
764 #include "features.h"
765 
766 #undef SCHED_FEAT
767 
768 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
769 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
770 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
771 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
772 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
773 
774 #ifdef CONFIG_NUMA_BALANCING
775 #define sched_feat_numa(x) sched_feat(x)
776 #ifdef CONFIG_SCHED_DEBUG
777 #define numabalancing_enabled sched_feat_numa(NUMA)
778 #else
779 extern bool numabalancing_enabled;
780 #endif /* CONFIG_SCHED_DEBUG */
781 #else
782 #define sched_feat_numa(x) (0)
783 #define numabalancing_enabled (0)
784 #endif /* CONFIG_NUMA_BALANCING */
785 
786 static inline u64 global_rt_period(void)
787 {
788 	return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
789 }
790 
791 static inline u64 global_rt_runtime(void)
792 {
793 	if (sysctl_sched_rt_runtime < 0)
794 		return RUNTIME_INF;
795 
796 	return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
797 }
798 
799 
800 
801 static inline int task_current(struct rq *rq, struct task_struct *p)
802 {
803 	return rq->curr == p;
804 }
805 
806 static inline int task_running(struct rq *rq, struct task_struct *p)
807 {
808 #ifdef CONFIG_SMP
809 	return p->on_cpu;
810 #else
811 	return task_current(rq, p);
812 #endif
813 }
814 
815 
816 #ifndef prepare_arch_switch
817 # define prepare_arch_switch(next)	do { } while (0)
818 #endif
819 #ifndef finish_arch_switch
820 # define finish_arch_switch(prev)	do { } while (0)
821 #endif
822 #ifndef finish_arch_post_lock_switch
823 # define finish_arch_post_lock_switch()	do { } while (0)
824 #endif
825 
826 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
827 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
828 {
829 #ifdef CONFIG_SMP
830 	/*
831 	 * We can optimise this out completely for !SMP, because the
832 	 * SMP rebalancing from interrupt is the only thing that cares
833 	 * here.
834 	 */
835 	next->on_cpu = 1;
836 #endif
837 }
838 
839 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
840 {
841 #ifdef CONFIG_SMP
842 	/*
843 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
844 	 * We must ensure this doesn't happen until the switch is completely
845 	 * finished.
846 	 */
847 	smp_wmb();
848 	prev->on_cpu = 0;
849 #endif
850 #ifdef CONFIG_DEBUG_SPINLOCK
851 	/* this is a valid case when another task releases the spinlock */
852 	rq->lock.owner = current;
853 #endif
854 	/*
855 	 * If we are tracking spinlock dependencies then we have to
856 	 * fix up the runqueue lock - which gets 'carried over' from
857 	 * prev into current:
858 	 */
859 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
860 
861 	raw_spin_unlock_irq(&rq->lock);
862 }
863 
864 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
865 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
866 {
867 #ifdef CONFIG_SMP
868 	/*
869 	 * We can optimise this out completely for !SMP, because the
870 	 * SMP rebalancing from interrupt is the only thing that cares
871 	 * here.
872 	 */
873 	next->on_cpu = 1;
874 #endif
875 	raw_spin_unlock(&rq->lock);
876 }
877 
878 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
879 {
880 #ifdef CONFIG_SMP
881 	/*
882 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
883 	 * We must ensure this doesn't happen until the switch is completely
884 	 * finished.
885 	 */
886 	smp_wmb();
887 	prev->on_cpu = 0;
888 #endif
889 	local_irq_enable();
890 }
891 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
892 
893 /*
894  * wake flags
895  */
896 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
897 #define WF_FORK		0x02		/* child wakeup after fork */
898 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
899 
900 /*
901  * To aid in avoiding the subversion of "niceness" due to uneven distribution
902  * of tasks with abnormal "nice" values across CPUs the contribution that
903  * each task makes to its run queue's load is weighted according to its
904  * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
905  * scaled version of the new time slice allocation that they receive on time
906  * slice expiry etc.
907  */
908 
909 #define WEIGHT_IDLEPRIO                3
910 #define WMULT_IDLEPRIO         1431655765
911 
912 /*
913  * Nice levels are multiplicative, with a gentle 10% change for every
914  * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
915  * nice 1, it will get ~10% less CPU time than another CPU-bound task
916  * that remained on nice 0.
917  *
918  * The "10% effect" is relative and cumulative: from _any_ nice level,
919  * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
920  * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
921  * If a task goes up by ~10% and another task goes down by ~10% then
922  * the relative distance between them is ~25%.)
923  */
924 static const int prio_to_weight[40] = {
925  /* -20 */     88761,     71755,     56483,     46273,     36291,
926  /* -15 */     29154,     23254,     18705,     14949,     11916,
927  /* -10 */      9548,      7620,      6100,      4904,      3906,
928  /*  -5 */      3121,      2501,      1991,      1586,      1277,
929  /*   0 */      1024,       820,       655,       526,       423,
930  /*   5 */       335,       272,       215,       172,       137,
931  /*  10 */       110,        87,        70,        56,        45,
932  /*  15 */        36,        29,        23,        18,        15,
933 };
934 
935 /*
936  * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
937  *
938  * In cases where the weight does not change often, we can use the
939  * precalculated inverse to speed up arithmetics by turning divisions
940  * into multiplications:
941  */
942 static const u32 prio_to_wmult[40] = {
943  /* -20 */     48388,     59856,     76040,     92818,    118348,
944  /* -15 */    147320,    184698,    229616,    287308,    360437,
945  /* -10 */    449829,    563644,    704093,    875809,   1099582,
946  /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
947  /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
948  /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
949  /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
950  /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
951 };
952 
953 #define ENQUEUE_WAKEUP		1
954 #define ENQUEUE_HEAD		2
955 #ifdef CONFIG_SMP
956 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
957 #else
958 #define ENQUEUE_WAKING		0
959 #endif
960 
961 #define DEQUEUE_SLEEP		1
962 
963 struct sched_class {
964 	const struct sched_class *next;
965 
966 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
967 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
968 	void (*yield_task) (struct rq *rq);
969 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
970 
971 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
972 
973 	struct task_struct * (*pick_next_task) (struct rq *rq);
974 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
975 
976 #ifdef CONFIG_SMP
977 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
978 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
979 
980 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
981 	void (*post_schedule) (struct rq *this_rq);
982 	void (*task_waking) (struct task_struct *task);
983 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
984 
985 	void (*set_cpus_allowed)(struct task_struct *p,
986 				 const struct cpumask *newmask);
987 
988 	void (*rq_online)(struct rq *rq);
989 	void (*rq_offline)(struct rq *rq);
990 #endif
991 
992 	void (*set_curr_task) (struct rq *rq);
993 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
994 	void (*task_fork) (struct task_struct *p);
995 
996 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
997 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
998 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
999 			     int oldprio);
1000 
1001 	unsigned int (*get_rr_interval) (struct rq *rq,
1002 					 struct task_struct *task);
1003 
1004 #ifdef CONFIG_FAIR_GROUP_SCHED
1005 	void (*task_move_group) (struct task_struct *p, int on_rq);
1006 #endif
1007 };
1008 
1009 #define sched_class_highest (&stop_sched_class)
1010 #define for_each_class(class) \
1011    for (class = sched_class_highest; class; class = class->next)
1012 
1013 extern const struct sched_class stop_sched_class;
1014 extern const struct sched_class rt_sched_class;
1015 extern const struct sched_class fair_sched_class;
1016 extern const struct sched_class idle_sched_class;
1017 
1018 
1019 #ifdef CONFIG_SMP
1020 
1021 extern void update_group_power(struct sched_domain *sd, int cpu);
1022 
1023 extern void trigger_load_balance(struct rq *rq, int cpu);
1024 extern void idle_balance(int this_cpu, struct rq *this_rq);
1025 
1026 extern void idle_enter_fair(struct rq *this_rq);
1027 extern void idle_exit_fair(struct rq *this_rq);
1028 
1029 #else	/* CONFIG_SMP */
1030 
1031 static inline void idle_balance(int cpu, struct rq *rq)
1032 {
1033 }
1034 
1035 #endif
1036 
1037 extern void sysrq_sched_debug_show(void);
1038 extern void sched_init_granularity(void);
1039 extern void update_max_interval(void);
1040 extern void init_sched_rt_class(void);
1041 extern void init_sched_fair_class(void);
1042 
1043 extern void resched_task(struct task_struct *p);
1044 extern void resched_cpu(int cpu);
1045 
1046 extern struct rt_bandwidth def_rt_bandwidth;
1047 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1048 
1049 extern void update_idle_cpu_load(struct rq *this_rq);
1050 
1051 extern void init_task_runnable_average(struct task_struct *p);
1052 
1053 #ifdef CONFIG_PARAVIRT
1054 static inline u64 steal_ticks(u64 steal)
1055 {
1056 	if (unlikely(steal > NSEC_PER_SEC))
1057 		return div_u64(steal, TICK_NSEC);
1058 
1059 	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1060 }
1061 #endif
1062 
1063 static inline void inc_nr_running(struct rq *rq)
1064 {
1065 	rq->nr_running++;
1066 
1067 #ifdef CONFIG_NO_HZ_FULL
1068 	if (rq->nr_running == 2) {
1069 		if (tick_nohz_full_cpu(rq->cpu)) {
1070 			/* Order rq->nr_running write against the IPI */
1071 			smp_wmb();
1072 			smp_send_reschedule(rq->cpu);
1073 		}
1074        }
1075 #endif
1076 }
1077 
1078 static inline void dec_nr_running(struct rq *rq)
1079 {
1080 	rq->nr_running--;
1081 }
1082 
1083 static inline void rq_last_tick_reset(struct rq *rq)
1084 {
1085 #ifdef CONFIG_NO_HZ_FULL
1086 	rq->last_sched_tick = jiffies;
1087 #endif
1088 }
1089 
1090 extern void update_rq_clock(struct rq *rq);
1091 
1092 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1093 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1094 
1095 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1096 
1097 extern const_debug unsigned int sysctl_sched_time_avg;
1098 extern const_debug unsigned int sysctl_sched_nr_migrate;
1099 extern const_debug unsigned int sysctl_sched_migration_cost;
1100 
1101 static inline u64 sched_avg_period(void)
1102 {
1103 	return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1104 }
1105 
1106 #ifdef CONFIG_SCHED_HRTICK
1107 
1108 /*
1109  * Use hrtick when:
1110  *  - enabled by features
1111  *  - hrtimer is actually high res
1112  */
1113 static inline int hrtick_enabled(struct rq *rq)
1114 {
1115 	if (!sched_feat(HRTICK))
1116 		return 0;
1117 	if (!cpu_active(cpu_of(rq)))
1118 		return 0;
1119 	return hrtimer_is_hres_active(&rq->hrtick_timer);
1120 }
1121 
1122 void hrtick_start(struct rq *rq, u64 delay);
1123 
1124 #else
1125 
1126 static inline int hrtick_enabled(struct rq *rq)
1127 {
1128 	return 0;
1129 }
1130 
1131 #endif /* CONFIG_SCHED_HRTICK */
1132 
1133 #ifdef CONFIG_SMP
1134 extern void sched_avg_update(struct rq *rq);
1135 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1136 {
1137 	rq->rt_avg += rt_delta;
1138 	sched_avg_update(rq);
1139 }
1140 #else
1141 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1142 static inline void sched_avg_update(struct rq *rq) { }
1143 #endif
1144 
1145 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
1146 
1147 #ifdef CONFIG_SMP
1148 #ifdef CONFIG_PREEMPT
1149 
1150 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1151 
1152 /*
1153  * fair double_lock_balance: Safely acquires both rq->locks in a fair
1154  * way at the expense of forcing extra atomic operations in all
1155  * invocations.  This assures that the double_lock is acquired using the
1156  * same underlying policy as the spinlock_t on this architecture, which
1157  * reduces latency compared to the unfair variant below.  However, it
1158  * also adds more overhead and therefore may reduce throughput.
1159  */
1160 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1161 	__releases(this_rq->lock)
1162 	__acquires(busiest->lock)
1163 	__acquires(this_rq->lock)
1164 {
1165 	raw_spin_unlock(&this_rq->lock);
1166 	double_rq_lock(this_rq, busiest);
1167 
1168 	return 1;
1169 }
1170 
1171 #else
1172 /*
1173  * Unfair double_lock_balance: Optimizes throughput at the expense of
1174  * latency by eliminating extra atomic operations when the locks are
1175  * already in proper order on entry.  This favors lower cpu-ids and will
1176  * grant the double lock to lower cpus over higher ids under contention,
1177  * regardless of entry order into the function.
1178  */
1179 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1180 	__releases(this_rq->lock)
1181 	__acquires(busiest->lock)
1182 	__acquires(this_rq->lock)
1183 {
1184 	int ret = 0;
1185 
1186 	if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1187 		if (busiest < this_rq) {
1188 			raw_spin_unlock(&this_rq->lock);
1189 			raw_spin_lock(&busiest->lock);
1190 			raw_spin_lock_nested(&this_rq->lock,
1191 					      SINGLE_DEPTH_NESTING);
1192 			ret = 1;
1193 		} else
1194 			raw_spin_lock_nested(&busiest->lock,
1195 					      SINGLE_DEPTH_NESTING);
1196 	}
1197 	return ret;
1198 }
1199 
1200 #endif /* CONFIG_PREEMPT */
1201 
1202 /*
1203  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1204  */
1205 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1206 {
1207 	if (unlikely(!irqs_disabled())) {
1208 		/* printk() doesn't work good under rq->lock */
1209 		raw_spin_unlock(&this_rq->lock);
1210 		BUG_ON(1);
1211 	}
1212 
1213 	return _double_lock_balance(this_rq, busiest);
1214 }
1215 
1216 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1217 	__releases(busiest->lock)
1218 {
1219 	raw_spin_unlock(&busiest->lock);
1220 	lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1221 }
1222 
1223 /*
1224  * double_rq_lock - safely lock two runqueues
1225  *
1226  * Note this does not disable interrupts like task_rq_lock,
1227  * you need to do so manually before calling.
1228  */
1229 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1230 	__acquires(rq1->lock)
1231 	__acquires(rq2->lock)
1232 {
1233 	BUG_ON(!irqs_disabled());
1234 	if (rq1 == rq2) {
1235 		raw_spin_lock(&rq1->lock);
1236 		__acquire(rq2->lock);	/* Fake it out ;) */
1237 	} else {
1238 		if (rq1 < rq2) {
1239 			raw_spin_lock(&rq1->lock);
1240 			raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1241 		} else {
1242 			raw_spin_lock(&rq2->lock);
1243 			raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1244 		}
1245 	}
1246 }
1247 
1248 /*
1249  * double_rq_unlock - safely unlock two runqueues
1250  *
1251  * Note this does not restore interrupts like task_rq_unlock,
1252  * you need to do so manually after calling.
1253  */
1254 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1255 	__releases(rq1->lock)
1256 	__releases(rq2->lock)
1257 {
1258 	raw_spin_unlock(&rq1->lock);
1259 	if (rq1 != rq2)
1260 		raw_spin_unlock(&rq2->lock);
1261 	else
1262 		__release(rq2->lock);
1263 }
1264 
1265 #else /* CONFIG_SMP */
1266 
1267 /*
1268  * double_rq_lock - safely lock two runqueues
1269  *
1270  * Note this does not disable interrupts like task_rq_lock,
1271  * you need to do so manually before calling.
1272  */
1273 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1274 	__acquires(rq1->lock)
1275 	__acquires(rq2->lock)
1276 {
1277 	BUG_ON(!irqs_disabled());
1278 	BUG_ON(rq1 != rq2);
1279 	raw_spin_lock(&rq1->lock);
1280 	__acquire(rq2->lock);	/* Fake it out ;) */
1281 }
1282 
1283 /*
1284  * double_rq_unlock - safely unlock two runqueues
1285  *
1286  * Note this does not restore interrupts like task_rq_unlock,
1287  * you need to do so manually after calling.
1288  */
1289 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1290 	__releases(rq1->lock)
1291 	__releases(rq2->lock)
1292 {
1293 	BUG_ON(rq1 != rq2);
1294 	raw_spin_unlock(&rq1->lock);
1295 	__release(rq2->lock);
1296 }
1297 
1298 #endif
1299 
1300 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1301 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1302 extern void print_cfs_stats(struct seq_file *m, int cpu);
1303 extern void print_rt_stats(struct seq_file *m, int cpu);
1304 
1305 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1306 extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1307 
1308 extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1309 
1310 #ifdef CONFIG_NO_HZ_COMMON
1311 enum rq_nohz_flag_bits {
1312 	NOHZ_TICK_STOPPED,
1313 	NOHZ_BALANCE_KICK,
1314 };
1315 
1316 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
1317 #endif
1318 
1319 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1320 
1321 DECLARE_PER_CPU(u64, cpu_hardirq_time);
1322 DECLARE_PER_CPU(u64, cpu_softirq_time);
1323 
1324 #ifndef CONFIG_64BIT
1325 DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1326 
1327 static inline void irq_time_write_begin(void)
1328 {
1329 	__this_cpu_inc(irq_time_seq.sequence);
1330 	smp_wmb();
1331 }
1332 
1333 static inline void irq_time_write_end(void)
1334 {
1335 	smp_wmb();
1336 	__this_cpu_inc(irq_time_seq.sequence);
1337 }
1338 
1339 static inline u64 irq_time_read(int cpu)
1340 {
1341 	u64 irq_time;
1342 	unsigned seq;
1343 
1344 	do {
1345 		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1346 		irq_time = per_cpu(cpu_softirq_time, cpu) +
1347 			   per_cpu(cpu_hardirq_time, cpu);
1348 	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1349 
1350 	return irq_time;
1351 }
1352 #else /* CONFIG_64BIT */
1353 static inline void irq_time_write_begin(void)
1354 {
1355 }
1356 
1357 static inline void irq_time_write_end(void)
1358 {
1359 }
1360 
1361 static inline u64 irq_time_read(int cpu)
1362 {
1363 	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1364 }
1365 #endif /* CONFIG_64BIT */
1366 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1367