xref: /linux/kernel/sched/fair.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3  *
4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *
6  *  Interactivity improvements by Mike Galbraith
7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
8  *
9  *  Various enhancements by Dmitry Adamushko.
10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11  *
12  *  Group scheduling enhancements by Srivatsa Vaddagiri
13  *  Copyright IBM Corporation, 2007
14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15  *
16  *  Scaled math optimizations by Thomas Gleixner
17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18  *
19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21  */
22 
23 #include <linux/latencytop.h>
24 #include <linux/sched.h>
25 #include <linux/cpumask.h>
26 #include <linux/slab.h>
27 #include <linux/profile.h>
28 #include <linux/interrupt.h>
29 
30 #include <trace/events/sched.h>
31 
32 #include "sched.h"
33 
34 /*
35  * Targeted preemption latency for CPU-bound tasks:
36  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
37  *
38  * NOTE: this latency value is not the same as the concept of
39  * 'timeslice length' - timeslices in CFS are of variable length
40  * and have no persistent notion like in traditional, time-slice
41  * based scheduling concepts.
42  *
43  * (to see the precise effective timeslice length of your workload,
44  *  run vmstat and monitor the context-switches (cs) field)
45  */
46 unsigned int sysctl_sched_latency = 6000000ULL;
47 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
48 
49 /*
50  * The initial- and re-scaling of tunables is configurable
51  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
52  *
53  * Options are:
54  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
55  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
56  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
57  */
58 enum sched_tunable_scaling sysctl_sched_tunable_scaling
59 	= SCHED_TUNABLESCALING_LOG;
60 
61 /*
62  * Minimal preemption granularity for CPU-bound tasks:
63  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
64  */
65 unsigned int sysctl_sched_min_granularity = 750000ULL;
66 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
67 
68 /*
69  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
70  */
71 static unsigned int sched_nr_latency = 8;
72 
73 /*
74  * After fork, child runs first. If set to 0 (default) then
75  * parent will (try to) run first.
76  */
77 unsigned int sysctl_sched_child_runs_first __read_mostly;
78 
79 /*
80  * SCHED_OTHER wake-up granularity.
81  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
82  *
83  * This option delays the preemption effects of decoupled workloads
84  * and reduces their over-scheduling. Synchronous workloads will still
85  * have immediate wakeup/sleep latencies.
86  */
87 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89 
90 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91 
92 /*
93  * The exponential sliding  window over which load is averaged for shares
94  * distribution.
95  * (default: 10msec)
96  */
97 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98 
99 #ifdef CONFIG_CFS_BANDWIDTH
100 /*
101  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102  * each time a cfs_rq requests quota.
103  *
104  * Note: in the case that the slice exceeds the runtime remaining (either due
105  * to consumption or the quota being specified to be smaller than the slice)
106  * we will always only issue the remaining available time.
107  *
108  * default: 5 msec, units: microseconds
109   */
110 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
111 #endif
112 
113 /*
114  * Increase the granularity value when there are more CPUs,
115  * because with more CPUs the 'effective latency' as visible
116  * to users decreases. But the relationship is not linear,
117  * so pick a second-best guess by going with the log2 of the
118  * number of CPUs.
119  *
120  * This idea comes from the SD scheduler of Con Kolivas:
121  */
122 static int get_update_sysctl_factor(void)
123 {
124 	unsigned int cpus = min_t(int, num_online_cpus(), 8);
125 	unsigned int factor;
126 
127 	switch (sysctl_sched_tunable_scaling) {
128 	case SCHED_TUNABLESCALING_NONE:
129 		factor = 1;
130 		break;
131 	case SCHED_TUNABLESCALING_LINEAR:
132 		factor = cpus;
133 		break;
134 	case SCHED_TUNABLESCALING_LOG:
135 	default:
136 		factor = 1 + ilog2(cpus);
137 		break;
138 	}
139 
140 	return factor;
141 }
142 
143 static void update_sysctl(void)
144 {
145 	unsigned int factor = get_update_sysctl_factor();
146 
147 #define SET_SYSCTL(name) \
148 	(sysctl_##name = (factor) * normalized_sysctl_##name)
149 	SET_SYSCTL(sched_min_granularity);
150 	SET_SYSCTL(sched_latency);
151 	SET_SYSCTL(sched_wakeup_granularity);
152 #undef SET_SYSCTL
153 }
154 
155 void sched_init_granularity(void)
156 {
157 	update_sysctl();
158 }
159 
160 #if BITS_PER_LONG == 32
161 # define WMULT_CONST	(~0UL)
162 #else
163 # define WMULT_CONST	(1UL << 32)
164 #endif
165 
166 #define WMULT_SHIFT	32
167 
168 /*
169  * Shift right and round:
170  */
171 #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
172 
173 /*
174  * delta *= weight / lw
175  */
176 static unsigned long
177 calc_delta_mine(unsigned long delta_exec, unsigned long weight,
178 		struct load_weight *lw)
179 {
180 	u64 tmp;
181 
182 	/*
183 	 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
184 	 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
185 	 * 2^SCHED_LOAD_RESOLUTION.
186 	 */
187 	if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
188 		tmp = (u64)delta_exec * scale_load_down(weight);
189 	else
190 		tmp = (u64)delta_exec;
191 
192 	if (!lw->inv_weight) {
193 		unsigned long w = scale_load_down(lw->weight);
194 
195 		if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
196 			lw->inv_weight = 1;
197 		else if (unlikely(!w))
198 			lw->inv_weight = WMULT_CONST;
199 		else
200 			lw->inv_weight = WMULT_CONST / w;
201 	}
202 
203 	/*
204 	 * Check whether we'd overflow the 64-bit multiplication:
205 	 */
206 	if (unlikely(tmp > WMULT_CONST))
207 		tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
208 			WMULT_SHIFT/2);
209 	else
210 		tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
211 
212 	return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
213 }
214 
215 
216 const struct sched_class fair_sched_class;
217 
218 /**************************************************************
219  * CFS operations on generic schedulable entities:
220  */
221 
222 #ifdef CONFIG_FAIR_GROUP_SCHED
223 
224 /* cpu runqueue to which this cfs_rq is attached */
225 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
226 {
227 	return cfs_rq->rq;
228 }
229 
230 /* An entity is a task if it doesn't "own" a runqueue */
231 #define entity_is_task(se)	(!se->my_q)
232 
233 static inline struct task_struct *task_of(struct sched_entity *se)
234 {
235 #ifdef CONFIG_SCHED_DEBUG
236 	WARN_ON_ONCE(!entity_is_task(se));
237 #endif
238 	return container_of(se, struct task_struct, se);
239 }
240 
241 /* Walk up scheduling entities hierarchy */
242 #define for_each_sched_entity(se) \
243 		for (; se; se = se->parent)
244 
245 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
246 {
247 	return p->se.cfs_rq;
248 }
249 
250 /* runqueue on which this entity is (to be) queued */
251 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
252 {
253 	return se->cfs_rq;
254 }
255 
256 /* runqueue "owned" by this group */
257 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
258 {
259 	return grp->my_q;
260 }
261 
262 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
263 {
264 	if (!cfs_rq->on_list) {
265 		/*
266 		 * Ensure we either appear before our parent (if already
267 		 * enqueued) or force our parent to appear after us when it is
268 		 * enqueued.  The fact that we always enqueue bottom-up
269 		 * reduces this to two cases.
270 		 */
271 		if (cfs_rq->tg->parent &&
272 		    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
273 			list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
274 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
275 		} else {
276 			list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
277 				&rq_of(cfs_rq)->leaf_cfs_rq_list);
278 		}
279 
280 		cfs_rq->on_list = 1;
281 	}
282 }
283 
284 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
285 {
286 	if (cfs_rq->on_list) {
287 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
288 		cfs_rq->on_list = 0;
289 	}
290 }
291 
292 /* Iterate thr' all leaf cfs_rq's on a runqueue */
293 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
294 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
295 
296 /* Do the two (enqueued) entities belong to the same group ? */
297 static inline int
298 is_same_group(struct sched_entity *se, struct sched_entity *pse)
299 {
300 	if (se->cfs_rq == pse->cfs_rq)
301 		return 1;
302 
303 	return 0;
304 }
305 
306 static inline struct sched_entity *parent_entity(struct sched_entity *se)
307 {
308 	return se->parent;
309 }
310 
311 /* return depth at which a sched entity is present in the hierarchy */
312 static inline int depth_se(struct sched_entity *se)
313 {
314 	int depth = 0;
315 
316 	for_each_sched_entity(se)
317 		depth++;
318 
319 	return depth;
320 }
321 
322 static void
323 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
324 {
325 	int se_depth, pse_depth;
326 
327 	/*
328 	 * preemption test can be made between sibling entities who are in the
329 	 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
330 	 * both tasks until we find their ancestors who are siblings of common
331 	 * parent.
332 	 */
333 
334 	/* First walk up until both entities are at same depth */
335 	se_depth = depth_se(*se);
336 	pse_depth = depth_se(*pse);
337 
338 	while (se_depth > pse_depth) {
339 		se_depth--;
340 		*se = parent_entity(*se);
341 	}
342 
343 	while (pse_depth > se_depth) {
344 		pse_depth--;
345 		*pse = parent_entity(*pse);
346 	}
347 
348 	while (!is_same_group(*se, *pse)) {
349 		*se = parent_entity(*se);
350 		*pse = parent_entity(*pse);
351 	}
352 }
353 
354 #else	/* !CONFIG_FAIR_GROUP_SCHED */
355 
356 static inline struct task_struct *task_of(struct sched_entity *se)
357 {
358 	return container_of(se, struct task_struct, se);
359 }
360 
361 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
362 {
363 	return container_of(cfs_rq, struct rq, cfs);
364 }
365 
366 #define entity_is_task(se)	1
367 
368 #define for_each_sched_entity(se) \
369 		for (; se; se = NULL)
370 
371 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
372 {
373 	return &task_rq(p)->cfs;
374 }
375 
376 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
377 {
378 	struct task_struct *p = task_of(se);
379 	struct rq *rq = task_rq(p);
380 
381 	return &rq->cfs;
382 }
383 
384 /* runqueue "owned" by this group */
385 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
386 {
387 	return NULL;
388 }
389 
390 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
391 {
392 }
393 
394 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
395 {
396 }
397 
398 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
399 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
400 
401 static inline int
402 is_same_group(struct sched_entity *se, struct sched_entity *pse)
403 {
404 	return 1;
405 }
406 
407 static inline struct sched_entity *parent_entity(struct sched_entity *se)
408 {
409 	return NULL;
410 }
411 
412 static inline void
413 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
414 {
415 }
416 
417 #endif	/* CONFIG_FAIR_GROUP_SCHED */
418 
419 static __always_inline
420 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
421 
422 /**************************************************************
423  * Scheduling class tree data structure manipulation methods:
424  */
425 
426 static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
427 {
428 	s64 delta = (s64)(vruntime - min_vruntime);
429 	if (delta > 0)
430 		min_vruntime = vruntime;
431 
432 	return min_vruntime;
433 }
434 
435 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
436 {
437 	s64 delta = (s64)(vruntime - min_vruntime);
438 	if (delta < 0)
439 		min_vruntime = vruntime;
440 
441 	return min_vruntime;
442 }
443 
444 static inline int entity_before(struct sched_entity *a,
445 				struct sched_entity *b)
446 {
447 	return (s64)(a->vruntime - b->vruntime) < 0;
448 }
449 
450 static void update_min_vruntime(struct cfs_rq *cfs_rq)
451 {
452 	u64 vruntime = cfs_rq->min_vruntime;
453 
454 	if (cfs_rq->curr)
455 		vruntime = cfs_rq->curr->vruntime;
456 
457 	if (cfs_rq->rb_leftmost) {
458 		struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
459 						   struct sched_entity,
460 						   run_node);
461 
462 		if (!cfs_rq->curr)
463 			vruntime = se->vruntime;
464 		else
465 			vruntime = min_vruntime(vruntime, se->vruntime);
466 	}
467 
468 	cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
469 #ifndef CONFIG_64BIT
470 	smp_wmb();
471 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
472 #endif
473 }
474 
475 /*
476  * Enqueue an entity into the rb-tree:
477  */
478 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
479 {
480 	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
481 	struct rb_node *parent = NULL;
482 	struct sched_entity *entry;
483 	int leftmost = 1;
484 
485 	/*
486 	 * Find the right place in the rbtree:
487 	 */
488 	while (*link) {
489 		parent = *link;
490 		entry = rb_entry(parent, struct sched_entity, run_node);
491 		/*
492 		 * We dont care about collisions. Nodes with
493 		 * the same key stay together.
494 		 */
495 		if (entity_before(se, entry)) {
496 			link = &parent->rb_left;
497 		} else {
498 			link = &parent->rb_right;
499 			leftmost = 0;
500 		}
501 	}
502 
503 	/*
504 	 * Maintain a cache of leftmost tree entries (it is frequently
505 	 * used):
506 	 */
507 	if (leftmost)
508 		cfs_rq->rb_leftmost = &se->run_node;
509 
510 	rb_link_node(&se->run_node, parent, link);
511 	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
512 }
513 
514 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
515 {
516 	if (cfs_rq->rb_leftmost == &se->run_node) {
517 		struct rb_node *next_node;
518 
519 		next_node = rb_next(&se->run_node);
520 		cfs_rq->rb_leftmost = next_node;
521 	}
522 
523 	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
524 }
525 
526 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
527 {
528 	struct rb_node *left = cfs_rq->rb_leftmost;
529 
530 	if (!left)
531 		return NULL;
532 
533 	return rb_entry(left, struct sched_entity, run_node);
534 }
535 
536 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
537 {
538 	struct rb_node *next = rb_next(&se->run_node);
539 
540 	if (!next)
541 		return NULL;
542 
543 	return rb_entry(next, struct sched_entity, run_node);
544 }
545 
546 #ifdef CONFIG_SCHED_DEBUG
547 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
548 {
549 	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
550 
551 	if (!last)
552 		return NULL;
553 
554 	return rb_entry(last, struct sched_entity, run_node);
555 }
556 
557 /**************************************************************
558  * Scheduling class statistics methods:
559  */
560 
561 int sched_proc_update_handler(struct ctl_table *table, int write,
562 		void __user *buffer, size_t *lenp,
563 		loff_t *ppos)
564 {
565 	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
566 	int factor = get_update_sysctl_factor();
567 
568 	if (ret || !write)
569 		return ret;
570 
571 	sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
572 					sysctl_sched_min_granularity);
573 
574 #define WRT_SYSCTL(name) \
575 	(normalized_sysctl_##name = sysctl_##name / (factor))
576 	WRT_SYSCTL(sched_min_granularity);
577 	WRT_SYSCTL(sched_latency);
578 	WRT_SYSCTL(sched_wakeup_granularity);
579 #undef WRT_SYSCTL
580 
581 	return 0;
582 }
583 #endif
584 
585 /*
586  * delta /= w
587  */
588 static inline unsigned long
589 calc_delta_fair(unsigned long delta, struct sched_entity *se)
590 {
591 	if (unlikely(se->load.weight != NICE_0_LOAD))
592 		delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
593 
594 	return delta;
595 }
596 
597 /*
598  * The idea is to set a period in which each task runs once.
599  *
600  * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
601  * this period because otherwise the slices get too small.
602  *
603  * p = (nr <= nl) ? l : l*nr/nl
604  */
605 static u64 __sched_period(unsigned long nr_running)
606 {
607 	u64 period = sysctl_sched_latency;
608 	unsigned long nr_latency = sched_nr_latency;
609 
610 	if (unlikely(nr_running > nr_latency)) {
611 		period = sysctl_sched_min_granularity;
612 		period *= nr_running;
613 	}
614 
615 	return period;
616 }
617 
618 /*
619  * We calculate the wall-time slice from the period by taking a part
620  * proportional to the weight.
621  *
622  * s = p*P[w/rw]
623  */
624 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
625 {
626 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
627 
628 	for_each_sched_entity(se) {
629 		struct load_weight *load;
630 		struct load_weight lw;
631 
632 		cfs_rq = cfs_rq_of(se);
633 		load = &cfs_rq->load;
634 
635 		if (unlikely(!se->on_rq)) {
636 			lw = cfs_rq->load;
637 
638 			update_load_add(&lw, se->load.weight);
639 			load = &lw;
640 		}
641 		slice = calc_delta_mine(slice, se->load.weight, load);
642 	}
643 	return slice;
644 }
645 
646 /*
647  * We calculate the vruntime slice of a to be inserted task
648  *
649  * vs = s/w
650  */
651 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652 {
653 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
654 }
655 
656 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
657 static void update_cfs_shares(struct cfs_rq *cfs_rq);
658 
659 /*
660  * Update the current task's runtime statistics. Skip current tasks that
661  * are not in our scheduling class.
662  */
663 static inline void
664 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
665 	      unsigned long delta_exec)
666 {
667 	unsigned long delta_exec_weighted;
668 
669 	schedstat_set(curr->statistics.exec_max,
670 		      max((u64)delta_exec, curr->statistics.exec_max));
671 
672 	curr->sum_exec_runtime += delta_exec;
673 	schedstat_add(cfs_rq, exec_clock, delta_exec);
674 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
675 
676 	curr->vruntime += delta_exec_weighted;
677 	update_min_vruntime(cfs_rq);
678 
679 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
680 	cfs_rq->load_unacc_exec_time += delta_exec;
681 #endif
682 }
683 
684 static void update_curr(struct cfs_rq *cfs_rq)
685 {
686 	struct sched_entity *curr = cfs_rq->curr;
687 	u64 now = rq_of(cfs_rq)->clock_task;
688 	unsigned long delta_exec;
689 
690 	if (unlikely(!curr))
691 		return;
692 
693 	/*
694 	 * Get the amount of time the current task was running
695 	 * since the last time we changed load (this cannot
696 	 * overflow on 32 bits):
697 	 */
698 	delta_exec = (unsigned long)(now - curr->exec_start);
699 	if (!delta_exec)
700 		return;
701 
702 	__update_curr(cfs_rq, curr, delta_exec);
703 	curr->exec_start = now;
704 
705 	if (entity_is_task(curr)) {
706 		struct task_struct *curtask = task_of(curr);
707 
708 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
709 		cpuacct_charge(curtask, delta_exec);
710 		account_group_exec_runtime(curtask, delta_exec);
711 	}
712 
713 	account_cfs_rq_runtime(cfs_rq, delta_exec);
714 }
715 
716 static inline void
717 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
718 {
719 	schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
720 }
721 
722 /*
723  * Task is being enqueued - update stats:
724  */
725 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
726 {
727 	/*
728 	 * Are we enqueueing a waiting task? (for current tasks
729 	 * a dequeue/enqueue event is a NOP)
730 	 */
731 	if (se != cfs_rq->curr)
732 		update_stats_wait_start(cfs_rq, se);
733 }
734 
735 static void
736 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
737 {
738 	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
739 			rq_of(cfs_rq)->clock - se->statistics.wait_start));
740 	schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
741 	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
742 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
743 #ifdef CONFIG_SCHEDSTATS
744 	if (entity_is_task(se)) {
745 		trace_sched_stat_wait(task_of(se),
746 			rq_of(cfs_rq)->clock - se->statistics.wait_start);
747 	}
748 #endif
749 	schedstat_set(se->statistics.wait_start, 0);
750 }
751 
752 static inline void
753 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
754 {
755 	/*
756 	 * Mark the end of the wait period if dequeueing a
757 	 * waiting task:
758 	 */
759 	if (se != cfs_rq->curr)
760 		update_stats_wait_end(cfs_rq, se);
761 }
762 
763 /*
764  * We are picking a new current task - update its stats:
765  */
766 static inline void
767 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
768 {
769 	/*
770 	 * We are starting a new run period:
771 	 */
772 	se->exec_start = rq_of(cfs_rq)->clock_task;
773 }
774 
775 /**************************************************
776  * Scheduling class queueing methods:
777  */
778 
779 static void
780 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
781 {
782 	update_load_add(&cfs_rq->load, se->load.weight);
783 	if (!parent_entity(se))
784 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
785 #ifdef CONFIG_SMP
786 	if (entity_is_task(se))
787 		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
788 #endif
789 	cfs_rq->nr_running++;
790 }
791 
792 static void
793 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
794 {
795 	update_load_sub(&cfs_rq->load, se->load.weight);
796 	if (!parent_entity(se))
797 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
798 	if (entity_is_task(se))
799 		list_del_init(&se->group_node);
800 	cfs_rq->nr_running--;
801 }
802 
803 #ifdef CONFIG_FAIR_GROUP_SCHED
804 /* we need this in update_cfs_load and load-balance functions below */
805 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
806 # ifdef CONFIG_SMP
807 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
808 					    int global_update)
809 {
810 	struct task_group *tg = cfs_rq->tg;
811 	long load_avg;
812 
813 	load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
814 	load_avg -= cfs_rq->load_contribution;
815 
816 	if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
817 		atomic_add(load_avg, &tg->load_weight);
818 		cfs_rq->load_contribution += load_avg;
819 	}
820 }
821 
822 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
823 {
824 	u64 period = sysctl_sched_shares_window;
825 	u64 now, delta;
826 	unsigned long load = cfs_rq->load.weight;
827 
828 	if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
829 		return;
830 
831 	now = rq_of(cfs_rq)->clock_task;
832 	delta = now - cfs_rq->load_stamp;
833 
834 	/* truncate load history at 4 idle periods */
835 	if (cfs_rq->load_stamp > cfs_rq->load_last &&
836 	    now - cfs_rq->load_last > 4 * period) {
837 		cfs_rq->load_period = 0;
838 		cfs_rq->load_avg = 0;
839 		delta = period - 1;
840 	}
841 
842 	cfs_rq->load_stamp = now;
843 	cfs_rq->load_unacc_exec_time = 0;
844 	cfs_rq->load_period += delta;
845 	if (load) {
846 		cfs_rq->load_last = now;
847 		cfs_rq->load_avg += delta * load;
848 	}
849 
850 	/* consider updating load contribution on each fold or truncate */
851 	if (global_update || cfs_rq->load_period > period
852 	    || !cfs_rq->load_period)
853 		update_cfs_rq_load_contribution(cfs_rq, global_update);
854 
855 	while (cfs_rq->load_period > period) {
856 		/*
857 		 * Inline assembly required to prevent the compiler
858 		 * optimising this loop into a divmod call.
859 		 * See __iter_div_u64_rem() for another example of this.
860 		 */
861 		asm("" : "+rm" (cfs_rq->load_period));
862 		cfs_rq->load_period /= 2;
863 		cfs_rq->load_avg /= 2;
864 	}
865 
866 	if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
867 		list_del_leaf_cfs_rq(cfs_rq);
868 }
869 
870 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
871 {
872 	long tg_weight;
873 
874 	/*
875 	 * Use this CPU's actual weight instead of the last load_contribution
876 	 * to gain a more accurate current total weight. See
877 	 * update_cfs_rq_load_contribution().
878 	 */
879 	tg_weight = atomic_read(&tg->load_weight);
880 	tg_weight -= cfs_rq->load_contribution;
881 	tg_weight += cfs_rq->load.weight;
882 
883 	return tg_weight;
884 }
885 
886 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
887 {
888 	long tg_weight, load, shares;
889 
890 	tg_weight = calc_tg_weight(tg, cfs_rq);
891 	load = cfs_rq->load.weight;
892 
893 	shares = (tg->shares * load);
894 	if (tg_weight)
895 		shares /= tg_weight;
896 
897 	if (shares < MIN_SHARES)
898 		shares = MIN_SHARES;
899 	if (shares > tg->shares)
900 		shares = tg->shares;
901 
902 	return shares;
903 }
904 
905 static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
906 {
907 	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
908 		update_cfs_load(cfs_rq, 0);
909 		update_cfs_shares(cfs_rq);
910 	}
911 }
912 # else /* CONFIG_SMP */
913 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
914 {
915 }
916 
917 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
918 {
919 	return tg->shares;
920 }
921 
922 static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
923 {
924 }
925 # endif /* CONFIG_SMP */
926 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
927 			    unsigned long weight)
928 {
929 	if (se->on_rq) {
930 		/* commit outstanding execution time */
931 		if (cfs_rq->curr == se)
932 			update_curr(cfs_rq);
933 		account_entity_dequeue(cfs_rq, se);
934 	}
935 
936 	update_load_set(&se->load, weight);
937 
938 	if (se->on_rq)
939 		account_entity_enqueue(cfs_rq, se);
940 }
941 
942 static void update_cfs_shares(struct cfs_rq *cfs_rq)
943 {
944 	struct task_group *tg;
945 	struct sched_entity *se;
946 	long shares;
947 
948 	tg = cfs_rq->tg;
949 	se = tg->se[cpu_of(rq_of(cfs_rq))];
950 	if (!se || throttled_hierarchy(cfs_rq))
951 		return;
952 #ifndef CONFIG_SMP
953 	if (likely(se->load.weight == tg->shares))
954 		return;
955 #endif
956 	shares = calc_cfs_shares(cfs_rq, tg);
957 
958 	reweight_entity(cfs_rq_of(se), se, shares);
959 }
960 #else /* CONFIG_FAIR_GROUP_SCHED */
961 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
962 {
963 }
964 
965 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
966 {
967 }
968 
969 static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
970 {
971 }
972 #endif /* CONFIG_FAIR_GROUP_SCHED */
973 
974 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
975 {
976 #ifdef CONFIG_SCHEDSTATS
977 	struct task_struct *tsk = NULL;
978 
979 	if (entity_is_task(se))
980 		tsk = task_of(se);
981 
982 	if (se->statistics.sleep_start) {
983 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
984 
985 		if ((s64)delta < 0)
986 			delta = 0;
987 
988 		if (unlikely(delta > se->statistics.sleep_max))
989 			se->statistics.sleep_max = delta;
990 
991 		se->statistics.sleep_start = 0;
992 		se->statistics.sum_sleep_runtime += delta;
993 
994 		if (tsk) {
995 			account_scheduler_latency(tsk, delta >> 10, 1);
996 			trace_sched_stat_sleep(tsk, delta);
997 		}
998 	}
999 	if (se->statistics.block_start) {
1000 		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1001 
1002 		if ((s64)delta < 0)
1003 			delta = 0;
1004 
1005 		if (unlikely(delta > se->statistics.block_max))
1006 			se->statistics.block_max = delta;
1007 
1008 		se->statistics.block_start = 0;
1009 		se->statistics.sum_sleep_runtime += delta;
1010 
1011 		if (tsk) {
1012 			if (tsk->in_iowait) {
1013 				se->statistics.iowait_sum += delta;
1014 				se->statistics.iowait_count++;
1015 				trace_sched_stat_iowait(tsk, delta);
1016 			}
1017 
1018 			trace_sched_stat_blocked(tsk, delta);
1019 
1020 			/*
1021 			 * Blocking time is in units of nanosecs, so shift by
1022 			 * 20 to get a milliseconds-range estimation of the
1023 			 * amount of time that the task spent sleeping:
1024 			 */
1025 			if (unlikely(prof_on == SLEEP_PROFILING)) {
1026 				profile_hits(SLEEP_PROFILING,
1027 						(void *)get_wchan(tsk),
1028 						delta >> 20);
1029 			}
1030 			account_scheduler_latency(tsk, delta >> 10, 0);
1031 		}
1032 	}
1033 #endif
1034 }
1035 
1036 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1037 {
1038 #ifdef CONFIG_SCHED_DEBUG
1039 	s64 d = se->vruntime - cfs_rq->min_vruntime;
1040 
1041 	if (d < 0)
1042 		d = -d;
1043 
1044 	if (d > 3*sysctl_sched_latency)
1045 		schedstat_inc(cfs_rq, nr_spread_over);
1046 #endif
1047 }
1048 
1049 static void
1050 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1051 {
1052 	u64 vruntime = cfs_rq->min_vruntime;
1053 
1054 	/*
1055 	 * The 'current' period is already promised to the current tasks,
1056 	 * however the extra weight of the new task will slow them down a
1057 	 * little, place the new task so that it fits in the slot that
1058 	 * stays open at the end.
1059 	 */
1060 	if (initial && sched_feat(START_DEBIT))
1061 		vruntime += sched_vslice(cfs_rq, se);
1062 
1063 	/* sleeps up to a single latency don't count. */
1064 	if (!initial) {
1065 		unsigned long thresh = sysctl_sched_latency;
1066 
1067 		/*
1068 		 * Halve their sleep time's effect, to allow
1069 		 * for a gentler effect of sleepers:
1070 		 */
1071 		if (sched_feat(GENTLE_FAIR_SLEEPERS))
1072 			thresh >>= 1;
1073 
1074 		vruntime -= thresh;
1075 	}
1076 
1077 	/* ensure we never gain time by being placed backwards. */
1078 	vruntime = max_vruntime(se->vruntime, vruntime);
1079 
1080 	se->vruntime = vruntime;
1081 }
1082 
1083 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1084 
1085 static void
1086 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1087 {
1088 	/*
1089 	 * Update the normalized vruntime before updating min_vruntime
1090 	 * through callig update_curr().
1091 	 */
1092 	if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1093 		se->vruntime += cfs_rq->min_vruntime;
1094 
1095 	/*
1096 	 * Update run-time statistics of the 'current'.
1097 	 */
1098 	update_curr(cfs_rq);
1099 	update_cfs_load(cfs_rq, 0);
1100 	account_entity_enqueue(cfs_rq, se);
1101 	update_cfs_shares(cfs_rq);
1102 
1103 	if (flags & ENQUEUE_WAKEUP) {
1104 		place_entity(cfs_rq, se, 0);
1105 		enqueue_sleeper(cfs_rq, se);
1106 	}
1107 
1108 	update_stats_enqueue(cfs_rq, se);
1109 	check_spread(cfs_rq, se);
1110 	if (se != cfs_rq->curr)
1111 		__enqueue_entity(cfs_rq, se);
1112 	se->on_rq = 1;
1113 
1114 	if (cfs_rq->nr_running == 1) {
1115 		list_add_leaf_cfs_rq(cfs_rq);
1116 		check_enqueue_throttle(cfs_rq);
1117 	}
1118 }
1119 
1120 static void __clear_buddies_last(struct sched_entity *se)
1121 {
1122 	for_each_sched_entity(se) {
1123 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1124 		if (cfs_rq->last == se)
1125 			cfs_rq->last = NULL;
1126 		else
1127 			break;
1128 	}
1129 }
1130 
1131 static void __clear_buddies_next(struct sched_entity *se)
1132 {
1133 	for_each_sched_entity(se) {
1134 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1135 		if (cfs_rq->next == se)
1136 			cfs_rq->next = NULL;
1137 		else
1138 			break;
1139 	}
1140 }
1141 
1142 static void __clear_buddies_skip(struct sched_entity *se)
1143 {
1144 	for_each_sched_entity(se) {
1145 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
1146 		if (cfs_rq->skip == se)
1147 			cfs_rq->skip = NULL;
1148 		else
1149 			break;
1150 	}
1151 }
1152 
1153 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1154 {
1155 	if (cfs_rq->last == se)
1156 		__clear_buddies_last(se);
1157 
1158 	if (cfs_rq->next == se)
1159 		__clear_buddies_next(se);
1160 
1161 	if (cfs_rq->skip == se)
1162 		__clear_buddies_skip(se);
1163 }
1164 
1165 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1166 
1167 static void
1168 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1169 {
1170 	/*
1171 	 * Update run-time statistics of the 'current'.
1172 	 */
1173 	update_curr(cfs_rq);
1174 
1175 	update_stats_dequeue(cfs_rq, se);
1176 	if (flags & DEQUEUE_SLEEP) {
1177 #ifdef CONFIG_SCHEDSTATS
1178 		if (entity_is_task(se)) {
1179 			struct task_struct *tsk = task_of(se);
1180 
1181 			if (tsk->state & TASK_INTERRUPTIBLE)
1182 				se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1183 			if (tsk->state & TASK_UNINTERRUPTIBLE)
1184 				se->statistics.block_start = rq_of(cfs_rq)->clock;
1185 		}
1186 #endif
1187 	}
1188 
1189 	clear_buddies(cfs_rq, se);
1190 
1191 	if (se != cfs_rq->curr)
1192 		__dequeue_entity(cfs_rq, se);
1193 	se->on_rq = 0;
1194 	update_cfs_load(cfs_rq, 0);
1195 	account_entity_dequeue(cfs_rq, se);
1196 
1197 	/*
1198 	 * Normalize the entity after updating the min_vruntime because the
1199 	 * update can refer to the ->curr item and we need to reflect this
1200 	 * movement in our normalized position.
1201 	 */
1202 	if (!(flags & DEQUEUE_SLEEP))
1203 		se->vruntime -= cfs_rq->min_vruntime;
1204 
1205 	/* return excess runtime on last dequeue */
1206 	return_cfs_rq_runtime(cfs_rq);
1207 
1208 	update_min_vruntime(cfs_rq);
1209 	update_cfs_shares(cfs_rq);
1210 }
1211 
1212 /*
1213  * Preempt the current task with a newly woken task if needed:
1214  */
1215 static void
1216 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1217 {
1218 	unsigned long ideal_runtime, delta_exec;
1219 	struct sched_entity *se;
1220 	s64 delta;
1221 
1222 	ideal_runtime = sched_slice(cfs_rq, curr);
1223 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1224 	if (delta_exec > ideal_runtime) {
1225 		resched_task(rq_of(cfs_rq)->curr);
1226 		/*
1227 		 * The current task ran long enough, ensure it doesn't get
1228 		 * re-elected due to buddy favours.
1229 		 */
1230 		clear_buddies(cfs_rq, curr);
1231 		return;
1232 	}
1233 
1234 	/*
1235 	 * Ensure that a task that missed wakeup preemption by a
1236 	 * narrow margin doesn't have to wait for a full slice.
1237 	 * This also mitigates buddy induced latencies under load.
1238 	 */
1239 	if (delta_exec < sysctl_sched_min_granularity)
1240 		return;
1241 
1242 	se = __pick_first_entity(cfs_rq);
1243 	delta = curr->vruntime - se->vruntime;
1244 
1245 	if (delta < 0)
1246 		return;
1247 
1248 	if (delta > ideal_runtime)
1249 		resched_task(rq_of(cfs_rq)->curr);
1250 }
1251 
1252 static void
1253 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1254 {
1255 	/* 'current' is not kept within the tree. */
1256 	if (se->on_rq) {
1257 		/*
1258 		 * Any task has to be enqueued before it get to execute on
1259 		 * a CPU. So account for the time it spent waiting on the
1260 		 * runqueue.
1261 		 */
1262 		update_stats_wait_end(cfs_rq, se);
1263 		__dequeue_entity(cfs_rq, se);
1264 	}
1265 
1266 	update_stats_curr_start(cfs_rq, se);
1267 	cfs_rq->curr = se;
1268 #ifdef CONFIG_SCHEDSTATS
1269 	/*
1270 	 * Track our maximum slice length, if the CPU's load is at
1271 	 * least twice that of our own weight (i.e. dont track it
1272 	 * when there are only lesser-weight tasks around):
1273 	 */
1274 	if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1275 		se->statistics.slice_max = max(se->statistics.slice_max,
1276 			se->sum_exec_runtime - se->prev_sum_exec_runtime);
1277 	}
1278 #endif
1279 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
1280 }
1281 
1282 static int
1283 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1284 
1285 /*
1286  * Pick the next process, keeping these things in mind, in this order:
1287  * 1) keep things fair between processes/task groups
1288  * 2) pick the "next" process, since someone really wants that to run
1289  * 3) pick the "last" process, for cache locality
1290  * 4) do not run the "skip" process, if something else is available
1291  */
1292 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1293 {
1294 	struct sched_entity *se = __pick_first_entity(cfs_rq);
1295 	struct sched_entity *left = se;
1296 
1297 	/*
1298 	 * Avoid running the skip buddy, if running something else can
1299 	 * be done without getting too unfair.
1300 	 */
1301 	if (cfs_rq->skip == se) {
1302 		struct sched_entity *second = __pick_next_entity(se);
1303 		if (second && wakeup_preempt_entity(second, left) < 1)
1304 			se = second;
1305 	}
1306 
1307 	/*
1308 	 * Prefer last buddy, try to return the CPU to a preempted task.
1309 	 */
1310 	if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1311 		se = cfs_rq->last;
1312 
1313 	/*
1314 	 * Someone really wants this to run. If it's not unfair, run it.
1315 	 */
1316 	if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1317 		se = cfs_rq->next;
1318 
1319 	clear_buddies(cfs_rq, se);
1320 
1321 	return se;
1322 }
1323 
1324 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1325 
1326 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1327 {
1328 	/*
1329 	 * If still on the runqueue then deactivate_task()
1330 	 * was not called and update_curr() has to be done:
1331 	 */
1332 	if (prev->on_rq)
1333 		update_curr(cfs_rq);
1334 
1335 	/* throttle cfs_rqs exceeding runtime */
1336 	check_cfs_rq_runtime(cfs_rq);
1337 
1338 	check_spread(cfs_rq, prev);
1339 	if (prev->on_rq) {
1340 		update_stats_wait_start(cfs_rq, prev);
1341 		/* Put 'current' back into the tree. */
1342 		__enqueue_entity(cfs_rq, prev);
1343 	}
1344 	cfs_rq->curr = NULL;
1345 }
1346 
1347 static void
1348 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1349 {
1350 	/*
1351 	 * Update run-time statistics of the 'current'.
1352 	 */
1353 	update_curr(cfs_rq);
1354 
1355 	/*
1356 	 * Update share accounting for long-running entities.
1357 	 */
1358 	update_entity_shares_tick(cfs_rq);
1359 
1360 #ifdef CONFIG_SCHED_HRTICK
1361 	/*
1362 	 * queued ticks are scheduled to match the slice, so don't bother
1363 	 * validating it and just reschedule.
1364 	 */
1365 	if (queued) {
1366 		resched_task(rq_of(cfs_rq)->curr);
1367 		return;
1368 	}
1369 	/*
1370 	 * don't let the period tick interfere with the hrtick preemption
1371 	 */
1372 	if (!sched_feat(DOUBLE_TICK) &&
1373 			hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1374 		return;
1375 #endif
1376 
1377 	if (cfs_rq->nr_running > 1)
1378 		check_preempt_tick(cfs_rq, curr);
1379 }
1380 
1381 
1382 /**************************************************
1383  * CFS bandwidth control machinery
1384  */
1385 
1386 #ifdef CONFIG_CFS_BANDWIDTH
1387 
1388 #ifdef HAVE_JUMP_LABEL
1389 static struct static_key __cfs_bandwidth_used;
1390 
1391 static inline bool cfs_bandwidth_used(void)
1392 {
1393 	return static_key_false(&__cfs_bandwidth_used);
1394 }
1395 
1396 void account_cfs_bandwidth_used(int enabled, int was_enabled)
1397 {
1398 	/* only need to count groups transitioning between enabled/!enabled */
1399 	if (enabled && !was_enabled)
1400 		static_key_slow_inc(&__cfs_bandwidth_used);
1401 	else if (!enabled && was_enabled)
1402 		static_key_slow_dec(&__cfs_bandwidth_used);
1403 }
1404 #else /* HAVE_JUMP_LABEL */
1405 static bool cfs_bandwidth_used(void)
1406 {
1407 	return true;
1408 }
1409 
1410 void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1411 #endif /* HAVE_JUMP_LABEL */
1412 
1413 /*
1414  * default period for cfs group bandwidth.
1415  * default: 0.1s, units: nanoseconds
1416  */
1417 static inline u64 default_cfs_period(void)
1418 {
1419 	return 100000000ULL;
1420 }
1421 
1422 static inline u64 sched_cfs_bandwidth_slice(void)
1423 {
1424 	return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
1425 }
1426 
1427 /*
1428  * Replenish runtime according to assigned quota and update expiration time.
1429  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1430  * additional synchronization around rq->lock.
1431  *
1432  * requires cfs_b->lock
1433  */
1434 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
1435 {
1436 	u64 now;
1437 
1438 	if (cfs_b->quota == RUNTIME_INF)
1439 		return;
1440 
1441 	now = sched_clock_cpu(smp_processor_id());
1442 	cfs_b->runtime = cfs_b->quota;
1443 	cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
1444 }
1445 
1446 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
1447 {
1448 	return &tg->cfs_bandwidth;
1449 }
1450 
1451 /* returns 0 on failure to allocate runtime */
1452 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1453 {
1454 	struct task_group *tg = cfs_rq->tg;
1455 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
1456 	u64 amount = 0, min_amount, expires;
1457 
1458 	/* note: this is a positive sum as runtime_remaining <= 0 */
1459 	min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1460 
1461 	raw_spin_lock(&cfs_b->lock);
1462 	if (cfs_b->quota == RUNTIME_INF)
1463 		amount = min_amount;
1464 	else {
1465 		/*
1466 		 * If the bandwidth pool has become inactive, then at least one
1467 		 * period must have elapsed since the last consumption.
1468 		 * Refresh the global state and ensure bandwidth timer becomes
1469 		 * active.
1470 		 */
1471 		if (!cfs_b->timer_active) {
1472 			__refill_cfs_bandwidth_runtime(cfs_b);
1473 			__start_cfs_bandwidth(cfs_b);
1474 		}
1475 
1476 		if (cfs_b->runtime > 0) {
1477 			amount = min(cfs_b->runtime, min_amount);
1478 			cfs_b->runtime -= amount;
1479 			cfs_b->idle = 0;
1480 		}
1481 	}
1482 	expires = cfs_b->runtime_expires;
1483 	raw_spin_unlock(&cfs_b->lock);
1484 
1485 	cfs_rq->runtime_remaining += amount;
1486 	/*
1487 	 * we may have advanced our local expiration to account for allowed
1488 	 * spread between our sched_clock and the one on which runtime was
1489 	 * issued.
1490 	 */
1491 	if ((s64)(expires - cfs_rq->runtime_expires) > 0)
1492 		cfs_rq->runtime_expires = expires;
1493 
1494 	return cfs_rq->runtime_remaining > 0;
1495 }
1496 
1497 /*
1498  * Note: This depends on the synchronization provided by sched_clock and the
1499  * fact that rq->clock snapshots this value.
1500  */
1501 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1502 {
1503 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1504 	struct rq *rq = rq_of(cfs_rq);
1505 
1506 	/* if the deadline is ahead of our clock, nothing to do */
1507 	if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
1508 		return;
1509 
1510 	if (cfs_rq->runtime_remaining < 0)
1511 		return;
1512 
1513 	/*
1514 	 * If the local deadline has passed we have to consider the
1515 	 * possibility that our sched_clock is 'fast' and the global deadline
1516 	 * has not truly expired.
1517 	 *
1518 	 * Fortunately we can check determine whether this the case by checking
1519 	 * whether the global deadline has advanced.
1520 	 */
1521 
1522 	if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
1523 		/* extend local deadline, drift is bounded above by 2 ticks */
1524 		cfs_rq->runtime_expires += TICK_NSEC;
1525 	} else {
1526 		/* global deadline is ahead, expiration has passed */
1527 		cfs_rq->runtime_remaining = 0;
1528 	}
1529 }
1530 
1531 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1532 				     unsigned long delta_exec)
1533 {
1534 	/* dock delta_exec before expiring quota (as it could span periods) */
1535 	cfs_rq->runtime_remaining -= delta_exec;
1536 	expire_cfs_rq_runtime(cfs_rq);
1537 
1538 	if (likely(cfs_rq->runtime_remaining > 0))
1539 		return;
1540 
1541 	/*
1542 	 * if we're unable to extend our runtime we resched so that the active
1543 	 * hierarchy can be throttled
1544 	 */
1545 	if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
1546 		resched_task(rq_of(cfs_rq)->curr);
1547 }
1548 
1549 static __always_inline
1550 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
1551 {
1552 	if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1553 		return;
1554 
1555 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
1556 }
1557 
1558 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
1559 {
1560 	return cfs_bandwidth_used() && cfs_rq->throttled;
1561 }
1562 
1563 /* check whether cfs_rq, or any parent, is throttled */
1564 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
1565 {
1566 	return cfs_bandwidth_used() && cfs_rq->throttle_count;
1567 }
1568 
1569 /*
1570  * Ensure that neither of the group entities corresponding to src_cpu or
1571  * dest_cpu are members of a throttled hierarchy when performing group
1572  * load-balance operations.
1573  */
1574 static inline int throttled_lb_pair(struct task_group *tg,
1575 				    int src_cpu, int dest_cpu)
1576 {
1577 	struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
1578 
1579 	src_cfs_rq = tg->cfs_rq[src_cpu];
1580 	dest_cfs_rq = tg->cfs_rq[dest_cpu];
1581 
1582 	return throttled_hierarchy(src_cfs_rq) ||
1583 	       throttled_hierarchy(dest_cfs_rq);
1584 }
1585 
1586 /* updated child weight may affect parent so we have to do this bottom up */
1587 static int tg_unthrottle_up(struct task_group *tg, void *data)
1588 {
1589 	struct rq *rq = data;
1590 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1591 
1592 	cfs_rq->throttle_count--;
1593 #ifdef CONFIG_SMP
1594 	if (!cfs_rq->throttle_count) {
1595 		u64 delta = rq->clock_task - cfs_rq->load_stamp;
1596 
1597 		/* leaving throttled state, advance shares averaging windows */
1598 		cfs_rq->load_stamp += delta;
1599 		cfs_rq->load_last += delta;
1600 
1601 		/* update entity weight now that we are on_rq again */
1602 		update_cfs_shares(cfs_rq);
1603 	}
1604 #endif
1605 
1606 	return 0;
1607 }
1608 
1609 static int tg_throttle_down(struct task_group *tg, void *data)
1610 {
1611 	struct rq *rq = data;
1612 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1613 
1614 	/* group is entering throttled state, record last load */
1615 	if (!cfs_rq->throttle_count)
1616 		update_cfs_load(cfs_rq, 0);
1617 	cfs_rq->throttle_count++;
1618 
1619 	return 0;
1620 }
1621 
1622 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1623 {
1624 	struct rq *rq = rq_of(cfs_rq);
1625 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1626 	struct sched_entity *se;
1627 	long task_delta, dequeue = 1;
1628 
1629 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1630 
1631 	/* account load preceding throttle */
1632 	rcu_read_lock();
1633 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
1634 	rcu_read_unlock();
1635 
1636 	task_delta = cfs_rq->h_nr_running;
1637 	for_each_sched_entity(se) {
1638 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
1639 		/* throttled entity or throttle-on-deactivate */
1640 		if (!se->on_rq)
1641 			break;
1642 
1643 		if (dequeue)
1644 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
1645 		qcfs_rq->h_nr_running -= task_delta;
1646 
1647 		if (qcfs_rq->load.weight)
1648 			dequeue = 0;
1649 	}
1650 
1651 	if (!se)
1652 		rq->nr_running -= task_delta;
1653 
1654 	cfs_rq->throttled = 1;
1655 	cfs_rq->throttled_timestamp = rq->clock;
1656 	raw_spin_lock(&cfs_b->lock);
1657 	list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1658 	raw_spin_unlock(&cfs_b->lock);
1659 }
1660 
1661 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
1662 {
1663 	struct rq *rq = rq_of(cfs_rq);
1664 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1665 	struct sched_entity *se;
1666 	int enqueue = 1;
1667 	long task_delta;
1668 
1669 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1670 
1671 	cfs_rq->throttled = 0;
1672 	raw_spin_lock(&cfs_b->lock);
1673 	cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
1674 	list_del_rcu(&cfs_rq->throttled_list);
1675 	raw_spin_unlock(&cfs_b->lock);
1676 	cfs_rq->throttled_timestamp = 0;
1677 
1678 	update_rq_clock(rq);
1679 	/* update hierarchical throttle state */
1680 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
1681 
1682 	if (!cfs_rq->load.weight)
1683 		return;
1684 
1685 	task_delta = cfs_rq->h_nr_running;
1686 	for_each_sched_entity(se) {
1687 		if (se->on_rq)
1688 			enqueue = 0;
1689 
1690 		cfs_rq = cfs_rq_of(se);
1691 		if (enqueue)
1692 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
1693 		cfs_rq->h_nr_running += task_delta;
1694 
1695 		if (cfs_rq_throttled(cfs_rq))
1696 			break;
1697 	}
1698 
1699 	if (!se)
1700 		rq->nr_running += task_delta;
1701 
1702 	/* determine whether we need to wake up potentially idle cpu */
1703 	if (rq->curr == rq->idle && rq->cfs.nr_running)
1704 		resched_task(rq->curr);
1705 }
1706 
1707 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
1708 		u64 remaining, u64 expires)
1709 {
1710 	struct cfs_rq *cfs_rq;
1711 	u64 runtime = remaining;
1712 
1713 	rcu_read_lock();
1714 	list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
1715 				throttled_list) {
1716 		struct rq *rq = rq_of(cfs_rq);
1717 
1718 		raw_spin_lock(&rq->lock);
1719 		if (!cfs_rq_throttled(cfs_rq))
1720 			goto next;
1721 
1722 		runtime = -cfs_rq->runtime_remaining + 1;
1723 		if (runtime > remaining)
1724 			runtime = remaining;
1725 		remaining -= runtime;
1726 
1727 		cfs_rq->runtime_remaining += runtime;
1728 		cfs_rq->runtime_expires = expires;
1729 
1730 		/* we check whether we're throttled above */
1731 		if (cfs_rq->runtime_remaining > 0)
1732 			unthrottle_cfs_rq(cfs_rq);
1733 
1734 next:
1735 		raw_spin_unlock(&rq->lock);
1736 
1737 		if (!remaining)
1738 			break;
1739 	}
1740 	rcu_read_unlock();
1741 
1742 	return remaining;
1743 }
1744 
1745 /*
1746  * Responsible for refilling a task_group's bandwidth and unthrottling its
1747  * cfs_rqs as appropriate. If there has been no activity within the last
1748  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1749  * used to track this state.
1750  */
1751 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1752 {
1753 	u64 runtime, runtime_expires;
1754 	int idle = 1, throttled;
1755 
1756 	raw_spin_lock(&cfs_b->lock);
1757 	/* no need to continue the timer with no bandwidth constraint */
1758 	if (cfs_b->quota == RUNTIME_INF)
1759 		goto out_unlock;
1760 
1761 	throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1762 	/* idle depends on !throttled (for the case of a large deficit) */
1763 	idle = cfs_b->idle && !throttled;
1764 	cfs_b->nr_periods += overrun;
1765 
1766 	/* if we're going inactive then everything else can be deferred */
1767 	if (idle)
1768 		goto out_unlock;
1769 
1770 	__refill_cfs_bandwidth_runtime(cfs_b);
1771 
1772 	if (!throttled) {
1773 		/* mark as potentially idle for the upcoming period */
1774 		cfs_b->idle = 1;
1775 		goto out_unlock;
1776 	}
1777 
1778 	/* account preceding periods in which throttling occurred */
1779 	cfs_b->nr_throttled += overrun;
1780 
1781 	/*
1782 	 * There are throttled entities so we must first use the new bandwidth
1783 	 * to unthrottle them before making it generally available.  This
1784 	 * ensures that all existing debts will be paid before a new cfs_rq is
1785 	 * allowed to run.
1786 	 */
1787 	runtime = cfs_b->runtime;
1788 	runtime_expires = cfs_b->runtime_expires;
1789 	cfs_b->runtime = 0;
1790 
1791 	/*
1792 	 * This check is repeated as we are holding onto the new bandwidth
1793 	 * while we unthrottle.  This can potentially race with an unthrottled
1794 	 * group trying to acquire new bandwidth from the global pool.
1795 	 */
1796 	while (throttled && runtime > 0) {
1797 		raw_spin_unlock(&cfs_b->lock);
1798 		/* we can't nest cfs_b->lock while distributing bandwidth */
1799 		runtime = distribute_cfs_runtime(cfs_b, runtime,
1800 						 runtime_expires);
1801 		raw_spin_lock(&cfs_b->lock);
1802 
1803 		throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1804 	}
1805 
1806 	/* return (any) remaining runtime */
1807 	cfs_b->runtime = runtime;
1808 	/*
1809 	 * While we are ensured activity in the period following an
1810 	 * unthrottle, this also covers the case in which the new bandwidth is
1811 	 * insufficient to cover the existing bandwidth deficit.  (Forcing the
1812 	 * timer to remain active while there are any throttled entities.)
1813 	 */
1814 	cfs_b->idle = 0;
1815 out_unlock:
1816 	if (idle)
1817 		cfs_b->timer_active = 0;
1818 	raw_spin_unlock(&cfs_b->lock);
1819 
1820 	return idle;
1821 }
1822 
1823 /* a cfs_rq won't donate quota below this amount */
1824 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
1825 /* minimum remaining period time to redistribute slack quota */
1826 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
1827 /* how long we wait to gather additional slack before distributing */
1828 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
1829 
1830 /* are we near the end of the current quota period? */
1831 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
1832 {
1833 	struct hrtimer *refresh_timer = &cfs_b->period_timer;
1834 	u64 remaining;
1835 
1836 	/* if the call-back is running a quota refresh is already occurring */
1837 	if (hrtimer_callback_running(refresh_timer))
1838 		return 1;
1839 
1840 	/* is a quota refresh about to occur? */
1841 	remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
1842 	if (remaining < min_expire)
1843 		return 1;
1844 
1845 	return 0;
1846 }
1847 
1848 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
1849 {
1850 	u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
1851 
1852 	/* if there's a quota refresh soon don't bother with slack */
1853 	if (runtime_refresh_within(cfs_b, min_left))
1854 		return;
1855 
1856 	start_bandwidth_timer(&cfs_b->slack_timer,
1857 				ns_to_ktime(cfs_bandwidth_slack_period));
1858 }
1859 
1860 /* we know any runtime found here is valid as update_curr() precedes return */
1861 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1862 {
1863 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1864 	s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
1865 
1866 	if (slack_runtime <= 0)
1867 		return;
1868 
1869 	raw_spin_lock(&cfs_b->lock);
1870 	if (cfs_b->quota != RUNTIME_INF &&
1871 	    cfs_rq->runtime_expires == cfs_b->runtime_expires) {
1872 		cfs_b->runtime += slack_runtime;
1873 
1874 		/* we are under rq->lock, defer unthrottling using a timer */
1875 		if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
1876 		    !list_empty(&cfs_b->throttled_cfs_rq))
1877 			start_cfs_slack_bandwidth(cfs_b);
1878 	}
1879 	raw_spin_unlock(&cfs_b->lock);
1880 
1881 	/* even if it's not valid for return we don't want to try again */
1882 	cfs_rq->runtime_remaining -= slack_runtime;
1883 }
1884 
1885 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1886 {
1887 	if (!cfs_bandwidth_used())
1888 		return;
1889 
1890 	if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
1891 		return;
1892 
1893 	__return_cfs_rq_runtime(cfs_rq);
1894 }
1895 
1896 /*
1897  * This is done with a timer (instead of inline with bandwidth return) since
1898  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
1899  */
1900 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
1901 {
1902 	u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
1903 	u64 expires;
1904 
1905 	/* confirm we're still not at a refresh boundary */
1906 	if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
1907 		return;
1908 
1909 	raw_spin_lock(&cfs_b->lock);
1910 	if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
1911 		runtime = cfs_b->runtime;
1912 		cfs_b->runtime = 0;
1913 	}
1914 	expires = cfs_b->runtime_expires;
1915 	raw_spin_unlock(&cfs_b->lock);
1916 
1917 	if (!runtime)
1918 		return;
1919 
1920 	runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
1921 
1922 	raw_spin_lock(&cfs_b->lock);
1923 	if (expires == cfs_b->runtime_expires)
1924 		cfs_b->runtime = runtime;
1925 	raw_spin_unlock(&cfs_b->lock);
1926 }
1927 
1928 /*
1929  * When a group wakes up we want to make sure that its quota is not already
1930  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1931  * runtime as update_curr() throttling can not not trigger until it's on-rq.
1932  */
1933 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
1934 {
1935 	if (!cfs_bandwidth_used())
1936 		return;
1937 
1938 	/* an active group must be handled by the update_curr()->put() path */
1939 	if (!cfs_rq->runtime_enabled || cfs_rq->curr)
1940 		return;
1941 
1942 	/* ensure the group is not already throttled */
1943 	if (cfs_rq_throttled(cfs_rq))
1944 		return;
1945 
1946 	/* update runtime allocation */
1947 	account_cfs_rq_runtime(cfs_rq, 0);
1948 	if (cfs_rq->runtime_remaining <= 0)
1949 		throttle_cfs_rq(cfs_rq);
1950 }
1951 
1952 /* conditionally throttle active cfs_rq's from put_prev_entity() */
1953 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1954 {
1955 	if (!cfs_bandwidth_used())
1956 		return;
1957 
1958 	if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
1959 		return;
1960 
1961 	/*
1962 	 * it's possible for a throttled entity to be forced into a running
1963 	 * state (e.g. set_curr_task), in this case we're finished.
1964 	 */
1965 	if (cfs_rq_throttled(cfs_rq))
1966 		return;
1967 
1968 	throttle_cfs_rq(cfs_rq);
1969 }
1970 
1971 static inline u64 default_cfs_period(void);
1972 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
1973 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
1974 
1975 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
1976 {
1977 	struct cfs_bandwidth *cfs_b =
1978 		container_of(timer, struct cfs_bandwidth, slack_timer);
1979 	do_sched_cfs_slack_timer(cfs_b);
1980 
1981 	return HRTIMER_NORESTART;
1982 }
1983 
1984 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
1985 {
1986 	struct cfs_bandwidth *cfs_b =
1987 		container_of(timer, struct cfs_bandwidth, period_timer);
1988 	ktime_t now;
1989 	int overrun;
1990 	int idle = 0;
1991 
1992 	for (;;) {
1993 		now = hrtimer_cb_get_time(timer);
1994 		overrun = hrtimer_forward(timer, now, cfs_b->period);
1995 
1996 		if (!overrun)
1997 			break;
1998 
1999 		idle = do_sched_cfs_period_timer(cfs_b, overrun);
2000 	}
2001 
2002 	return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2003 }
2004 
2005 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2006 {
2007 	raw_spin_lock_init(&cfs_b->lock);
2008 	cfs_b->runtime = 0;
2009 	cfs_b->quota = RUNTIME_INF;
2010 	cfs_b->period = ns_to_ktime(default_cfs_period());
2011 
2012 	INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2013 	hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2014 	cfs_b->period_timer.function = sched_cfs_period_timer;
2015 	hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2016 	cfs_b->slack_timer.function = sched_cfs_slack_timer;
2017 }
2018 
2019 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2020 {
2021 	cfs_rq->runtime_enabled = 0;
2022 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
2023 }
2024 
2025 /* requires cfs_b->lock, may release to reprogram timer */
2026 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2027 {
2028 	/*
2029 	 * The timer may be active because we're trying to set a new bandwidth
2030 	 * period or because we're racing with the tear-down path
2031 	 * (timer_active==0 becomes visible before the hrtimer call-back
2032 	 * terminates).  In either case we ensure that it's re-programmed
2033 	 */
2034 	while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2035 		raw_spin_unlock(&cfs_b->lock);
2036 		/* ensure cfs_b->lock is available while we wait */
2037 		hrtimer_cancel(&cfs_b->period_timer);
2038 
2039 		raw_spin_lock(&cfs_b->lock);
2040 		/* if someone else restarted the timer then we're done */
2041 		if (cfs_b->timer_active)
2042 			return;
2043 	}
2044 
2045 	cfs_b->timer_active = 1;
2046 	start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2047 }
2048 
2049 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2050 {
2051 	hrtimer_cancel(&cfs_b->period_timer);
2052 	hrtimer_cancel(&cfs_b->slack_timer);
2053 }
2054 
2055 void unthrottle_offline_cfs_rqs(struct rq *rq)
2056 {
2057 	struct cfs_rq *cfs_rq;
2058 
2059 	for_each_leaf_cfs_rq(rq, cfs_rq) {
2060 		struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2061 
2062 		if (!cfs_rq->runtime_enabled)
2063 			continue;
2064 
2065 		/*
2066 		 * clock_task is not advancing so we just need to make sure
2067 		 * there's some valid quota amount
2068 		 */
2069 		cfs_rq->runtime_remaining = cfs_b->quota;
2070 		if (cfs_rq_throttled(cfs_rq))
2071 			unthrottle_cfs_rq(cfs_rq);
2072 	}
2073 }
2074 
2075 #else /* CONFIG_CFS_BANDWIDTH */
2076 static __always_inline
2077 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
2078 static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2079 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2080 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2081 
2082 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2083 {
2084 	return 0;
2085 }
2086 
2087 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2088 {
2089 	return 0;
2090 }
2091 
2092 static inline int throttled_lb_pair(struct task_group *tg,
2093 				    int src_cpu, int dest_cpu)
2094 {
2095 	return 0;
2096 }
2097 
2098 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2099 
2100 #ifdef CONFIG_FAIR_GROUP_SCHED
2101 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2102 #endif
2103 
2104 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2105 {
2106 	return NULL;
2107 }
2108 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2109 void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2110 
2111 #endif /* CONFIG_CFS_BANDWIDTH */
2112 
2113 /**************************************************
2114  * CFS operations on tasks:
2115  */
2116 
2117 #ifdef CONFIG_SCHED_HRTICK
2118 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2119 {
2120 	struct sched_entity *se = &p->se;
2121 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2122 
2123 	WARN_ON(task_rq(p) != rq);
2124 
2125 	if (cfs_rq->nr_running > 1) {
2126 		u64 slice = sched_slice(cfs_rq, se);
2127 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2128 		s64 delta = slice - ran;
2129 
2130 		if (delta < 0) {
2131 			if (rq->curr == p)
2132 				resched_task(p);
2133 			return;
2134 		}
2135 
2136 		/*
2137 		 * Don't schedule slices shorter than 10000ns, that just
2138 		 * doesn't make sense. Rely on vruntime for fairness.
2139 		 */
2140 		if (rq->curr != p)
2141 			delta = max_t(s64, 10000LL, delta);
2142 
2143 		hrtick_start(rq, delta);
2144 	}
2145 }
2146 
2147 /*
2148  * called from enqueue/dequeue and updates the hrtick when the
2149  * current task is from our class and nr_running is low enough
2150  * to matter.
2151  */
2152 static void hrtick_update(struct rq *rq)
2153 {
2154 	struct task_struct *curr = rq->curr;
2155 
2156 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
2157 		return;
2158 
2159 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2160 		hrtick_start_fair(rq, curr);
2161 }
2162 #else /* !CONFIG_SCHED_HRTICK */
2163 static inline void
2164 hrtick_start_fair(struct rq *rq, struct task_struct *p)
2165 {
2166 }
2167 
2168 static inline void hrtick_update(struct rq *rq)
2169 {
2170 }
2171 #endif
2172 
2173 /*
2174  * The enqueue_task method is called before nr_running is
2175  * increased. Here we update the fair scheduling stats and
2176  * then put the task into the rbtree:
2177  */
2178 static void
2179 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2180 {
2181 	struct cfs_rq *cfs_rq;
2182 	struct sched_entity *se = &p->se;
2183 
2184 	for_each_sched_entity(se) {
2185 		if (se->on_rq)
2186 			break;
2187 		cfs_rq = cfs_rq_of(se);
2188 		enqueue_entity(cfs_rq, se, flags);
2189 
2190 		/*
2191 		 * end evaluation on encountering a throttled cfs_rq
2192 		 *
2193 		 * note: in the case of encountering a throttled cfs_rq we will
2194 		 * post the final h_nr_running increment below.
2195 		*/
2196 		if (cfs_rq_throttled(cfs_rq))
2197 			break;
2198 		cfs_rq->h_nr_running++;
2199 
2200 		flags = ENQUEUE_WAKEUP;
2201 	}
2202 
2203 	for_each_sched_entity(se) {
2204 		cfs_rq = cfs_rq_of(se);
2205 		cfs_rq->h_nr_running++;
2206 
2207 		if (cfs_rq_throttled(cfs_rq))
2208 			break;
2209 
2210 		update_cfs_load(cfs_rq, 0);
2211 		update_cfs_shares(cfs_rq);
2212 	}
2213 
2214 	if (!se)
2215 		inc_nr_running(rq);
2216 	hrtick_update(rq);
2217 }
2218 
2219 static void set_next_buddy(struct sched_entity *se);
2220 
2221 /*
2222  * The dequeue_task method is called before nr_running is
2223  * decreased. We remove the task from the rbtree and
2224  * update the fair scheduling stats:
2225  */
2226 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2227 {
2228 	struct cfs_rq *cfs_rq;
2229 	struct sched_entity *se = &p->se;
2230 	int task_sleep = flags & DEQUEUE_SLEEP;
2231 
2232 	for_each_sched_entity(se) {
2233 		cfs_rq = cfs_rq_of(se);
2234 		dequeue_entity(cfs_rq, se, flags);
2235 
2236 		/*
2237 		 * end evaluation on encountering a throttled cfs_rq
2238 		 *
2239 		 * note: in the case of encountering a throttled cfs_rq we will
2240 		 * post the final h_nr_running decrement below.
2241 		*/
2242 		if (cfs_rq_throttled(cfs_rq))
2243 			break;
2244 		cfs_rq->h_nr_running--;
2245 
2246 		/* Don't dequeue parent if it has other entities besides us */
2247 		if (cfs_rq->load.weight) {
2248 			/*
2249 			 * Bias pick_next to pick a task from this cfs_rq, as
2250 			 * p is sleeping when it is within its sched_slice.
2251 			 */
2252 			if (task_sleep && parent_entity(se))
2253 				set_next_buddy(parent_entity(se));
2254 
2255 			/* avoid re-evaluating load for this entity */
2256 			se = parent_entity(se);
2257 			break;
2258 		}
2259 		flags |= DEQUEUE_SLEEP;
2260 	}
2261 
2262 	for_each_sched_entity(se) {
2263 		cfs_rq = cfs_rq_of(se);
2264 		cfs_rq->h_nr_running--;
2265 
2266 		if (cfs_rq_throttled(cfs_rq))
2267 			break;
2268 
2269 		update_cfs_load(cfs_rq, 0);
2270 		update_cfs_shares(cfs_rq);
2271 	}
2272 
2273 	if (!se)
2274 		dec_nr_running(rq);
2275 	hrtick_update(rq);
2276 }
2277 
2278 #ifdef CONFIG_SMP
2279 /* Used instead of source_load when we know the type == 0 */
2280 static unsigned long weighted_cpuload(const int cpu)
2281 {
2282 	return cpu_rq(cpu)->load.weight;
2283 }
2284 
2285 /*
2286  * Return a low guess at the load of a migration-source cpu weighted
2287  * according to the scheduling class and "nice" value.
2288  *
2289  * We want to under-estimate the load of migration sources, to
2290  * balance conservatively.
2291  */
2292 static unsigned long source_load(int cpu, int type)
2293 {
2294 	struct rq *rq = cpu_rq(cpu);
2295 	unsigned long total = weighted_cpuload(cpu);
2296 
2297 	if (type == 0 || !sched_feat(LB_BIAS))
2298 		return total;
2299 
2300 	return min(rq->cpu_load[type-1], total);
2301 }
2302 
2303 /*
2304  * Return a high guess at the load of a migration-target cpu weighted
2305  * according to the scheduling class and "nice" value.
2306  */
2307 static unsigned long target_load(int cpu, int type)
2308 {
2309 	struct rq *rq = cpu_rq(cpu);
2310 	unsigned long total = weighted_cpuload(cpu);
2311 
2312 	if (type == 0 || !sched_feat(LB_BIAS))
2313 		return total;
2314 
2315 	return max(rq->cpu_load[type-1], total);
2316 }
2317 
2318 static unsigned long power_of(int cpu)
2319 {
2320 	return cpu_rq(cpu)->cpu_power;
2321 }
2322 
2323 static unsigned long cpu_avg_load_per_task(int cpu)
2324 {
2325 	struct rq *rq = cpu_rq(cpu);
2326 	unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2327 
2328 	if (nr_running)
2329 		return rq->load.weight / nr_running;
2330 
2331 	return 0;
2332 }
2333 
2334 
2335 static void task_waking_fair(struct task_struct *p)
2336 {
2337 	struct sched_entity *se = &p->se;
2338 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
2339 	u64 min_vruntime;
2340 
2341 #ifndef CONFIG_64BIT
2342 	u64 min_vruntime_copy;
2343 
2344 	do {
2345 		min_vruntime_copy = cfs_rq->min_vruntime_copy;
2346 		smp_rmb();
2347 		min_vruntime = cfs_rq->min_vruntime;
2348 	} while (min_vruntime != min_vruntime_copy);
2349 #else
2350 	min_vruntime = cfs_rq->min_vruntime;
2351 #endif
2352 
2353 	se->vruntime -= min_vruntime;
2354 }
2355 
2356 #ifdef CONFIG_FAIR_GROUP_SCHED
2357 /*
2358  * effective_load() calculates the load change as seen from the root_task_group
2359  *
2360  * Adding load to a group doesn't make a group heavier, but can cause movement
2361  * of group shares between cpus. Assuming the shares were perfectly aligned one
2362  * can calculate the shift in shares.
2363  *
2364  * Calculate the effective load difference if @wl is added (subtracted) to @tg
2365  * on this @cpu and results in a total addition (subtraction) of @wg to the
2366  * total group weight.
2367  *
2368  * Given a runqueue weight distribution (rw_i) we can compute a shares
2369  * distribution (s_i) using:
2370  *
2371  *   s_i = rw_i / \Sum rw_j						(1)
2372  *
2373  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2374  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2375  * shares distribution (s_i):
2376  *
2377  *   rw_i = {   2,   4,   1,   0 }
2378  *   s_i  = { 2/7, 4/7, 1/7,   0 }
2379  *
2380  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2381  * task used to run on and the CPU the waker is running on), we need to
2382  * compute the effect of waking a task on either CPU and, in case of a sync
2383  * wakeup, compute the effect of the current task going to sleep.
2384  *
2385  * So for a change of @wl to the local @cpu with an overall group weight change
2386  * of @wl we can compute the new shares distribution (s'_i) using:
2387  *
2388  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)				(2)
2389  *
2390  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2391  * differences in waking a task to CPU 0. The additional task changes the
2392  * weight and shares distributions like:
2393  *
2394  *   rw'_i = {   3,   4,   1,   0 }
2395  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
2396  *
2397  * We can then compute the difference in effective weight by using:
2398  *
2399  *   dw_i = S * (s'_i - s_i)						(3)
2400  *
2401  * Where 'S' is the group weight as seen by its parent.
2402  *
2403  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2404  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2405  * 4/7) times the weight of the group.
2406  */
2407 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2408 {
2409 	struct sched_entity *se = tg->se[cpu];
2410 
2411 	if (!tg->parent)	/* the trivial, non-cgroup case */
2412 		return wl;
2413 
2414 	for_each_sched_entity(se) {
2415 		long w, W;
2416 
2417 		tg = se->my_q->tg;
2418 
2419 		/*
2420 		 * W = @wg + \Sum rw_j
2421 		 */
2422 		W = wg + calc_tg_weight(tg, se->my_q);
2423 
2424 		/*
2425 		 * w = rw_i + @wl
2426 		 */
2427 		w = se->my_q->load.weight + wl;
2428 
2429 		/*
2430 		 * wl = S * s'_i; see (2)
2431 		 */
2432 		if (W > 0 && w < W)
2433 			wl = (w * tg->shares) / W;
2434 		else
2435 			wl = tg->shares;
2436 
2437 		/*
2438 		 * Per the above, wl is the new se->load.weight value; since
2439 		 * those are clipped to [MIN_SHARES, ...) do so now. See
2440 		 * calc_cfs_shares().
2441 		 */
2442 		if (wl < MIN_SHARES)
2443 			wl = MIN_SHARES;
2444 
2445 		/*
2446 		 * wl = dw_i = S * (s'_i - s_i); see (3)
2447 		 */
2448 		wl -= se->load.weight;
2449 
2450 		/*
2451 		 * Recursively apply this logic to all parent groups to compute
2452 		 * the final effective load change on the root group. Since
2453 		 * only the @tg group gets extra weight, all parent groups can
2454 		 * only redistribute existing shares. @wl is the shift in shares
2455 		 * resulting from this level per the above.
2456 		 */
2457 		wg = 0;
2458 	}
2459 
2460 	return wl;
2461 }
2462 #else
2463 
2464 static inline unsigned long effective_load(struct task_group *tg, int cpu,
2465 		unsigned long wl, unsigned long wg)
2466 {
2467 	return wl;
2468 }
2469 
2470 #endif
2471 
2472 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
2473 {
2474 	s64 this_load, load;
2475 	int idx, this_cpu, prev_cpu;
2476 	unsigned long tl_per_task;
2477 	struct task_group *tg;
2478 	unsigned long weight;
2479 	int balanced;
2480 
2481 	idx	  = sd->wake_idx;
2482 	this_cpu  = smp_processor_id();
2483 	prev_cpu  = task_cpu(p);
2484 	load	  = source_load(prev_cpu, idx);
2485 	this_load = target_load(this_cpu, idx);
2486 
2487 	/*
2488 	 * If sync wakeup then subtract the (maximum possible)
2489 	 * effect of the currently running task from the load
2490 	 * of the current CPU:
2491 	 */
2492 	if (sync) {
2493 		tg = task_group(current);
2494 		weight = current->se.load.weight;
2495 
2496 		this_load += effective_load(tg, this_cpu, -weight, -weight);
2497 		load += effective_load(tg, prev_cpu, 0, -weight);
2498 	}
2499 
2500 	tg = task_group(p);
2501 	weight = p->se.load.weight;
2502 
2503 	/*
2504 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2505 	 * due to the sync cause above having dropped this_load to 0, we'll
2506 	 * always have an imbalance, but there's really nothing you can do
2507 	 * about that, so that's good too.
2508 	 *
2509 	 * Otherwise check if either cpus are near enough in load to allow this
2510 	 * task to be woken on this_cpu.
2511 	 */
2512 	if (this_load > 0) {
2513 		s64 this_eff_load, prev_eff_load;
2514 
2515 		this_eff_load = 100;
2516 		this_eff_load *= power_of(prev_cpu);
2517 		this_eff_load *= this_load +
2518 			effective_load(tg, this_cpu, weight, weight);
2519 
2520 		prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
2521 		prev_eff_load *= power_of(this_cpu);
2522 		prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
2523 
2524 		balanced = this_eff_load <= prev_eff_load;
2525 	} else
2526 		balanced = true;
2527 
2528 	/*
2529 	 * If the currently running task will sleep within
2530 	 * a reasonable amount of time then attract this newly
2531 	 * woken task:
2532 	 */
2533 	if (sync && balanced)
2534 		return 1;
2535 
2536 	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
2537 	tl_per_task = cpu_avg_load_per_task(this_cpu);
2538 
2539 	if (balanced ||
2540 	    (this_load <= load &&
2541 	     this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
2542 		/*
2543 		 * This domain has SD_WAKE_AFFINE and
2544 		 * p is cache cold in this domain, and
2545 		 * there is no bad imbalance.
2546 		 */
2547 		schedstat_inc(sd, ttwu_move_affine);
2548 		schedstat_inc(p, se.statistics.nr_wakeups_affine);
2549 
2550 		return 1;
2551 	}
2552 	return 0;
2553 }
2554 
2555 /*
2556  * find_idlest_group finds and returns the least busy CPU group within the
2557  * domain.
2558  */
2559 static struct sched_group *
2560 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
2561 		  int this_cpu, int load_idx)
2562 {
2563 	struct sched_group *idlest = NULL, *group = sd->groups;
2564 	unsigned long min_load = ULONG_MAX, this_load = 0;
2565 	int imbalance = 100 + (sd->imbalance_pct-100)/2;
2566 
2567 	do {
2568 		unsigned long load, avg_load;
2569 		int local_group;
2570 		int i;
2571 
2572 		/* Skip over this group if it has no CPUs allowed */
2573 		if (!cpumask_intersects(sched_group_cpus(group),
2574 					tsk_cpus_allowed(p)))
2575 			continue;
2576 
2577 		local_group = cpumask_test_cpu(this_cpu,
2578 					       sched_group_cpus(group));
2579 
2580 		/* Tally up the load of all CPUs in the group */
2581 		avg_load = 0;
2582 
2583 		for_each_cpu(i, sched_group_cpus(group)) {
2584 			/* Bias balancing toward cpus of our domain */
2585 			if (local_group)
2586 				load = source_load(i, load_idx);
2587 			else
2588 				load = target_load(i, load_idx);
2589 
2590 			avg_load += load;
2591 		}
2592 
2593 		/* Adjust by relative CPU power of the group */
2594 		avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
2595 
2596 		if (local_group) {
2597 			this_load = avg_load;
2598 		} else if (avg_load < min_load) {
2599 			min_load = avg_load;
2600 			idlest = group;
2601 		}
2602 	} while (group = group->next, group != sd->groups);
2603 
2604 	if (!idlest || 100*this_load < imbalance*min_load)
2605 		return NULL;
2606 	return idlest;
2607 }
2608 
2609 /*
2610  * find_idlest_cpu - find the idlest cpu among the cpus in group.
2611  */
2612 static int
2613 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2614 {
2615 	unsigned long load, min_load = ULONG_MAX;
2616 	int idlest = -1;
2617 	int i;
2618 
2619 	/* Traverse only the allowed CPUs */
2620 	for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
2621 		load = weighted_cpuload(i);
2622 
2623 		if (load < min_load || (load == min_load && i == this_cpu)) {
2624 			min_load = load;
2625 			idlest = i;
2626 		}
2627 	}
2628 
2629 	return idlest;
2630 }
2631 
2632 /*
2633  * Try and locate an idle CPU in the sched_domain.
2634  */
2635 static int select_idle_sibling(struct task_struct *p, int target)
2636 {
2637 	int cpu = smp_processor_id();
2638 	int prev_cpu = task_cpu(p);
2639 	struct sched_domain *sd;
2640 
2641 	/*
2642 	 * If the task is going to be woken-up on this cpu and if it is
2643 	 * already idle, then it is the right target.
2644 	 */
2645 	if (target == cpu && idle_cpu(cpu))
2646 		return cpu;
2647 
2648 	/*
2649 	 * If the task is going to be woken-up on the cpu where it previously
2650 	 * ran and if it is currently idle, then it the right target.
2651 	 */
2652 	if (target == prev_cpu && idle_cpu(prev_cpu))
2653 		return prev_cpu;
2654 
2655 	/*
2656 	 * Otherwise, check assigned siblings to find an elegible idle cpu.
2657 	 */
2658 	sd = rcu_dereference(per_cpu(sd_llc, target));
2659 
2660 	for_each_lower_domain(sd) {
2661 		if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p)))
2662 			continue;
2663 		if (idle_cpu(sd->idle_buddy))
2664 			return sd->idle_buddy;
2665 	}
2666 
2667 	return target;
2668 }
2669 
2670 /*
2671  * sched_balance_self: balance the current task (running on cpu) in domains
2672  * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2673  * SD_BALANCE_EXEC.
2674  *
2675  * Balance, ie. select the least loaded group.
2676  *
2677  * Returns the target CPU number, or the same CPU if no balancing is needed.
2678  *
2679  * preempt must be disabled.
2680  */
2681 static int
2682 select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2683 {
2684 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
2685 	int cpu = smp_processor_id();
2686 	int prev_cpu = task_cpu(p);
2687 	int new_cpu = cpu;
2688 	int want_affine = 0;
2689 	int want_sd = 1;
2690 	int sync = wake_flags & WF_SYNC;
2691 
2692 	if (p->nr_cpus_allowed == 1)
2693 		return prev_cpu;
2694 
2695 	if (sd_flag & SD_BALANCE_WAKE) {
2696 		if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
2697 			want_affine = 1;
2698 		new_cpu = prev_cpu;
2699 	}
2700 
2701 	rcu_read_lock();
2702 	for_each_domain(cpu, tmp) {
2703 		if (!(tmp->flags & SD_LOAD_BALANCE))
2704 			continue;
2705 
2706 		/*
2707 		 * If power savings logic is enabled for a domain, see if we
2708 		 * are not overloaded, if so, don't balance wider.
2709 		 */
2710 		if (tmp->flags & (SD_PREFER_LOCAL)) {
2711 			unsigned long power = 0;
2712 			unsigned long nr_running = 0;
2713 			unsigned long capacity;
2714 			int i;
2715 
2716 			for_each_cpu(i, sched_domain_span(tmp)) {
2717 				power += power_of(i);
2718 				nr_running += cpu_rq(i)->cfs.nr_running;
2719 			}
2720 
2721 			capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
2722 
2723 			if (nr_running < capacity)
2724 				want_sd = 0;
2725 		}
2726 
2727 		/*
2728 		 * If both cpu and prev_cpu are part of this domain,
2729 		 * cpu is a valid SD_WAKE_AFFINE target.
2730 		 */
2731 		if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
2732 		    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
2733 			affine_sd = tmp;
2734 			want_affine = 0;
2735 		}
2736 
2737 		if (!want_sd && !want_affine)
2738 			break;
2739 
2740 		if (!(tmp->flags & sd_flag))
2741 			continue;
2742 
2743 		if (want_sd)
2744 			sd = tmp;
2745 	}
2746 
2747 	if (affine_sd) {
2748 		if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
2749 			prev_cpu = cpu;
2750 
2751 		new_cpu = select_idle_sibling(p, prev_cpu);
2752 		goto unlock;
2753 	}
2754 
2755 	while (sd) {
2756 		int load_idx = sd->forkexec_idx;
2757 		struct sched_group *group;
2758 		int weight;
2759 
2760 		if (!(sd->flags & sd_flag)) {
2761 			sd = sd->child;
2762 			continue;
2763 		}
2764 
2765 		if (sd_flag & SD_BALANCE_WAKE)
2766 			load_idx = sd->wake_idx;
2767 
2768 		group = find_idlest_group(sd, p, cpu, load_idx);
2769 		if (!group) {
2770 			sd = sd->child;
2771 			continue;
2772 		}
2773 
2774 		new_cpu = find_idlest_cpu(group, p, cpu);
2775 		if (new_cpu == -1 || new_cpu == cpu) {
2776 			/* Now try balancing at a lower domain level of cpu */
2777 			sd = sd->child;
2778 			continue;
2779 		}
2780 
2781 		/* Now try balancing at a lower domain level of new_cpu */
2782 		cpu = new_cpu;
2783 		weight = sd->span_weight;
2784 		sd = NULL;
2785 		for_each_domain(cpu, tmp) {
2786 			if (weight <= tmp->span_weight)
2787 				break;
2788 			if (tmp->flags & sd_flag)
2789 				sd = tmp;
2790 		}
2791 		/* while loop will break here if sd == NULL */
2792 	}
2793 unlock:
2794 	rcu_read_unlock();
2795 
2796 	return new_cpu;
2797 }
2798 #endif /* CONFIG_SMP */
2799 
2800 static unsigned long
2801 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
2802 {
2803 	unsigned long gran = sysctl_sched_wakeup_granularity;
2804 
2805 	/*
2806 	 * Since its curr running now, convert the gran from real-time
2807 	 * to virtual-time in his units.
2808 	 *
2809 	 * By using 'se' instead of 'curr' we penalize light tasks, so
2810 	 * they get preempted easier. That is, if 'se' < 'curr' then
2811 	 * the resulting gran will be larger, therefore penalizing the
2812 	 * lighter, if otoh 'se' > 'curr' then the resulting gran will
2813 	 * be smaller, again penalizing the lighter task.
2814 	 *
2815 	 * This is especially important for buddies when the leftmost
2816 	 * task is higher priority than the buddy.
2817 	 */
2818 	return calc_delta_fair(gran, se);
2819 }
2820 
2821 /*
2822  * Should 'se' preempt 'curr'.
2823  *
2824  *             |s1
2825  *        |s2
2826  *   |s3
2827  *         g
2828  *      |<--->|c
2829  *
2830  *  w(c, s1) = -1
2831  *  w(c, s2) =  0
2832  *  w(c, s3) =  1
2833  *
2834  */
2835 static int
2836 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
2837 {
2838 	s64 gran, vdiff = curr->vruntime - se->vruntime;
2839 
2840 	if (vdiff <= 0)
2841 		return -1;
2842 
2843 	gran = wakeup_gran(curr, se);
2844 	if (vdiff > gran)
2845 		return 1;
2846 
2847 	return 0;
2848 }
2849 
2850 static void set_last_buddy(struct sched_entity *se)
2851 {
2852 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2853 		return;
2854 
2855 	for_each_sched_entity(se)
2856 		cfs_rq_of(se)->last = se;
2857 }
2858 
2859 static void set_next_buddy(struct sched_entity *se)
2860 {
2861 	if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2862 		return;
2863 
2864 	for_each_sched_entity(se)
2865 		cfs_rq_of(se)->next = se;
2866 }
2867 
2868 static void set_skip_buddy(struct sched_entity *se)
2869 {
2870 	for_each_sched_entity(se)
2871 		cfs_rq_of(se)->skip = se;
2872 }
2873 
2874 /*
2875  * Preempt the current task with a newly woken task if needed:
2876  */
2877 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2878 {
2879 	struct task_struct *curr = rq->curr;
2880 	struct sched_entity *se = &curr->se, *pse = &p->se;
2881 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
2882 	int scale = cfs_rq->nr_running >= sched_nr_latency;
2883 	int next_buddy_marked = 0;
2884 
2885 	if (unlikely(se == pse))
2886 		return;
2887 
2888 	/*
2889 	 * This is possible from callers such as move_task(), in which we
2890 	 * unconditionally check_prempt_curr() after an enqueue (which may have
2891 	 * lead to a throttle).  This both saves work and prevents false
2892 	 * next-buddy nomination below.
2893 	 */
2894 	if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
2895 		return;
2896 
2897 	if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
2898 		set_next_buddy(pse);
2899 		next_buddy_marked = 1;
2900 	}
2901 
2902 	/*
2903 	 * We can come here with TIF_NEED_RESCHED already set from new task
2904 	 * wake up path.
2905 	 *
2906 	 * Note: this also catches the edge-case of curr being in a throttled
2907 	 * group (e.g. via set_curr_task), since update_curr() (in the
2908 	 * enqueue of curr) will have resulted in resched being set.  This
2909 	 * prevents us from potentially nominating it as a false LAST_BUDDY
2910 	 * below.
2911 	 */
2912 	if (test_tsk_need_resched(curr))
2913 		return;
2914 
2915 	/* Idle tasks are by definition preempted by non-idle tasks. */
2916 	if (unlikely(curr->policy == SCHED_IDLE) &&
2917 	    likely(p->policy != SCHED_IDLE))
2918 		goto preempt;
2919 
2920 	/*
2921 	 * Batch and idle tasks do not preempt non-idle tasks (their preemption
2922 	 * is driven by the tick):
2923 	 */
2924 	if (unlikely(p->policy != SCHED_NORMAL))
2925 		return;
2926 
2927 	find_matching_se(&se, &pse);
2928 	update_curr(cfs_rq_of(se));
2929 	BUG_ON(!pse);
2930 	if (wakeup_preempt_entity(se, pse) == 1) {
2931 		/*
2932 		 * Bias pick_next to pick the sched entity that is
2933 		 * triggering this preemption.
2934 		 */
2935 		if (!next_buddy_marked)
2936 			set_next_buddy(pse);
2937 		goto preempt;
2938 	}
2939 
2940 	return;
2941 
2942 preempt:
2943 	resched_task(curr);
2944 	/*
2945 	 * Only set the backward buddy when the current task is still
2946 	 * on the rq. This can happen when a wakeup gets interleaved
2947 	 * with schedule on the ->pre_schedule() or idle_balance()
2948 	 * point, either of which can * drop the rq lock.
2949 	 *
2950 	 * Also, during early boot the idle thread is in the fair class,
2951 	 * for obvious reasons its a bad idea to schedule back to it.
2952 	 */
2953 	if (unlikely(!se->on_rq || curr == rq->idle))
2954 		return;
2955 
2956 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
2957 		set_last_buddy(se);
2958 }
2959 
2960 static struct task_struct *pick_next_task_fair(struct rq *rq)
2961 {
2962 	struct task_struct *p;
2963 	struct cfs_rq *cfs_rq = &rq->cfs;
2964 	struct sched_entity *se;
2965 
2966 	if (!cfs_rq->nr_running)
2967 		return NULL;
2968 
2969 	do {
2970 		se = pick_next_entity(cfs_rq);
2971 		set_next_entity(cfs_rq, se);
2972 		cfs_rq = group_cfs_rq(se);
2973 	} while (cfs_rq);
2974 
2975 	p = task_of(se);
2976 	if (hrtick_enabled(rq))
2977 		hrtick_start_fair(rq, p);
2978 
2979 	return p;
2980 }
2981 
2982 /*
2983  * Account for a descheduled task:
2984  */
2985 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
2986 {
2987 	struct sched_entity *se = &prev->se;
2988 	struct cfs_rq *cfs_rq;
2989 
2990 	for_each_sched_entity(se) {
2991 		cfs_rq = cfs_rq_of(se);
2992 		put_prev_entity(cfs_rq, se);
2993 	}
2994 }
2995 
2996 /*
2997  * sched_yield() is very simple
2998  *
2999  * The magic of dealing with the ->skip buddy is in pick_next_entity.
3000  */
3001 static void yield_task_fair(struct rq *rq)
3002 {
3003 	struct task_struct *curr = rq->curr;
3004 	struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3005 	struct sched_entity *se = &curr->se;
3006 
3007 	/*
3008 	 * Are we the only task in the tree?
3009 	 */
3010 	if (unlikely(rq->nr_running == 1))
3011 		return;
3012 
3013 	clear_buddies(cfs_rq, se);
3014 
3015 	if (curr->policy != SCHED_BATCH) {
3016 		update_rq_clock(rq);
3017 		/*
3018 		 * Update run-time statistics of the 'current'.
3019 		 */
3020 		update_curr(cfs_rq);
3021 		/*
3022 		 * Tell update_rq_clock() that we've just updated,
3023 		 * so we don't do microscopic update in schedule()
3024 		 * and double the fastpath cost.
3025 		 */
3026 		 rq->skip_clock_update = 1;
3027 	}
3028 
3029 	set_skip_buddy(se);
3030 }
3031 
3032 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3033 {
3034 	struct sched_entity *se = &p->se;
3035 
3036 	/* throttled hierarchies are not runnable */
3037 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3038 		return false;
3039 
3040 	/* Tell the scheduler that we'd really like pse to run next. */
3041 	set_next_buddy(se);
3042 
3043 	yield_task_fair(rq);
3044 
3045 	return true;
3046 }
3047 
3048 #ifdef CONFIG_SMP
3049 /**************************************************
3050  * Fair scheduling class load-balancing methods:
3051  */
3052 
3053 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3054 
3055 #define LBF_ALL_PINNED	0x01
3056 #define LBF_NEED_BREAK	0x02
3057 #define LBF_SOME_PINNED 0x04
3058 
3059 struct lb_env {
3060 	struct sched_domain	*sd;
3061 
3062 	struct rq		*src_rq;
3063 	int			src_cpu;
3064 
3065 	int			dst_cpu;
3066 	struct rq		*dst_rq;
3067 
3068 	struct cpumask		*dst_grpmask;
3069 	int			new_dst_cpu;
3070 	enum cpu_idle_type	idle;
3071 	long			imbalance;
3072 	unsigned int		flags;
3073 
3074 	unsigned int		loop;
3075 	unsigned int		loop_break;
3076 	unsigned int		loop_max;
3077 };
3078 
3079 /*
3080  * move_task - move a task from one runqueue to another runqueue.
3081  * Both runqueues must be locked.
3082  */
3083 static void move_task(struct task_struct *p, struct lb_env *env)
3084 {
3085 	deactivate_task(env->src_rq, p, 0);
3086 	set_task_cpu(p, env->dst_cpu);
3087 	activate_task(env->dst_rq, p, 0);
3088 	check_preempt_curr(env->dst_rq, p, 0);
3089 }
3090 
3091 /*
3092  * Is this task likely cache-hot:
3093  */
3094 static int
3095 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3096 {
3097 	s64 delta;
3098 
3099 	if (p->sched_class != &fair_sched_class)
3100 		return 0;
3101 
3102 	if (unlikely(p->policy == SCHED_IDLE))
3103 		return 0;
3104 
3105 	/*
3106 	 * Buddy candidates are cache hot:
3107 	 */
3108 	if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3109 			(&p->se == cfs_rq_of(&p->se)->next ||
3110 			 &p->se == cfs_rq_of(&p->se)->last))
3111 		return 1;
3112 
3113 	if (sysctl_sched_migration_cost == -1)
3114 		return 1;
3115 	if (sysctl_sched_migration_cost == 0)
3116 		return 0;
3117 
3118 	delta = now - p->se.exec_start;
3119 
3120 	return delta < (s64)sysctl_sched_migration_cost;
3121 }
3122 
3123 /*
3124  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3125  */
3126 static
3127 int can_migrate_task(struct task_struct *p, struct lb_env *env)
3128 {
3129 	int tsk_cache_hot = 0;
3130 	/*
3131 	 * We do not migrate tasks that are:
3132 	 * 1) running (obviously), or
3133 	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3134 	 * 3) are cache-hot on their current CPU.
3135 	 */
3136 	if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
3137 		int new_dst_cpu;
3138 
3139 		schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3140 
3141 		/*
3142 		 * Remember if this task can be migrated to any other cpu in
3143 		 * our sched_group. We may want to revisit it if we couldn't
3144 		 * meet load balance goals by pulling other tasks on src_cpu.
3145 		 *
3146 		 * Also avoid computing new_dst_cpu if we have already computed
3147 		 * one in current iteration.
3148 		 */
3149 		if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3150 			return 0;
3151 
3152 		new_dst_cpu = cpumask_first_and(env->dst_grpmask,
3153 						tsk_cpus_allowed(p));
3154 		if (new_dst_cpu < nr_cpu_ids) {
3155 			env->flags |= LBF_SOME_PINNED;
3156 			env->new_dst_cpu = new_dst_cpu;
3157 		}
3158 		return 0;
3159 	}
3160 
3161 	/* Record that we found atleast one task that could run on dst_cpu */
3162 	env->flags &= ~LBF_ALL_PINNED;
3163 
3164 	if (task_running(env->src_rq, p)) {
3165 		schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3166 		return 0;
3167 	}
3168 
3169 	/*
3170 	 * Aggressive migration if:
3171 	 * 1) task is cache cold, or
3172 	 * 2) too many balance attempts have failed.
3173 	 */
3174 
3175 	tsk_cache_hot = task_hot(p, env->src_rq->clock_task, env->sd);
3176 	if (!tsk_cache_hot ||
3177 		env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
3178 #ifdef CONFIG_SCHEDSTATS
3179 		if (tsk_cache_hot) {
3180 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
3181 			schedstat_inc(p, se.statistics.nr_forced_migrations);
3182 		}
3183 #endif
3184 		return 1;
3185 	}
3186 
3187 	if (tsk_cache_hot) {
3188 		schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3189 		return 0;
3190 	}
3191 	return 1;
3192 }
3193 
3194 /*
3195  * move_one_task tries to move exactly one task from busiest to this_rq, as
3196  * part of active balancing operations within "domain".
3197  * Returns 1 if successful and 0 otherwise.
3198  *
3199  * Called with both runqueues locked.
3200  */
3201 static int move_one_task(struct lb_env *env)
3202 {
3203 	struct task_struct *p, *n;
3204 
3205 	list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
3206 		if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
3207 			continue;
3208 
3209 		if (!can_migrate_task(p, env))
3210 			continue;
3211 
3212 		move_task(p, env);
3213 		/*
3214 		 * Right now, this is only the second place move_task()
3215 		 * is called, so we can safely collect move_task()
3216 		 * stats here rather than inside move_task().
3217 		 */
3218 		schedstat_inc(env->sd, lb_gained[env->idle]);
3219 		return 1;
3220 	}
3221 	return 0;
3222 }
3223 
3224 static unsigned long task_h_load(struct task_struct *p);
3225 
3226 static const unsigned int sched_nr_migrate_break = 32;
3227 
3228 /*
3229  * move_tasks tries to move up to imbalance weighted load from busiest to
3230  * this_rq, as part of a balancing operation within domain "sd".
3231  * Returns 1 if successful and 0 otherwise.
3232  *
3233  * Called with both runqueues locked.
3234  */
3235 static int move_tasks(struct lb_env *env)
3236 {
3237 	struct list_head *tasks = &env->src_rq->cfs_tasks;
3238 	struct task_struct *p;
3239 	unsigned long load;
3240 	int pulled = 0;
3241 
3242 	if (env->imbalance <= 0)
3243 		return 0;
3244 
3245 	while (!list_empty(tasks)) {
3246 		p = list_first_entry(tasks, struct task_struct, se.group_node);
3247 
3248 		env->loop++;
3249 		/* We've more or less seen every task there is, call it quits */
3250 		if (env->loop > env->loop_max)
3251 			break;
3252 
3253 		/* take a breather every nr_migrate tasks */
3254 		if (env->loop > env->loop_break) {
3255 			env->loop_break += sched_nr_migrate_break;
3256 			env->flags |= LBF_NEED_BREAK;
3257 			break;
3258 		}
3259 
3260 		if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3261 			goto next;
3262 
3263 		load = task_h_load(p);
3264 
3265 		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
3266 			goto next;
3267 
3268 		if ((load / 2) > env->imbalance)
3269 			goto next;
3270 
3271 		if (!can_migrate_task(p, env))
3272 			goto next;
3273 
3274 		move_task(p, env);
3275 		pulled++;
3276 		env->imbalance -= load;
3277 
3278 #ifdef CONFIG_PREEMPT
3279 		/*
3280 		 * NEWIDLE balancing is a source of latency, so preemptible
3281 		 * kernels will stop after the first task is pulled to minimize
3282 		 * the critical section.
3283 		 */
3284 		if (env->idle == CPU_NEWLY_IDLE)
3285 			break;
3286 #endif
3287 
3288 		/*
3289 		 * We only want to steal up to the prescribed amount of
3290 		 * weighted load.
3291 		 */
3292 		if (env->imbalance <= 0)
3293 			break;
3294 
3295 		continue;
3296 next:
3297 		list_move_tail(&p->se.group_node, tasks);
3298 	}
3299 
3300 	/*
3301 	 * Right now, this is one of only two places move_task() is called,
3302 	 * so we can safely collect move_task() stats here rather than
3303 	 * inside move_task().
3304 	 */
3305 	schedstat_add(env->sd, lb_gained[env->idle], pulled);
3306 
3307 	return pulled;
3308 }
3309 
3310 #ifdef CONFIG_FAIR_GROUP_SCHED
3311 /*
3312  * update tg->load_weight by folding this cpu's load_avg
3313  */
3314 static int update_shares_cpu(struct task_group *tg, int cpu)
3315 {
3316 	struct cfs_rq *cfs_rq;
3317 	unsigned long flags;
3318 	struct rq *rq;
3319 
3320 	if (!tg->se[cpu])
3321 		return 0;
3322 
3323 	rq = cpu_rq(cpu);
3324 	cfs_rq = tg->cfs_rq[cpu];
3325 
3326 	raw_spin_lock_irqsave(&rq->lock, flags);
3327 
3328 	update_rq_clock(rq);
3329 	update_cfs_load(cfs_rq, 1);
3330 
3331 	/*
3332 	 * We need to update shares after updating tg->load_weight in
3333 	 * order to adjust the weight of groups with long running tasks.
3334 	 */
3335 	update_cfs_shares(cfs_rq);
3336 
3337 	raw_spin_unlock_irqrestore(&rq->lock, flags);
3338 
3339 	return 0;
3340 }
3341 
3342 static void update_shares(int cpu)
3343 {
3344 	struct cfs_rq *cfs_rq;
3345 	struct rq *rq = cpu_rq(cpu);
3346 
3347 	rcu_read_lock();
3348 	/*
3349 	 * Iterates the task_group tree in a bottom up fashion, see
3350 	 * list_add_leaf_cfs_rq() for details.
3351 	 */
3352 	for_each_leaf_cfs_rq(rq, cfs_rq) {
3353 		/* throttled entities do not contribute to load */
3354 		if (throttled_hierarchy(cfs_rq))
3355 			continue;
3356 
3357 		update_shares_cpu(cfs_rq->tg, cpu);
3358 	}
3359 	rcu_read_unlock();
3360 }
3361 
3362 /*
3363  * Compute the cpu's hierarchical load factor for each task group.
3364  * This needs to be done in a top-down fashion because the load of a child
3365  * group is a fraction of its parents load.
3366  */
3367 static int tg_load_down(struct task_group *tg, void *data)
3368 {
3369 	unsigned long load;
3370 	long cpu = (long)data;
3371 
3372 	if (!tg->parent) {
3373 		load = cpu_rq(cpu)->load.weight;
3374 	} else {
3375 		load = tg->parent->cfs_rq[cpu]->h_load;
3376 		load *= tg->se[cpu]->load.weight;
3377 		load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
3378 	}
3379 
3380 	tg->cfs_rq[cpu]->h_load = load;
3381 
3382 	return 0;
3383 }
3384 
3385 static void update_h_load(long cpu)
3386 {
3387 	rcu_read_lock();
3388 	walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3389 	rcu_read_unlock();
3390 }
3391 
3392 static unsigned long task_h_load(struct task_struct *p)
3393 {
3394 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
3395 	unsigned long load;
3396 
3397 	load = p->se.load.weight;
3398 	load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
3399 
3400 	return load;
3401 }
3402 #else
3403 static inline void update_shares(int cpu)
3404 {
3405 }
3406 
3407 static inline void update_h_load(long cpu)
3408 {
3409 }
3410 
3411 static unsigned long task_h_load(struct task_struct *p)
3412 {
3413 	return p->se.load.weight;
3414 }
3415 #endif
3416 
3417 /********** Helpers for find_busiest_group ************************/
3418 /*
3419  * sd_lb_stats - Structure to store the statistics of a sched_domain
3420  * 		during load balancing.
3421  */
3422 struct sd_lb_stats {
3423 	struct sched_group *busiest; /* Busiest group in this sd */
3424 	struct sched_group *this;  /* Local group in this sd */
3425 	unsigned long total_load;  /* Total load of all groups in sd */
3426 	unsigned long total_pwr;   /*	Total power of all groups in sd */
3427 	unsigned long avg_load;	   /* Average load across all groups in sd */
3428 
3429 	/** Statistics of this group */
3430 	unsigned long this_load;
3431 	unsigned long this_load_per_task;
3432 	unsigned long this_nr_running;
3433 	unsigned long this_has_capacity;
3434 	unsigned int  this_idle_cpus;
3435 
3436 	/* Statistics of the busiest group */
3437 	unsigned int  busiest_idle_cpus;
3438 	unsigned long max_load;
3439 	unsigned long busiest_load_per_task;
3440 	unsigned long busiest_nr_running;
3441 	unsigned long busiest_group_capacity;
3442 	unsigned long busiest_has_capacity;
3443 	unsigned int  busiest_group_weight;
3444 
3445 	int group_imb; /* Is there imbalance in this sd */
3446 };
3447 
3448 /*
3449  * sg_lb_stats - stats of a sched_group required for load_balancing
3450  */
3451 struct sg_lb_stats {
3452 	unsigned long avg_load; /*Avg load across the CPUs of the group */
3453 	unsigned long group_load; /* Total load over the CPUs of the group */
3454 	unsigned long sum_nr_running; /* Nr tasks running in the group */
3455 	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3456 	unsigned long group_capacity;
3457 	unsigned long idle_cpus;
3458 	unsigned long group_weight;
3459 	int group_imb; /* Is there an imbalance in the group ? */
3460 	int group_has_capacity; /* Is there extra capacity in the group? */
3461 };
3462 
3463 /**
3464  * get_sd_load_idx - Obtain the load index for a given sched domain.
3465  * @sd: The sched_domain whose load_idx is to be obtained.
3466  * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3467  */
3468 static inline int get_sd_load_idx(struct sched_domain *sd,
3469 					enum cpu_idle_type idle)
3470 {
3471 	int load_idx;
3472 
3473 	switch (idle) {
3474 	case CPU_NOT_IDLE:
3475 		load_idx = sd->busy_idx;
3476 		break;
3477 
3478 	case CPU_NEWLY_IDLE:
3479 		load_idx = sd->newidle_idx;
3480 		break;
3481 	default:
3482 		load_idx = sd->idle_idx;
3483 		break;
3484 	}
3485 
3486 	return load_idx;
3487 }
3488 
3489 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3490 {
3491 	return SCHED_POWER_SCALE;
3492 }
3493 
3494 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3495 {
3496 	return default_scale_freq_power(sd, cpu);
3497 }
3498 
3499 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3500 {
3501 	unsigned long weight = sd->span_weight;
3502 	unsigned long smt_gain = sd->smt_gain;
3503 
3504 	smt_gain /= weight;
3505 
3506 	return smt_gain;
3507 }
3508 
3509 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3510 {
3511 	return default_scale_smt_power(sd, cpu);
3512 }
3513 
3514 unsigned long scale_rt_power(int cpu)
3515 {
3516 	struct rq *rq = cpu_rq(cpu);
3517 	u64 total, available, age_stamp, avg;
3518 
3519 	/*
3520 	 * Since we're reading these variables without serialization make sure
3521 	 * we read them once before doing sanity checks on them.
3522 	 */
3523 	age_stamp = ACCESS_ONCE(rq->age_stamp);
3524 	avg = ACCESS_ONCE(rq->rt_avg);
3525 
3526 	total = sched_avg_period() + (rq->clock - age_stamp);
3527 
3528 	if (unlikely(total < avg)) {
3529 		/* Ensures that power won't end up being negative */
3530 		available = 0;
3531 	} else {
3532 		available = total - avg;
3533 	}
3534 
3535 	if (unlikely((s64)total < SCHED_POWER_SCALE))
3536 		total = SCHED_POWER_SCALE;
3537 
3538 	total >>= SCHED_POWER_SHIFT;
3539 
3540 	return div_u64(available, total);
3541 }
3542 
3543 static void update_cpu_power(struct sched_domain *sd, int cpu)
3544 {
3545 	unsigned long weight = sd->span_weight;
3546 	unsigned long power = SCHED_POWER_SCALE;
3547 	struct sched_group *sdg = sd->groups;
3548 
3549 	if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3550 		if (sched_feat(ARCH_POWER))
3551 			power *= arch_scale_smt_power(sd, cpu);
3552 		else
3553 			power *= default_scale_smt_power(sd, cpu);
3554 
3555 		power >>= SCHED_POWER_SHIFT;
3556 	}
3557 
3558 	sdg->sgp->power_orig = power;
3559 
3560 	if (sched_feat(ARCH_POWER))
3561 		power *= arch_scale_freq_power(sd, cpu);
3562 	else
3563 		power *= default_scale_freq_power(sd, cpu);
3564 
3565 	power >>= SCHED_POWER_SHIFT;
3566 
3567 	power *= scale_rt_power(cpu);
3568 	power >>= SCHED_POWER_SHIFT;
3569 
3570 	if (!power)
3571 		power = 1;
3572 
3573 	cpu_rq(cpu)->cpu_power = power;
3574 	sdg->sgp->power = power;
3575 }
3576 
3577 void update_group_power(struct sched_domain *sd, int cpu)
3578 {
3579 	struct sched_domain *child = sd->child;
3580 	struct sched_group *group, *sdg = sd->groups;
3581 	unsigned long power;
3582 	unsigned long interval;
3583 
3584 	interval = msecs_to_jiffies(sd->balance_interval);
3585 	interval = clamp(interval, 1UL, max_load_balance_interval);
3586 	sdg->sgp->next_update = jiffies + interval;
3587 
3588 	if (!child) {
3589 		update_cpu_power(sd, cpu);
3590 		return;
3591 	}
3592 
3593 	power = 0;
3594 
3595 	if (child->flags & SD_OVERLAP) {
3596 		/*
3597 		 * SD_OVERLAP domains cannot assume that child groups
3598 		 * span the current group.
3599 		 */
3600 
3601 		for_each_cpu(cpu, sched_group_cpus(sdg))
3602 			power += power_of(cpu);
3603 	} else  {
3604 		/*
3605 		 * !SD_OVERLAP domains can assume that child groups
3606 		 * span the current group.
3607 		 */
3608 
3609 		group = child->groups;
3610 		do {
3611 			power += group->sgp->power;
3612 			group = group->next;
3613 		} while (group != child->groups);
3614 	}
3615 
3616 	sdg->sgp->power_orig = sdg->sgp->power = power;
3617 }
3618 
3619 /*
3620  * Try and fix up capacity for tiny siblings, this is needed when
3621  * things like SD_ASYM_PACKING need f_b_g to select another sibling
3622  * which on its own isn't powerful enough.
3623  *
3624  * See update_sd_pick_busiest() and check_asym_packing().
3625  */
3626 static inline int
3627 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3628 {
3629 	/*
3630 	 * Only siblings can have significantly less than SCHED_POWER_SCALE
3631 	 */
3632 	if (!(sd->flags & SD_SHARE_CPUPOWER))
3633 		return 0;
3634 
3635 	/*
3636 	 * If ~90% of the cpu_power is still there, we're good.
3637 	 */
3638 	if (group->sgp->power * 32 > group->sgp->power_orig * 29)
3639 		return 1;
3640 
3641 	return 0;
3642 }
3643 
3644 /**
3645  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3646  * @env: The load balancing environment.
3647  * @group: sched_group whose statistics are to be updated.
3648  * @load_idx: Load index of sched_domain of this_cpu for load calc.
3649  * @local_group: Does group contain this_cpu.
3650  * @cpus: Set of cpus considered for load balancing.
3651  * @balance: Should we balance.
3652  * @sgs: variable to hold the statistics for this group.
3653  */
3654 static inline void update_sg_lb_stats(struct lb_env *env,
3655 			struct sched_group *group, int load_idx,
3656 			int local_group, const struct cpumask *cpus,
3657 			int *balance, struct sg_lb_stats *sgs)
3658 {
3659 	unsigned long nr_running, max_nr_running, min_nr_running;
3660 	unsigned long load, max_cpu_load, min_cpu_load;
3661 	unsigned int balance_cpu = -1, first_idle_cpu = 0;
3662 	unsigned long avg_load_per_task = 0;
3663 	int i;
3664 
3665 	if (local_group)
3666 		balance_cpu = group_balance_cpu(group);
3667 
3668 	/* Tally up the load of all CPUs in the group */
3669 	max_cpu_load = 0;
3670 	min_cpu_load = ~0UL;
3671 	max_nr_running = 0;
3672 	min_nr_running = ~0UL;
3673 
3674 	for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3675 		struct rq *rq = cpu_rq(i);
3676 
3677 		nr_running = rq->nr_running;
3678 
3679 		/* Bias balancing toward cpus of our domain */
3680 		if (local_group) {
3681 			if (idle_cpu(i) && !first_idle_cpu &&
3682 					cpumask_test_cpu(i, sched_group_mask(group))) {
3683 				first_idle_cpu = 1;
3684 				balance_cpu = i;
3685 			}
3686 
3687 			load = target_load(i, load_idx);
3688 		} else {
3689 			load = source_load(i, load_idx);
3690 			if (load > max_cpu_load)
3691 				max_cpu_load = load;
3692 			if (min_cpu_load > load)
3693 				min_cpu_load = load;
3694 
3695 			if (nr_running > max_nr_running)
3696 				max_nr_running = nr_running;
3697 			if (min_nr_running > nr_running)
3698 				min_nr_running = nr_running;
3699 		}
3700 
3701 		sgs->group_load += load;
3702 		sgs->sum_nr_running += nr_running;
3703 		sgs->sum_weighted_load += weighted_cpuload(i);
3704 		if (idle_cpu(i))
3705 			sgs->idle_cpus++;
3706 	}
3707 
3708 	/*
3709 	 * First idle cpu or the first cpu(busiest) in this sched group
3710 	 * is eligible for doing load balancing at this and above
3711 	 * domains. In the newly idle case, we will allow all the cpu's
3712 	 * to do the newly idle load balance.
3713 	 */
3714 	if (local_group) {
3715 		if (env->idle != CPU_NEWLY_IDLE) {
3716 			if (balance_cpu != env->dst_cpu) {
3717 				*balance = 0;
3718 				return;
3719 			}
3720 			update_group_power(env->sd, env->dst_cpu);
3721 		} else if (time_after_eq(jiffies, group->sgp->next_update))
3722 			update_group_power(env->sd, env->dst_cpu);
3723 	}
3724 
3725 	/* Adjust by relative CPU power of the group */
3726 	sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
3727 
3728 	/*
3729 	 * Consider the group unbalanced when the imbalance is larger
3730 	 * than the average weight of a task.
3731 	 *
3732 	 * APZ: with cgroup the avg task weight can vary wildly and
3733 	 *      might not be a suitable number - should we keep a
3734 	 *      normalized nr_running number somewhere that negates
3735 	 *      the hierarchy?
3736 	 */
3737 	if (sgs->sum_nr_running)
3738 		avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3739 
3740 	if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
3741 	    (max_nr_running - min_nr_running) > 1)
3742 		sgs->group_imb = 1;
3743 
3744 	sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
3745 						SCHED_POWER_SCALE);
3746 	if (!sgs->group_capacity)
3747 		sgs->group_capacity = fix_small_capacity(env->sd, group);
3748 	sgs->group_weight = group->group_weight;
3749 
3750 	if (sgs->group_capacity > sgs->sum_nr_running)
3751 		sgs->group_has_capacity = 1;
3752 }
3753 
3754 /**
3755  * update_sd_pick_busiest - return 1 on busiest group
3756  * @env: The load balancing environment.
3757  * @sds: sched_domain statistics
3758  * @sg: sched_group candidate to be checked for being the busiest
3759  * @sgs: sched_group statistics
3760  *
3761  * Determine if @sg is a busier group than the previously selected
3762  * busiest group.
3763  */
3764 static bool update_sd_pick_busiest(struct lb_env *env,
3765 				   struct sd_lb_stats *sds,
3766 				   struct sched_group *sg,
3767 				   struct sg_lb_stats *sgs)
3768 {
3769 	if (sgs->avg_load <= sds->max_load)
3770 		return false;
3771 
3772 	if (sgs->sum_nr_running > sgs->group_capacity)
3773 		return true;
3774 
3775 	if (sgs->group_imb)
3776 		return true;
3777 
3778 	/*
3779 	 * ASYM_PACKING needs to move all the work to the lowest
3780 	 * numbered CPUs in the group, therefore mark all groups
3781 	 * higher than ourself as busy.
3782 	 */
3783 	if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
3784 	    env->dst_cpu < group_first_cpu(sg)) {
3785 		if (!sds->busiest)
3786 			return true;
3787 
3788 		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
3789 			return true;
3790 	}
3791 
3792 	return false;
3793 }
3794 
3795 /**
3796  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3797  * @env: The load balancing environment.
3798  * @cpus: Set of cpus considered for load balancing.
3799  * @balance: Should we balance.
3800  * @sds: variable to hold the statistics for this sched_domain.
3801  */
3802 static inline void update_sd_lb_stats(struct lb_env *env,
3803 				      const struct cpumask *cpus,
3804 				      int *balance, struct sd_lb_stats *sds)
3805 {
3806 	struct sched_domain *child = env->sd->child;
3807 	struct sched_group *sg = env->sd->groups;
3808 	struct sg_lb_stats sgs;
3809 	int load_idx, prefer_sibling = 0;
3810 
3811 	if (child && child->flags & SD_PREFER_SIBLING)
3812 		prefer_sibling = 1;
3813 
3814 	load_idx = get_sd_load_idx(env->sd, env->idle);
3815 
3816 	do {
3817 		int local_group;
3818 
3819 		local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
3820 		memset(&sgs, 0, sizeof(sgs));
3821 		update_sg_lb_stats(env, sg, load_idx, local_group,
3822 				   cpus, balance, &sgs);
3823 
3824 		if (local_group && !(*balance))
3825 			return;
3826 
3827 		sds->total_load += sgs.group_load;
3828 		sds->total_pwr += sg->sgp->power;
3829 
3830 		/*
3831 		 * In case the child domain prefers tasks go to siblings
3832 		 * first, lower the sg capacity to one so that we'll try
3833 		 * and move all the excess tasks away. We lower the capacity
3834 		 * of a group only if the local group has the capacity to fit
3835 		 * these excess tasks, i.e. nr_running < group_capacity. The
3836 		 * extra check prevents the case where you always pull from the
3837 		 * heaviest group when it is already under-utilized (possible
3838 		 * with a large weight task outweighs the tasks on the system).
3839 		 */
3840 		if (prefer_sibling && !local_group && sds->this_has_capacity)
3841 			sgs.group_capacity = min(sgs.group_capacity, 1UL);
3842 
3843 		if (local_group) {
3844 			sds->this_load = sgs.avg_load;
3845 			sds->this = sg;
3846 			sds->this_nr_running = sgs.sum_nr_running;
3847 			sds->this_load_per_task = sgs.sum_weighted_load;
3848 			sds->this_has_capacity = sgs.group_has_capacity;
3849 			sds->this_idle_cpus = sgs.idle_cpus;
3850 		} else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
3851 			sds->max_load = sgs.avg_load;
3852 			sds->busiest = sg;
3853 			sds->busiest_nr_running = sgs.sum_nr_running;
3854 			sds->busiest_idle_cpus = sgs.idle_cpus;
3855 			sds->busiest_group_capacity = sgs.group_capacity;
3856 			sds->busiest_load_per_task = sgs.sum_weighted_load;
3857 			sds->busiest_has_capacity = sgs.group_has_capacity;
3858 			sds->busiest_group_weight = sgs.group_weight;
3859 			sds->group_imb = sgs.group_imb;
3860 		}
3861 
3862 		sg = sg->next;
3863 	} while (sg != env->sd->groups);
3864 }
3865 
3866 /**
3867  * check_asym_packing - Check to see if the group is packed into the
3868  *			sched doman.
3869  *
3870  * This is primarily intended to used at the sibling level.  Some
3871  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
3872  * case of POWER7, it can move to lower SMT modes only when higher
3873  * threads are idle.  When in lower SMT modes, the threads will
3874  * perform better since they share less core resources.  Hence when we
3875  * have idle threads, we want them to be the higher ones.
3876  *
3877  * This packing function is run on idle threads.  It checks to see if
3878  * the busiest CPU in this domain (core in the P7 case) has a higher
3879  * CPU number than the packing function is being run on.  Here we are
3880  * assuming lower CPU number will be equivalent to lower a SMT thread
3881  * number.
3882  *
3883  * Returns 1 when packing is required and a task should be moved to
3884  * this CPU.  The amount of the imbalance is returned in *imbalance.
3885  *
3886  * @env: The load balancing environment.
3887  * @sds: Statistics of the sched_domain which is to be packed
3888  */
3889 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
3890 {
3891 	int busiest_cpu;
3892 
3893 	if (!(env->sd->flags & SD_ASYM_PACKING))
3894 		return 0;
3895 
3896 	if (!sds->busiest)
3897 		return 0;
3898 
3899 	busiest_cpu = group_first_cpu(sds->busiest);
3900 	if (env->dst_cpu > busiest_cpu)
3901 		return 0;
3902 
3903 	env->imbalance = DIV_ROUND_CLOSEST(
3904 		sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
3905 
3906 	return 1;
3907 }
3908 
3909 /**
3910  * fix_small_imbalance - Calculate the minor imbalance that exists
3911  *			amongst the groups of a sched_domain, during
3912  *			load balancing.
3913  * @env: The load balancing environment.
3914  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3915  */
3916 static inline
3917 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
3918 {
3919 	unsigned long tmp, pwr_now = 0, pwr_move = 0;
3920 	unsigned int imbn = 2;
3921 	unsigned long scaled_busy_load_per_task;
3922 
3923 	if (sds->this_nr_running) {
3924 		sds->this_load_per_task /= sds->this_nr_running;
3925 		if (sds->busiest_load_per_task >
3926 				sds->this_load_per_task)
3927 			imbn = 1;
3928 	} else {
3929 		sds->this_load_per_task =
3930 			cpu_avg_load_per_task(env->dst_cpu);
3931 	}
3932 
3933 	scaled_busy_load_per_task = sds->busiest_load_per_task
3934 					 * SCHED_POWER_SCALE;
3935 	scaled_busy_load_per_task /= sds->busiest->sgp->power;
3936 
3937 	if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
3938 			(scaled_busy_load_per_task * imbn)) {
3939 		env->imbalance = sds->busiest_load_per_task;
3940 		return;
3941 	}
3942 
3943 	/*
3944 	 * OK, we don't have enough imbalance to justify moving tasks,
3945 	 * however we may be able to increase total CPU power used by
3946 	 * moving them.
3947 	 */
3948 
3949 	pwr_now += sds->busiest->sgp->power *
3950 			min(sds->busiest_load_per_task, sds->max_load);
3951 	pwr_now += sds->this->sgp->power *
3952 			min(sds->this_load_per_task, sds->this_load);
3953 	pwr_now /= SCHED_POWER_SCALE;
3954 
3955 	/* Amount of load we'd subtract */
3956 	tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3957 		sds->busiest->sgp->power;
3958 	if (sds->max_load > tmp)
3959 		pwr_move += sds->busiest->sgp->power *
3960 			min(sds->busiest_load_per_task, sds->max_load - tmp);
3961 
3962 	/* Amount of load we'd add */
3963 	if (sds->max_load * sds->busiest->sgp->power <
3964 		sds->busiest_load_per_task * SCHED_POWER_SCALE)
3965 		tmp = (sds->max_load * sds->busiest->sgp->power) /
3966 			sds->this->sgp->power;
3967 	else
3968 		tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3969 			sds->this->sgp->power;
3970 	pwr_move += sds->this->sgp->power *
3971 			min(sds->this_load_per_task, sds->this_load + tmp);
3972 	pwr_move /= SCHED_POWER_SCALE;
3973 
3974 	/* Move if we gain throughput */
3975 	if (pwr_move > pwr_now)
3976 		env->imbalance = sds->busiest_load_per_task;
3977 }
3978 
3979 /**
3980  * calculate_imbalance - Calculate the amount of imbalance present within the
3981  *			 groups of a given sched_domain during load balance.
3982  * @env: load balance environment
3983  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3984  */
3985 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
3986 {
3987 	unsigned long max_pull, load_above_capacity = ~0UL;
3988 
3989 	sds->busiest_load_per_task /= sds->busiest_nr_running;
3990 	if (sds->group_imb) {
3991 		sds->busiest_load_per_task =
3992 			min(sds->busiest_load_per_task, sds->avg_load);
3993 	}
3994 
3995 	/*
3996 	 * In the presence of smp nice balancing, certain scenarios can have
3997 	 * max load less than avg load(as we skip the groups at or below
3998 	 * its cpu_power, while calculating max_load..)
3999 	 */
4000 	if (sds->max_load < sds->avg_load) {
4001 		env->imbalance = 0;
4002 		return fix_small_imbalance(env, sds);
4003 	}
4004 
4005 	if (!sds->group_imb) {
4006 		/*
4007 		 * Don't want to pull so many tasks that a group would go idle.
4008 		 */
4009 		load_above_capacity = (sds->busiest_nr_running -
4010 						sds->busiest_group_capacity);
4011 
4012 		load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4013 
4014 		load_above_capacity /= sds->busiest->sgp->power;
4015 	}
4016 
4017 	/*
4018 	 * We're trying to get all the cpus to the average_load, so we don't
4019 	 * want to push ourselves above the average load, nor do we wish to
4020 	 * reduce the max loaded cpu below the average load. At the same time,
4021 	 * we also don't want to reduce the group load below the group capacity
4022 	 * (so that we can implement power-savings policies etc). Thus we look
4023 	 * for the minimum possible imbalance.
4024 	 * Be careful of negative numbers as they'll appear as very large values
4025 	 * with unsigned longs.
4026 	 */
4027 	max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4028 
4029 	/* How much load to actually move to equalise the imbalance */
4030 	env->imbalance = min(max_pull * sds->busiest->sgp->power,
4031 		(sds->avg_load - sds->this_load) * sds->this->sgp->power)
4032 			/ SCHED_POWER_SCALE;
4033 
4034 	/*
4035 	 * if *imbalance is less than the average load per runnable task
4036 	 * there is no guarantee that any tasks will be moved so we'll have
4037 	 * a think about bumping its value to force at least one task to be
4038 	 * moved
4039 	 */
4040 	if (env->imbalance < sds->busiest_load_per_task)
4041 		return fix_small_imbalance(env, sds);
4042 
4043 }
4044 
4045 /******* find_busiest_group() helpers end here *********************/
4046 
4047 /**
4048  * find_busiest_group - Returns the busiest group within the sched_domain
4049  * if there is an imbalance. If there isn't an imbalance, and
4050  * the user has opted for power-savings, it returns a group whose
4051  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4052  * such a group exists.
4053  *
4054  * Also calculates the amount of weighted load which should be moved
4055  * to restore balance.
4056  *
4057  * @env: The load balancing environment.
4058  * @cpus: The set of CPUs under consideration for load-balancing.
4059  * @balance: Pointer to a variable indicating if this_cpu
4060  *	is the appropriate cpu to perform load balancing at this_level.
4061  *
4062  * Returns:	- the busiest group if imbalance exists.
4063  *		- If no imbalance and user has opted for power-savings balance,
4064  *		   return the least loaded group whose CPUs can be
4065  *		   put to idle by rebalancing its tasks onto our group.
4066  */
4067 static struct sched_group *
4068 find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
4069 {
4070 	struct sd_lb_stats sds;
4071 
4072 	memset(&sds, 0, sizeof(sds));
4073 
4074 	/*
4075 	 * Compute the various statistics relavent for load balancing at
4076 	 * this level.
4077 	 */
4078 	update_sd_lb_stats(env, cpus, balance, &sds);
4079 
4080 	/*
4081 	 * this_cpu is not the appropriate cpu to perform load balancing at
4082 	 * this level.
4083 	 */
4084 	if (!(*balance))
4085 		goto ret;
4086 
4087 	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4088 	    check_asym_packing(env, &sds))
4089 		return sds.busiest;
4090 
4091 	/* There is no busy sibling group to pull tasks from */
4092 	if (!sds.busiest || sds.busiest_nr_running == 0)
4093 		goto out_balanced;
4094 
4095 	sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4096 
4097 	/*
4098 	 * If the busiest group is imbalanced the below checks don't
4099 	 * work because they assumes all things are equal, which typically
4100 	 * isn't true due to cpus_allowed constraints and the like.
4101 	 */
4102 	if (sds.group_imb)
4103 		goto force_balance;
4104 
4105 	/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4106 	if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4107 			!sds.busiest_has_capacity)
4108 		goto force_balance;
4109 
4110 	/*
4111 	 * If the local group is more busy than the selected busiest group
4112 	 * don't try and pull any tasks.
4113 	 */
4114 	if (sds.this_load >= sds.max_load)
4115 		goto out_balanced;
4116 
4117 	/*
4118 	 * Don't pull any tasks if this group is already above the domain
4119 	 * average load.
4120 	 */
4121 	if (sds.this_load >= sds.avg_load)
4122 		goto out_balanced;
4123 
4124 	if (env->idle == CPU_IDLE) {
4125 		/*
4126 		 * This cpu is idle. If the busiest group load doesn't
4127 		 * have more tasks than the number of available cpu's and
4128 		 * there is no imbalance between this and busiest group
4129 		 * wrt to idle cpu's, it is balanced.
4130 		 */
4131 		if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4132 		    sds.busiest_nr_running <= sds.busiest_group_weight)
4133 			goto out_balanced;
4134 	} else {
4135 		/*
4136 		 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4137 		 * imbalance_pct to be conservative.
4138 		 */
4139 		if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
4140 			goto out_balanced;
4141 	}
4142 
4143 force_balance:
4144 	/* Looks like there is an imbalance. Compute it */
4145 	calculate_imbalance(env, &sds);
4146 	return sds.busiest;
4147 
4148 out_balanced:
4149 ret:
4150 	env->imbalance = 0;
4151 	return NULL;
4152 }
4153 
4154 /*
4155  * find_busiest_queue - find the busiest runqueue among the cpus in group.
4156  */
4157 static struct rq *find_busiest_queue(struct lb_env *env,
4158 				     struct sched_group *group,
4159 				     const struct cpumask *cpus)
4160 {
4161 	struct rq *busiest = NULL, *rq;
4162 	unsigned long max_load = 0;
4163 	int i;
4164 
4165 	for_each_cpu(i, sched_group_cpus(group)) {
4166 		unsigned long power = power_of(i);
4167 		unsigned long capacity = DIV_ROUND_CLOSEST(power,
4168 							   SCHED_POWER_SCALE);
4169 		unsigned long wl;
4170 
4171 		if (!capacity)
4172 			capacity = fix_small_capacity(env->sd, group);
4173 
4174 		if (!cpumask_test_cpu(i, cpus))
4175 			continue;
4176 
4177 		rq = cpu_rq(i);
4178 		wl = weighted_cpuload(i);
4179 
4180 		/*
4181 		 * When comparing with imbalance, use weighted_cpuload()
4182 		 * which is not scaled with the cpu power.
4183 		 */
4184 		if (capacity && rq->nr_running == 1 && wl > env->imbalance)
4185 			continue;
4186 
4187 		/*
4188 		 * For the load comparisons with the other cpu's, consider
4189 		 * the weighted_cpuload() scaled with the cpu power, so that
4190 		 * the load can be moved away from the cpu that is potentially
4191 		 * running at a lower capacity.
4192 		 */
4193 		wl = (wl * SCHED_POWER_SCALE) / power;
4194 
4195 		if (wl > max_load) {
4196 			max_load = wl;
4197 			busiest = rq;
4198 		}
4199 	}
4200 
4201 	return busiest;
4202 }
4203 
4204 /*
4205  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4206  * so long as it is large enough.
4207  */
4208 #define MAX_PINNED_INTERVAL	512
4209 
4210 /* Working cpumask for load_balance and load_balance_newidle. */
4211 DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4212 
4213 static int need_active_balance(struct lb_env *env)
4214 {
4215 	struct sched_domain *sd = env->sd;
4216 
4217 	if (env->idle == CPU_NEWLY_IDLE) {
4218 
4219 		/*
4220 		 * ASYM_PACKING needs to force migrate tasks from busy but
4221 		 * higher numbered CPUs in order to pack all tasks in the
4222 		 * lowest numbered CPUs.
4223 		 */
4224 		if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
4225 			return 1;
4226 	}
4227 
4228 	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
4229 }
4230 
4231 static int active_load_balance_cpu_stop(void *data);
4232 
4233 /*
4234  * Check this_cpu to ensure it is balanced within domain. Attempt to move
4235  * tasks if there is an imbalance.
4236  */
4237 static int load_balance(int this_cpu, struct rq *this_rq,
4238 			struct sched_domain *sd, enum cpu_idle_type idle,
4239 			int *balance)
4240 {
4241 	int ld_moved, cur_ld_moved, active_balance = 0;
4242 	int lb_iterations, max_lb_iterations;
4243 	struct sched_group *group;
4244 	struct rq *busiest;
4245 	unsigned long flags;
4246 	struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4247 
4248 	struct lb_env env = {
4249 		.sd		= sd,
4250 		.dst_cpu	= this_cpu,
4251 		.dst_rq		= this_rq,
4252 		.dst_grpmask    = sched_group_cpus(sd->groups),
4253 		.idle		= idle,
4254 		.loop_break	= sched_nr_migrate_break,
4255 	};
4256 
4257 	cpumask_copy(cpus, cpu_active_mask);
4258 	max_lb_iterations = cpumask_weight(env.dst_grpmask);
4259 
4260 	schedstat_inc(sd, lb_count[idle]);
4261 
4262 redo:
4263 	group = find_busiest_group(&env, cpus, balance);
4264 
4265 	if (*balance == 0)
4266 		goto out_balanced;
4267 
4268 	if (!group) {
4269 		schedstat_inc(sd, lb_nobusyg[idle]);
4270 		goto out_balanced;
4271 	}
4272 
4273 	busiest = find_busiest_queue(&env, group, cpus);
4274 	if (!busiest) {
4275 		schedstat_inc(sd, lb_nobusyq[idle]);
4276 		goto out_balanced;
4277 	}
4278 
4279 	BUG_ON(busiest == this_rq);
4280 
4281 	schedstat_add(sd, lb_imbalance[idle], env.imbalance);
4282 
4283 	ld_moved = 0;
4284 	lb_iterations = 1;
4285 	if (busiest->nr_running > 1) {
4286 		/*
4287 		 * Attempt to move tasks. If find_busiest_group has found
4288 		 * an imbalance but busiest->nr_running <= 1, the group is
4289 		 * still unbalanced. ld_moved simply stays zero, so it is
4290 		 * correctly treated as an imbalance.
4291 		 */
4292 		env.flags |= LBF_ALL_PINNED;
4293 		env.src_cpu   = busiest->cpu;
4294 		env.src_rq    = busiest;
4295 		env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
4296 
4297 more_balance:
4298 		local_irq_save(flags);
4299 		double_rq_lock(this_rq, busiest);
4300 		if (!env.loop)
4301 			update_h_load(env.src_cpu);
4302 
4303 		/*
4304 		 * cur_ld_moved - load moved in current iteration
4305 		 * ld_moved     - cumulative load moved across iterations
4306 		 */
4307 		cur_ld_moved = move_tasks(&env);
4308 		ld_moved += cur_ld_moved;
4309 		double_rq_unlock(this_rq, busiest);
4310 		local_irq_restore(flags);
4311 
4312 		if (env.flags & LBF_NEED_BREAK) {
4313 			env.flags &= ~LBF_NEED_BREAK;
4314 			goto more_balance;
4315 		}
4316 
4317 		/*
4318 		 * some other cpu did the load balance for us.
4319 		 */
4320 		if (cur_ld_moved && env.dst_cpu != smp_processor_id())
4321 			resched_cpu(env.dst_cpu);
4322 
4323 		/*
4324 		 * Revisit (affine) tasks on src_cpu that couldn't be moved to
4325 		 * us and move them to an alternate dst_cpu in our sched_group
4326 		 * where they can run. The upper limit on how many times we
4327 		 * iterate on same src_cpu is dependent on number of cpus in our
4328 		 * sched_group.
4329 		 *
4330 		 * This changes load balance semantics a bit on who can move
4331 		 * load to a given_cpu. In addition to the given_cpu itself
4332 		 * (or a ilb_cpu acting on its behalf where given_cpu is
4333 		 * nohz-idle), we now have balance_cpu in a position to move
4334 		 * load to given_cpu. In rare situations, this may cause
4335 		 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
4336 		 * _independently_ and at _same_ time to move some load to
4337 		 * given_cpu) causing exceess load to be moved to given_cpu.
4338 		 * This however should not happen so much in practice and
4339 		 * moreover subsequent load balance cycles should correct the
4340 		 * excess load moved.
4341 		 */
4342 		if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 &&
4343 				lb_iterations++ < max_lb_iterations) {
4344 
4345 			this_rq		 = cpu_rq(env.new_dst_cpu);
4346 			env.dst_rq	 = this_rq;
4347 			env.dst_cpu	 = env.new_dst_cpu;
4348 			env.flags	&= ~LBF_SOME_PINNED;
4349 			env.loop	 = 0;
4350 			env.loop_break	 = sched_nr_migrate_break;
4351 			/*
4352 			 * Go back to "more_balance" rather than "redo" since we
4353 			 * need to continue with same src_cpu.
4354 			 */
4355 			goto more_balance;
4356 		}
4357 
4358 		/* All tasks on this runqueue were pinned by CPU affinity */
4359 		if (unlikely(env.flags & LBF_ALL_PINNED)) {
4360 			cpumask_clear_cpu(cpu_of(busiest), cpus);
4361 			if (!cpumask_empty(cpus)) {
4362 				env.loop = 0;
4363 				env.loop_break = sched_nr_migrate_break;
4364 				goto redo;
4365 			}
4366 			goto out_balanced;
4367 		}
4368 	}
4369 
4370 	if (!ld_moved) {
4371 		schedstat_inc(sd, lb_failed[idle]);
4372 		/*
4373 		 * Increment the failure counter only on periodic balance.
4374 		 * We do not want newidle balance, which can be very
4375 		 * frequent, pollute the failure counter causing
4376 		 * excessive cache_hot migrations and active balances.
4377 		 */
4378 		if (idle != CPU_NEWLY_IDLE)
4379 			sd->nr_balance_failed++;
4380 
4381 		if (need_active_balance(&env)) {
4382 			raw_spin_lock_irqsave(&busiest->lock, flags);
4383 
4384 			/* don't kick the active_load_balance_cpu_stop,
4385 			 * if the curr task on busiest cpu can't be
4386 			 * moved to this_cpu
4387 			 */
4388 			if (!cpumask_test_cpu(this_cpu,
4389 					tsk_cpus_allowed(busiest->curr))) {
4390 				raw_spin_unlock_irqrestore(&busiest->lock,
4391 							    flags);
4392 				env.flags |= LBF_ALL_PINNED;
4393 				goto out_one_pinned;
4394 			}
4395 
4396 			/*
4397 			 * ->active_balance synchronizes accesses to
4398 			 * ->active_balance_work.  Once set, it's cleared
4399 			 * only after active load balance is finished.
4400 			 */
4401 			if (!busiest->active_balance) {
4402 				busiest->active_balance = 1;
4403 				busiest->push_cpu = this_cpu;
4404 				active_balance = 1;
4405 			}
4406 			raw_spin_unlock_irqrestore(&busiest->lock, flags);
4407 
4408 			if (active_balance) {
4409 				stop_one_cpu_nowait(cpu_of(busiest),
4410 					active_load_balance_cpu_stop, busiest,
4411 					&busiest->active_balance_work);
4412 			}
4413 
4414 			/*
4415 			 * We've kicked active balancing, reset the failure
4416 			 * counter.
4417 			 */
4418 			sd->nr_balance_failed = sd->cache_nice_tries+1;
4419 		}
4420 	} else
4421 		sd->nr_balance_failed = 0;
4422 
4423 	if (likely(!active_balance)) {
4424 		/* We were unbalanced, so reset the balancing interval */
4425 		sd->balance_interval = sd->min_interval;
4426 	} else {
4427 		/*
4428 		 * If we've begun active balancing, start to back off. This
4429 		 * case may not be covered by the all_pinned logic if there
4430 		 * is only 1 task on the busy runqueue (because we don't call
4431 		 * move_tasks).
4432 		 */
4433 		if (sd->balance_interval < sd->max_interval)
4434 			sd->balance_interval *= 2;
4435 	}
4436 
4437 	goto out;
4438 
4439 out_balanced:
4440 	schedstat_inc(sd, lb_balanced[idle]);
4441 
4442 	sd->nr_balance_failed = 0;
4443 
4444 out_one_pinned:
4445 	/* tune up the balancing interval */
4446 	if (((env.flags & LBF_ALL_PINNED) &&
4447 			sd->balance_interval < MAX_PINNED_INTERVAL) ||
4448 			(sd->balance_interval < sd->max_interval))
4449 		sd->balance_interval *= 2;
4450 
4451 	ld_moved = 0;
4452 out:
4453 	return ld_moved;
4454 }
4455 
4456 /*
4457  * idle_balance is called by schedule() if this_cpu is about to become
4458  * idle. Attempts to pull tasks from other CPUs.
4459  */
4460 void idle_balance(int this_cpu, struct rq *this_rq)
4461 {
4462 	struct sched_domain *sd;
4463 	int pulled_task = 0;
4464 	unsigned long next_balance = jiffies + HZ;
4465 
4466 	this_rq->idle_stamp = this_rq->clock;
4467 
4468 	if (this_rq->avg_idle < sysctl_sched_migration_cost)
4469 		return;
4470 
4471 	/*
4472 	 * Drop the rq->lock, but keep IRQ/preempt disabled.
4473 	 */
4474 	raw_spin_unlock(&this_rq->lock);
4475 
4476 	update_shares(this_cpu);
4477 	rcu_read_lock();
4478 	for_each_domain(this_cpu, sd) {
4479 		unsigned long interval;
4480 		int balance = 1;
4481 
4482 		if (!(sd->flags & SD_LOAD_BALANCE))
4483 			continue;
4484 
4485 		if (sd->flags & SD_BALANCE_NEWIDLE) {
4486 			/* If we've pulled tasks over stop searching: */
4487 			pulled_task = load_balance(this_cpu, this_rq,
4488 						   sd, CPU_NEWLY_IDLE, &balance);
4489 		}
4490 
4491 		interval = msecs_to_jiffies(sd->balance_interval);
4492 		if (time_after(next_balance, sd->last_balance + interval))
4493 			next_balance = sd->last_balance + interval;
4494 		if (pulled_task) {
4495 			this_rq->idle_stamp = 0;
4496 			break;
4497 		}
4498 	}
4499 	rcu_read_unlock();
4500 
4501 	raw_spin_lock(&this_rq->lock);
4502 
4503 	if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4504 		/*
4505 		 * We are going idle. next_balance may be set based on
4506 		 * a busy processor. So reset next_balance.
4507 		 */
4508 		this_rq->next_balance = next_balance;
4509 	}
4510 }
4511 
4512 /*
4513  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4514  * running tasks off the busiest CPU onto idle CPUs. It requires at
4515  * least 1 task to be running on each physical CPU where possible, and
4516  * avoids physical / logical imbalances.
4517  */
4518 static int active_load_balance_cpu_stop(void *data)
4519 {
4520 	struct rq *busiest_rq = data;
4521 	int busiest_cpu = cpu_of(busiest_rq);
4522 	int target_cpu = busiest_rq->push_cpu;
4523 	struct rq *target_rq = cpu_rq(target_cpu);
4524 	struct sched_domain *sd;
4525 
4526 	raw_spin_lock_irq(&busiest_rq->lock);
4527 
4528 	/* make sure the requested cpu hasn't gone down in the meantime */
4529 	if (unlikely(busiest_cpu != smp_processor_id() ||
4530 		     !busiest_rq->active_balance))
4531 		goto out_unlock;
4532 
4533 	/* Is there any task to move? */
4534 	if (busiest_rq->nr_running <= 1)
4535 		goto out_unlock;
4536 
4537 	/*
4538 	 * This condition is "impossible", if it occurs
4539 	 * we need to fix it. Originally reported by
4540 	 * Bjorn Helgaas on a 128-cpu setup.
4541 	 */
4542 	BUG_ON(busiest_rq == target_rq);
4543 
4544 	/* move a task from busiest_rq to target_rq */
4545 	double_lock_balance(busiest_rq, target_rq);
4546 
4547 	/* Search for an sd spanning us and the target CPU. */
4548 	rcu_read_lock();
4549 	for_each_domain(target_cpu, sd) {
4550 		if ((sd->flags & SD_LOAD_BALANCE) &&
4551 		    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
4552 				break;
4553 	}
4554 
4555 	if (likely(sd)) {
4556 		struct lb_env env = {
4557 			.sd		= sd,
4558 			.dst_cpu	= target_cpu,
4559 			.dst_rq		= target_rq,
4560 			.src_cpu	= busiest_rq->cpu,
4561 			.src_rq		= busiest_rq,
4562 			.idle		= CPU_IDLE,
4563 		};
4564 
4565 		schedstat_inc(sd, alb_count);
4566 
4567 		if (move_one_task(&env))
4568 			schedstat_inc(sd, alb_pushed);
4569 		else
4570 			schedstat_inc(sd, alb_failed);
4571 	}
4572 	rcu_read_unlock();
4573 	double_unlock_balance(busiest_rq, target_rq);
4574 out_unlock:
4575 	busiest_rq->active_balance = 0;
4576 	raw_spin_unlock_irq(&busiest_rq->lock);
4577 	return 0;
4578 }
4579 
4580 #ifdef CONFIG_NO_HZ
4581 /*
4582  * idle load balancing details
4583  * - When one of the busy CPUs notice that there may be an idle rebalancing
4584  *   needed, they will kick the idle load balancer, which then does idle
4585  *   load balancing for all the idle CPUs.
4586  */
4587 static struct {
4588 	cpumask_var_t idle_cpus_mask;
4589 	atomic_t nr_cpus;
4590 	unsigned long next_balance;     /* in jiffy units */
4591 } nohz ____cacheline_aligned;
4592 
4593 static inline int find_new_ilb(int call_cpu)
4594 {
4595 	int ilb = cpumask_first(nohz.idle_cpus_mask);
4596 
4597 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
4598 		return ilb;
4599 
4600 	return nr_cpu_ids;
4601 }
4602 
4603 /*
4604  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4605  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4606  * CPU (if there is one).
4607  */
4608 static void nohz_balancer_kick(int cpu)
4609 {
4610 	int ilb_cpu;
4611 
4612 	nohz.next_balance++;
4613 
4614 	ilb_cpu = find_new_ilb(cpu);
4615 
4616 	if (ilb_cpu >= nr_cpu_ids)
4617 		return;
4618 
4619 	if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
4620 		return;
4621 	/*
4622 	 * Use smp_send_reschedule() instead of resched_cpu().
4623 	 * This way we generate a sched IPI on the target cpu which
4624 	 * is idle. And the softirq performing nohz idle load balance
4625 	 * will be run before returning from the IPI.
4626 	 */
4627 	smp_send_reschedule(ilb_cpu);
4628 	return;
4629 }
4630 
4631 static inline void clear_nohz_tick_stopped(int cpu)
4632 {
4633 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
4634 		cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4635 		atomic_dec(&nohz.nr_cpus);
4636 		clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4637 	}
4638 }
4639 
4640 static inline void set_cpu_sd_state_busy(void)
4641 {
4642 	struct sched_domain *sd;
4643 	int cpu = smp_processor_id();
4644 
4645 	if (!test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4646 		return;
4647 	clear_bit(NOHZ_IDLE, nohz_flags(cpu));
4648 
4649 	rcu_read_lock();
4650 	for_each_domain(cpu, sd)
4651 		atomic_inc(&sd->groups->sgp->nr_busy_cpus);
4652 	rcu_read_unlock();
4653 }
4654 
4655 void set_cpu_sd_state_idle(void)
4656 {
4657 	struct sched_domain *sd;
4658 	int cpu = smp_processor_id();
4659 
4660 	if (test_bit(NOHZ_IDLE, nohz_flags(cpu)))
4661 		return;
4662 	set_bit(NOHZ_IDLE, nohz_flags(cpu));
4663 
4664 	rcu_read_lock();
4665 	for_each_domain(cpu, sd)
4666 		atomic_dec(&sd->groups->sgp->nr_busy_cpus);
4667 	rcu_read_unlock();
4668 }
4669 
4670 /*
4671  * This routine will record that this cpu is going idle with tick stopped.
4672  * This info will be used in performing idle load balancing in the future.
4673  */
4674 void select_nohz_load_balancer(int stop_tick)
4675 {
4676 	int cpu = smp_processor_id();
4677 
4678 	/*
4679 	 * If this cpu is going down, then nothing needs to be done.
4680 	 */
4681 	if (!cpu_active(cpu))
4682 		return;
4683 
4684 	if (stop_tick) {
4685 		if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
4686 			return;
4687 
4688 		cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
4689 		atomic_inc(&nohz.nr_cpus);
4690 		set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4691 	}
4692 	return;
4693 }
4694 
4695 static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
4696 					unsigned long action, void *hcpu)
4697 {
4698 	switch (action & ~CPU_TASKS_FROZEN) {
4699 	case CPU_DYING:
4700 		clear_nohz_tick_stopped(smp_processor_id());
4701 		return NOTIFY_OK;
4702 	default:
4703 		return NOTIFY_DONE;
4704 	}
4705 }
4706 #endif
4707 
4708 static DEFINE_SPINLOCK(balancing);
4709 
4710 /*
4711  * Scale the max load_balance interval with the number of CPUs in the system.
4712  * This trades load-balance latency on larger machines for less cross talk.
4713  */
4714 void update_max_interval(void)
4715 {
4716 	max_load_balance_interval = HZ*num_online_cpus()/10;
4717 }
4718 
4719 /*
4720  * It checks each scheduling domain to see if it is due to be balanced,
4721  * and initiates a balancing operation if so.
4722  *
4723  * Balancing parameters are set up in arch_init_sched_domains.
4724  */
4725 static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4726 {
4727 	int balance = 1;
4728 	struct rq *rq = cpu_rq(cpu);
4729 	unsigned long interval;
4730 	struct sched_domain *sd;
4731 	/* Earliest time when we have to do rebalance again */
4732 	unsigned long next_balance = jiffies + 60*HZ;
4733 	int update_next_balance = 0;
4734 	int need_serialize;
4735 
4736 	update_shares(cpu);
4737 
4738 	rcu_read_lock();
4739 	for_each_domain(cpu, sd) {
4740 		if (!(sd->flags & SD_LOAD_BALANCE))
4741 			continue;
4742 
4743 		interval = sd->balance_interval;
4744 		if (idle != CPU_IDLE)
4745 			interval *= sd->busy_factor;
4746 
4747 		/* scale ms to jiffies */
4748 		interval = msecs_to_jiffies(interval);
4749 		interval = clamp(interval, 1UL, max_load_balance_interval);
4750 
4751 		need_serialize = sd->flags & SD_SERIALIZE;
4752 
4753 		if (need_serialize) {
4754 			if (!spin_trylock(&balancing))
4755 				goto out;
4756 		}
4757 
4758 		if (time_after_eq(jiffies, sd->last_balance + interval)) {
4759 			if (load_balance(cpu, rq, sd, idle, &balance)) {
4760 				/*
4761 				 * We've pulled tasks over so either we're no
4762 				 * longer idle.
4763 				 */
4764 				idle = CPU_NOT_IDLE;
4765 			}
4766 			sd->last_balance = jiffies;
4767 		}
4768 		if (need_serialize)
4769 			spin_unlock(&balancing);
4770 out:
4771 		if (time_after(next_balance, sd->last_balance + interval)) {
4772 			next_balance = sd->last_balance + interval;
4773 			update_next_balance = 1;
4774 		}
4775 
4776 		/*
4777 		 * Stop the load balance at this level. There is another
4778 		 * CPU in our sched group which is doing load balancing more
4779 		 * actively.
4780 		 */
4781 		if (!balance)
4782 			break;
4783 	}
4784 	rcu_read_unlock();
4785 
4786 	/*
4787 	 * next_balance will be updated only when there is a need.
4788 	 * When the cpu is attached to null domain for ex, it will not be
4789 	 * updated.
4790 	 */
4791 	if (likely(update_next_balance))
4792 		rq->next_balance = next_balance;
4793 }
4794 
4795 #ifdef CONFIG_NO_HZ
4796 /*
4797  * In CONFIG_NO_HZ case, the idle balance kickee will do the
4798  * rebalancing for all the cpus for whom scheduler ticks are stopped.
4799  */
4800 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
4801 {
4802 	struct rq *this_rq = cpu_rq(this_cpu);
4803 	struct rq *rq;
4804 	int balance_cpu;
4805 
4806 	if (idle != CPU_IDLE ||
4807 	    !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
4808 		goto end;
4809 
4810 	for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
4811 		if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
4812 			continue;
4813 
4814 		/*
4815 		 * If this cpu gets work to do, stop the load balancing
4816 		 * work being done for other cpus. Next load
4817 		 * balancing owner will pick it up.
4818 		 */
4819 		if (need_resched())
4820 			break;
4821 
4822 		raw_spin_lock_irq(&this_rq->lock);
4823 		update_rq_clock(this_rq);
4824 		update_idle_cpu_load(this_rq);
4825 		raw_spin_unlock_irq(&this_rq->lock);
4826 
4827 		rebalance_domains(balance_cpu, CPU_IDLE);
4828 
4829 		rq = cpu_rq(balance_cpu);
4830 		if (time_after(this_rq->next_balance, rq->next_balance))
4831 			this_rq->next_balance = rq->next_balance;
4832 	}
4833 	nohz.next_balance = this_rq->next_balance;
4834 end:
4835 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
4836 }
4837 
4838 /*
4839  * Current heuristic for kicking the idle load balancer in the presence
4840  * of an idle cpu is the system.
4841  *   - This rq has more than one task.
4842  *   - At any scheduler domain level, this cpu's scheduler group has multiple
4843  *     busy cpu's exceeding the group's power.
4844  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
4845  *     domain span are idle.
4846  */
4847 static inline int nohz_kick_needed(struct rq *rq, int cpu)
4848 {
4849 	unsigned long now = jiffies;
4850 	struct sched_domain *sd;
4851 
4852 	if (unlikely(idle_cpu(cpu)))
4853 		return 0;
4854 
4855        /*
4856 	* We may be recently in ticked or tickless idle mode. At the first
4857 	* busy tick after returning from idle, we will update the busy stats.
4858 	*/
4859 	set_cpu_sd_state_busy();
4860 	clear_nohz_tick_stopped(cpu);
4861 
4862 	/*
4863 	 * None are in tickless mode and hence no need for NOHZ idle load
4864 	 * balancing.
4865 	 */
4866 	if (likely(!atomic_read(&nohz.nr_cpus)))
4867 		return 0;
4868 
4869 	if (time_before(now, nohz.next_balance))
4870 		return 0;
4871 
4872 	if (rq->nr_running >= 2)
4873 		goto need_kick;
4874 
4875 	rcu_read_lock();
4876 	for_each_domain(cpu, sd) {
4877 		struct sched_group *sg = sd->groups;
4878 		struct sched_group_power *sgp = sg->sgp;
4879 		int nr_busy = atomic_read(&sgp->nr_busy_cpus);
4880 
4881 		if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
4882 			goto need_kick_unlock;
4883 
4884 		if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
4885 		    && (cpumask_first_and(nohz.idle_cpus_mask,
4886 					  sched_domain_span(sd)) < cpu))
4887 			goto need_kick_unlock;
4888 
4889 		if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
4890 			break;
4891 	}
4892 	rcu_read_unlock();
4893 	return 0;
4894 
4895 need_kick_unlock:
4896 	rcu_read_unlock();
4897 need_kick:
4898 	return 1;
4899 }
4900 #else
4901 static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
4902 #endif
4903 
4904 /*
4905  * run_rebalance_domains is triggered when needed from the scheduler tick.
4906  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
4907  */
4908 static void run_rebalance_domains(struct softirq_action *h)
4909 {
4910 	int this_cpu = smp_processor_id();
4911 	struct rq *this_rq = cpu_rq(this_cpu);
4912 	enum cpu_idle_type idle = this_rq->idle_balance ?
4913 						CPU_IDLE : CPU_NOT_IDLE;
4914 
4915 	rebalance_domains(this_cpu, idle);
4916 
4917 	/*
4918 	 * If this cpu has a pending nohz_balance_kick, then do the
4919 	 * balancing on behalf of the other idle cpus whose ticks are
4920 	 * stopped.
4921 	 */
4922 	nohz_idle_balance(this_cpu, idle);
4923 }
4924 
4925 static inline int on_null_domain(int cpu)
4926 {
4927 	return !rcu_dereference_sched(cpu_rq(cpu)->sd);
4928 }
4929 
4930 /*
4931  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4932  */
4933 void trigger_load_balance(struct rq *rq, int cpu)
4934 {
4935 	/* Don't need to rebalance while attached to NULL domain */
4936 	if (time_after_eq(jiffies, rq->next_balance) &&
4937 	    likely(!on_null_domain(cpu)))
4938 		raise_softirq(SCHED_SOFTIRQ);
4939 #ifdef CONFIG_NO_HZ
4940 	if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
4941 		nohz_balancer_kick(cpu);
4942 #endif
4943 }
4944 
4945 static void rq_online_fair(struct rq *rq)
4946 {
4947 	update_sysctl();
4948 }
4949 
4950 static void rq_offline_fair(struct rq *rq)
4951 {
4952 	update_sysctl();
4953 }
4954 
4955 #endif /* CONFIG_SMP */
4956 
4957 /*
4958  * scheduler tick hitting a task of our scheduling class:
4959  */
4960 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
4961 {
4962 	struct cfs_rq *cfs_rq;
4963 	struct sched_entity *se = &curr->se;
4964 
4965 	for_each_sched_entity(se) {
4966 		cfs_rq = cfs_rq_of(se);
4967 		entity_tick(cfs_rq, se, queued);
4968 	}
4969 }
4970 
4971 /*
4972  * called on fork with the child task as argument from the parent's context
4973  *  - child not yet on the tasklist
4974  *  - preemption disabled
4975  */
4976 static void task_fork_fair(struct task_struct *p)
4977 {
4978 	struct cfs_rq *cfs_rq;
4979 	struct sched_entity *se = &p->se, *curr;
4980 	int this_cpu = smp_processor_id();
4981 	struct rq *rq = this_rq();
4982 	unsigned long flags;
4983 
4984 	raw_spin_lock_irqsave(&rq->lock, flags);
4985 
4986 	update_rq_clock(rq);
4987 
4988 	cfs_rq = task_cfs_rq(current);
4989 	curr = cfs_rq->curr;
4990 
4991 	if (unlikely(task_cpu(p) != this_cpu)) {
4992 		rcu_read_lock();
4993 		__set_task_cpu(p, this_cpu);
4994 		rcu_read_unlock();
4995 	}
4996 
4997 	update_curr(cfs_rq);
4998 
4999 	if (curr)
5000 		se->vruntime = curr->vruntime;
5001 	place_entity(cfs_rq, se, 1);
5002 
5003 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5004 		/*
5005 		 * Upon rescheduling, sched_class::put_prev_task() will place
5006 		 * 'current' within the tree based on its new key value.
5007 		 */
5008 		swap(curr->vruntime, se->vruntime);
5009 		resched_task(rq->curr);
5010 	}
5011 
5012 	se->vruntime -= cfs_rq->min_vruntime;
5013 
5014 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5015 }
5016 
5017 /*
5018  * Priority of the task has changed. Check to see if we preempt
5019  * the current task.
5020  */
5021 static void
5022 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5023 {
5024 	if (!p->se.on_rq)
5025 		return;
5026 
5027 	/*
5028 	 * Reschedule if we are currently running on this runqueue and
5029 	 * our priority decreased, or if we are not currently running on
5030 	 * this runqueue and our priority is higher than the current's
5031 	 */
5032 	if (rq->curr == p) {
5033 		if (p->prio > oldprio)
5034 			resched_task(rq->curr);
5035 	} else
5036 		check_preempt_curr(rq, p, 0);
5037 }
5038 
5039 static void switched_from_fair(struct rq *rq, struct task_struct *p)
5040 {
5041 	struct sched_entity *se = &p->se;
5042 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
5043 
5044 	/*
5045 	 * Ensure the task's vruntime is normalized, so that when its
5046 	 * switched back to the fair class the enqueue_entity(.flags=0) will
5047 	 * do the right thing.
5048 	 *
5049 	 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5050 	 * have normalized the vruntime, if it was !on_rq, then only when
5051 	 * the task is sleeping will it still have non-normalized vruntime.
5052 	 */
5053 	if (!se->on_rq && p->state != TASK_RUNNING) {
5054 		/*
5055 		 * Fix up our vruntime so that the current sleep doesn't
5056 		 * cause 'unlimited' sleep bonus.
5057 		 */
5058 		place_entity(cfs_rq, se, 0);
5059 		se->vruntime -= cfs_rq->min_vruntime;
5060 	}
5061 }
5062 
5063 /*
5064  * We switched to the sched_fair class.
5065  */
5066 static void switched_to_fair(struct rq *rq, struct task_struct *p)
5067 {
5068 	if (!p->se.on_rq)
5069 		return;
5070 
5071 	/*
5072 	 * We were most likely switched from sched_rt, so
5073 	 * kick off the schedule if running, otherwise just see
5074 	 * if we can still preempt the current task.
5075 	 */
5076 	if (rq->curr == p)
5077 		resched_task(rq->curr);
5078 	else
5079 		check_preempt_curr(rq, p, 0);
5080 }
5081 
5082 /* Account for a task changing its policy or group.
5083  *
5084  * This routine is mostly called to set cfs_rq->curr field when a task
5085  * migrates between groups/classes.
5086  */
5087 static void set_curr_task_fair(struct rq *rq)
5088 {
5089 	struct sched_entity *se = &rq->curr->se;
5090 
5091 	for_each_sched_entity(se) {
5092 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
5093 
5094 		set_next_entity(cfs_rq, se);
5095 		/* ensure bandwidth has been allocated on our new cfs_rq */
5096 		account_cfs_rq_runtime(cfs_rq, 0);
5097 	}
5098 }
5099 
5100 void init_cfs_rq(struct cfs_rq *cfs_rq)
5101 {
5102 	cfs_rq->tasks_timeline = RB_ROOT;
5103 	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5104 #ifndef CONFIG_64BIT
5105 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5106 #endif
5107 }
5108 
5109 #ifdef CONFIG_FAIR_GROUP_SCHED
5110 static void task_move_group_fair(struct task_struct *p, int on_rq)
5111 {
5112 	/*
5113 	 * If the task was not on the rq at the time of this cgroup movement
5114 	 * it must have been asleep, sleeping tasks keep their ->vruntime
5115 	 * absolute on their old rq until wakeup (needed for the fair sleeper
5116 	 * bonus in place_entity()).
5117 	 *
5118 	 * If it was on the rq, we've just 'preempted' it, which does convert
5119 	 * ->vruntime to a relative base.
5120 	 *
5121 	 * Make sure both cases convert their relative position when migrating
5122 	 * to another cgroup's rq. This does somewhat interfere with the
5123 	 * fair sleeper stuff for the first placement, but who cares.
5124 	 */
5125 	/*
5126 	 * When !on_rq, vruntime of the task has usually NOT been normalized.
5127 	 * But there are some cases where it has already been normalized:
5128 	 *
5129 	 * - Moving a forked child which is waiting for being woken up by
5130 	 *   wake_up_new_task().
5131 	 * - Moving a task which has been woken up by try_to_wake_up() and
5132 	 *   waiting for actually being woken up by sched_ttwu_pending().
5133 	 *
5134 	 * To prevent boost or penalty in the new cfs_rq caused by delta
5135 	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5136 	 */
5137 	if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
5138 		on_rq = 1;
5139 
5140 	if (!on_rq)
5141 		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5142 	set_task_rq(p, task_cpu(p));
5143 	if (!on_rq)
5144 		p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
5145 }
5146 
5147 void free_fair_sched_group(struct task_group *tg)
5148 {
5149 	int i;
5150 
5151 	destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5152 
5153 	for_each_possible_cpu(i) {
5154 		if (tg->cfs_rq)
5155 			kfree(tg->cfs_rq[i]);
5156 		if (tg->se)
5157 			kfree(tg->se[i]);
5158 	}
5159 
5160 	kfree(tg->cfs_rq);
5161 	kfree(tg->se);
5162 }
5163 
5164 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5165 {
5166 	struct cfs_rq *cfs_rq;
5167 	struct sched_entity *se;
5168 	int i;
5169 
5170 	tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5171 	if (!tg->cfs_rq)
5172 		goto err;
5173 	tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5174 	if (!tg->se)
5175 		goto err;
5176 
5177 	tg->shares = NICE_0_LOAD;
5178 
5179 	init_cfs_bandwidth(tg_cfs_bandwidth(tg));
5180 
5181 	for_each_possible_cpu(i) {
5182 		cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
5183 				      GFP_KERNEL, cpu_to_node(i));
5184 		if (!cfs_rq)
5185 			goto err;
5186 
5187 		se = kzalloc_node(sizeof(struct sched_entity),
5188 				  GFP_KERNEL, cpu_to_node(i));
5189 		if (!se)
5190 			goto err_free_rq;
5191 
5192 		init_cfs_rq(cfs_rq);
5193 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
5194 	}
5195 
5196 	return 1;
5197 
5198 err_free_rq:
5199 	kfree(cfs_rq);
5200 err:
5201 	return 0;
5202 }
5203 
5204 void unregister_fair_sched_group(struct task_group *tg, int cpu)
5205 {
5206 	struct rq *rq = cpu_rq(cpu);
5207 	unsigned long flags;
5208 
5209 	/*
5210 	* Only empty task groups can be destroyed; so we can speculatively
5211 	* check on_list without danger of it being re-added.
5212 	*/
5213 	if (!tg->cfs_rq[cpu]->on_list)
5214 		return;
5215 
5216 	raw_spin_lock_irqsave(&rq->lock, flags);
5217 	list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
5218 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5219 }
5220 
5221 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
5222 			struct sched_entity *se, int cpu,
5223 			struct sched_entity *parent)
5224 {
5225 	struct rq *rq = cpu_rq(cpu);
5226 
5227 	cfs_rq->tg = tg;
5228 	cfs_rq->rq = rq;
5229 #ifdef CONFIG_SMP
5230 	/* allow initial update_cfs_load() to truncate */
5231 	cfs_rq->load_stamp = 1;
5232 #endif
5233 	init_cfs_rq_runtime(cfs_rq);
5234 
5235 	tg->cfs_rq[cpu] = cfs_rq;
5236 	tg->se[cpu] = se;
5237 
5238 	/* se could be NULL for root_task_group */
5239 	if (!se)
5240 		return;
5241 
5242 	if (!parent)
5243 		se->cfs_rq = &rq->cfs;
5244 	else
5245 		se->cfs_rq = parent->my_q;
5246 
5247 	se->my_q = cfs_rq;
5248 	update_load_set(&se->load, 0);
5249 	se->parent = parent;
5250 }
5251 
5252 static DEFINE_MUTEX(shares_mutex);
5253 
5254 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5255 {
5256 	int i;
5257 	unsigned long flags;
5258 
5259 	/*
5260 	 * We can't change the weight of the root cgroup.
5261 	 */
5262 	if (!tg->se[0])
5263 		return -EINVAL;
5264 
5265 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
5266 
5267 	mutex_lock(&shares_mutex);
5268 	if (tg->shares == shares)
5269 		goto done;
5270 
5271 	tg->shares = shares;
5272 	for_each_possible_cpu(i) {
5273 		struct rq *rq = cpu_rq(i);
5274 		struct sched_entity *se;
5275 
5276 		se = tg->se[i];
5277 		/* Propagate contribution to hierarchy */
5278 		raw_spin_lock_irqsave(&rq->lock, flags);
5279 		for_each_sched_entity(se)
5280 			update_cfs_shares(group_cfs_rq(se));
5281 		raw_spin_unlock_irqrestore(&rq->lock, flags);
5282 	}
5283 
5284 done:
5285 	mutex_unlock(&shares_mutex);
5286 	return 0;
5287 }
5288 #else /* CONFIG_FAIR_GROUP_SCHED */
5289 
5290 void free_fair_sched_group(struct task_group *tg) { }
5291 
5292 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5293 {
5294 	return 1;
5295 }
5296 
5297 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
5298 
5299 #endif /* CONFIG_FAIR_GROUP_SCHED */
5300 
5301 
5302 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
5303 {
5304 	struct sched_entity *se = &task->se;
5305 	unsigned int rr_interval = 0;
5306 
5307 	/*
5308 	 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5309 	 * idle runqueue:
5310 	 */
5311 	if (rq->cfs.load.weight)
5312 		rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
5313 
5314 	return rr_interval;
5315 }
5316 
5317 /*
5318  * All the scheduling class methods:
5319  */
5320 const struct sched_class fair_sched_class = {
5321 	.next			= &idle_sched_class,
5322 	.enqueue_task		= enqueue_task_fair,
5323 	.dequeue_task		= dequeue_task_fair,
5324 	.yield_task		= yield_task_fair,
5325 	.yield_to_task		= yield_to_task_fair,
5326 
5327 	.check_preempt_curr	= check_preempt_wakeup,
5328 
5329 	.pick_next_task		= pick_next_task_fair,
5330 	.put_prev_task		= put_prev_task_fair,
5331 
5332 #ifdef CONFIG_SMP
5333 	.select_task_rq		= select_task_rq_fair,
5334 
5335 	.rq_online		= rq_online_fair,
5336 	.rq_offline		= rq_offline_fair,
5337 
5338 	.task_waking		= task_waking_fair,
5339 #endif
5340 
5341 	.set_curr_task          = set_curr_task_fair,
5342 	.task_tick		= task_tick_fair,
5343 	.task_fork		= task_fork_fair,
5344 
5345 	.prio_changed		= prio_changed_fair,
5346 	.switched_from		= switched_from_fair,
5347 	.switched_to		= switched_to_fair,
5348 
5349 	.get_rr_interval	= get_rr_interval_fair,
5350 
5351 #ifdef CONFIG_FAIR_GROUP_SCHED
5352 	.task_move_group	= task_move_group_fair,
5353 #endif
5354 };
5355 
5356 #ifdef CONFIG_SCHED_DEBUG
5357 void print_cfs_stats(struct seq_file *m, int cpu)
5358 {
5359 	struct cfs_rq *cfs_rq;
5360 
5361 	rcu_read_lock();
5362 	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5363 		print_cfs_rq(m, cpu, cfs_rq);
5364 	rcu_read_unlock();
5365 }
5366 #endif
5367 
5368 __init void init_sched_fair_class(void)
5369 {
5370 #ifdef CONFIG_SMP
5371 	open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
5372 
5373 #ifdef CONFIG_NO_HZ
5374 	nohz.next_balance = jiffies;
5375 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5376 	cpu_notifier(sched_ilb_notifier, 0);
5377 #endif
5378 #endif /* SMP */
5379 
5380 }
5381