xref: /linux/kernel/sched/core.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  *  kernel/sched/core.c
3  *
4  *  Kernel scheduler and related syscalls
5  *
6  *  Copyright (C) 1991-2002  Linus Torvalds
7  *
8  *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9  *		make semaphores SMP safe
10  *  1998-11-19	Implemented schedule_timeout() and related stuff
11  *		by Andrea Arcangeli
12  *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13  *		hybrid priority-list and round-robin design with
14  *		an array-switch method of distributing timeslices
15  *		and per-CPU runqueues.  Cleanups and useful suggestions
16  *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17  *  2003-09-03	Interactivity tuning by Con Kolivas.
18  *  2004-04-02	Scheduler domains code by Nick Piggin
19  *  2007-04-15  Work begun on replacing all interactivity tuning with a
20  *              fair scheduling design by Con Kolivas.
21  *  2007-05-05  Load balancing (smp-nice) and other improvements
22  *              by Peter Williams
23  *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
24  *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
25  *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
26  *              Thomas Gleixner, Mike Kravetz
27  */
28 
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/nmi.h>
32 #include <linux/init.h>
33 #include <linux/uaccess.h>
34 #include <linux/highmem.h>
35 #include <asm/mmu_context.h>
36 #include <linux/interrupt.h>
37 #include <linux/capability.h>
38 #include <linux/completion.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/debug_locks.h>
41 #include <linux/perf_event.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/profile.h>
45 #include <linux/freezer.h>
46 #include <linux/vmalloc.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/smp.h>
51 #include <linux/threads.h>
52 #include <linux/timer.h>
53 #include <linux/rcupdate.h>
54 #include <linux/cpu.h>
55 #include <linux/cpuset.h>
56 #include <linux/percpu.h>
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <linux/sysctl.h>
60 #include <linux/syscalls.h>
61 #include <linux/times.h>
62 #include <linux/tsacct_kern.h>
63 #include <linux/kprobes.h>
64 #include <linux/delayacct.h>
65 #include <linux/unistd.h>
66 #include <linux/pagemap.h>
67 #include <linux/hrtimer.h>
68 #include <linux/tick.h>
69 #include <linux/debugfs.h>
70 #include <linux/ctype.h>
71 #include <linux/ftrace.h>
72 #include <linux/slab.h>
73 #include <linux/init_task.h>
74 #include <linux/binfmts.h>
75 
76 #include <asm/switch_to.h>
77 #include <asm/tlb.h>
78 #include <asm/irq_regs.h>
79 #include <asm/mutex.h>
80 #ifdef CONFIG_PARAVIRT
81 #include <asm/paravirt.h>
82 #endif
83 
84 #include "sched.h"
85 #include "../workqueue_sched.h"
86 #include "../smpboot.h"
87 
88 #define CREATE_TRACE_POINTS
89 #include <trace/events/sched.h>
90 
91 void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
92 {
93 	unsigned long delta;
94 	ktime_t soft, hard, now;
95 
96 	for (;;) {
97 		if (hrtimer_active(period_timer))
98 			break;
99 
100 		now = hrtimer_cb_get_time(period_timer);
101 		hrtimer_forward(period_timer, now, period);
102 
103 		soft = hrtimer_get_softexpires(period_timer);
104 		hard = hrtimer_get_expires(period_timer);
105 		delta = ktime_to_ns(ktime_sub(hard, soft));
106 		__hrtimer_start_range_ns(period_timer, soft, delta,
107 					 HRTIMER_MODE_ABS_PINNED, 0);
108 	}
109 }
110 
111 DEFINE_MUTEX(sched_domains_mutex);
112 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
113 
114 static void update_rq_clock_task(struct rq *rq, s64 delta);
115 
116 void update_rq_clock(struct rq *rq)
117 {
118 	s64 delta;
119 
120 	if (rq->skip_clock_update > 0)
121 		return;
122 
123 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
124 	rq->clock += delta;
125 	update_rq_clock_task(rq, delta);
126 }
127 
128 /*
129  * Debugging: various feature bits
130  */
131 
132 #define SCHED_FEAT(name, enabled)	\
133 	(1UL << __SCHED_FEAT_##name) * enabled |
134 
135 const_debug unsigned int sysctl_sched_features =
136 #include "features.h"
137 	0;
138 
139 #undef SCHED_FEAT
140 
141 #ifdef CONFIG_SCHED_DEBUG
142 #define SCHED_FEAT(name, enabled)	\
143 	#name ,
144 
145 static __read_mostly char *sched_feat_names[] = {
146 #include "features.h"
147 	NULL
148 };
149 
150 #undef SCHED_FEAT
151 
152 static int sched_feat_show(struct seq_file *m, void *v)
153 {
154 	int i;
155 
156 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
157 		if (!(sysctl_sched_features & (1UL << i)))
158 			seq_puts(m, "NO_");
159 		seq_printf(m, "%s ", sched_feat_names[i]);
160 	}
161 	seq_puts(m, "\n");
162 
163 	return 0;
164 }
165 
166 #ifdef HAVE_JUMP_LABEL
167 
168 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
169 #define jump_label_key__false STATIC_KEY_INIT_FALSE
170 
171 #define SCHED_FEAT(name, enabled)	\
172 	jump_label_key__##enabled ,
173 
174 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
175 #include "features.h"
176 };
177 
178 #undef SCHED_FEAT
179 
180 static void sched_feat_disable(int i)
181 {
182 	if (static_key_enabled(&sched_feat_keys[i]))
183 		static_key_slow_dec(&sched_feat_keys[i]);
184 }
185 
186 static void sched_feat_enable(int i)
187 {
188 	if (!static_key_enabled(&sched_feat_keys[i]))
189 		static_key_slow_inc(&sched_feat_keys[i]);
190 }
191 #else
192 static void sched_feat_disable(int i) { };
193 static void sched_feat_enable(int i) { };
194 #endif /* HAVE_JUMP_LABEL */
195 
196 static ssize_t
197 sched_feat_write(struct file *filp, const char __user *ubuf,
198 		size_t cnt, loff_t *ppos)
199 {
200 	char buf[64];
201 	char *cmp;
202 	int neg = 0;
203 	int i;
204 
205 	if (cnt > 63)
206 		cnt = 63;
207 
208 	if (copy_from_user(&buf, ubuf, cnt))
209 		return -EFAULT;
210 
211 	buf[cnt] = 0;
212 	cmp = strstrip(buf);
213 
214 	if (strncmp(cmp, "NO_", 3) == 0) {
215 		neg = 1;
216 		cmp += 3;
217 	}
218 
219 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
220 		if (strcmp(cmp, sched_feat_names[i]) == 0) {
221 			if (neg) {
222 				sysctl_sched_features &= ~(1UL << i);
223 				sched_feat_disable(i);
224 			} else {
225 				sysctl_sched_features |= (1UL << i);
226 				sched_feat_enable(i);
227 			}
228 			break;
229 		}
230 	}
231 
232 	if (i == __SCHED_FEAT_NR)
233 		return -EINVAL;
234 
235 	*ppos += cnt;
236 
237 	return cnt;
238 }
239 
240 static int sched_feat_open(struct inode *inode, struct file *filp)
241 {
242 	return single_open(filp, sched_feat_show, NULL);
243 }
244 
245 static const struct file_operations sched_feat_fops = {
246 	.open		= sched_feat_open,
247 	.write		= sched_feat_write,
248 	.read		= seq_read,
249 	.llseek		= seq_lseek,
250 	.release	= single_release,
251 };
252 
253 static __init int sched_init_debug(void)
254 {
255 	debugfs_create_file("sched_features", 0644, NULL, NULL,
256 			&sched_feat_fops);
257 
258 	return 0;
259 }
260 late_initcall(sched_init_debug);
261 #endif /* CONFIG_SCHED_DEBUG */
262 
263 /*
264  * Number of tasks to iterate in a single balance run.
265  * Limited because this is done with IRQs disabled.
266  */
267 const_debug unsigned int sysctl_sched_nr_migrate = 32;
268 
269 /*
270  * period over which we average the RT time consumption, measured
271  * in ms.
272  *
273  * default: 1s
274  */
275 const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
276 
277 /*
278  * period over which we measure -rt task cpu usage in us.
279  * default: 1s
280  */
281 unsigned int sysctl_sched_rt_period = 1000000;
282 
283 __read_mostly int scheduler_running;
284 
285 /*
286  * part of the period that we allow rt tasks to run in us.
287  * default: 0.95s
288  */
289 int sysctl_sched_rt_runtime = 950000;
290 
291 
292 
293 /*
294  * __task_rq_lock - lock the rq @p resides on.
295  */
296 static inline struct rq *__task_rq_lock(struct task_struct *p)
297 	__acquires(rq->lock)
298 {
299 	struct rq *rq;
300 
301 	lockdep_assert_held(&p->pi_lock);
302 
303 	for (;;) {
304 		rq = task_rq(p);
305 		raw_spin_lock(&rq->lock);
306 		if (likely(rq == task_rq(p)))
307 			return rq;
308 		raw_spin_unlock(&rq->lock);
309 	}
310 }
311 
312 /*
313  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
314  */
315 static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
316 	__acquires(p->pi_lock)
317 	__acquires(rq->lock)
318 {
319 	struct rq *rq;
320 
321 	for (;;) {
322 		raw_spin_lock_irqsave(&p->pi_lock, *flags);
323 		rq = task_rq(p);
324 		raw_spin_lock(&rq->lock);
325 		if (likely(rq == task_rq(p)))
326 			return rq;
327 		raw_spin_unlock(&rq->lock);
328 		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
329 	}
330 }
331 
332 static void __task_rq_unlock(struct rq *rq)
333 	__releases(rq->lock)
334 {
335 	raw_spin_unlock(&rq->lock);
336 }
337 
338 static inline void
339 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
340 	__releases(rq->lock)
341 	__releases(p->pi_lock)
342 {
343 	raw_spin_unlock(&rq->lock);
344 	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
345 }
346 
347 /*
348  * this_rq_lock - lock this runqueue and disable interrupts.
349  */
350 static struct rq *this_rq_lock(void)
351 	__acquires(rq->lock)
352 {
353 	struct rq *rq;
354 
355 	local_irq_disable();
356 	rq = this_rq();
357 	raw_spin_lock(&rq->lock);
358 
359 	return rq;
360 }
361 
362 #ifdef CONFIG_SCHED_HRTICK
363 /*
364  * Use HR-timers to deliver accurate preemption points.
365  *
366  * Its all a bit involved since we cannot program an hrt while holding the
367  * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
368  * reschedule event.
369  *
370  * When we get rescheduled we reprogram the hrtick_timer outside of the
371  * rq->lock.
372  */
373 
374 static void hrtick_clear(struct rq *rq)
375 {
376 	if (hrtimer_active(&rq->hrtick_timer))
377 		hrtimer_cancel(&rq->hrtick_timer);
378 }
379 
380 /*
381  * High-resolution timer tick.
382  * Runs from hardirq context with interrupts disabled.
383  */
384 static enum hrtimer_restart hrtick(struct hrtimer *timer)
385 {
386 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
387 
388 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
389 
390 	raw_spin_lock(&rq->lock);
391 	update_rq_clock(rq);
392 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
393 	raw_spin_unlock(&rq->lock);
394 
395 	return HRTIMER_NORESTART;
396 }
397 
398 #ifdef CONFIG_SMP
399 /*
400  * called from hardirq (IPI) context
401  */
402 static void __hrtick_start(void *arg)
403 {
404 	struct rq *rq = arg;
405 
406 	raw_spin_lock(&rq->lock);
407 	hrtimer_restart(&rq->hrtick_timer);
408 	rq->hrtick_csd_pending = 0;
409 	raw_spin_unlock(&rq->lock);
410 }
411 
412 /*
413  * Called to set the hrtick timer state.
414  *
415  * called with rq->lock held and irqs disabled
416  */
417 void hrtick_start(struct rq *rq, u64 delay)
418 {
419 	struct hrtimer *timer = &rq->hrtick_timer;
420 	ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
421 
422 	hrtimer_set_expires(timer, time);
423 
424 	if (rq == this_rq()) {
425 		hrtimer_restart(timer);
426 	} else if (!rq->hrtick_csd_pending) {
427 		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
428 		rq->hrtick_csd_pending = 1;
429 	}
430 }
431 
432 static int
433 hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
434 {
435 	int cpu = (int)(long)hcpu;
436 
437 	switch (action) {
438 	case CPU_UP_CANCELED:
439 	case CPU_UP_CANCELED_FROZEN:
440 	case CPU_DOWN_PREPARE:
441 	case CPU_DOWN_PREPARE_FROZEN:
442 	case CPU_DEAD:
443 	case CPU_DEAD_FROZEN:
444 		hrtick_clear(cpu_rq(cpu));
445 		return NOTIFY_OK;
446 	}
447 
448 	return NOTIFY_DONE;
449 }
450 
451 static __init void init_hrtick(void)
452 {
453 	hotcpu_notifier(hotplug_hrtick, 0);
454 }
455 #else
456 /*
457  * Called to set the hrtick timer state.
458  *
459  * called with rq->lock held and irqs disabled
460  */
461 void hrtick_start(struct rq *rq, u64 delay)
462 {
463 	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
464 			HRTIMER_MODE_REL_PINNED, 0);
465 }
466 
467 static inline void init_hrtick(void)
468 {
469 }
470 #endif /* CONFIG_SMP */
471 
472 static void init_rq_hrtick(struct rq *rq)
473 {
474 #ifdef CONFIG_SMP
475 	rq->hrtick_csd_pending = 0;
476 
477 	rq->hrtick_csd.flags = 0;
478 	rq->hrtick_csd.func = __hrtick_start;
479 	rq->hrtick_csd.info = rq;
480 #endif
481 
482 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
483 	rq->hrtick_timer.function = hrtick;
484 }
485 #else	/* CONFIG_SCHED_HRTICK */
486 static inline void hrtick_clear(struct rq *rq)
487 {
488 }
489 
490 static inline void init_rq_hrtick(struct rq *rq)
491 {
492 }
493 
494 static inline void init_hrtick(void)
495 {
496 }
497 #endif	/* CONFIG_SCHED_HRTICK */
498 
499 /*
500  * resched_task - mark a task 'to be rescheduled now'.
501  *
502  * On UP this means the setting of the need_resched flag, on SMP it
503  * might also involve a cross-CPU call to trigger the scheduler on
504  * the target CPU.
505  */
506 #ifdef CONFIG_SMP
507 
508 #ifndef tsk_is_polling
509 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
510 #endif
511 
512 void resched_task(struct task_struct *p)
513 {
514 	int cpu;
515 
516 	assert_raw_spin_locked(&task_rq(p)->lock);
517 
518 	if (test_tsk_need_resched(p))
519 		return;
520 
521 	set_tsk_need_resched(p);
522 
523 	cpu = task_cpu(p);
524 	if (cpu == smp_processor_id())
525 		return;
526 
527 	/* NEED_RESCHED must be visible before we test polling */
528 	smp_mb();
529 	if (!tsk_is_polling(p))
530 		smp_send_reschedule(cpu);
531 }
532 
533 void resched_cpu(int cpu)
534 {
535 	struct rq *rq = cpu_rq(cpu);
536 	unsigned long flags;
537 
538 	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
539 		return;
540 	resched_task(cpu_curr(cpu));
541 	raw_spin_unlock_irqrestore(&rq->lock, flags);
542 }
543 
544 #ifdef CONFIG_NO_HZ
545 /*
546  * In the semi idle case, use the nearest busy cpu for migrating timers
547  * from an idle cpu.  This is good for power-savings.
548  *
549  * We don't do similar optimization for completely idle system, as
550  * selecting an idle cpu will add more delays to the timers than intended
551  * (as that cpu's timer base may not be uptodate wrt jiffies etc).
552  */
553 int get_nohz_timer_target(void)
554 {
555 	int cpu = smp_processor_id();
556 	int i;
557 	struct sched_domain *sd;
558 
559 	rcu_read_lock();
560 	for_each_domain(cpu, sd) {
561 		for_each_cpu(i, sched_domain_span(sd)) {
562 			if (!idle_cpu(i)) {
563 				cpu = i;
564 				goto unlock;
565 			}
566 		}
567 	}
568 unlock:
569 	rcu_read_unlock();
570 	return cpu;
571 }
572 /*
573  * When add_timer_on() enqueues a timer into the timer wheel of an
574  * idle CPU then this timer might expire before the next timer event
575  * which is scheduled to wake up that CPU. In case of a completely
576  * idle system the next event might even be infinite time into the
577  * future. wake_up_idle_cpu() ensures that the CPU is woken up and
578  * leaves the inner idle loop so the newly added timer is taken into
579  * account when the CPU goes back to idle and evaluates the timer
580  * wheel for the next timer event.
581  */
582 void wake_up_idle_cpu(int cpu)
583 {
584 	struct rq *rq = cpu_rq(cpu);
585 
586 	if (cpu == smp_processor_id())
587 		return;
588 
589 	/*
590 	 * This is safe, as this function is called with the timer
591 	 * wheel base lock of (cpu) held. When the CPU is on the way
592 	 * to idle and has not yet set rq->curr to idle then it will
593 	 * be serialized on the timer wheel base lock and take the new
594 	 * timer into account automatically.
595 	 */
596 	if (rq->curr != rq->idle)
597 		return;
598 
599 	/*
600 	 * We can set TIF_RESCHED on the idle task of the other CPU
601 	 * lockless. The worst case is that the other CPU runs the
602 	 * idle task through an additional NOOP schedule()
603 	 */
604 	set_tsk_need_resched(rq->idle);
605 
606 	/* NEED_RESCHED must be visible before we test polling */
607 	smp_mb();
608 	if (!tsk_is_polling(rq->idle))
609 		smp_send_reschedule(cpu);
610 }
611 
612 static inline bool got_nohz_idle_kick(void)
613 {
614 	int cpu = smp_processor_id();
615 	return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
616 }
617 
618 #else /* CONFIG_NO_HZ */
619 
620 static inline bool got_nohz_idle_kick(void)
621 {
622 	return false;
623 }
624 
625 #endif /* CONFIG_NO_HZ */
626 
627 void sched_avg_update(struct rq *rq)
628 {
629 	s64 period = sched_avg_period();
630 
631 	while ((s64)(rq->clock - rq->age_stamp) > period) {
632 		/*
633 		 * Inline assembly required to prevent the compiler
634 		 * optimising this loop into a divmod call.
635 		 * See __iter_div_u64_rem() for another example of this.
636 		 */
637 		asm("" : "+rm" (rq->age_stamp));
638 		rq->age_stamp += period;
639 		rq->rt_avg /= 2;
640 	}
641 }
642 
643 #else /* !CONFIG_SMP */
644 void resched_task(struct task_struct *p)
645 {
646 	assert_raw_spin_locked(&task_rq(p)->lock);
647 	set_tsk_need_resched(p);
648 }
649 #endif /* CONFIG_SMP */
650 
651 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
652 			(defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
653 /*
654  * Iterate task_group tree rooted at *from, calling @down when first entering a
655  * node and @up when leaving it for the final time.
656  *
657  * Caller must hold rcu_lock or sufficient equivalent.
658  */
659 int walk_tg_tree_from(struct task_group *from,
660 			     tg_visitor down, tg_visitor up, void *data)
661 {
662 	struct task_group *parent, *child;
663 	int ret;
664 
665 	parent = from;
666 
667 down:
668 	ret = (*down)(parent, data);
669 	if (ret)
670 		goto out;
671 	list_for_each_entry_rcu(child, &parent->children, siblings) {
672 		parent = child;
673 		goto down;
674 
675 up:
676 		continue;
677 	}
678 	ret = (*up)(parent, data);
679 	if (ret || parent == from)
680 		goto out;
681 
682 	child = parent;
683 	parent = parent->parent;
684 	if (parent)
685 		goto up;
686 out:
687 	return ret;
688 }
689 
690 int tg_nop(struct task_group *tg, void *data)
691 {
692 	return 0;
693 }
694 #endif
695 
696 static void set_load_weight(struct task_struct *p)
697 {
698 	int prio = p->static_prio - MAX_RT_PRIO;
699 	struct load_weight *load = &p->se.load;
700 
701 	/*
702 	 * SCHED_IDLE tasks get minimal weight:
703 	 */
704 	if (p->policy == SCHED_IDLE) {
705 		load->weight = scale_load(WEIGHT_IDLEPRIO);
706 		load->inv_weight = WMULT_IDLEPRIO;
707 		return;
708 	}
709 
710 	load->weight = scale_load(prio_to_weight[prio]);
711 	load->inv_weight = prio_to_wmult[prio];
712 }
713 
714 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
715 {
716 	update_rq_clock(rq);
717 	sched_info_queued(p);
718 	p->sched_class->enqueue_task(rq, p, flags);
719 }
720 
721 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
722 {
723 	update_rq_clock(rq);
724 	sched_info_dequeued(p);
725 	p->sched_class->dequeue_task(rq, p, flags);
726 }
727 
728 void activate_task(struct rq *rq, struct task_struct *p, int flags)
729 {
730 	if (task_contributes_to_load(p))
731 		rq->nr_uninterruptible--;
732 
733 	enqueue_task(rq, p, flags);
734 }
735 
736 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
737 {
738 	if (task_contributes_to_load(p))
739 		rq->nr_uninterruptible++;
740 
741 	dequeue_task(rq, p, flags);
742 }
743 
744 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
745 
746 /*
747  * There are no locks covering percpu hardirq/softirq time.
748  * They are only modified in account_system_vtime, on corresponding CPU
749  * with interrupts disabled. So, writes are safe.
750  * They are read and saved off onto struct rq in update_rq_clock().
751  * This may result in other CPU reading this CPU's irq time and can
752  * race with irq/account_system_vtime on this CPU. We would either get old
753  * or new value with a side effect of accounting a slice of irq time to wrong
754  * task when irq is in progress while we read rq->clock. That is a worthy
755  * compromise in place of having locks on each irq in account_system_time.
756  */
757 static DEFINE_PER_CPU(u64, cpu_hardirq_time);
758 static DEFINE_PER_CPU(u64, cpu_softirq_time);
759 
760 static DEFINE_PER_CPU(u64, irq_start_time);
761 static int sched_clock_irqtime;
762 
763 void enable_sched_clock_irqtime(void)
764 {
765 	sched_clock_irqtime = 1;
766 }
767 
768 void disable_sched_clock_irqtime(void)
769 {
770 	sched_clock_irqtime = 0;
771 }
772 
773 #ifndef CONFIG_64BIT
774 static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
775 
776 static inline void irq_time_write_begin(void)
777 {
778 	__this_cpu_inc(irq_time_seq.sequence);
779 	smp_wmb();
780 }
781 
782 static inline void irq_time_write_end(void)
783 {
784 	smp_wmb();
785 	__this_cpu_inc(irq_time_seq.sequence);
786 }
787 
788 static inline u64 irq_time_read(int cpu)
789 {
790 	u64 irq_time;
791 	unsigned seq;
792 
793 	do {
794 		seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
795 		irq_time = per_cpu(cpu_softirq_time, cpu) +
796 			   per_cpu(cpu_hardirq_time, cpu);
797 	} while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
798 
799 	return irq_time;
800 }
801 #else /* CONFIG_64BIT */
802 static inline void irq_time_write_begin(void)
803 {
804 }
805 
806 static inline void irq_time_write_end(void)
807 {
808 }
809 
810 static inline u64 irq_time_read(int cpu)
811 {
812 	return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
813 }
814 #endif /* CONFIG_64BIT */
815 
816 /*
817  * Called before incrementing preempt_count on {soft,}irq_enter
818  * and before decrementing preempt_count on {soft,}irq_exit.
819  */
820 void account_system_vtime(struct task_struct *curr)
821 {
822 	unsigned long flags;
823 	s64 delta;
824 	int cpu;
825 
826 	if (!sched_clock_irqtime)
827 		return;
828 
829 	local_irq_save(flags);
830 
831 	cpu = smp_processor_id();
832 	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
833 	__this_cpu_add(irq_start_time, delta);
834 
835 	irq_time_write_begin();
836 	/*
837 	 * We do not account for softirq time from ksoftirqd here.
838 	 * We want to continue accounting softirq time to ksoftirqd thread
839 	 * in that case, so as not to confuse scheduler with a special task
840 	 * that do not consume any time, but still wants to run.
841 	 */
842 	if (hardirq_count())
843 		__this_cpu_add(cpu_hardirq_time, delta);
844 	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
845 		__this_cpu_add(cpu_softirq_time, delta);
846 
847 	irq_time_write_end();
848 	local_irq_restore(flags);
849 }
850 EXPORT_SYMBOL_GPL(account_system_vtime);
851 
852 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
853 
854 #ifdef CONFIG_PARAVIRT
855 static inline u64 steal_ticks(u64 steal)
856 {
857 	if (unlikely(steal > NSEC_PER_SEC))
858 		return div_u64(steal, TICK_NSEC);
859 
860 	return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
861 }
862 #endif
863 
864 static void update_rq_clock_task(struct rq *rq, s64 delta)
865 {
866 /*
867  * In theory, the compile should just see 0 here, and optimize out the call
868  * to sched_rt_avg_update. But I don't trust it...
869  */
870 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
871 	s64 steal = 0, irq_delta = 0;
872 #endif
873 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
874 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
875 
876 	/*
877 	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
878 	 * this case when a previous update_rq_clock() happened inside a
879 	 * {soft,}irq region.
880 	 *
881 	 * When this happens, we stop ->clock_task and only update the
882 	 * prev_irq_time stamp to account for the part that fit, so that a next
883 	 * update will consume the rest. This ensures ->clock_task is
884 	 * monotonic.
885 	 *
886 	 * It does however cause some slight miss-attribution of {soft,}irq
887 	 * time, a more accurate solution would be to update the irq_time using
888 	 * the current rq->clock timestamp, except that would require using
889 	 * atomic ops.
890 	 */
891 	if (irq_delta > delta)
892 		irq_delta = delta;
893 
894 	rq->prev_irq_time += irq_delta;
895 	delta -= irq_delta;
896 #endif
897 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
898 	if (static_key_false((&paravirt_steal_rq_enabled))) {
899 		u64 st;
900 
901 		steal = paravirt_steal_clock(cpu_of(rq));
902 		steal -= rq->prev_steal_time_rq;
903 
904 		if (unlikely(steal > delta))
905 			steal = delta;
906 
907 		st = steal_ticks(steal);
908 		steal = st * TICK_NSEC;
909 
910 		rq->prev_steal_time_rq += steal;
911 
912 		delta -= steal;
913 	}
914 #endif
915 
916 	rq->clock_task += delta;
917 
918 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
919 	if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
920 		sched_rt_avg_update(rq, irq_delta + steal);
921 #endif
922 }
923 
924 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
925 static int irqtime_account_hi_update(void)
926 {
927 	u64 *cpustat = kcpustat_this_cpu->cpustat;
928 	unsigned long flags;
929 	u64 latest_ns;
930 	int ret = 0;
931 
932 	local_irq_save(flags);
933 	latest_ns = this_cpu_read(cpu_hardirq_time);
934 	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
935 		ret = 1;
936 	local_irq_restore(flags);
937 	return ret;
938 }
939 
940 static int irqtime_account_si_update(void)
941 {
942 	u64 *cpustat = kcpustat_this_cpu->cpustat;
943 	unsigned long flags;
944 	u64 latest_ns;
945 	int ret = 0;
946 
947 	local_irq_save(flags);
948 	latest_ns = this_cpu_read(cpu_softirq_time);
949 	if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
950 		ret = 1;
951 	local_irq_restore(flags);
952 	return ret;
953 }
954 
955 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
956 
957 #define sched_clock_irqtime	(0)
958 
959 #endif
960 
961 void sched_set_stop_task(int cpu, struct task_struct *stop)
962 {
963 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
964 	struct task_struct *old_stop = cpu_rq(cpu)->stop;
965 
966 	if (stop) {
967 		/*
968 		 * Make it appear like a SCHED_FIFO task, its something
969 		 * userspace knows about and won't get confused about.
970 		 *
971 		 * Also, it will make PI more or less work without too
972 		 * much confusion -- but then, stop work should not
973 		 * rely on PI working anyway.
974 		 */
975 		sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
976 
977 		stop->sched_class = &stop_sched_class;
978 	}
979 
980 	cpu_rq(cpu)->stop = stop;
981 
982 	if (old_stop) {
983 		/*
984 		 * Reset it back to a normal scheduling class so that
985 		 * it can die in pieces.
986 		 */
987 		old_stop->sched_class = &rt_sched_class;
988 	}
989 }
990 
991 /*
992  * __normal_prio - return the priority that is based on the static prio
993  */
994 static inline int __normal_prio(struct task_struct *p)
995 {
996 	return p->static_prio;
997 }
998 
999 /*
1000  * Calculate the expected normal priority: i.e. priority
1001  * without taking RT-inheritance into account. Might be
1002  * boosted by interactivity modifiers. Changes upon fork,
1003  * setprio syscalls, and whenever the interactivity
1004  * estimator recalculates.
1005  */
1006 static inline int normal_prio(struct task_struct *p)
1007 {
1008 	int prio;
1009 
1010 	if (task_has_rt_policy(p))
1011 		prio = MAX_RT_PRIO-1 - p->rt_priority;
1012 	else
1013 		prio = __normal_prio(p);
1014 	return prio;
1015 }
1016 
1017 /*
1018  * Calculate the current priority, i.e. the priority
1019  * taken into account by the scheduler. This value might
1020  * be boosted by RT tasks, or might be boosted by
1021  * interactivity modifiers. Will be RT if the task got
1022  * RT-boosted. If not then it returns p->normal_prio.
1023  */
1024 static int effective_prio(struct task_struct *p)
1025 {
1026 	p->normal_prio = normal_prio(p);
1027 	/*
1028 	 * If we are RT tasks or we were boosted to RT priority,
1029 	 * keep the priority unchanged. Otherwise, update priority
1030 	 * to the normal priority:
1031 	 */
1032 	if (!rt_prio(p->prio))
1033 		return p->normal_prio;
1034 	return p->prio;
1035 }
1036 
1037 /**
1038  * task_curr - is this task currently executing on a CPU?
1039  * @p: the task in question.
1040  */
1041 inline int task_curr(const struct task_struct *p)
1042 {
1043 	return cpu_curr(task_cpu(p)) == p;
1044 }
1045 
1046 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1047 				       const struct sched_class *prev_class,
1048 				       int oldprio)
1049 {
1050 	if (prev_class != p->sched_class) {
1051 		if (prev_class->switched_from)
1052 			prev_class->switched_from(rq, p);
1053 		p->sched_class->switched_to(rq, p);
1054 	} else if (oldprio != p->prio)
1055 		p->sched_class->prio_changed(rq, p, oldprio);
1056 }
1057 
1058 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1059 {
1060 	const struct sched_class *class;
1061 
1062 	if (p->sched_class == rq->curr->sched_class) {
1063 		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1064 	} else {
1065 		for_each_class(class) {
1066 			if (class == rq->curr->sched_class)
1067 				break;
1068 			if (class == p->sched_class) {
1069 				resched_task(rq->curr);
1070 				break;
1071 			}
1072 		}
1073 	}
1074 
1075 	/*
1076 	 * A queue event has occurred, and we're going to schedule.  In
1077 	 * this case, we can save a useless back to back clock update.
1078 	 */
1079 	if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1080 		rq->skip_clock_update = 1;
1081 }
1082 
1083 #ifdef CONFIG_SMP
1084 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1085 {
1086 #ifdef CONFIG_SCHED_DEBUG
1087 	/*
1088 	 * We should never call set_task_cpu() on a blocked task,
1089 	 * ttwu() will sort out the placement.
1090 	 */
1091 	WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1092 			!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
1093 
1094 #ifdef CONFIG_LOCKDEP
1095 	/*
1096 	 * The caller should hold either p->pi_lock or rq->lock, when changing
1097 	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1098 	 *
1099 	 * sched_move_task() holds both and thus holding either pins the cgroup,
1100 	 * see set_task_rq().
1101 	 *
1102 	 * Furthermore, all task_rq users should acquire both locks, see
1103 	 * task_rq_lock().
1104 	 */
1105 	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1106 				      lockdep_is_held(&task_rq(p)->lock)));
1107 #endif
1108 #endif
1109 
1110 	trace_sched_migrate_task(p, new_cpu);
1111 
1112 	if (task_cpu(p) != new_cpu) {
1113 		p->se.nr_migrations++;
1114 		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1115 	}
1116 
1117 	__set_task_cpu(p, new_cpu);
1118 }
1119 
1120 struct migration_arg {
1121 	struct task_struct *task;
1122 	int dest_cpu;
1123 };
1124 
1125 static int migration_cpu_stop(void *data);
1126 
1127 /*
1128  * wait_task_inactive - wait for a thread to unschedule.
1129  *
1130  * If @match_state is nonzero, it's the @p->state value just checked and
1131  * not expected to change.  If it changes, i.e. @p might have woken up,
1132  * then return zero.  When we succeed in waiting for @p to be off its CPU,
1133  * we return a positive number (its total switch count).  If a second call
1134  * a short while later returns the same number, the caller can be sure that
1135  * @p has remained unscheduled the whole time.
1136  *
1137  * The caller must ensure that the task *will* unschedule sometime soon,
1138  * else this function might spin for a *long* time. This function can't
1139  * be called with interrupts off, or it may introduce deadlock with
1140  * smp_call_function() if an IPI is sent by the same process we are
1141  * waiting to become inactive.
1142  */
1143 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1144 {
1145 	unsigned long flags;
1146 	int running, on_rq;
1147 	unsigned long ncsw;
1148 	struct rq *rq;
1149 
1150 	for (;;) {
1151 		/*
1152 		 * We do the initial early heuristics without holding
1153 		 * any task-queue locks at all. We'll only try to get
1154 		 * the runqueue lock when things look like they will
1155 		 * work out!
1156 		 */
1157 		rq = task_rq(p);
1158 
1159 		/*
1160 		 * If the task is actively running on another CPU
1161 		 * still, just relax and busy-wait without holding
1162 		 * any locks.
1163 		 *
1164 		 * NOTE! Since we don't hold any locks, it's not
1165 		 * even sure that "rq" stays as the right runqueue!
1166 		 * But we don't care, since "task_running()" will
1167 		 * return false if the runqueue has changed and p
1168 		 * is actually now running somewhere else!
1169 		 */
1170 		while (task_running(rq, p)) {
1171 			if (match_state && unlikely(p->state != match_state))
1172 				return 0;
1173 			cpu_relax();
1174 		}
1175 
1176 		/*
1177 		 * Ok, time to look more closely! We need the rq
1178 		 * lock now, to be *sure*. If we're wrong, we'll
1179 		 * just go back and repeat.
1180 		 */
1181 		rq = task_rq_lock(p, &flags);
1182 		trace_sched_wait_task(p);
1183 		running = task_running(rq, p);
1184 		on_rq = p->on_rq;
1185 		ncsw = 0;
1186 		if (!match_state || p->state == match_state)
1187 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1188 		task_rq_unlock(rq, p, &flags);
1189 
1190 		/*
1191 		 * If it changed from the expected state, bail out now.
1192 		 */
1193 		if (unlikely(!ncsw))
1194 			break;
1195 
1196 		/*
1197 		 * Was it really running after all now that we
1198 		 * checked with the proper locks actually held?
1199 		 *
1200 		 * Oops. Go back and try again..
1201 		 */
1202 		if (unlikely(running)) {
1203 			cpu_relax();
1204 			continue;
1205 		}
1206 
1207 		/*
1208 		 * It's not enough that it's not actively running,
1209 		 * it must be off the runqueue _entirely_, and not
1210 		 * preempted!
1211 		 *
1212 		 * So if it was still runnable (but just not actively
1213 		 * running right now), it's preempted, and we should
1214 		 * yield - it could be a while.
1215 		 */
1216 		if (unlikely(on_rq)) {
1217 			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1218 
1219 			set_current_state(TASK_UNINTERRUPTIBLE);
1220 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1221 			continue;
1222 		}
1223 
1224 		/*
1225 		 * Ahh, all good. It wasn't running, and it wasn't
1226 		 * runnable, which means that it will never become
1227 		 * running in the future either. We're all done!
1228 		 */
1229 		break;
1230 	}
1231 
1232 	return ncsw;
1233 }
1234 
1235 /***
1236  * kick_process - kick a running thread to enter/exit the kernel
1237  * @p: the to-be-kicked thread
1238  *
1239  * Cause a process which is running on another CPU to enter
1240  * kernel-mode, without any delay. (to get signals handled.)
1241  *
1242  * NOTE: this function doesn't have to take the runqueue lock,
1243  * because all it wants to ensure is that the remote task enters
1244  * the kernel. If the IPI races and the task has been migrated
1245  * to another CPU then no harm is done and the purpose has been
1246  * achieved as well.
1247  */
1248 void kick_process(struct task_struct *p)
1249 {
1250 	int cpu;
1251 
1252 	preempt_disable();
1253 	cpu = task_cpu(p);
1254 	if ((cpu != smp_processor_id()) && task_curr(p))
1255 		smp_send_reschedule(cpu);
1256 	preempt_enable();
1257 }
1258 EXPORT_SYMBOL_GPL(kick_process);
1259 #endif /* CONFIG_SMP */
1260 
1261 #ifdef CONFIG_SMP
1262 /*
1263  * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1264  */
1265 static int select_fallback_rq(int cpu, struct task_struct *p)
1266 {
1267 	const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1268 	enum { cpuset, possible, fail } state = cpuset;
1269 	int dest_cpu;
1270 
1271 	/* Look for allowed, online CPU in same node. */
1272 	for_each_cpu(dest_cpu, nodemask) {
1273 		if (!cpu_online(dest_cpu))
1274 			continue;
1275 		if (!cpu_active(dest_cpu))
1276 			continue;
1277 		if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1278 			return dest_cpu;
1279 	}
1280 
1281 	for (;;) {
1282 		/* Any allowed, online CPU? */
1283 		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1284 			if (!cpu_online(dest_cpu))
1285 				continue;
1286 			if (!cpu_active(dest_cpu))
1287 				continue;
1288 			goto out;
1289 		}
1290 
1291 		switch (state) {
1292 		case cpuset:
1293 			/* No more Mr. Nice Guy. */
1294 			cpuset_cpus_allowed_fallback(p);
1295 			state = possible;
1296 			break;
1297 
1298 		case possible:
1299 			do_set_cpus_allowed(p, cpu_possible_mask);
1300 			state = fail;
1301 			break;
1302 
1303 		case fail:
1304 			BUG();
1305 			break;
1306 		}
1307 	}
1308 
1309 out:
1310 	if (state != cpuset) {
1311 		/*
1312 		 * Don't tell them about moving exiting tasks or
1313 		 * kernel threads (both mm NULL), since they never
1314 		 * leave kernel.
1315 		 */
1316 		if (p->mm && printk_ratelimit()) {
1317 			printk_sched("process %d (%s) no longer affine to cpu%d\n",
1318 					task_pid_nr(p), p->comm, cpu);
1319 		}
1320 	}
1321 
1322 	return dest_cpu;
1323 }
1324 
1325 /*
1326  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1327  */
1328 static inline
1329 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
1330 {
1331 	int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
1332 
1333 	/*
1334 	 * In order not to call set_task_cpu() on a blocking task we need
1335 	 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1336 	 * cpu.
1337 	 *
1338 	 * Since this is common to all placement strategies, this lives here.
1339 	 *
1340 	 * [ this allows ->select_task() to simply return task_cpu(p) and
1341 	 *   not worry about this generic constraint ]
1342 	 */
1343 	if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1344 		     !cpu_online(cpu)))
1345 		cpu = select_fallback_rq(task_cpu(p), p);
1346 
1347 	return cpu;
1348 }
1349 
1350 static void update_avg(u64 *avg, u64 sample)
1351 {
1352 	s64 diff = sample - *avg;
1353 	*avg += diff >> 3;
1354 }
1355 #endif
1356 
1357 static void
1358 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1359 {
1360 #ifdef CONFIG_SCHEDSTATS
1361 	struct rq *rq = this_rq();
1362 
1363 #ifdef CONFIG_SMP
1364 	int this_cpu = smp_processor_id();
1365 
1366 	if (cpu == this_cpu) {
1367 		schedstat_inc(rq, ttwu_local);
1368 		schedstat_inc(p, se.statistics.nr_wakeups_local);
1369 	} else {
1370 		struct sched_domain *sd;
1371 
1372 		schedstat_inc(p, se.statistics.nr_wakeups_remote);
1373 		rcu_read_lock();
1374 		for_each_domain(this_cpu, sd) {
1375 			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1376 				schedstat_inc(sd, ttwu_wake_remote);
1377 				break;
1378 			}
1379 		}
1380 		rcu_read_unlock();
1381 	}
1382 
1383 	if (wake_flags & WF_MIGRATED)
1384 		schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1385 
1386 #endif /* CONFIG_SMP */
1387 
1388 	schedstat_inc(rq, ttwu_count);
1389 	schedstat_inc(p, se.statistics.nr_wakeups);
1390 
1391 	if (wake_flags & WF_SYNC)
1392 		schedstat_inc(p, se.statistics.nr_wakeups_sync);
1393 
1394 #endif /* CONFIG_SCHEDSTATS */
1395 }
1396 
1397 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1398 {
1399 	activate_task(rq, p, en_flags);
1400 	p->on_rq = 1;
1401 
1402 	/* if a worker is waking up, notify workqueue */
1403 	if (p->flags & PF_WQ_WORKER)
1404 		wq_worker_waking_up(p, cpu_of(rq));
1405 }
1406 
1407 /*
1408  * Mark the task runnable and perform wakeup-preemption.
1409  */
1410 static void
1411 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1412 {
1413 	trace_sched_wakeup(p, true);
1414 	check_preempt_curr(rq, p, wake_flags);
1415 
1416 	p->state = TASK_RUNNING;
1417 #ifdef CONFIG_SMP
1418 	if (p->sched_class->task_woken)
1419 		p->sched_class->task_woken(rq, p);
1420 
1421 	if (rq->idle_stamp) {
1422 		u64 delta = rq->clock - rq->idle_stamp;
1423 		u64 max = 2*sysctl_sched_migration_cost;
1424 
1425 		if (delta > max)
1426 			rq->avg_idle = max;
1427 		else
1428 			update_avg(&rq->avg_idle, delta);
1429 		rq->idle_stamp = 0;
1430 	}
1431 #endif
1432 }
1433 
1434 static void
1435 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1436 {
1437 #ifdef CONFIG_SMP
1438 	if (p->sched_contributes_to_load)
1439 		rq->nr_uninterruptible--;
1440 #endif
1441 
1442 	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1443 	ttwu_do_wakeup(rq, p, wake_flags);
1444 }
1445 
1446 /*
1447  * Called in case the task @p isn't fully descheduled from its runqueue,
1448  * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1449  * since all we need to do is flip p->state to TASK_RUNNING, since
1450  * the task is still ->on_rq.
1451  */
1452 static int ttwu_remote(struct task_struct *p, int wake_flags)
1453 {
1454 	struct rq *rq;
1455 	int ret = 0;
1456 
1457 	rq = __task_rq_lock(p);
1458 	if (p->on_rq) {
1459 		ttwu_do_wakeup(rq, p, wake_flags);
1460 		ret = 1;
1461 	}
1462 	__task_rq_unlock(rq);
1463 
1464 	return ret;
1465 }
1466 
1467 #ifdef CONFIG_SMP
1468 static void sched_ttwu_pending(void)
1469 {
1470 	struct rq *rq = this_rq();
1471 	struct llist_node *llist = llist_del_all(&rq->wake_list);
1472 	struct task_struct *p;
1473 
1474 	raw_spin_lock(&rq->lock);
1475 
1476 	while (llist) {
1477 		p = llist_entry(llist, struct task_struct, wake_entry);
1478 		llist = llist_next(llist);
1479 		ttwu_do_activate(rq, p, 0);
1480 	}
1481 
1482 	raw_spin_unlock(&rq->lock);
1483 }
1484 
1485 void scheduler_ipi(void)
1486 {
1487 	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1488 		return;
1489 
1490 	/*
1491 	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1492 	 * traditionally all their work was done from the interrupt return
1493 	 * path. Now that we actually do some work, we need to make sure
1494 	 * we do call them.
1495 	 *
1496 	 * Some archs already do call them, luckily irq_enter/exit nest
1497 	 * properly.
1498 	 *
1499 	 * Arguably we should visit all archs and update all handlers,
1500 	 * however a fair share of IPIs are still resched only so this would
1501 	 * somewhat pessimize the simple resched case.
1502 	 */
1503 	irq_enter();
1504 	sched_ttwu_pending();
1505 
1506 	/*
1507 	 * Check if someone kicked us for doing the nohz idle load balance.
1508 	 */
1509 	if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1510 		this_rq()->idle_balance = 1;
1511 		raise_softirq_irqoff(SCHED_SOFTIRQ);
1512 	}
1513 	irq_exit();
1514 }
1515 
1516 static void ttwu_queue_remote(struct task_struct *p, int cpu)
1517 {
1518 	if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1519 		smp_send_reschedule(cpu);
1520 }
1521 
1522 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1523 static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1524 {
1525 	struct rq *rq;
1526 	int ret = 0;
1527 
1528 	rq = __task_rq_lock(p);
1529 	if (p->on_cpu) {
1530 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1531 		ttwu_do_wakeup(rq, p, wake_flags);
1532 		ret = 1;
1533 	}
1534 	__task_rq_unlock(rq);
1535 
1536 	return ret;
1537 
1538 }
1539 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1540 
1541 bool cpus_share_cache(int this_cpu, int that_cpu)
1542 {
1543 	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1544 }
1545 #endif /* CONFIG_SMP */
1546 
1547 static void ttwu_queue(struct task_struct *p, int cpu)
1548 {
1549 	struct rq *rq = cpu_rq(cpu);
1550 
1551 #if defined(CONFIG_SMP)
1552 	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1553 		sched_clock_cpu(cpu); /* sync clocks x-cpu */
1554 		ttwu_queue_remote(p, cpu);
1555 		return;
1556 	}
1557 #endif
1558 
1559 	raw_spin_lock(&rq->lock);
1560 	ttwu_do_activate(rq, p, 0);
1561 	raw_spin_unlock(&rq->lock);
1562 }
1563 
1564 /**
1565  * try_to_wake_up - wake up a thread
1566  * @p: the thread to be awakened
1567  * @state: the mask of task states that can be woken
1568  * @wake_flags: wake modifier flags (WF_*)
1569  *
1570  * Put it on the run-queue if it's not already there. The "current"
1571  * thread is always on the run-queue (except when the actual
1572  * re-schedule is in progress), and as such you're allowed to do
1573  * the simpler "current->state = TASK_RUNNING" to mark yourself
1574  * runnable without the overhead of this.
1575  *
1576  * Returns %true if @p was woken up, %false if it was already running
1577  * or @state didn't match @p's state.
1578  */
1579 static int
1580 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1581 {
1582 	unsigned long flags;
1583 	int cpu, success = 0;
1584 
1585 	smp_wmb();
1586 	raw_spin_lock_irqsave(&p->pi_lock, flags);
1587 	if (!(p->state & state))
1588 		goto out;
1589 
1590 	success = 1; /* we're going to change ->state */
1591 	cpu = task_cpu(p);
1592 
1593 	if (p->on_rq && ttwu_remote(p, wake_flags))
1594 		goto stat;
1595 
1596 #ifdef CONFIG_SMP
1597 	/*
1598 	 * If the owning (remote) cpu is still in the middle of schedule() with
1599 	 * this task as prev, wait until its done referencing the task.
1600 	 */
1601 	while (p->on_cpu) {
1602 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1603 		/*
1604 		 * In case the architecture enables interrupts in
1605 		 * context_switch(), we cannot busy wait, since that
1606 		 * would lead to deadlocks when an interrupt hits and
1607 		 * tries to wake up @prev. So bail and do a complete
1608 		 * remote wakeup.
1609 		 */
1610 		if (ttwu_activate_remote(p, wake_flags))
1611 			goto stat;
1612 #else
1613 		cpu_relax();
1614 #endif
1615 	}
1616 	/*
1617 	 * Pairs with the smp_wmb() in finish_lock_switch().
1618 	 */
1619 	smp_rmb();
1620 
1621 	p->sched_contributes_to_load = !!task_contributes_to_load(p);
1622 	p->state = TASK_WAKING;
1623 
1624 	if (p->sched_class->task_waking)
1625 		p->sched_class->task_waking(p);
1626 
1627 	cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
1628 	if (task_cpu(p) != cpu) {
1629 		wake_flags |= WF_MIGRATED;
1630 		set_task_cpu(p, cpu);
1631 	}
1632 #endif /* CONFIG_SMP */
1633 
1634 	ttwu_queue(p, cpu);
1635 stat:
1636 	ttwu_stat(p, cpu, wake_flags);
1637 out:
1638 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1639 
1640 	return success;
1641 }
1642 
1643 /**
1644  * try_to_wake_up_local - try to wake up a local task with rq lock held
1645  * @p: the thread to be awakened
1646  *
1647  * Put @p on the run-queue if it's not already there. The caller must
1648  * ensure that this_rq() is locked, @p is bound to this_rq() and not
1649  * the current task.
1650  */
1651 static void try_to_wake_up_local(struct task_struct *p)
1652 {
1653 	struct rq *rq = task_rq(p);
1654 
1655 	BUG_ON(rq != this_rq());
1656 	BUG_ON(p == current);
1657 	lockdep_assert_held(&rq->lock);
1658 
1659 	if (!raw_spin_trylock(&p->pi_lock)) {
1660 		raw_spin_unlock(&rq->lock);
1661 		raw_spin_lock(&p->pi_lock);
1662 		raw_spin_lock(&rq->lock);
1663 	}
1664 
1665 	if (!(p->state & TASK_NORMAL))
1666 		goto out;
1667 
1668 	if (!p->on_rq)
1669 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1670 
1671 	ttwu_do_wakeup(rq, p, 0);
1672 	ttwu_stat(p, smp_processor_id(), 0);
1673 out:
1674 	raw_spin_unlock(&p->pi_lock);
1675 }
1676 
1677 /**
1678  * wake_up_process - Wake up a specific process
1679  * @p: The process to be woken up.
1680  *
1681  * Attempt to wake up the nominated process and move it to the set of runnable
1682  * processes.  Returns 1 if the process was woken up, 0 if it was already
1683  * running.
1684  *
1685  * It may be assumed that this function implies a write memory barrier before
1686  * changing the task state if and only if any tasks are woken up.
1687  */
1688 int wake_up_process(struct task_struct *p)
1689 {
1690 	return try_to_wake_up(p, TASK_ALL, 0);
1691 }
1692 EXPORT_SYMBOL(wake_up_process);
1693 
1694 int wake_up_state(struct task_struct *p, unsigned int state)
1695 {
1696 	return try_to_wake_up(p, state, 0);
1697 }
1698 
1699 /*
1700  * Perform scheduler related setup for a newly forked process p.
1701  * p is forked by current.
1702  *
1703  * __sched_fork() is basic setup used by init_idle() too:
1704  */
1705 static void __sched_fork(struct task_struct *p)
1706 {
1707 	p->on_rq			= 0;
1708 
1709 	p->se.on_rq			= 0;
1710 	p->se.exec_start		= 0;
1711 	p->se.sum_exec_runtime		= 0;
1712 	p->se.prev_sum_exec_runtime	= 0;
1713 	p->se.nr_migrations		= 0;
1714 	p->se.vruntime			= 0;
1715 	INIT_LIST_HEAD(&p->se.group_node);
1716 
1717 #ifdef CONFIG_SCHEDSTATS
1718 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1719 #endif
1720 
1721 	INIT_LIST_HEAD(&p->rt.run_list);
1722 
1723 #ifdef CONFIG_PREEMPT_NOTIFIERS
1724 	INIT_HLIST_HEAD(&p->preempt_notifiers);
1725 #endif
1726 }
1727 
1728 /*
1729  * fork()/clone()-time setup:
1730  */
1731 void sched_fork(struct task_struct *p)
1732 {
1733 	unsigned long flags;
1734 	int cpu = get_cpu();
1735 
1736 	__sched_fork(p);
1737 	/*
1738 	 * We mark the process as running here. This guarantees that
1739 	 * nobody will actually run it, and a signal or other external
1740 	 * event cannot wake it up and insert it on the runqueue either.
1741 	 */
1742 	p->state = TASK_RUNNING;
1743 
1744 	/*
1745 	 * Make sure we do not leak PI boosting priority to the child.
1746 	 */
1747 	p->prio = current->normal_prio;
1748 
1749 	/*
1750 	 * Revert to default priority/policy on fork if requested.
1751 	 */
1752 	if (unlikely(p->sched_reset_on_fork)) {
1753 		if (task_has_rt_policy(p)) {
1754 			p->policy = SCHED_NORMAL;
1755 			p->static_prio = NICE_TO_PRIO(0);
1756 			p->rt_priority = 0;
1757 		} else if (PRIO_TO_NICE(p->static_prio) < 0)
1758 			p->static_prio = NICE_TO_PRIO(0);
1759 
1760 		p->prio = p->normal_prio = __normal_prio(p);
1761 		set_load_weight(p);
1762 
1763 		/*
1764 		 * We don't need the reset flag anymore after the fork. It has
1765 		 * fulfilled its duty:
1766 		 */
1767 		p->sched_reset_on_fork = 0;
1768 	}
1769 
1770 	if (!rt_prio(p->prio))
1771 		p->sched_class = &fair_sched_class;
1772 
1773 	if (p->sched_class->task_fork)
1774 		p->sched_class->task_fork(p);
1775 
1776 	/*
1777 	 * The child is not yet in the pid-hash so no cgroup attach races,
1778 	 * and the cgroup is pinned to this child due to cgroup_fork()
1779 	 * is ran before sched_fork().
1780 	 *
1781 	 * Silence PROVE_RCU.
1782 	 */
1783 	raw_spin_lock_irqsave(&p->pi_lock, flags);
1784 	set_task_cpu(p, cpu);
1785 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1786 
1787 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1788 	if (likely(sched_info_on()))
1789 		memset(&p->sched_info, 0, sizeof(p->sched_info));
1790 #endif
1791 #if defined(CONFIG_SMP)
1792 	p->on_cpu = 0;
1793 #endif
1794 #ifdef CONFIG_PREEMPT_COUNT
1795 	/* Want to start with kernel preemption disabled. */
1796 	task_thread_info(p)->preempt_count = 1;
1797 #endif
1798 #ifdef CONFIG_SMP
1799 	plist_node_init(&p->pushable_tasks, MAX_PRIO);
1800 #endif
1801 
1802 	put_cpu();
1803 }
1804 
1805 /*
1806  * wake_up_new_task - wake up a newly created task for the first time.
1807  *
1808  * This function will do some initial scheduler statistics housekeeping
1809  * that must be done for every newly created context, then puts the task
1810  * on the runqueue and wakes it.
1811  */
1812 void wake_up_new_task(struct task_struct *p)
1813 {
1814 	unsigned long flags;
1815 	struct rq *rq;
1816 
1817 	raw_spin_lock_irqsave(&p->pi_lock, flags);
1818 #ifdef CONFIG_SMP
1819 	/*
1820 	 * Fork balancing, do it here and not earlier because:
1821 	 *  - cpus_allowed can change in the fork path
1822 	 *  - any previously selected cpu might disappear through hotplug
1823 	 */
1824 	set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
1825 #endif
1826 
1827 	rq = __task_rq_lock(p);
1828 	activate_task(rq, p, 0);
1829 	p->on_rq = 1;
1830 	trace_sched_wakeup_new(p, true);
1831 	check_preempt_curr(rq, p, WF_FORK);
1832 #ifdef CONFIG_SMP
1833 	if (p->sched_class->task_woken)
1834 		p->sched_class->task_woken(rq, p);
1835 #endif
1836 	task_rq_unlock(rq, p, &flags);
1837 }
1838 
1839 #ifdef CONFIG_PREEMPT_NOTIFIERS
1840 
1841 /**
1842  * preempt_notifier_register - tell me when current is being preempted & rescheduled
1843  * @notifier: notifier struct to register
1844  */
1845 void preempt_notifier_register(struct preempt_notifier *notifier)
1846 {
1847 	hlist_add_head(&notifier->link, &current->preempt_notifiers);
1848 }
1849 EXPORT_SYMBOL_GPL(preempt_notifier_register);
1850 
1851 /**
1852  * preempt_notifier_unregister - no longer interested in preemption notifications
1853  * @notifier: notifier struct to unregister
1854  *
1855  * This is safe to call from within a preemption notifier.
1856  */
1857 void preempt_notifier_unregister(struct preempt_notifier *notifier)
1858 {
1859 	hlist_del(&notifier->link);
1860 }
1861 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1862 
1863 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1864 {
1865 	struct preempt_notifier *notifier;
1866 	struct hlist_node *node;
1867 
1868 	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1869 		notifier->ops->sched_in(notifier, raw_smp_processor_id());
1870 }
1871 
1872 static void
1873 fire_sched_out_preempt_notifiers(struct task_struct *curr,
1874 				 struct task_struct *next)
1875 {
1876 	struct preempt_notifier *notifier;
1877 	struct hlist_node *node;
1878 
1879 	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1880 		notifier->ops->sched_out(notifier, next);
1881 }
1882 
1883 #else /* !CONFIG_PREEMPT_NOTIFIERS */
1884 
1885 static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1886 {
1887 }
1888 
1889 static void
1890 fire_sched_out_preempt_notifiers(struct task_struct *curr,
1891 				 struct task_struct *next)
1892 {
1893 }
1894 
1895 #endif /* CONFIG_PREEMPT_NOTIFIERS */
1896 
1897 /**
1898  * prepare_task_switch - prepare to switch tasks
1899  * @rq: the runqueue preparing to switch
1900  * @prev: the current task that is being switched out
1901  * @next: the task we are going to switch to.
1902  *
1903  * This is called with the rq lock held and interrupts off. It must
1904  * be paired with a subsequent finish_task_switch after the context
1905  * switch.
1906  *
1907  * prepare_task_switch sets up locking and calls architecture specific
1908  * hooks.
1909  */
1910 static inline void
1911 prepare_task_switch(struct rq *rq, struct task_struct *prev,
1912 		    struct task_struct *next)
1913 {
1914 	sched_info_switch(prev, next);
1915 	perf_event_task_sched(prev, next);
1916 	fire_sched_out_preempt_notifiers(prev, next);
1917 	prepare_lock_switch(rq, next);
1918 	prepare_arch_switch(next);
1919 	trace_sched_switch(prev, next);
1920 }
1921 
1922 /**
1923  * finish_task_switch - clean up after a task-switch
1924  * @rq: runqueue associated with task-switch
1925  * @prev: the thread we just switched away from.
1926  *
1927  * finish_task_switch must be called after the context switch, paired
1928  * with a prepare_task_switch call before the context switch.
1929  * finish_task_switch will reconcile locking set up by prepare_task_switch,
1930  * and do any other architecture-specific cleanup actions.
1931  *
1932  * Note that we may have delayed dropping an mm in context_switch(). If
1933  * so, we finish that here outside of the runqueue lock. (Doing it
1934  * with the lock held can cause deadlocks; see schedule() for
1935  * details.)
1936  */
1937 static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1938 	__releases(rq->lock)
1939 {
1940 	struct mm_struct *mm = rq->prev_mm;
1941 	long prev_state;
1942 
1943 	rq->prev_mm = NULL;
1944 
1945 	/*
1946 	 * A task struct has one reference for the use as "current".
1947 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1948 	 * schedule one last time. The schedule call will never return, and
1949 	 * the scheduled task must drop that reference.
1950 	 * The test for TASK_DEAD must occur while the runqueue locks are
1951 	 * still held, otherwise prev could be scheduled on another cpu, die
1952 	 * there before we look at prev->state, and then the reference would
1953 	 * be dropped twice.
1954 	 *		Manfred Spraul <manfred@colorfullife.com>
1955 	 */
1956 	prev_state = prev->state;
1957 	finish_arch_switch(prev);
1958 	finish_lock_switch(rq, prev);
1959 	finish_arch_post_lock_switch();
1960 
1961 	fire_sched_in_preempt_notifiers(current);
1962 	if (mm)
1963 		mmdrop(mm);
1964 	if (unlikely(prev_state == TASK_DEAD)) {
1965 		/*
1966 		 * Remove function-return probe instances associated with this
1967 		 * task and put them back on the free list.
1968 		 */
1969 		kprobe_flush_task(prev);
1970 		put_task_struct(prev);
1971 	}
1972 }
1973 
1974 #ifdef CONFIG_SMP
1975 
1976 /* assumes rq->lock is held */
1977 static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1978 {
1979 	if (prev->sched_class->pre_schedule)
1980 		prev->sched_class->pre_schedule(rq, prev);
1981 }
1982 
1983 /* rq->lock is NOT held, but preemption is disabled */
1984 static inline void post_schedule(struct rq *rq)
1985 {
1986 	if (rq->post_schedule) {
1987 		unsigned long flags;
1988 
1989 		raw_spin_lock_irqsave(&rq->lock, flags);
1990 		if (rq->curr->sched_class->post_schedule)
1991 			rq->curr->sched_class->post_schedule(rq);
1992 		raw_spin_unlock_irqrestore(&rq->lock, flags);
1993 
1994 		rq->post_schedule = 0;
1995 	}
1996 }
1997 
1998 #else
1999 
2000 static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2001 {
2002 }
2003 
2004 static inline void post_schedule(struct rq *rq)
2005 {
2006 }
2007 
2008 #endif
2009 
2010 /**
2011  * schedule_tail - first thing a freshly forked thread must call.
2012  * @prev: the thread we just switched away from.
2013  */
2014 asmlinkage void schedule_tail(struct task_struct *prev)
2015 	__releases(rq->lock)
2016 {
2017 	struct rq *rq = this_rq();
2018 
2019 	finish_task_switch(rq, prev);
2020 
2021 	/*
2022 	 * FIXME: do we need to worry about rq being invalidated by the
2023 	 * task_switch?
2024 	 */
2025 	post_schedule(rq);
2026 
2027 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
2028 	/* In this case, finish_task_switch does not reenable preemption */
2029 	preempt_enable();
2030 #endif
2031 	if (current->set_child_tid)
2032 		put_user(task_pid_vnr(current), current->set_child_tid);
2033 }
2034 
2035 /*
2036  * context_switch - switch to the new MM and the new
2037  * thread's register state.
2038  */
2039 static inline void
2040 context_switch(struct rq *rq, struct task_struct *prev,
2041 	       struct task_struct *next)
2042 {
2043 	struct mm_struct *mm, *oldmm;
2044 
2045 	prepare_task_switch(rq, prev, next);
2046 
2047 	mm = next->mm;
2048 	oldmm = prev->active_mm;
2049 	/*
2050 	 * For paravirt, this is coupled with an exit in switch_to to
2051 	 * combine the page table reload and the switch backend into
2052 	 * one hypercall.
2053 	 */
2054 	arch_start_context_switch(prev);
2055 
2056 	if (!mm) {
2057 		next->active_mm = oldmm;
2058 		atomic_inc(&oldmm->mm_count);
2059 		enter_lazy_tlb(oldmm, next);
2060 	} else
2061 		switch_mm(oldmm, mm, next);
2062 
2063 	if (!prev->mm) {
2064 		prev->active_mm = NULL;
2065 		rq->prev_mm = oldmm;
2066 	}
2067 	/*
2068 	 * Since the runqueue lock will be released by the next
2069 	 * task (which is an invalid locking op but in the case
2070 	 * of the scheduler it's an obvious special-case), so we
2071 	 * do an early lockdep release here:
2072 	 */
2073 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
2074 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2075 #endif
2076 
2077 	/* Here we just switch the register state and the stack. */
2078 	rcu_switch_from(prev);
2079 	switch_to(prev, next, prev);
2080 
2081 	barrier();
2082 	/*
2083 	 * this_rq must be evaluated again because prev may have moved
2084 	 * CPUs since it called schedule(), thus the 'rq' on its stack
2085 	 * frame will be invalid.
2086 	 */
2087 	finish_task_switch(this_rq(), prev);
2088 }
2089 
2090 /*
2091  * nr_running, nr_uninterruptible and nr_context_switches:
2092  *
2093  * externally visible scheduler statistics: current number of runnable
2094  * threads, current number of uninterruptible-sleeping threads, total
2095  * number of context switches performed since bootup.
2096  */
2097 unsigned long nr_running(void)
2098 {
2099 	unsigned long i, sum = 0;
2100 
2101 	for_each_online_cpu(i)
2102 		sum += cpu_rq(i)->nr_running;
2103 
2104 	return sum;
2105 }
2106 
2107 unsigned long nr_uninterruptible(void)
2108 {
2109 	unsigned long i, sum = 0;
2110 
2111 	for_each_possible_cpu(i)
2112 		sum += cpu_rq(i)->nr_uninterruptible;
2113 
2114 	/*
2115 	 * Since we read the counters lockless, it might be slightly
2116 	 * inaccurate. Do not allow it to go below zero though:
2117 	 */
2118 	if (unlikely((long)sum < 0))
2119 		sum = 0;
2120 
2121 	return sum;
2122 }
2123 
2124 unsigned long long nr_context_switches(void)
2125 {
2126 	int i;
2127 	unsigned long long sum = 0;
2128 
2129 	for_each_possible_cpu(i)
2130 		sum += cpu_rq(i)->nr_switches;
2131 
2132 	return sum;
2133 }
2134 
2135 unsigned long nr_iowait(void)
2136 {
2137 	unsigned long i, sum = 0;
2138 
2139 	for_each_possible_cpu(i)
2140 		sum += atomic_read(&cpu_rq(i)->nr_iowait);
2141 
2142 	return sum;
2143 }
2144 
2145 unsigned long nr_iowait_cpu(int cpu)
2146 {
2147 	struct rq *this = cpu_rq(cpu);
2148 	return atomic_read(&this->nr_iowait);
2149 }
2150 
2151 unsigned long this_cpu_load(void)
2152 {
2153 	struct rq *this = this_rq();
2154 	return this->cpu_load[0];
2155 }
2156 
2157 
2158 /* Variables and functions for calc_load */
2159 static atomic_long_t calc_load_tasks;
2160 static unsigned long calc_load_update;
2161 unsigned long avenrun[3];
2162 EXPORT_SYMBOL(avenrun);
2163 
2164 static long calc_load_fold_active(struct rq *this_rq)
2165 {
2166 	long nr_active, delta = 0;
2167 
2168 	nr_active = this_rq->nr_running;
2169 	nr_active += (long) this_rq->nr_uninterruptible;
2170 
2171 	if (nr_active != this_rq->calc_load_active) {
2172 		delta = nr_active - this_rq->calc_load_active;
2173 		this_rq->calc_load_active = nr_active;
2174 	}
2175 
2176 	return delta;
2177 }
2178 
2179 static unsigned long
2180 calc_load(unsigned long load, unsigned long exp, unsigned long active)
2181 {
2182 	load *= exp;
2183 	load += active * (FIXED_1 - exp);
2184 	load += 1UL << (FSHIFT - 1);
2185 	return load >> FSHIFT;
2186 }
2187 
2188 #ifdef CONFIG_NO_HZ
2189 /*
2190  * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2191  *
2192  * When making the ILB scale, we should try to pull this in as well.
2193  */
2194 static atomic_long_t calc_load_tasks_idle;
2195 
2196 void calc_load_account_idle(struct rq *this_rq)
2197 {
2198 	long delta;
2199 
2200 	delta = calc_load_fold_active(this_rq);
2201 	if (delta)
2202 		atomic_long_add(delta, &calc_load_tasks_idle);
2203 }
2204 
2205 static long calc_load_fold_idle(void)
2206 {
2207 	long delta = 0;
2208 
2209 	/*
2210 	 * Its got a race, we don't care...
2211 	 */
2212 	if (atomic_long_read(&calc_load_tasks_idle))
2213 		delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2214 
2215 	return delta;
2216 }
2217 
2218 /**
2219  * fixed_power_int - compute: x^n, in O(log n) time
2220  *
2221  * @x:         base of the power
2222  * @frac_bits: fractional bits of @x
2223  * @n:         power to raise @x to.
2224  *
2225  * By exploiting the relation between the definition of the natural power
2226  * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2227  * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2228  * (where: n_i \elem {0, 1}, the binary vector representing n),
2229  * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2230  * of course trivially computable in O(log_2 n), the length of our binary
2231  * vector.
2232  */
2233 static unsigned long
2234 fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2235 {
2236 	unsigned long result = 1UL << frac_bits;
2237 
2238 	if (n) for (;;) {
2239 		if (n & 1) {
2240 			result *= x;
2241 			result += 1UL << (frac_bits - 1);
2242 			result >>= frac_bits;
2243 		}
2244 		n >>= 1;
2245 		if (!n)
2246 			break;
2247 		x *= x;
2248 		x += 1UL << (frac_bits - 1);
2249 		x >>= frac_bits;
2250 	}
2251 
2252 	return result;
2253 }
2254 
2255 /*
2256  * a1 = a0 * e + a * (1 - e)
2257  *
2258  * a2 = a1 * e + a * (1 - e)
2259  *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2260  *    = a0 * e^2 + a * (1 - e) * (1 + e)
2261  *
2262  * a3 = a2 * e + a * (1 - e)
2263  *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2264  *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2265  *
2266  *  ...
2267  *
2268  * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2269  *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2270  *    = a0 * e^n + a * (1 - e^n)
2271  *
2272  * [1] application of the geometric series:
2273  *
2274  *              n         1 - x^(n+1)
2275  *     S_n := \Sum x^i = -------------
2276  *             i=0          1 - x
2277  */
2278 static unsigned long
2279 calc_load_n(unsigned long load, unsigned long exp,
2280 	    unsigned long active, unsigned int n)
2281 {
2282 
2283 	return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2284 }
2285 
2286 /*
2287  * NO_HZ can leave us missing all per-cpu ticks calling
2288  * calc_load_account_active(), but since an idle CPU folds its delta into
2289  * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2290  * in the pending idle delta if our idle period crossed a load cycle boundary.
2291  *
2292  * Once we've updated the global active value, we need to apply the exponential
2293  * weights adjusted to the number of cycles missed.
2294  */
2295 static void calc_global_nohz(void)
2296 {
2297 	long delta, active, n;
2298 
2299 	/*
2300 	 * If we crossed a calc_load_update boundary, make sure to fold
2301 	 * any pending idle changes, the respective CPUs might have
2302 	 * missed the tick driven calc_load_account_active() update
2303 	 * due to NO_HZ.
2304 	 */
2305 	delta = calc_load_fold_idle();
2306 	if (delta)
2307 		atomic_long_add(delta, &calc_load_tasks);
2308 
2309 	/*
2310 	 * It could be the one fold was all it took, we done!
2311 	 */
2312 	if (time_before(jiffies, calc_load_update + 10))
2313 		return;
2314 
2315 	/*
2316 	 * Catch-up, fold however many we are behind still
2317 	 */
2318 	delta = jiffies - calc_load_update - 10;
2319 	n = 1 + (delta / LOAD_FREQ);
2320 
2321 	active = atomic_long_read(&calc_load_tasks);
2322 	active = active > 0 ? active * FIXED_1 : 0;
2323 
2324 	avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2325 	avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2326 	avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2327 
2328 	calc_load_update += n * LOAD_FREQ;
2329 }
2330 #else
2331 void calc_load_account_idle(struct rq *this_rq)
2332 {
2333 }
2334 
2335 static inline long calc_load_fold_idle(void)
2336 {
2337 	return 0;
2338 }
2339 
2340 static void calc_global_nohz(void)
2341 {
2342 }
2343 #endif
2344 
2345 /**
2346  * get_avenrun - get the load average array
2347  * @loads:	pointer to dest load array
2348  * @offset:	offset to add
2349  * @shift:	shift count to shift the result left
2350  *
2351  * These values are estimates at best, so no need for locking.
2352  */
2353 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2354 {
2355 	loads[0] = (avenrun[0] + offset) << shift;
2356 	loads[1] = (avenrun[1] + offset) << shift;
2357 	loads[2] = (avenrun[2] + offset) << shift;
2358 }
2359 
2360 /*
2361  * calc_load - update the avenrun load estimates 10 ticks after the
2362  * CPUs have updated calc_load_tasks.
2363  */
2364 void calc_global_load(unsigned long ticks)
2365 {
2366 	long active;
2367 
2368 	if (time_before(jiffies, calc_load_update + 10))
2369 		return;
2370 
2371 	active = atomic_long_read(&calc_load_tasks);
2372 	active = active > 0 ? active * FIXED_1 : 0;
2373 
2374 	avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2375 	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2376 	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2377 
2378 	calc_load_update += LOAD_FREQ;
2379 
2380 	/*
2381 	 * Account one period with whatever state we found before
2382 	 * folding in the nohz state and ageing the entire idle period.
2383 	 *
2384 	 * This avoids loosing a sample when we go idle between
2385 	 * calc_load_account_active() (10 ticks ago) and now and thus
2386 	 * under-accounting.
2387 	 */
2388 	calc_global_nohz();
2389 }
2390 
2391 /*
2392  * Called from update_cpu_load() to periodically update this CPU's
2393  * active count.
2394  */
2395 static void calc_load_account_active(struct rq *this_rq)
2396 {
2397 	long delta;
2398 
2399 	if (time_before(jiffies, this_rq->calc_load_update))
2400 		return;
2401 
2402 	delta  = calc_load_fold_active(this_rq);
2403 	delta += calc_load_fold_idle();
2404 	if (delta)
2405 		atomic_long_add(delta, &calc_load_tasks);
2406 
2407 	this_rq->calc_load_update += LOAD_FREQ;
2408 }
2409 
2410 /*
2411  * The exact cpuload at various idx values, calculated at every tick would be
2412  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2413  *
2414  * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2415  * on nth tick when cpu may be busy, then we have:
2416  * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2417  * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2418  *
2419  * decay_load_missed() below does efficient calculation of
2420  * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2421  * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2422  *
2423  * The calculation is approximated on a 128 point scale.
2424  * degrade_zero_ticks is the number of ticks after which load at any
2425  * particular idx is approximated to be zero.
2426  * degrade_factor is a precomputed table, a row for each load idx.
2427  * Each column corresponds to degradation factor for a power of two ticks,
2428  * based on 128 point scale.
2429  * Example:
2430  * row 2, col 3 (=12) says that the degradation at load idx 2 after
2431  * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2432  *
2433  * With this power of 2 load factors, we can degrade the load n times
2434  * by looking at 1 bits in n and doing as many mult/shift instead of
2435  * n mult/shifts needed by the exact degradation.
2436  */
2437 #define DEGRADE_SHIFT		7
2438 static const unsigned char
2439 		degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2440 static const unsigned char
2441 		degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2442 					{0, 0, 0, 0, 0, 0, 0, 0},
2443 					{64, 32, 8, 0, 0, 0, 0, 0},
2444 					{96, 72, 40, 12, 1, 0, 0},
2445 					{112, 98, 75, 43, 15, 1, 0},
2446 					{120, 112, 98, 76, 45, 16, 2} };
2447 
2448 /*
2449  * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2450  * would be when CPU is idle and so we just decay the old load without
2451  * adding any new load.
2452  */
2453 static unsigned long
2454 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2455 {
2456 	int j = 0;
2457 
2458 	if (!missed_updates)
2459 		return load;
2460 
2461 	if (missed_updates >= degrade_zero_ticks[idx])
2462 		return 0;
2463 
2464 	if (idx == 1)
2465 		return load >> missed_updates;
2466 
2467 	while (missed_updates) {
2468 		if (missed_updates % 2)
2469 			load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2470 
2471 		missed_updates >>= 1;
2472 		j++;
2473 	}
2474 	return load;
2475 }
2476 
2477 /*
2478  * Update rq->cpu_load[] statistics. This function is usually called every
2479  * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2480  * every tick. We fix it up based on jiffies.
2481  */
2482 static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2483 			      unsigned long pending_updates)
2484 {
2485 	int i, scale;
2486 
2487 	this_rq->nr_load_updates++;
2488 
2489 	/* Update our load: */
2490 	this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2491 	for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2492 		unsigned long old_load, new_load;
2493 
2494 		/* scale is effectively 1 << i now, and >> i divides by scale */
2495 
2496 		old_load = this_rq->cpu_load[i];
2497 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
2498 		new_load = this_load;
2499 		/*
2500 		 * Round up the averaging division if load is increasing. This
2501 		 * prevents us from getting stuck on 9 if the load is 10, for
2502 		 * example.
2503 		 */
2504 		if (new_load > old_load)
2505 			new_load += scale - 1;
2506 
2507 		this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
2508 	}
2509 
2510 	sched_avg_update(this_rq);
2511 }
2512 
2513 /*
2514  * Called from nohz_idle_balance() to update the load ratings before doing the
2515  * idle balance.
2516  */
2517 void update_idle_cpu_load(struct rq *this_rq)
2518 {
2519 	unsigned long curr_jiffies = jiffies;
2520 	unsigned long load = this_rq->load.weight;
2521 	unsigned long pending_updates;
2522 
2523 	/*
2524 	 * Bloody broken means of dealing with nohz, but better than nothing..
2525 	 * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
2526 	 * update and see 0 difference the one time and 2 the next, even though
2527 	 * we ticked at roughtly the same rate.
2528 	 *
2529 	 * Hence we only use this from nohz_idle_balance() and skip this
2530 	 * nonsense when called from the scheduler_tick() since that's
2531 	 * guaranteed a stable rate.
2532 	 */
2533 	if (load || curr_jiffies == this_rq->last_load_update_tick)
2534 		return;
2535 
2536 	pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2537 	this_rq->last_load_update_tick = curr_jiffies;
2538 
2539 	__update_cpu_load(this_rq, load, pending_updates);
2540 }
2541 
2542 /*
2543  * Called from scheduler_tick()
2544  */
2545 static void update_cpu_load_active(struct rq *this_rq)
2546 {
2547 	/*
2548 	 * See the mess in update_idle_cpu_load().
2549 	 */
2550 	this_rq->last_load_update_tick = jiffies;
2551 	__update_cpu_load(this_rq, this_rq->load.weight, 1);
2552 
2553 	calc_load_account_active(this_rq);
2554 }
2555 
2556 #ifdef CONFIG_SMP
2557 
2558 /*
2559  * sched_exec - execve() is a valuable balancing opportunity, because at
2560  * this point the task has the smallest effective memory and cache footprint.
2561  */
2562 void sched_exec(void)
2563 {
2564 	struct task_struct *p = current;
2565 	unsigned long flags;
2566 	int dest_cpu;
2567 
2568 	raw_spin_lock_irqsave(&p->pi_lock, flags);
2569 	dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
2570 	if (dest_cpu == smp_processor_id())
2571 		goto unlock;
2572 
2573 	if (likely(cpu_active(dest_cpu))) {
2574 		struct migration_arg arg = { p, dest_cpu };
2575 
2576 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2577 		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2578 		return;
2579 	}
2580 unlock:
2581 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2582 }
2583 
2584 #endif
2585 
2586 DEFINE_PER_CPU(struct kernel_stat, kstat);
2587 DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2588 
2589 EXPORT_PER_CPU_SYMBOL(kstat);
2590 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2591 
2592 /*
2593  * Return any ns on the sched_clock that have not yet been accounted in
2594  * @p in case that task is currently running.
2595  *
2596  * Called with task_rq_lock() held on @rq.
2597  */
2598 static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2599 {
2600 	u64 ns = 0;
2601 
2602 	if (task_current(rq, p)) {
2603 		update_rq_clock(rq);
2604 		ns = rq->clock_task - p->se.exec_start;
2605 		if ((s64)ns < 0)
2606 			ns = 0;
2607 	}
2608 
2609 	return ns;
2610 }
2611 
2612 unsigned long long task_delta_exec(struct task_struct *p)
2613 {
2614 	unsigned long flags;
2615 	struct rq *rq;
2616 	u64 ns = 0;
2617 
2618 	rq = task_rq_lock(p, &flags);
2619 	ns = do_task_delta_exec(p, rq);
2620 	task_rq_unlock(rq, p, &flags);
2621 
2622 	return ns;
2623 }
2624 
2625 /*
2626  * Return accounted runtime for the task.
2627  * In case the task is currently running, return the runtime plus current's
2628  * pending runtime that have not been accounted yet.
2629  */
2630 unsigned long long task_sched_runtime(struct task_struct *p)
2631 {
2632 	unsigned long flags;
2633 	struct rq *rq;
2634 	u64 ns = 0;
2635 
2636 	rq = task_rq_lock(p, &flags);
2637 	ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2638 	task_rq_unlock(rq, p, &flags);
2639 
2640 	return ns;
2641 }
2642 
2643 #ifdef CONFIG_CGROUP_CPUACCT
2644 struct cgroup_subsys cpuacct_subsys;
2645 struct cpuacct root_cpuacct;
2646 #endif
2647 
2648 static inline void task_group_account_field(struct task_struct *p, int index,
2649 					    u64 tmp)
2650 {
2651 #ifdef CONFIG_CGROUP_CPUACCT
2652 	struct kernel_cpustat *kcpustat;
2653 	struct cpuacct *ca;
2654 #endif
2655 	/*
2656 	 * Since all updates are sure to touch the root cgroup, we
2657 	 * get ourselves ahead and touch it first. If the root cgroup
2658 	 * is the only cgroup, then nothing else should be necessary.
2659 	 *
2660 	 */
2661 	__get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
2662 
2663 #ifdef CONFIG_CGROUP_CPUACCT
2664 	if (unlikely(!cpuacct_subsys.active))
2665 		return;
2666 
2667 	rcu_read_lock();
2668 	ca = task_ca(p);
2669 	while (ca && (ca != &root_cpuacct)) {
2670 		kcpustat = this_cpu_ptr(ca->cpustat);
2671 		kcpustat->cpustat[index] += tmp;
2672 		ca = parent_ca(ca);
2673 	}
2674 	rcu_read_unlock();
2675 #endif
2676 }
2677 
2678 
2679 /*
2680  * Account user cpu time to a process.
2681  * @p: the process that the cpu time gets accounted to
2682  * @cputime: the cpu time spent in user space since the last update
2683  * @cputime_scaled: cputime scaled by cpu frequency
2684  */
2685 void account_user_time(struct task_struct *p, cputime_t cputime,
2686 		       cputime_t cputime_scaled)
2687 {
2688 	int index;
2689 
2690 	/* Add user time to process. */
2691 	p->utime += cputime;
2692 	p->utimescaled += cputime_scaled;
2693 	account_group_user_time(p, cputime);
2694 
2695 	index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
2696 
2697 	/* Add user time to cpustat. */
2698 	task_group_account_field(p, index, (__force u64) cputime);
2699 
2700 	/* Account for user time used */
2701 	acct_update_integrals(p);
2702 }
2703 
2704 /*
2705  * Account guest cpu time to a process.
2706  * @p: the process that the cpu time gets accounted to
2707  * @cputime: the cpu time spent in virtual machine since the last update
2708  * @cputime_scaled: cputime scaled by cpu frequency
2709  */
2710 static void account_guest_time(struct task_struct *p, cputime_t cputime,
2711 			       cputime_t cputime_scaled)
2712 {
2713 	u64 *cpustat = kcpustat_this_cpu->cpustat;
2714 
2715 	/* Add guest time to process. */
2716 	p->utime += cputime;
2717 	p->utimescaled += cputime_scaled;
2718 	account_group_user_time(p, cputime);
2719 	p->gtime += cputime;
2720 
2721 	/* Add guest time to cpustat. */
2722 	if (TASK_NICE(p) > 0) {
2723 		cpustat[CPUTIME_NICE] += (__force u64) cputime;
2724 		cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
2725 	} else {
2726 		cpustat[CPUTIME_USER] += (__force u64) cputime;
2727 		cpustat[CPUTIME_GUEST] += (__force u64) cputime;
2728 	}
2729 }
2730 
2731 /*
2732  * Account system cpu time to a process and desired cpustat field
2733  * @p: the process that the cpu time gets accounted to
2734  * @cputime: the cpu time spent in kernel space since the last update
2735  * @cputime_scaled: cputime scaled by cpu frequency
2736  * @target_cputime64: pointer to cpustat field that has to be updated
2737  */
2738 static inline
2739 void __account_system_time(struct task_struct *p, cputime_t cputime,
2740 			cputime_t cputime_scaled, int index)
2741 {
2742 	/* Add system time to process. */
2743 	p->stime += cputime;
2744 	p->stimescaled += cputime_scaled;
2745 	account_group_system_time(p, cputime);
2746 
2747 	/* Add system time to cpustat. */
2748 	task_group_account_field(p, index, (__force u64) cputime);
2749 
2750 	/* Account for system time used */
2751 	acct_update_integrals(p);
2752 }
2753 
2754 /*
2755  * Account system cpu time to a process.
2756  * @p: the process that the cpu time gets accounted to
2757  * @hardirq_offset: the offset to subtract from hardirq_count()
2758  * @cputime: the cpu time spent in kernel space since the last update
2759  * @cputime_scaled: cputime scaled by cpu frequency
2760  */
2761 void account_system_time(struct task_struct *p, int hardirq_offset,
2762 			 cputime_t cputime, cputime_t cputime_scaled)
2763 {
2764 	int index;
2765 
2766 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
2767 		account_guest_time(p, cputime, cputime_scaled);
2768 		return;
2769 	}
2770 
2771 	if (hardirq_count() - hardirq_offset)
2772 		index = CPUTIME_IRQ;
2773 	else if (in_serving_softirq())
2774 		index = CPUTIME_SOFTIRQ;
2775 	else
2776 		index = CPUTIME_SYSTEM;
2777 
2778 	__account_system_time(p, cputime, cputime_scaled, index);
2779 }
2780 
2781 /*
2782  * Account for involuntary wait time.
2783  * @cputime: the cpu time spent in involuntary wait
2784  */
2785 void account_steal_time(cputime_t cputime)
2786 {
2787 	u64 *cpustat = kcpustat_this_cpu->cpustat;
2788 
2789 	cpustat[CPUTIME_STEAL] += (__force u64) cputime;
2790 }
2791 
2792 /*
2793  * Account for idle time.
2794  * @cputime: the cpu time spent in idle wait
2795  */
2796 void account_idle_time(cputime_t cputime)
2797 {
2798 	u64 *cpustat = kcpustat_this_cpu->cpustat;
2799 	struct rq *rq = this_rq();
2800 
2801 	if (atomic_read(&rq->nr_iowait) > 0)
2802 		cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
2803 	else
2804 		cpustat[CPUTIME_IDLE] += (__force u64) cputime;
2805 }
2806 
2807 static __always_inline bool steal_account_process_tick(void)
2808 {
2809 #ifdef CONFIG_PARAVIRT
2810 	if (static_key_false(&paravirt_steal_enabled)) {
2811 		u64 steal, st = 0;
2812 
2813 		steal = paravirt_steal_clock(smp_processor_id());
2814 		steal -= this_rq()->prev_steal_time;
2815 
2816 		st = steal_ticks(steal);
2817 		this_rq()->prev_steal_time += st * TICK_NSEC;
2818 
2819 		account_steal_time(st);
2820 		return st;
2821 	}
2822 #endif
2823 	return false;
2824 }
2825 
2826 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
2827 
2828 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2829 /*
2830  * Account a tick to a process and cpustat
2831  * @p: the process that the cpu time gets accounted to
2832  * @user_tick: is the tick from userspace
2833  * @rq: the pointer to rq
2834  *
2835  * Tick demultiplexing follows the order
2836  * - pending hardirq update
2837  * - pending softirq update
2838  * - user_time
2839  * - idle_time
2840  * - system time
2841  *   - check for guest_time
2842  *   - else account as system_time
2843  *
2844  * Check for hardirq is done both for system and user time as there is
2845  * no timer going off while we are on hardirq and hence we may never get an
2846  * opportunity to update it solely in system time.
2847  * p->stime and friends are only updated on system time and not on irq
2848  * softirq as those do not count in task exec_runtime any more.
2849  */
2850 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2851 						struct rq *rq)
2852 {
2853 	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
2854 	u64 *cpustat = kcpustat_this_cpu->cpustat;
2855 
2856 	if (steal_account_process_tick())
2857 		return;
2858 
2859 	if (irqtime_account_hi_update()) {
2860 		cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
2861 	} else if (irqtime_account_si_update()) {
2862 		cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
2863 	} else if (this_cpu_ksoftirqd() == p) {
2864 		/*
2865 		 * ksoftirqd time do not get accounted in cpu_softirq_time.
2866 		 * So, we have to handle it separately here.
2867 		 * Also, p->stime needs to be updated for ksoftirqd.
2868 		 */
2869 		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2870 					CPUTIME_SOFTIRQ);
2871 	} else if (user_tick) {
2872 		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2873 	} else if (p == rq->idle) {
2874 		account_idle_time(cputime_one_jiffy);
2875 	} else if (p->flags & PF_VCPU) { /* System time or guest time */
2876 		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
2877 	} else {
2878 		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2879 					CPUTIME_SYSTEM);
2880 	}
2881 }
2882 
2883 static void irqtime_account_idle_ticks(int ticks)
2884 {
2885 	int i;
2886 	struct rq *rq = this_rq();
2887 
2888 	for (i = 0; i < ticks; i++)
2889 		irqtime_account_process_tick(current, 0, rq);
2890 }
2891 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
2892 static void irqtime_account_idle_ticks(int ticks) {}
2893 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2894 						struct rq *rq) {}
2895 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2896 
2897 /*
2898  * Account a single tick of cpu time.
2899  * @p: the process that the cpu time gets accounted to
2900  * @user_tick: indicates if the tick is a user or a system tick
2901  */
2902 void account_process_tick(struct task_struct *p, int user_tick)
2903 {
2904 	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
2905 	struct rq *rq = this_rq();
2906 
2907 	if (sched_clock_irqtime) {
2908 		irqtime_account_process_tick(p, user_tick, rq);
2909 		return;
2910 	}
2911 
2912 	if (steal_account_process_tick())
2913 		return;
2914 
2915 	if (user_tick)
2916 		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2917 	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
2918 		account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
2919 				    one_jiffy_scaled);
2920 	else
2921 		account_idle_time(cputime_one_jiffy);
2922 }
2923 
2924 /*
2925  * Account multiple ticks of steal time.
2926  * @p: the process from which the cpu time has been stolen
2927  * @ticks: number of stolen ticks
2928  */
2929 void account_steal_ticks(unsigned long ticks)
2930 {
2931 	account_steal_time(jiffies_to_cputime(ticks));
2932 }
2933 
2934 /*
2935  * Account multiple ticks of idle time.
2936  * @ticks: number of stolen ticks
2937  */
2938 void account_idle_ticks(unsigned long ticks)
2939 {
2940 
2941 	if (sched_clock_irqtime) {
2942 		irqtime_account_idle_ticks(ticks);
2943 		return;
2944 	}
2945 
2946 	account_idle_time(jiffies_to_cputime(ticks));
2947 }
2948 
2949 #endif
2950 
2951 /*
2952  * Use precise platform statistics if available:
2953  */
2954 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
2955 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2956 {
2957 	*ut = p->utime;
2958 	*st = p->stime;
2959 }
2960 
2961 void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2962 {
2963 	struct task_cputime cputime;
2964 
2965 	thread_group_cputime(p, &cputime);
2966 
2967 	*ut = cputime.utime;
2968 	*st = cputime.stime;
2969 }
2970 #else
2971 
2972 #ifndef nsecs_to_cputime
2973 # define nsecs_to_cputime(__nsecs)	nsecs_to_jiffies(__nsecs)
2974 #endif
2975 
2976 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2977 {
2978 	cputime_t rtime, utime = p->utime, total = utime + p->stime;
2979 
2980 	/*
2981 	 * Use CFS's precise accounting:
2982 	 */
2983 	rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
2984 
2985 	if (total) {
2986 		u64 temp = (__force u64) rtime;
2987 
2988 		temp *= (__force u64) utime;
2989 		do_div(temp, (__force u32) total);
2990 		utime = (__force cputime_t) temp;
2991 	} else
2992 		utime = rtime;
2993 
2994 	/*
2995 	 * Compare with previous values, to keep monotonicity:
2996 	 */
2997 	p->prev_utime = max(p->prev_utime, utime);
2998 	p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
2999 
3000 	*ut = p->prev_utime;
3001 	*st = p->prev_stime;
3002 }
3003 
3004 /*
3005  * Must be called with siglock held.
3006  */
3007 void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3008 {
3009 	struct signal_struct *sig = p->signal;
3010 	struct task_cputime cputime;
3011 	cputime_t rtime, utime, total;
3012 
3013 	thread_group_cputime(p, &cputime);
3014 
3015 	total = cputime.utime + cputime.stime;
3016 	rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3017 
3018 	if (total) {
3019 		u64 temp = (__force u64) rtime;
3020 
3021 		temp *= (__force u64) cputime.utime;
3022 		do_div(temp, (__force u32) total);
3023 		utime = (__force cputime_t) temp;
3024 	} else
3025 		utime = rtime;
3026 
3027 	sig->prev_utime = max(sig->prev_utime, utime);
3028 	sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
3029 
3030 	*ut = sig->prev_utime;
3031 	*st = sig->prev_stime;
3032 }
3033 #endif
3034 
3035 /*
3036  * This function gets called by the timer code, with HZ frequency.
3037  * We call it with interrupts disabled.
3038  */
3039 void scheduler_tick(void)
3040 {
3041 	int cpu = smp_processor_id();
3042 	struct rq *rq = cpu_rq(cpu);
3043 	struct task_struct *curr = rq->curr;
3044 
3045 	sched_clock_tick();
3046 
3047 	raw_spin_lock(&rq->lock);
3048 	update_rq_clock(rq);
3049 	update_cpu_load_active(rq);
3050 	curr->sched_class->task_tick(rq, curr, 0);
3051 	raw_spin_unlock(&rq->lock);
3052 
3053 	perf_event_task_tick();
3054 
3055 #ifdef CONFIG_SMP
3056 	rq->idle_balance = idle_cpu(cpu);
3057 	trigger_load_balance(rq, cpu);
3058 #endif
3059 }
3060 
3061 notrace unsigned long get_parent_ip(unsigned long addr)
3062 {
3063 	if (in_lock_functions(addr)) {
3064 		addr = CALLER_ADDR2;
3065 		if (in_lock_functions(addr))
3066 			addr = CALLER_ADDR3;
3067 	}
3068 	return addr;
3069 }
3070 
3071 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3072 				defined(CONFIG_PREEMPT_TRACER))
3073 
3074 void __kprobes add_preempt_count(int val)
3075 {
3076 #ifdef CONFIG_DEBUG_PREEMPT
3077 	/*
3078 	 * Underflow?
3079 	 */
3080 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3081 		return;
3082 #endif
3083 	preempt_count() += val;
3084 #ifdef CONFIG_DEBUG_PREEMPT
3085 	/*
3086 	 * Spinlock count overflowing soon?
3087 	 */
3088 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3089 				PREEMPT_MASK - 10);
3090 #endif
3091 	if (preempt_count() == val)
3092 		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3093 }
3094 EXPORT_SYMBOL(add_preempt_count);
3095 
3096 void __kprobes sub_preempt_count(int val)
3097 {
3098 #ifdef CONFIG_DEBUG_PREEMPT
3099 	/*
3100 	 * Underflow?
3101 	 */
3102 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3103 		return;
3104 	/*
3105 	 * Is the spinlock portion underflowing?
3106 	 */
3107 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3108 			!(preempt_count() & PREEMPT_MASK)))
3109 		return;
3110 #endif
3111 
3112 	if (preempt_count() == val)
3113 		trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3114 	preempt_count() -= val;
3115 }
3116 EXPORT_SYMBOL(sub_preempt_count);
3117 
3118 #endif
3119 
3120 /*
3121  * Print scheduling while atomic bug:
3122  */
3123 static noinline void __schedule_bug(struct task_struct *prev)
3124 {
3125 	if (oops_in_progress)
3126 		return;
3127 
3128 	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3129 		prev->comm, prev->pid, preempt_count());
3130 
3131 	debug_show_held_locks(prev);
3132 	print_modules();
3133 	if (irqs_disabled())
3134 		print_irqtrace_events(prev);
3135 	dump_stack();
3136 	add_taint(TAINT_WARN);
3137 }
3138 
3139 /*
3140  * Various schedule()-time debugging checks and statistics:
3141  */
3142 static inline void schedule_debug(struct task_struct *prev)
3143 {
3144 	/*
3145 	 * Test if we are atomic. Since do_exit() needs to call into
3146 	 * schedule() atomically, we ignore that path for now.
3147 	 * Otherwise, whine if we are scheduling when we should not be.
3148 	 */
3149 	if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
3150 		__schedule_bug(prev);
3151 	rcu_sleep_check();
3152 
3153 	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3154 
3155 	schedstat_inc(this_rq(), sched_count);
3156 }
3157 
3158 static void put_prev_task(struct rq *rq, struct task_struct *prev)
3159 {
3160 	if (prev->on_rq || rq->skip_clock_update < 0)
3161 		update_rq_clock(rq);
3162 	prev->sched_class->put_prev_task(rq, prev);
3163 }
3164 
3165 /*
3166  * Pick up the highest-prio task:
3167  */
3168 static inline struct task_struct *
3169 pick_next_task(struct rq *rq)
3170 {
3171 	const struct sched_class *class;
3172 	struct task_struct *p;
3173 
3174 	/*
3175 	 * Optimization: we know that if all tasks are in
3176 	 * the fair class we can call that function directly:
3177 	 */
3178 	if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
3179 		p = fair_sched_class.pick_next_task(rq);
3180 		if (likely(p))
3181 			return p;
3182 	}
3183 
3184 	for_each_class(class) {
3185 		p = class->pick_next_task(rq);
3186 		if (p)
3187 			return p;
3188 	}
3189 
3190 	BUG(); /* the idle class will always have a runnable task */
3191 }
3192 
3193 /*
3194  * __schedule() is the main scheduler function.
3195  */
3196 static void __sched __schedule(void)
3197 {
3198 	struct task_struct *prev, *next;
3199 	unsigned long *switch_count;
3200 	struct rq *rq;
3201 	int cpu;
3202 
3203 need_resched:
3204 	preempt_disable();
3205 	cpu = smp_processor_id();
3206 	rq = cpu_rq(cpu);
3207 	rcu_note_context_switch(cpu);
3208 	prev = rq->curr;
3209 
3210 	schedule_debug(prev);
3211 
3212 	if (sched_feat(HRTICK))
3213 		hrtick_clear(rq);
3214 
3215 	raw_spin_lock_irq(&rq->lock);
3216 
3217 	switch_count = &prev->nivcsw;
3218 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3219 		if (unlikely(signal_pending_state(prev->state, prev))) {
3220 			prev->state = TASK_RUNNING;
3221 		} else {
3222 			deactivate_task(rq, prev, DEQUEUE_SLEEP);
3223 			prev->on_rq = 0;
3224 
3225 			/*
3226 			 * If a worker went to sleep, notify and ask workqueue
3227 			 * whether it wants to wake up a task to maintain
3228 			 * concurrency.
3229 			 */
3230 			if (prev->flags & PF_WQ_WORKER) {
3231 				struct task_struct *to_wakeup;
3232 
3233 				to_wakeup = wq_worker_sleeping(prev, cpu);
3234 				if (to_wakeup)
3235 					try_to_wake_up_local(to_wakeup);
3236 			}
3237 		}
3238 		switch_count = &prev->nvcsw;
3239 	}
3240 
3241 	pre_schedule(rq, prev);
3242 
3243 	if (unlikely(!rq->nr_running))
3244 		idle_balance(cpu, rq);
3245 
3246 	put_prev_task(rq, prev);
3247 	next = pick_next_task(rq);
3248 	clear_tsk_need_resched(prev);
3249 	rq->skip_clock_update = 0;
3250 
3251 	if (likely(prev != next)) {
3252 		rq->nr_switches++;
3253 		rq->curr = next;
3254 		++*switch_count;
3255 
3256 		context_switch(rq, prev, next); /* unlocks the rq */
3257 		/*
3258 		 * The context switch have flipped the stack from under us
3259 		 * and restored the local variables which were saved when
3260 		 * this task called schedule() in the past. prev == current
3261 		 * is still correct, but it can be moved to another cpu/rq.
3262 		 */
3263 		cpu = smp_processor_id();
3264 		rq = cpu_rq(cpu);
3265 	} else
3266 		raw_spin_unlock_irq(&rq->lock);
3267 
3268 	post_schedule(rq);
3269 
3270 	sched_preempt_enable_no_resched();
3271 	if (need_resched())
3272 		goto need_resched;
3273 }
3274 
3275 static inline void sched_submit_work(struct task_struct *tsk)
3276 {
3277 	if (!tsk->state || tsk_is_pi_blocked(tsk))
3278 		return;
3279 	/*
3280 	 * If we are going to sleep and we have plugged IO queued,
3281 	 * make sure to submit it to avoid deadlocks.
3282 	 */
3283 	if (blk_needs_flush_plug(tsk))
3284 		blk_schedule_flush_plug(tsk);
3285 }
3286 
3287 asmlinkage void __sched schedule(void)
3288 {
3289 	struct task_struct *tsk = current;
3290 
3291 	sched_submit_work(tsk);
3292 	__schedule();
3293 }
3294 EXPORT_SYMBOL(schedule);
3295 
3296 /**
3297  * schedule_preempt_disabled - called with preemption disabled
3298  *
3299  * Returns with preemption disabled. Note: preempt_count must be 1
3300  */
3301 void __sched schedule_preempt_disabled(void)
3302 {
3303 	sched_preempt_enable_no_resched();
3304 	schedule();
3305 	preempt_disable();
3306 }
3307 
3308 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3309 
3310 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3311 {
3312 	if (lock->owner != owner)
3313 		return false;
3314 
3315 	/*
3316 	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3317 	 * lock->owner still matches owner, if that fails, owner might
3318 	 * point to free()d memory, if it still matches, the rcu_read_lock()
3319 	 * ensures the memory stays valid.
3320 	 */
3321 	barrier();
3322 
3323 	return owner->on_cpu;
3324 }
3325 
3326 /*
3327  * Look out! "owner" is an entirely speculative pointer
3328  * access and not reliable.
3329  */
3330 int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3331 {
3332 	if (!sched_feat(OWNER_SPIN))
3333 		return 0;
3334 
3335 	rcu_read_lock();
3336 	while (owner_running(lock, owner)) {
3337 		if (need_resched())
3338 			break;
3339 
3340 		arch_mutex_cpu_relax();
3341 	}
3342 	rcu_read_unlock();
3343 
3344 	/*
3345 	 * We break out the loop above on need_resched() and when the
3346 	 * owner changed, which is a sign for heavy contention. Return
3347 	 * success only when lock->owner is NULL.
3348 	 */
3349 	return lock->owner == NULL;
3350 }
3351 #endif
3352 
3353 #ifdef CONFIG_PREEMPT
3354 /*
3355  * this is the entry point to schedule() from in-kernel preemption
3356  * off of preempt_enable. Kernel preemptions off return from interrupt
3357  * occur there and call schedule directly.
3358  */
3359 asmlinkage void __sched notrace preempt_schedule(void)
3360 {
3361 	struct thread_info *ti = current_thread_info();
3362 
3363 	/*
3364 	 * If there is a non-zero preempt_count or interrupts are disabled,
3365 	 * we do not want to preempt the current task. Just return..
3366 	 */
3367 	if (likely(ti->preempt_count || irqs_disabled()))
3368 		return;
3369 
3370 	do {
3371 		add_preempt_count_notrace(PREEMPT_ACTIVE);
3372 		__schedule();
3373 		sub_preempt_count_notrace(PREEMPT_ACTIVE);
3374 
3375 		/*
3376 		 * Check again in case we missed a preemption opportunity
3377 		 * between schedule and now.
3378 		 */
3379 		barrier();
3380 	} while (need_resched());
3381 }
3382 EXPORT_SYMBOL(preempt_schedule);
3383 
3384 /*
3385  * this is the entry point to schedule() from kernel preemption
3386  * off of irq context.
3387  * Note, that this is called and return with irqs disabled. This will
3388  * protect us against recursive calling from irq.
3389  */
3390 asmlinkage void __sched preempt_schedule_irq(void)
3391 {
3392 	struct thread_info *ti = current_thread_info();
3393 
3394 	/* Catch callers which need to be fixed */
3395 	BUG_ON(ti->preempt_count || !irqs_disabled());
3396 
3397 	do {
3398 		add_preempt_count(PREEMPT_ACTIVE);
3399 		local_irq_enable();
3400 		__schedule();
3401 		local_irq_disable();
3402 		sub_preempt_count(PREEMPT_ACTIVE);
3403 
3404 		/*
3405 		 * Check again in case we missed a preemption opportunity
3406 		 * between schedule and now.
3407 		 */
3408 		barrier();
3409 	} while (need_resched());
3410 }
3411 
3412 #endif /* CONFIG_PREEMPT */
3413 
3414 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3415 			  void *key)
3416 {
3417 	return try_to_wake_up(curr->private, mode, wake_flags);
3418 }
3419 EXPORT_SYMBOL(default_wake_function);
3420 
3421 /*
3422  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3423  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3424  * number) then we wake all the non-exclusive tasks and one exclusive task.
3425  *
3426  * There are circumstances in which we can try to wake a task which has already
3427  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3428  * zero in this (rare) case, and we handle it by continuing to scan the queue.
3429  */
3430 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3431 			int nr_exclusive, int wake_flags, void *key)
3432 {
3433 	wait_queue_t *curr, *next;
3434 
3435 	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
3436 		unsigned flags = curr->flags;
3437 
3438 		if (curr->func(curr, mode, wake_flags, key) &&
3439 				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3440 			break;
3441 	}
3442 }
3443 
3444 /**
3445  * __wake_up - wake up threads blocked on a waitqueue.
3446  * @q: the waitqueue
3447  * @mode: which threads
3448  * @nr_exclusive: how many wake-one or wake-many threads to wake up
3449  * @key: is directly passed to the wakeup function
3450  *
3451  * It may be assumed that this function implies a write memory barrier before
3452  * changing the task state if and only if any tasks are woken up.
3453  */
3454 void __wake_up(wait_queue_head_t *q, unsigned int mode,
3455 			int nr_exclusive, void *key)
3456 {
3457 	unsigned long flags;
3458 
3459 	spin_lock_irqsave(&q->lock, flags);
3460 	__wake_up_common(q, mode, nr_exclusive, 0, key);
3461 	spin_unlock_irqrestore(&q->lock, flags);
3462 }
3463 EXPORT_SYMBOL(__wake_up);
3464 
3465 /*
3466  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3467  */
3468 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
3469 {
3470 	__wake_up_common(q, mode, nr, 0, NULL);
3471 }
3472 EXPORT_SYMBOL_GPL(__wake_up_locked);
3473 
3474 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3475 {
3476 	__wake_up_common(q, mode, 1, 0, key);
3477 }
3478 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
3479 
3480 /**
3481  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
3482  * @q: the waitqueue
3483  * @mode: which threads
3484  * @nr_exclusive: how many wake-one or wake-many threads to wake up
3485  * @key: opaque value to be passed to wakeup targets
3486  *
3487  * The sync wakeup differs that the waker knows that it will schedule
3488  * away soon, so while the target thread will be woken up, it will not
3489  * be migrated to another CPU - ie. the two threads are 'synchronized'
3490  * with each other. This can prevent needless bouncing between CPUs.
3491  *
3492  * On UP it can prevent extra preemption.
3493  *
3494  * It may be assumed that this function implies a write memory barrier before
3495  * changing the task state if and only if any tasks are woken up.
3496  */
3497 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3498 			int nr_exclusive, void *key)
3499 {
3500 	unsigned long flags;
3501 	int wake_flags = WF_SYNC;
3502 
3503 	if (unlikely(!q))
3504 		return;
3505 
3506 	if (unlikely(!nr_exclusive))
3507 		wake_flags = 0;
3508 
3509 	spin_lock_irqsave(&q->lock, flags);
3510 	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
3511 	spin_unlock_irqrestore(&q->lock, flags);
3512 }
3513 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3514 
3515 /*
3516  * __wake_up_sync - see __wake_up_sync_key()
3517  */
3518 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3519 {
3520 	__wake_up_sync_key(q, mode, nr_exclusive, NULL);
3521 }
3522 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
3523 
3524 /**
3525  * complete: - signals a single thread waiting on this completion
3526  * @x:  holds the state of this particular completion
3527  *
3528  * This will wake up a single thread waiting on this completion. Threads will be
3529  * awakened in the same order in which they were queued.
3530  *
3531  * See also complete_all(), wait_for_completion() and related routines.
3532  *
3533  * It may be assumed that this function implies a write memory barrier before
3534  * changing the task state if and only if any tasks are woken up.
3535  */
3536 void complete(struct completion *x)
3537 {
3538 	unsigned long flags;
3539 
3540 	spin_lock_irqsave(&x->wait.lock, flags);
3541 	x->done++;
3542 	__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
3543 	spin_unlock_irqrestore(&x->wait.lock, flags);
3544 }
3545 EXPORT_SYMBOL(complete);
3546 
3547 /**
3548  * complete_all: - signals all threads waiting on this completion
3549  * @x:  holds the state of this particular completion
3550  *
3551  * This will wake up all threads waiting on this particular completion event.
3552  *
3553  * It may be assumed that this function implies a write memory barrier before
3554  * changing the task state if and only if any tasks are woken up.
3555  */
3556 void complete_all(struct completion *x)
3557 {
3558 	unsigned long flags;
3559 
3560 	spin_lock_irqsave(&x->wait.lock, flags);
3561 	x->done += UINT_MAX/2;
3562 	__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
3563 	spin_unlock_irqrestore(&x->wait.lock, flags);
3564 }
3565 EXPORT_SYMBOL(complete_all);
3566 
3567 static inline long __sched
3568 do_wait_for_common(struct completion *x, long timeout, int state)
3569 {
3570 	if (!x->done) {
3571 		DECLARE_WAITQUEUE(wait, current);
3572 
3573 		__add_wait_queue_tail_exclusive(&x->wait, &wait);
3574 		do {
3575 			if (signal_pending_state(state, current)) {
3576 				timeout = -ERESTARTSYS;
3577 				break;
3578 			}
3579 			__set_current_state(state);
3580 			spin_unlock_irq(&x->wait.lock);
3581 			timeout = schedule_timeout(timeout);
3582 			spin_lock_irq(&x->wait.lock);
3583 		} while (!x->done && timeout);
3584 		__remove_wait_queue(&x->wait, &wait);
3585 		if (!x->done)
3586 			return timeout;
3587 	}
3588 	x->done--;
3589 	return timeout ?: 1;
3590 }
3591 
3592 static long __sched
3593 wait_for_common(struct completion *x, long timeout, int state)
3594 {
3595 	might_sleep();
3596 
3597 	spin_lock_irq(&x->wait.lock);
3598 	timeout = do_wait_for_common(x, timeout, state);
3599 	spin_unlock_irq(&x->wait.lock);
3600 	return timeout;
3601 }
3602 
3603 /**
3604  * wait_for_completion: - waits for completion of a task
3605  * @x:  holds the state of this particular completion
3606  *
3607  * This waits to be signaled for completion of a specific task. It is NOT
3608  * interruptible and there is no timeout.
3609  *
3610  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3611  * and interrupt capability. Also see complete().
3612  */
3613 void __sched wait_for_completion(struct completion *x)
3614 {
3615 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3616 }
3617 EXPORT_SYMBOL(wait_for_completion);
3618 
3619 /**
3620  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3621  * @x:  holds the state of this particular completion
3622  * @timeout:  timeout value in jiffies
3623  *
3624  * This waits for either a completion of a specific task to be signaled or for a
3625  * specified timeout to expire. The timeout is in jiffies. It is not
3626  * interruptible.
3627  *
3628  * The return value is 0 if timed out, and positive (at least 1, or number of
3629  * jiffies left till timeout) if completed.
3630  */
3631 unsigned long __sched
3632 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3633 {
3634 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
3635 }
3636 EXPORT_SYMBOL(wait_for_completion_timeout);
3637 
3638 /**
3639  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3640  * @x:  holds the state of this particular completion
3641  *
3642  * This waits for completion of a specific task to be signaled. It is
3643  * interruptible.
3644  *
3645  * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3646  */
3647 int __sched wait_for_completion_interruptible(struct completion *x)
3648 {
3649 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3650 	if (t == -ERESTARTSYS)
3651 		return t;
3652 	return 0;
3653 }
3654 EXPORT_SYMBOL(wait_for_completion_interruptible);
3655 
3656 /**
3657  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3658  * @x:  holds the state of this particular completion
3659  * @timeout:  timeout value in jiffies
3660  *
3661  * This waits for either a completion of a specific task to be signaled or for a
3662  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
3663  *
3664  * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3665  * positive (at least 1, or number of jiffies left till timeout) if completed.
3666  */
3667 long __sched
3668 wait_for_completion_interruptible_timeout(struct completion *x,
3669 					  unsigned long timeout)
3670 {
3671 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
3672 }
3673 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3674 
3675 /**
3676  * wait_for_completion_killable: - waits for completion of a task (killable)
3677  * @x:  holds the state of this particular completion
3678  *
3679  * This waits to be signaled for completion of a specific task. It can be
3680  * interrupted by a kill signal.
3681  *
3682  * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3683  */
3684 int __sched wait_for_completion_killable(struct completion *x)
3685 {
3686 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3687 	if (t == -ERESTARTSYS)
3688 		return t;
3689 	return 0;
3690 }
3691 EXPORT_SYMBOL(wait_for_completion_killable);
3692 
3693 /**
3694  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3695  * @x:  holds the state of this particular completion
3696  * @timeout:  timeout value in jiffies
3697  *
3698  * This waits for either a completion of a specific task to be
3699  * signaled or for a specified timeout to expire. It can be
3700  * interrupted by a kill signal. The timeout is in jiffies.
3701  *
3702  * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3703  * positive (at least 1, or number of jiffies left till timeout) if completed.
3704  */
3705 long __sched
3706 wait_for_completion_killable_timeout(struct completion *x,
3707 				     unsigned long timeout)
3708 {
3709 	return wait_for_common(x, timeout, TASK_KILLABLE);
3710 }
3711 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3712 
3713 /**
3714  *	try_wait_for_completion - try to decrement a completion without blocking
3715  *	@x:	completion structure
3716  *
3717  *	Returns: 0 if a decrement cannot be done without blocking
3718  *		 1 if a decrement succeeded.
3719  *
3720  *	If a completion is being used as a counting completion,
3721  *	attempt to decrement the counter without blocking. This
3722  *	enables us to avoid waiting if the resource the completion
3723  *	is protecting is not available.
3724  */
3725 bool try_wait_for_completion(struct completion *x)
3726 {
3727 	unsigned long flags;
3728 	int ret = 1;
3729 
3730 	spin_lock_irqsave(&x->wait.lock, flags);
3731 	if (!x->done)
3732 		ret = 0;
3733 	else
3734 		x->done--;
3735 	spin_unlock_irqrestore(&x->wait.lock, flags);
3736 	return ret;
3737 }
3738 EXPORT_SYMBOL(try_wait_for_completion);
3739 
3740 /**
3741  *	completion_done - Test to see if a completion has any waiters
3742  *	@x:	completion structure
3743  *
3744  *	Returns: 0 if there are waiters (wait_for_completion() in progress)
3745  *		 1 if there are no waiters.
3746  *
3747  */
3748 bool completion_done(struct completion *x)
3749 {
3750 	unsigned long flags;
3751 	int ret = 1;
3752 
3753 	spin_lock_irqsave(&x->wait.lock, flags);
3754 	if (!x->done)
3755 		ret = 0;
3756 	spin_unlock_irqrestore(&x->wait.lock, flags);
3757 	return ret;
3758 }
3759 EXPORT_SYMBOL(completion_done);
3760 
3761 static long __sched
3762 sleep_on_common(wait_queue_head_t *q, int state, long timeout)
3763 {
3764 	unsigned long flags;
3765 	wait_queue_t wait;
3766 
3767 	init_waitqueue_entry(&wait, current);
3768 
3769 	__set_current_state(state);
3770 
3771 	spin_lock_irqsave(&q->lock, flags);
3772 	__add_wait_queue(q, &wait);
3773 	spin_unlock(&q->lock);
3774 	timeout = schedule_timeout(timeout);
3775 	spin_lock_irq(&q->lock);
3776 	__remove_wait_queue(q, &wait);
3777 	spin_unlock_irqrestore(&q->lock, flags);
3778 
3779 	return timeout;
3780 }
3781 
3782 void __sched interruptible_sleep_on(wait_queue_head_t *q)
3783 {
3784 	sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3785 }
3786 EXPORT_SYMBOL(interruptible_sleep_on);
3787 
3788 long __sched
3789 interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3790 {
3791 	return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
3792 }
3793 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3794 
3795 void __sched sleep_on(wait_queue_head_t *q)
3796 {
3797 	sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3798 }
3799 EXPORT_SYMBOL(sleep_on);
3800 
3801 long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3802 {
3803 	return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3804 }
3805 EXPORT_SYMBOL(sleep_on_timeout);
3806 
3807 #ifdef CONFIG_RT_MUTEXES
3808 
3809 /*
3810  * rt_mutex_setprio - set the current priority of a task
3811  * @p: task
3812  * @prio: prio value (kernel-internal form)
3813  *
3814  * This function changes the 'effective' priority of a task. It does
3815  * not touch ->normal_prio like __setscheduler().
3816  *
3817  * Used by the rt_mutex code to implement priority inheritance logic.
3818  */
3819 void rt_mutex_setprio(struct task_struct *p, int prio)
3820 {
3821 	int oldprio, on_rq, running;
3822 	struct rq *rq;
3823 	const struct sched_class *prev_class;
3824 
3825 	BUG_ON(prio < 0 || prio > MAX_PRIO);
3826 
3827 	rq = __task_rq_lock(p);
3828 
3829 	/*
3830 	 * Idle task boosting is a nono in general. There is one
3831 	 * exception, when PREEMPT_RT and NOHZ is active:
3832 	 *
3833 	 * The idle task calls get_next_timer_interrupt() and holds
3834 	 * the timer wheel base->lock on the CPU and another CPU wants
3835 	 * to access the timer (probably to cancel it). We can safely
3836 	 * ignore the boosting request, as the idle CPU runs this code
3837 	 * with interrupts disabled and will complete the lock
3838 	 * protected section without being interrupted. So there is no
3839 	 * real need to boost.
3840 	 */
3841 	if (unlikely(p == rq->idle)) {
3842 		WARN_ON(p != rq->curr);
3843 		WARN_ON(p->pi_blocked_on);
3844 		goto out_unlock;
3845 	}
3846 
3847 	trace_sched_pi_setprio(p, prio);
3848 	oldprio = p->prio;
3849 	prev_class = p->sched_class;
3850 	on_rq = p->on_rq;
3851 	running = task_current(rq, p);
3852 	if (on_rq)
3853 		dequeue_task(rq, p, 0);
3854 	if (running)
3855 		p->sched_class->put_prev_task(rq, p);
3856 
3857 	if (rt_prio(prio))
3858 		p->sched_class = &rt_sched_class;
3859 	else
3860 		p->sched_class = &fair_sched_class;
3861 
3862 	p->prio = prio;
3863 
3864 	if (running)
3865 		p->sched_class->set_curr_task(rq);
3866 	if (on_rq)
3867 		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
3868 
3869 	check_class_changed(rq, p, prev_class, oldprio);
3870 out_unlock:
3871 	__task_rq_unlock(rq);
3872 }
3873 #endif
3874 void set_user_nice(struct task_struct *p, long nice)
3875 {
3876 	int old_prio, delta, on_rq;
3877 	unsigned long flags;
3878 	struct rq *rq;
3879 
3880 	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3881 		return;
3882 	/*
3883 	 * We have to be careful, if called from sys_setpriority(),
3884 	 * the task might be in the middle of scheduling on another CPU.
3885 	 */
3886 	rq = task_rq_lock(p, &flags);
3887 	/*
3888 	 * The RT priorities are set via sched_setscheduler(), but we still
3889 	 * allow the 'normal' nice value to be set - but as expected
3890 	 * it wont have any effect on scheduling until the task is
3891 	 * SCHED_FIFO/SCHED_RR:
3892 	 */
3893 	if (task_has_rt_policy(p)) {
3894 		p->static_prio = NICE_TO_PRIO(nice);
3895 		goto out_unlock;
3896 	}
3897 	on_rq = p->on_rq;
3898 	if (on_rq)
3899 		dequeue_task(rq, p, 0);
3900 
3901 	p->static_prio = NICE_TO_PRIO(nice);
3902 	set_load_weight(p);
3903 	old_prio = p->prio;
3904 	p->prio = effective_prio(p);
3905 	delta = p->prio - old_prio;
3906 
3907 	if (on_rq) {
3908 		enqueue_task(rq, p, 0);
3909 		/*
3910 		 * If the task increased its priority or is running and
3911 		 * lowered its priority, then reschedule its CPU:
3912 		 */
3913 		if (delta < 0 || (delta > 0 && task_running(rq, p)))
3914 			resched_task(rq->curr);
3915 	}
3916 out_unlock:
3917 	task_rq_unlock(rq, p, &flags);
3918 }
3919 EXPORT_SYMBOL(set_user_nice);
3920 
3921 /*
3922  * can_nice - check if a task can reduce its nice value
3923  * @p: task
3924  * @nice: nice value
3925  */
3926 int can_nice(const struct task_struct *p, const int nice)
3927 {
3928 	/* convert nice value [19,-20] to rlimit style value [1,40] */
3929 	int nice_rlim = 20 - nice;
3930 
3931 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3932 		capable(CAP_SYS_NICE));
3933 }
3934 
3935 #ifdef __ARCH_WANT_SYS_NICE
3936 
3937 /*
3938  * sys_nice - change the priority of the current process.
3939  * @increment: priority increment
3940  *
3941  * sys_setpriority is a more generic, but much slower function that
3942  * does similar things.
3943  */
3944 SYSCALL_DEFINE1(nice, int, increment)
3945 {
3946 	long nice, retval;
3947 
3948 	/*
3949 	 * Setpriority might change our priority at the same moment.
3950 	 * We don't have to worry. Conceptually one call occurs first
3951 	 * and we have a single winner.
3952 	 */
3953 	if (increment < -40)
3954 		increment = -40;
3955 	if (increment > 40)
3956 		increment = 40;
3957 
3958 	nice = TASK_NICE(current) + increment;
3959 	if (nice < -20)
3960 		nice = -20;
3961 	if (nice > 19)
3962 		nice = 19;
3963 
3964 	if (increment < 0 && !can_nice(current, nice))
3965 		return -EPERM;
3966 
3967 	retval = security_task_setnice(current, nice);
3968 	if (retval)
3969 		return retval;
3970 
3971 	set_user_nice(current, nice);
3972 	return 0;
3973 }
3974 
3975 #endif
3976 
3977 /**
3978  * task_prio - return the priority value of a given task.
3979  * @p: the task in question.
3980  *
3981  * This is the priority value as seen by users in /proc.
3982  * RT tasks are offset by -200. Normal tasks are centered
3983  * around 0, value goes from -16 to +15.
3984  */
3985 int task_prio(const struct task_struct *p)
3986 {
3987 	return p->prio - MAX_RT_PRIO;
3988 }
3989 
3990 /**
3991  * task_nice - return the nice value of a given task.
3992  * @p: the task in question.
3993  */
3994 int task_nice(const struct task_struct *p)
3995 {
3996 	return TASK_NICE(p);
3997 }
3998 EXPORT_SYMBOL(task_nice);
3999 
4000 /**
4001  * idle_cpu - is a given cpu idle currently?
4002  * @cpu: the processor in question.
4003  */
4004 int idle_cpu(int cpu)
4005 {
4006 	struct rq *rq = cpu_rq(cpu);
4007 
4008 	if (rq->curr != rq->idle)
4009 		return 0;
4010 
4011 	if (rq->nr_running)
4012 		return 0;
4013 
4014 #ifdef CONFIG_SMP
4015 	if (!llist_empty(&rq->wake_list))
4016 		return 0;
4017 #endif
4018 
4019 	return 1;
4020 }
4021 
4022 /**
4023  * idle_task - return the idle task for a given cpu.
4024  * @cpu: the processor in question.
4025  */
4026 struct task_struct *idle_task(int cpu)
4027 {
4028 	return cpu_rq(cpu)->idle;
4029 }
4030 
4031 /**
4032  * find_process_by_pid - find a process with a matching PID value.
4033  * @pid: the pid in question.
4034  */
4035 static struct task_struct *find_process_by_pid(pid_t pid)
4036 {
4037 	return pid ? find_task_by_vpid(pid) : current;
4038 }
4039 
4040 /* Actually do priority change: must hold rq lock. */
4041 static void
4042 __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4043 {
4044 	p->policy = policy;
4045 	p->rt_priority = prio;
4046 	p->normal_prio = normal_prio(p);
4047 	/* we are holding p->pi_lock already */
4048 	p->prio = rt_mutex_getprio(p);
4049 	if (rt_prio(p->prio))
4050 		p->sched_class = &rt_sched_class;
4051 	else
4052 		p->sched_class = &fair_sched_class;
4053 	set_load_weight(p);
4054 }
4055 
4056 /*
4057  * check the target process has a UID that matches the current process's
4058  */
4059 static bool check_same_owner(struct task_struct *p)
4060 {
4061 	const struct cred *cred = current_cred(), *pcred;
4062 	bool match;
4063 
4064 	rcu_read_lock();
4065 	pcred = __task_cred(p);
4066 	if (cred->user->user_ns == pcred->user->user_ns)
4067 		match = (cred->euid == pcred->euid ||
4068 			 cred->euid == pcred->uid);
4069 	else
4070 		match = false;
4071 	rcu_read_unlock();
4072 	return match;
4073 }
4074 
4075 static int __sched_setscheduler(struct task_struct *p, int policy,
4076 				const struct sched_param *param, bool user)
4077 {
4078 	int retval, oldprio, oldpolicy = -1, on_rq, running;
4079 	unsigned long flags;
4080 	const struct sched_class *prev_class;
4081 	struct rq *rq;
4082 	int reset_on_fork;
4083 
4084 	/* may grab non-irq protected spin_locks */
4085 	BUG_ON(in_interrupt());
4086 recheck:
4087 	/* double check policy once rq lock held */
4088 	if (policy < 0) {
4089 		reset_on_fork = p->sched_reset_on_fork;
4090 		policy = oldpolicy = p->policy;
4091 	} else {
4092 		reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4093 		policy &= ~SCHED_RESET_ON_FORK;
4094 
4095 		if (policy != SCHED_FIFO && policy != SCHED_RR &&
4096 				policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4097 				policy != SCHED_IDLE)
4098 			return -EINVAL;
4099 	}
4100 
4101 	/*
4102 	 * Valid priorities for SCHED_FIFO and SCHED_RR are
4103 	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4104 	 * SCHED_BATCH and SCHED_IDLE is 0.
4105 	 */
4106 	if (param->sched_priority < 0 ||
4107 	    (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4108 	    (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4109 		return -EINVAL;
4110 	if (rt_policy(policy) != (param->sched_priority != 0))
4111 		return -EINVAL;
4112 
4113 	/*
4114 	 * Allow unprivileged RT tasks to decrease priority:
4115 	 */
4116 	if (user && !capable(CAP_SYS_NICE)) {
4117 		if (rt_policy(policy)) {
4118 			unsigned long rlim_rtprio =
4119 					task_rlimit(p, RLIMIT_RTPRIO);
4120 
4121 			/* can't set/change the rt policy */
4122 			if (policy != p->policy && !rlim_rtprio)
4123 				return -EPERM;
4124 
4125 			/* can't increase priority */
4126 			if (param->sched_priority > p->rt_priority &&
4127 			    param->sched_priority > rlim_rtprio)
4128 				return -EPERM;
4129 		}
4130 
4131 		/*
4132 		 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4133 		 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
4134 		 */
4135 		if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4136 			if (!can_nice(p, TASK_NICE(p)))
4137 				return -EPERM;
4138 		}
4139 
4140 		/* can't change other user's priorities */
4141 		if (!check_same_owner(p))
4142 			return -EPERM;
4143 
4144 		/* Normal users shall not reset the sched_reset_on_fork flag */
4145 		if (p->sched_reset_on_fork && !reset_on_fork)
4146 			return -EPERM;
4147 	}
4148 
4149 	if (user) {
4150 		retval = security_task_setscheduler(p);
4151 		if (retval)
4152 			return retval;
4153 	}
4154 
4155 	/*
4156 	 * make sure no PI-waiters arrive (or leave) while we are
4157 	 * changing the priority of the task:
4158 	 *
4159 	 * To be able to change p->policy safely, the appropriate
4160 	 * runqueue lock must be held.
4161 	 */
4162 	rq = task_rq_lock(p, &flags);
4163 
4164 	/*
4165 	 * Changing the policy of the stop threads its a very bad idea
4166 	 */
4167 	if (p == rq->stop) {
4168 		task_rq_unlock(rq, p, &flags);
4169 		return -EINVAL;
4170 	}
4171 
4172 	/*
4173 	 * If not changing anything there's no need to proceed further:
4174 	 */
4175 	if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4176 			param->sched_priority == p->rt_priority))) {
4177 
4178 		__task_rq_unlock(rq);
4179 		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4180 		return 0;
4181 	}
4182 
4183 #ifdef CONFIG_RT_GROUP_SCHED
4184 	if (user) {
4185 		/*
4186 		 * Do not allow realtime tasks into groups that have no runtime
4187 		 * assigned.
4188 		 */
4189 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
4190 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4191 				!task_group_is_autogroup(task_group(p))) {
4192 			task_rq_unlock(rq, p, &flags);
4193 			return -EPERM;
4194 		}
4195 	}
4196 #endif
4197 
4198 	/* recheck policy now with rq lock held */
4199 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4200 		policy = oldpolicy = -1;
4201 		task_rq_unlock(rq, p, &flags);
4202 		goto recheck;
4203 	}
4204 	on_rq = p->on_rq;
4205 	running = task_current(rq, p);
4206 	if (on_rq)
4207 		dequeue_task(rq, p, 0);
4208 	if (running)
4209 		p->sched_class->put_prev_task(rq, p);
4210 
4211 	p->sched_reset_on_fork = reset_on_fork;
4212 
4213 	oldprio = p->prio;
4214 	prev_class = p->sched_class;
4215 	__setscheduler(rq, p, policy, param->sched_priority);
4216 
4217 	if (running)
4218 		p->sched_class->set_curr_task(rq);
4219 	if (on_rq)
4220 		enqueue_task(rq, p, 0);
4221 
4222 	check_class_changed(rq, p, prev_class, oldprio);
4223 	task_rq_unlock(rq, p, &flags);
4224 
4225 	rt_mutex_adjust_pi(p);
4226 
4227 	return 0;
4228 }
4229 
4230 /**
4231  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4232  * @p: the task in question.
4233  * @policy: new policy.
4234  * @param: structure containing the new RT priority.
4235  *
4236  * NOTE that the task may be already dead.
4237  */
4238 int sched_setscheduler(struct task_struct *p, int policy,
4239 		       const struct sched_param *param)
4240 {
4241 	return __sched_setscheduler(p, policy, param, true);
4242 }
4243 EXPORT_SYMBOL_GPL(sched_setscheduler);
4244 
4245 /**
4246  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4247  * @p: the task in question.
4248  * @policy: new policy.
4249  * @param: structure containing the new RT priority.
4250  *
4251  * Just like sched_setscheduler, only don't bother checking if the
4252  * current context has permission.  For example, this is needed in
4253  * stop_machine(): we create temporary high priority worker threads,
4254  * but our caller might not have that capability.
4255  */
4256 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4257 			       const struct sched_param *param)
4258 {
4259 	return __sched_setscheduler(p, policy, param, false);
4260 }
4261 
4262 static int
4263 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4264 {
4265 	struct sched_param lparam;
4266 	struct task_struct *p;
4267 	int retval;
4268 
4269 	if (!param || pid < 0)
4270 		return -EINVAL;
4271 	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4272 		return -EFAULT;
4273 
4274 	rcu_read_lock();
4275 	retval = -ESRCH;
4276 	p = find_process_by_pid(pid);
4277 	if (p != NULL)
4278 		retval = sched_setscheduler(p, policy, &lparam);
4279 	rcu_read_unlock();
4280 
4281 	return retval;
4282 }
4283 
4284 /**
4285  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4286  * @pid: the pid in question.
4287  * @policy: new policy.
4288  * @param: structure containing the new RT priority.
4289  */
4290 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4291 		struct sched_param __user *, param)
4292 {
4293 	/* negative values for policy are not valid */
4294 	if (policy < 0)
4295 		return -EINVAL;
4296 
4297 	return do_sched_setscheduler(pid, policy, param);
4298 }
4299 
4300 /**
4301  * sys_sched_setparam - set/change the RT priority of a thread
4302  * @pid: the pid in question.
4303  * @param: structure containing the new RT priority.
4304  */
4305 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4306 {
4307 	return do_sched_setscheduler(pid, -1, param);
4308 }
4309 
4310 /**
4311  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4312  * @pid: the pid in question.
4313  */
4314 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4315 {
4316 	struct task_struct *p;
4317 	int retval;
4318 
4319 	if (pid < 0)
4320 		return -EINVAL;
4321 
4322 	retval = -ESRCH;
4323 	rcu_read_lock();
4324 	p = find_process_by_pid(pid);
4325 	if (p) {
4326 		retval = security_task_getscheduler(p);
4327 		if (!retval)
4328 			retval = p->policy
4329 				| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4330 	}
4331 	rcu_read_unlock();
4332 	return retval;
4333 }
4334 
4335 /**
4336  * sys_sched_getparam - get the RT priority of a thread
4337  * @pid: the pid in question.
4338  * @param: structure containing the RT priority.
4339  */
4340 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4341 {
4342 	struct sched_param lp;
4343 	struct task_struct *p;
4344 	int retval;
4345 
4346 	if (!param || pid < 0)
4347 		return -EINVAL;
4348 
4349 	rcu_read_lock();
4350 	p = find_process_by_pid(pid);
4351 	retval = -ESRCH;
4352 	if (!p)
4353 		goto out_unlock;
4354 
4355 	retval = security_task_getscheduler(p);
4356 	if (retval)
4357 		goto out_unlock;
4358 
4359 	lp.sched_priority = p->rt_priority;
4360 	rcu_read_unlock();
4361 
4362 	/*
4363 	 * This one might sleep, we cannot do it with a spinlock held ...
4364 	 */
4365 	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4366 
4367 	return retval;
4368 
4369 out_unlock:
4370 	rcu_read_unlock();
4371 	return retval;
4372 }
4373 
4374 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4375 {
4376 	cpumask_var_t cpus_allowed, new_mask;
4377 	struct task_struct *p;
4378 	int retval;
4379 
4380 	get_online_cpus();
4381 	rcu_read_lock();
4382 
4383 	p = find_process_by_pid(pid);
4384 	if (!p) {
4385 		rcu_read_unlock();
4386 		put_online_cpus();
4387 		return -ESRCH;
4388 	}
4389 
4390 	/* Prevent p going away */
4391 	get_task_struct(p);
4392 	rcu_read_unlock();
4393 
4394 	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4395 		retval = -ENOMEM;
4396 		goto out_put_task;
4397 	}
4398 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4399 		retval = -ENOMEM;
4400 		goto out_free_cpus_allowed;
4401 	}
4402 	retval = -EPERM;
4403 	if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
4404 		goto out_unlock;
4405 
4406 	retval = security_task_setscheduler(p);
4407 	if (retval)
4408 		goto out_unlock;
4409 
4410 	cpuset_cpus_allowed(p, cpus_allowed);
4411 	cpumask_and(new_mask, in_mask, cpus_allowed);
4412 again:
4413 	retval = set_cpus_allowed_ptr(p, new_mask);
4414 
4415 	if (!retval) {
4416 		cpuset_cpus_allowed(p, cpus_allowed);
4417 		if (!cpumask_subset(new_mask, cpus_allowed)) {
4418 			/*
4419 			 * We must have raced with a concurrent cpuset
4420 			 * update. Just reset the cpus_allowed to the
4421 			 * cpuset's cpus_allowed
4422 			 */
4423 			cpumask_copy(new_mask, cpus_allowed);
4424 			goto again;
4425 		}
4426 	}
4427 out_unlock:
4428 	free_cpumask_var(new_mask);
4429 out_free_cpus_allowed:
4430 	free_cpumask_var(cpus_allowed);
4431 out_put_task:
4432 	put_task_struct(p);
4433 	put_online_cpus();
4434 	return retval;
4435 }
4436 
4437 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4438 			     struct cpumask *new_mask)
4439 {
4440 	if (len < cpumask_size())
4441 		cpumask_clear(new_mask);
4442 	else if (len > cpumask_size())
4443 		len = cpumask_size();
4444 
4445 	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4446 }
4447 
4448 /**
4449  * sys_sched_setaffinity - set the cpu affinity of a process
4450  * @pid: pid of the process
4451  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4452  * @user_mask_ptr: user-space pointer to the new cpu mask
4453  */
4454 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4455 		unsigned long __user *, user_mask_ptr)
4456 {
4457 	cpumask_var_t new_mask;
4458 	int retval;
4459 
4460 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4461 		return -ENOMEM;
4462 
4463 	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4464 	if (retval == 0)
4465 		retval = sched_setaffinity(pid, new_mask);
4466 	free_cpumask_var(new_mask);
4467 	return retval;
4468 }
4469 
4470 long sched_getaffinity(pid_t pid, struct cpumask *mask)
4471 {
4472 	struct task_struct *p;
4473 	unsigned long flags;
4474 	int retval;
4475 
4476 	get_online_cpus();
4477 	rcu_read_lock();
4478 
4479 	retval = -ESRCH;
4480 	p = find_process_by_pid(pid);
4481 	if (!p)
4482 		goto out_unlock;
4483 
4484 	retval = security_task_getscheduler(p);
4485 	if (retval)
4486 		goto out_unlock;
4487 
4488 	raw_spin_lock_irqsave(&p->pi_lock, flags);
4489 	cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
4490 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4491 
4492 out_unlock:
4493 	rcu_read_unlock();
4494 	put_online_cpus();
4495 
4496 	return retval;
4497 }
4498 
4499 /**
4500  * sys_sched_getaffinity - get the cpu affinity of a process
4501  * @pid: pid of the process
4502  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4503  * @user_mask_ptr: user-space pointer to hold the current cpu mask
4504  */
4505 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4506 		unsigned long __user *, user_mask_ptr)
4507 {
4508 	int ret;
4509 	cpumask_var_t mask;
4510 
4511 	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4512 		return -EINVAL;
4513 	if (len & (sizeof(unsigned long)-1))
4514 		return -EINVAL;
4515 
4516 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4517 		return -ENOMEM;
4518 
4519 	ret = sched_getaffinity(pid, mask);
4520 	if (ret == 0) {
4521 		size_t retlen = min_t(size_t, len, cpumask_size());
4522 
4523 		if (copy_to_user(user_mask_ptr, mask, retlen))
4524 			ret = -EFAULT;
4525 		else
4526 			ret = retlen;
4527 	}
4528 	free_cpumask_var(mask);
4529 
4530 	return ret;
4531 }
4532 
4533 /**
4534  * sys_sched_yield - yield the current processor to other threads.
4535  *
4536  * This function yields the current CPU to other tasks. If there are no
4537  * other threads running on this CPU then this function will return.
4538  */
4539 SYSCALL_DEFINE0(sched_yield)
4540 {
4541 	struct rq *rq = this_rq_lock();
4542 
4543 	schedstat_inc(rq, yld_count);
4544 	current->sched_class->yield_task(rq);
4545 
4546 	/*
4547 	 * Since we are going to call schedule() anyway, there's
4548 	 * no need to preempt or enable interrupts:
4549 	 */
4550 	__release(rq->lock);
4551 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4552 	do_raw_spin_unlock(&rq->lock);
4553 	sched_preempt_enable_no_resched();
4554 
4555 	schedule();
4556 
4557 	return 0;
4558 }
4559 
4560 static inline int should_resched(void)
4561 {
4562 	return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4563 }
4564 
4565 static void __cond_resched(void)
4566 {
4567 	add_preempt_count(PREEMPT_ACTIVE);
4568 	__schedule();
4569 	sub_preempt_count(PREEMPT_ACTIVE);
4570 }
4571 
4572 int __sched _cond_resched(void)
4573 {
4574 	if (should_resched()) {
4575 		__cond_resched();
4576 		return 1;
4577 	}
4578 	return 0;
4579 }
4580 EXPORT_SYMBOL(_cond_resched);
4581 
4582 /*
4583  * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4584  * call schedule, and on return reacquire the lock.
4585  *
4586  * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4587  * operations here to prevent schedule() from being called twice (once via
4588  * spin_unlock(), once by hand).
4589  */
4590 int __cond_resched_lock(spinlock_t *lock)
4591 {
4592 	int resched = should_resched();
4593 	int ret = 0;
4594 
4595 	lockdep_assert_held(lock);
4596 
4597 	if (spin_needbreak(lock) || resched) {
4598 		spin_unlock(lock);
4599 		if (resched)
4600 			__cond_resched();
4601 		else
4602 			cpu_relax();
4603 		ret = 1;
4604 		spin_lock(lock);
4605 	}
4606 	return ret;
4607 }
4608 EXPORT_SYMBOL(__cond_resched_lock);
4609 
4610 int __sched __cond_resched_softirq(void)
4611 {
4612 	BUG_ON(!in_softirq());
4613 
4614 	if (should_resched()) {
4615 		local_bh_enable();
4616 		__cond_resched();
4617 		local_bh_disable();
4618 		return 1;
4619 	}
4620 	return 0;
4621 }
4622 EXPORT_SYMBOL(__cond_resched_softirq);
4623 
4624 /**
4625  * yield - yield the current processor to other threads.
4626  *
4627  * Do not ever use this function, there's a 99% chance you're doing it wrong.
4628  *
4629  * The scheduler is at all times free to pick the calling task as the most
4630  * eligible task to run, if removing the yield() call from your code breaks
4631  * it, its already broken.
4632  *
4633  * Typical broken usage is:
4634  *
4635  * while (!event)
4636  * 	yield();
4637  *
4638  * where one assumes that yield() will let 'the other' process run that will
4639  * make event true. If the current task is a SCHED_FIFO task that will never
4640  * happen. Never use yield() as a progress guarantee!!
4641  *
4642  * If you want to use yield() to wait for something, use wait_event().
4643  * If you want to use yield() to be 'nice' for others, use cond_resched().
4644  * If you still want to use yield(), do not!
4645  */
4646 void __sched yield(void)
4647 {
4648 	set_current_state(TASK_RUNNING);
4649 	sys_sched_yield();
4650 }
4651 EXPORT_SYMBOL(yield);
4652 
4653 /**
4654  * yield_to - yield the current processor to another thread in
4655  * your thread group, or accelerate that thread toward the
4656  * processor it's on.
4657  * @p: target task
4658  * @preempt: whether task preemption is allowed or not
4659  *
4660  * It's the caller's job to ensure that the target task struct
4661  * can't go away on us before we can do any checks.
4662  *
4663  * Returns true if we indeed boosted the target task.
4664  */
4665 bool __sched yield_to(struct task_struct *p, bool preempt)
4666 {
4667 	struct task_struct *curr = current;
4668 	struct rq *rq, *p_rq;
4669 	unsigned long flags;
4670 	bool yielded = 0;
4671 
4672 	local_irq_save(flags);
4673 	rq = this_rq();
4674 
4675 again:
4676 	p_rq = task_rq(p);
4677 	double_rq_lock(rq, p_rq);
4678 	while (task_rq(p) != p_rq) {
4679 		double_rq_unlock(rq, p_rq);
4680 		goto again;
4681 	}
4682 
4683 	if (!curr->sched_class->yield_to_task)
4684 		goto out;
4685 
4686 	if (curr->sched_class != p->sched_class)
4687 		goto out;
4688 
4689 	if (task_running(p_rq, p) || p->state)
4690 		goto out;
4691 
4692 	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4693 	if (yielded) {
4694 		schedstat_inc(rq, yld_count);
4695 		/*
4696 		 * Make p's CPU reschedule; pick_next_entity takes care of
4697 		 * fairness.
4698 		 */
4699 		if (preempt && rq != p_rq)
4700 			resched_task(p_rq->curr);
4701 	} else {
4702 		/*
4703 		 * We might have set it in task_yield_fair(), but are
4704 		 * not going to schedule(), so don't want to skip
4705 		 * the next update.
4706 		 */
4707 		rq->skip_clock_update = 0;
4708 	}
4709 
4710 out:
4711 	double_rq_unlock(rq, p_rq);
4712 	local_irq_restore(flags);
4713 
4714 	if (yielded)
4715 		schedule();
4716 
4717 	return yielded;
4718 }
4719 EXPORT_SYMBOL_GPL(yield_to);
4720 
4721 /*
4722  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4723  * that process accounting knows that this is a task in IO wait state.
4724  */
4725 void __sched io_schedule(void)
4726 {
4727 	struct rq *rq = raw_rq();
4728 
4729 	delayacct_blkio_start();
4730 	atomic_inc(&rq->nr_iowait);
4731 	blk_flush_plug(current);
4732 	current->in_iowait = 1;
4733 	schedule();
4734 	current->in_iowait = 0;
4735 	atomic_dec(&rq->nr_iowait);
4736 	delayacct_blkio_end();
4737 }
4738 EXPORT_SYMBOL(io_schedule);
4739 
4740 long __sched io_schedule_timeout(long timeout)
4741 {
4742 	struct rq *rq = raw_rq();
4743 	long ret;
4744 
4745 	delayacct_blkio_start();
4746 	atomic_inc(&rq->nr_iowait);
4747 	blk_flush_plug(current);
4748 	current->in_iowait = 1;
4749 	ret = schedule_timeout(timeout);
4750 	current->in_iowait = 0;
4751 	atomic_dec(&rq->nr_iowait);
4752 	delayacct_blkio_end();
4753 	return ret;
4754 }
4755 
4756 /**
4757  * sys_sched_get_priority_max - return maximum RT priority.
4758  * @policy: scheduling class.
4759  *
4760  * this syscall returns the maximum rt_priority that can be used
4761  * by a given scheduling class.
4762  */
4763 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4764 {
4765 	int ret = -EINVAL;
4766 
4767 	switch (policy) {
4768 	case SCHED_FIFO:
4769 	case SCHED_RR:
4770 		ret = MAX_USER_RT_PRIO-1;
4771 		break;
4772 	case SCHED_NORMAL:
4773 	case SCHED_BATCH:
4774 	case SCHED_IDLE:
4775 		ret = 0;
4776 		break;
4777 	}
4778 	return ret;
4779 }
4780 
4781 /**
4782  * sys_sched_get_priority_min - return minimum RT priority.
4783  * @policy: scheduling class.
4784  *
4785  * this syscall returns the minimum rt_priority that can be used
4786  * by a given scheduling class.
4787  */
4788 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4789 {
4790 	int ret = -EINVAL;
4791 
4792 	switch (policy) {
4793 	case SCHED_FIFO:
4794 	case SCHED_RR:
4795 		ret = 1;
4796 		break;
4797 	case SCHED_NORMAL:
4798 	case SCHED_BATCH:
4799 	case SCHED_IDLE:
4800 		ret = 0;
4801 	}
4802 	return ret;
4803 }
4804 
4805 /**
4806  * sys_sched_rr_get_interval - return the default timeslice of a process.
4807  * @pid: pid of the process.
4808  * @interval: userspace pointer to the timeslice value.
4809  *
4810  * this syscall writes the default timeslice value of a given process
4811  * into the user-space timespec buffer. A value of '0' means infinity.
4812  */
4813 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4814 		struct timespec __user *, interval)
4815 {
4816 	struct task_struct *p;
4817 	unsigned int time_slice;
4818 	unsigned long flags;
4819 	struct rq *rq;
4820 	int retval;
4821 	struct timespec t;
4822 
4823 	if (pid < 0)
4824 		return -EINVAL;
4825 
4826 	retval = -ESRCH;
4827 	rcu_read_lock();
4828 	p = find_process_by_pid(pid);
4829 	if (!p)
4830 		goto out_unlock;
4831 
4832 	retval = security_task_getscheduler(p);
4833 	if (retval)
4834 		goto out_unlock;
4835 
4836 	rq = task_rq_lock(p, &flags);
4837 	time_slice = p->sched_class->get_rr_interval(rq, p);
4838 	task_rq_unlock(rq, p, &flags);
4839 
4840 	rcu_read_unlock();
4841 	jiffies_to_timespec(time_slice, &t);
4842 	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4843 	return retval;
4844 
4845 out_unlock:
4846 	rcu_read_unlock();
4847 	return retval;
4848 }
4849 
4850 static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4851 
4852 void sched_show_task(struct task_struct *p)
4853 {
4854 	unsigned long free = 0;
4855 	unsigned state;
4856 
4857 	state = p->state ? __ffs(p->state) + 1 : 0;
4858 	printk(KERN_INFO "%-15.15s %c", p->comm,
4859 		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4860 #if BITS_PER_LONG == 32
4861 	if (state == TASK_RUNNING)
4862 		printk(KERN_CONT " running  ");
4863 	else
4864 		printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4865 #else
4866 	if (state == TASK_RUNNING)
4867 		printk(KERN_CONT "  running task    ");
4868 	else
4869 		printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4870 #endif
4871 #ifdef CONFIG_DEBUG_STACK_USAGE
4872 	free = stack_not_used(p);
4873 #endif
4874 	printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4875 		task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
4876 		(unsigned long)task_thread_info(p)->flags);
4877 
4878 	show_stack(p, NULL);
4879 }
4880 
4881 void show_state_filter(unsigned long state_filter)
4882 {
4883 	struct task_struct *g, *p;
4884 
4885 #if BITS_PER_LONG == 32
4886 	printk(KERN_INFO
4887 		"  task                PC stack   pid father\n");
4888 #else
4889 	printk(KERN_INFO
4890 		"  task                        PC stack   pid father\n");
4891 #endif
4892 	rcu_read_lock();
4893 	do_each_thread(g, p) {
4894 		/*
4895 		 * reset the NMI-timeout, listing all files on a slow
4896 		 * console might take a lot of time:
4897 		 */
4898 		touch_nmi_watchdog();
4899 		if (!state_filter || (p->state & state_filter))
4900 			sched_show_task(p);
4901 	} while_each_thread(g, p);
4902 
4903 	touch_all_softlockup_watchdogs();
4904 
4905 #ifdef CONFIG_SCHED_DEBUG
4906 	sysrq_sched_debug_show();
4907 #endif
4908 	rcu_read_unlock();
4909 	/*
4910 	 * Only show locks if all tasks are dumped:
4911 	 */
4912 	if (!state_filter)
4913 		debug_show_all_locks();
4914 }
4915 
4916 void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4917 {
4918 	idle->sched_class = &idle_sched_class;
4919 }
4920 
4921 /**
4922  * init_idle - set up an idle thread for a given CPU
4923  * @idle: task in question
4924  * @cpu: cpu the idle task belongs to
4925  *
4926  * NOTE: this function does not set the idle thread's NEED_RESCHED
4927  * flag, to make booting more robust.
4928  */
4929 void __cpuinit init_idle(struct task_struct *idle, int cpu)
4930 {
4931 	struct rq *rq = cpu_rq(cpu);
4932 	unsigned long flags;
4933 
4934 	raw_spin_lock_irqsave(&rq->lock, flags);
4935 
4936 	__sched_fork(idle);
4937 	idle->state = TASK_RUNNING;
4938 	idle->se.exec_start = sched_clock();
4939 
4940 	do_set_cpus_allowed(idle, cpumask_of(cpu));
4941 	/*
4942 	 * We're having a chicken and egg problem, even though we are
4943 	 * holding rq->lock, the cpu isn't yet set to this cpu so the
4944 	 * lockdep check in task_group() will fail.
4945 	 *
4946 	 * Similar case to sched_fork(). / Alternatively we could
4947 	 * use task_rq_lock() here and obtain the other rq->lock.
4948 	 *
4949 	 * Silence PROVE_RCU
4950 	 */
4951 	rcu_read_lock();
4952 	__set_task_cpu(idle, cpu);
4953 	rcu_read_unlock();
4954 
4955 	rq->curr = rq->idle = idle;
4956 #if defined(CONFIG_SMP)
4957 	idle->on_cpu = 1;
4958 #endif
4959 	raw_spin_unlock_irqrestore(&rq->lock, flags);
4960 
4961 	/* Set the preempt count _outside_ the spinlocks! */
4962 	task_thread_info(idle)->preempt_count = 0;
4963 
4964 	/*
4965 	 * The idle tasks have their own, simple scheduling class:
4966 	 */
4967 	idle->sched_class = &idle_sched_class;
4968 	ftrace_graph_init_idle_task(idle, cpu);
4969 #if defined(CONFIG_SMP)
4970 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4971 #endif
4972 }
4973 
4974 #ifdef CONFIG_SMP
4975 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4976 {
4977 	if (p->sched_class && p->sched_class->set_cpus_allowed)
4978 		p->sched_class->set_cpus_allowed(p, new_mask);
4979 
4980 	cpumask_copy(&p->cpus_allowed, new_mask);
4981 	p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
4982 }
4983 
4984 /*
4985  * This is how migration works:
4986  *
4987  * 1) we invoke migration_cpu_stop() on the target CPU using
4988  *    stop_one_cpu().
4989  * 2) stopper starts to run (implicitly forcing the migrated thread
4990  *    off the CPU)
4991  * 3) it checks whether the migrated task is still in the wrong runqueue.
4992  * 4) if it's in the wrong runqueue then the migration thread removes
4993  *    it and puts it into the right queue.
4994  * 5) stopper completes and stop_one_cpu() returns and the migration
4995  *    is done.
4996  */
4997 
4998 /*
4999  * Change a given task's CPU affinity. Migrate the thread to a
5000  * proper CPU and schedule it away if the CPU it's executing on
5001  * is removed from the allowed bitmask.
5002  *
5003  * NOTE: the caller must have a valid reference to the task, the
5004  * task must not exit() & deallocate itself prematurely. The
5005  * call is not atomic; no spinlocks may be held.
5006  */
5007 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5008 {
5009 	unsigned long flags;
5010 	struct rq *rq;
5011 	unsigned int dest_cpu;
5012 	int ret = 0;
5013 
5014 	rq = task_rq_lock(p, &flags);
5015 
5016 	if (cpumask_equal(&p->cpus_allowed, new_mask))
5017 		goto out;
5018 
5019 	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5020 		ret = -EINVAL;
5021 		goto out;
5022 	}
5023 
5024 	if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
5025 		ret = -EINVAL;
5026 		goto out;
5027 	}
5028 
5029 	do_set_cpus_allowed(p, new_mask);
5030 
5031 	/* Can the task run on the task's current CPU? If so, we're done */
5032 	if (cpumask_test_cpu(task_cpu(p), new_mask))
5033 		goto out;
5034 
5035 	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5036 	if (p->on_rq) {
5037 		struct migration_arg arg = { p, dest_cpu };
5038 		/* Need help from migration thread: drop lock and wait. */
5039 		task_rq_unlock(rq, p, &flags);
5040 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5041 		tlb_migrate_finish(p->mm);
5042 		return 0;
5043 	}
5044 out:
5045 	task_rq_unlock(rq, p, &flags);
5046 
5047 	return ret;
5048 }
5049 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5050 
5051 /*
5052  * Move (not current) task off this cpu, onto dest cpu. We're doing
5053  * this because either it can't run here any more (set_cpus_allowed()
5054  * away from this CPU, or CPU going down), or because we're
5055  * attempting to rebalance this task on exec (sched_exec).
5056  *
5057  * So we race with normal scheduler movements, but that's OK, as long
5058  * as the task is no longer on this CPU.
5059  *
5060  * Returns non-zero if task was successfully migrated.
5061  */
5062 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5063 {
5064 	struct rq *rq_dest, *rq_src;
5065 	int ret = 0;
5066 
5067 	if (unlikely(!cpu_active(dest_cpu)))
5068 		return ret;
5069 
5070 	rq_src = cpu_rq(src_cpu);
5071 	rq_dest = cpu_rq(dest_cpu);
5072 
5073 	raw_spin_lock(&p->pi_lock);
5074 	double_rq_lock(rq_src, rq_dest);
5075 	/* Already moved. */
5076 	if (task_cpu(p) != src_cpu)
5077 		goto done;
5078 	/* Affinity changed (again). */
5079 	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
5080 		goto fail;
5081 
5082 	/*
5083 	 * If we're not on a rq, the next wake-up will ensure we're
5084 	 * placed properly.
5085 	 */
5086 	if (p->on_rq) {
5087 		dequeue_task(rq_src, p, 0);
5088 		set_task_cpu(p, dest_cpu);
5089 		enqueue_task(rq_dest, p, 0);
5090 		check_preempt_curr(rq_dest, p, 0);
5091 	}
5092 done:
5093 	ret = 1;
5094 fail:
5095 	double_rq_unlock(rq_src, rq_dest);
5096 	raw_spin_unlock(&p->pi_lock);
5097 	return ret;
5098 }
5099 
5100 /*
5101  * migration_cpu_stop - this will be executed by a highprio stopper thread
5102  * and performs thread migration by bumping thread off CPU then
5103  * 'pushing' onto another runqueue.
5104  */
5105 static int migration_cpu_stop(void *data)
5106 {
5107 	struct migration_arg *arg = data;
5108 
5109 	/*
5110 	 * The original target cpu might have gone down and we might
5111 	 * be on another cpu but it doesn't matter.
5112 	 */
5113 	local_irq_disable();
5114 	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5115 	local_irq_enable();
5116 	return 0;
5117 }
5118 
5119 #ifdef CONFIG_HOTPLUG_CPU
5120 
5121 /*
5122  * Ensures that the idle task is using init_mm right before its cpu goes
5123  * offline.
5124  */
5125 void idle_task_exit(void)
5126 {
5127 	struct mm_struct *mm = current->active_mm;
5128 
5129 	BUG_ON(cpu_online(smp_processor_id()));
5130 
5131 	if (mm != &init_mm)
5132 		switch_mm(mm, &init_mm, current);
5133 	mmdrop(mm);
5134 }
5135 
5136 /*
5137  * While a dead CPU has no uninterruptible tasks queued at this point,
5138  * it might still have a nonzero ->nr_uninterruptible counter, because
5139  * for performance reasons the counter is not stricly tracking tasks to
5140  * their home CPUs. So we just add the counter to another CPU's counter,
5141  * to keep the global sum constant after CPU-down:
5142  */
5143 static void migrate_nr_uninterruptible(struct rq *rq_src)
5144 {
5145 	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5146 
5147 	rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5148 	rq_src->nr_uninterruptible = 0;
5149 }
5150 
5151 /*
5152  * remove the tasks which were accounted by rq from calc_load_tasks.
5153  */
5154 static void calc_global_load_remove(struct rq *rq)
5155 {
5156 	atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
5157 	rq->calc_load_active = 0;
5158 }
5159 
5160 /*
5161  * Migrate all tasks from the rq, sleeping tasks will be migrated by
5162  * try_to_wake_up()->select_task_rq().
5163  *
5164  * Called with rq->lock held even though we'er in stop_machine() and
5165  * there's no concurrency possible, we hold the required locks anyway
5166  * because of lock validation efforts.
5167  */
5168 static void migrate_tasks(unsigned int dead_cpu)
5169 {
5170 	struct rq *rq = cpu_rq(dead_cpu);
5171 	struct task_struct *next, *stop = rq->stop;
5172 	int dest_cpu;
5173 
5174 	/*
5175 	 * Fudge the rq selection such that the below task selection loop
5176 	 * doesn't get stuck on the currently eligible stop task.
5177 	 *
5178 	 * We're currently inside stop_machine() and the rq is either stuck
5179 	 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5180 	 * either way we should never end up calling schedule() until we're
5181 	 * done here.
5182 	 */
5183 	rq->stop = NULL;
5184 
5185 	/* Ensure any throttled groups are reachable by pick_next_task */
5186 	unthrottle_offline_cfs_rqs(rq);
5187 
5188 	for ( ; ; ) {
5189 		/*
5190 		 * There's this thread running, bail when that's the only
5191 		 * remaining thread.
5192 		 */
5193 		if (rq->nr_running == 1)
5194 			break;
5195 
5196 		next = pick_next_task(rq);
5197 		BUG_ON(!next);
5198 		next->sched_class->put_prev_task(rq, next);
5199 
5200 		/* Find suitable destination for @next, with force if needed. */
5201 		dest_cpu = select_fallback_rq(dead_cpu, next);
5202 		raw_spin_unlock(&rq->lock);
5203 
5204 		__migrate_task(next, dead_cpu, dest_cpu);
5205 
5206 		raw_spin_lock(&rq->lock);
5207 	}
5208 
5209 	rq->stop = stop;
5210 }
5211 
5212 #endif /* CONFIG_HOTPLUG_CPU */
5213 
5214 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5215 
5216 static struct ctl_table sd_ctl_dir[] = {
5217 	{
5218 		.procname	= "sched_domain",
5219 		.mode		= 0555,
5220 	},
5221 	{}
5222 };
5223 
5224 static struct ctl_table sd_ctl_root[] = {
5225 	{
5226 		.procname	= "kernel",
5227 		.mode		= 0555,
5228 		.child		= sd_ctl_dir,
5229 	},
5230 	{}
5231 };
5232 
5233 static struct ctl_table *sd_alloc_ctl_entry(int n)
5234 {
5235 	struct ctl_table *entry =
5236 		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5237 
5238 	return entry;
5239 }
5240 
5241 static void sd_free_ctl_entry(struct ctl_table **tablep)
5242 {
5243 	struct ctl_table *entry;
5244 
5245 	/*
5246 	 * In the intermediate directories, both the child directory and
5247 	 * procname are dynamically allocated and could fail but the mode
5248 	 * will always be set. In the lowest directory the names are
5249 	 * static strings and all have proc handlers.
5250 	 */
5251 	for (entry = *tablep; entry->mode; entry++) {
5252 		if (entry->child)
5253 			sd_free_ctl_entry(&entry->child);
5254 		if (entry->proc_handler == NULL)
5255 			kfree(entry->procname);
5256 	}
5257 
5258 	kfree(*tablep);
5259 	*tablep = NULL;
5260 }
5261 
5262 static void
5263 set_table_entry(struct ctl_table *entry,
5264 		const char *procname, void *data, int maxlen,
5265 		umode_t mode, proc_handler *proc_handler)
5266 {
5267 	entry->procname = procname;
5268 	entry->data = data;
5269 	entry->maxlen = maxlen;
5270 	entry->mode = mode;
5271 	entry->proc_handler = proc_handler;
5272 }
5273 
5274 static struct ctl_table *
5275 sd_alloc_ctl_domain_table(struct sched_domain *sd)
5276 {
5277 	struct ctl_table *table = sd_alloc_ctl_entry(13);
5278 
5279 	if (table == NULL)
5280 		return NULL;
5281 
5282 	set_table_entry(&table[0], "min_interval", &sd->min_interval,
5283 		sizeof(long), 0644, proc_doulongvec_minmax);
5284 	set_table_entry(&table[1], "max_interval", &sd->max_interval,
5285 		sizeof(long), 0644, proc_doulongvec_minmax);
5286 	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5287 		sizeof(int), 0644, proc_dointvec_minmax);
5288 	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5289 		sizeof(int), 0644, proc_dointvec_minmax);
5290 	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5291 		sizeof(int), 0644, proc_dointvec_minmax);
5292 	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5293 		sizeof(int), 0644, proc_dointvec_minmax);
5294 	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5295 		sizeof(int), 0644, proc_dointvec_minmax);
5296 	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5297 		sizeof(int), 0644, proc_dointvec_minmax);
5298 	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5299 		sizeof(int), 0644, proc_dointvec_minmax);
5300 	set_table_entry(&table[9], "cache_nice_tries",
5301 		&sd->cache_nice_tries,
5302 		sizeof(int), 0644, proc_dointvec_minmax);
5303 	set_table_entry(&table[10], "flags", &sd->flags,
5304 		sizeof(int), 0644, proc_dointvec_minmax);
5305 	set_table_entry(&table[11], "name", sd->name,
5306 		CORENAME_MAX_SIZE, 0444, proc_dostring);
5307 	/* &table[12] is terminator */
5308 
5309 	return table;
5310 }
5311 
5312 static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5313 {
5314 	struct ctl_table *entry, *table;
5315 	struct sched_domain *sd;
5316 	int domain_num = 0, i;
5317 	char buf[32];
5318 
5319 	for_each_domain(cpu, sd)
5320 		domain_num++;
5321 	entry = table = sd_alloc_ctl_entry(domain_num + 1);
5322 	if (table == NULL)
5323 		return NULL;
5324 
5325 	i = 0;
5326 	for_each_domain(cpu, sd) {
5327 		snprintf(buf, 32, "domain%d", i);
5328 		entry->procname = kstrdup(buf, GFP_KERNEL);
5329 		entry->mode = 0555;
5330 		entry->child = sd_alloc_ctl_domain_table(sd);
5331 		entry++;
5332 		i++;
5333 	}
5334 	return table;
5335 }
5336 
5337 static struct ctl_table_header *sd_sysctl_header;
5338 static void register_sched_domain_sysctl(void)
5339 {
5340 	int i, cpu_num = num_possible_cpus();
5341 	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5342 	char buf[32];
5343 
5344 	WARN_ON(sd_ctl_dir[0].child);
5345 	sd_ctl_dir[0].child = entry;
5346 
5347 	if (entry == NULL)
5348 		return;
5349 
5350 	for_each_possible_cpu(i) {
5351 		snprintf(buf, 32, "cpu%d", i);
5352 		entry->procname = kstrdup(buf, GFP_KERNEL);
5353 		entry->mode = 0555;
5354 		entry->child = sd_alloc_ctl_cpu_table(i);
5355 		entry++;
5356 	}
5357 
5358 	WARN_ON(sd_sysctl_header);
5359 	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5360 }
5361 
5362 /* may be called multiple times per register */
5363 static void unregister_sched_domain_sysctl(void)
5364 {
5365 	if (sd_sysctl_header)
5366 		unregister_sysctl_table(sd_sysctl_header);
5367 	sd_sysctl_header = NULL;
5368 	if (sd_ctl_dir[0].child)
5369 		sd_free_ctl_entry(&sd_ctl_dir[0].child);
5370 }
5371 #else
5372 static void register_sched_domain_sysctl(void)
5373 {
5374 }
5375 static void unregister_sched_domain_sysctl(void)
5376 {
5377 }
5378 #endif
5379 
5380 static void set_rq_online(struct rq *rq)
5381 {
5382 	if (!rq->online) {
5383 		const struct sched_class *class;
5384 
5385 		cpumask_set_cpu(rq->cpu, rq->rd->online);
5386 		rq->online = 1;
5387 
5388 		for_each_class(class) {
5389 			if (class->rq_online)
5390 				class->rq_online(rq);
5391 		}
5392 	}
5393 }
5394 
5395 static void set_rq_offline(struct rq *rq)
5396 {
5397 	if (rq->online) {
5398 		const struct sched_class *class;
5399 
5400 		for_each_class(class) {
5401 			if (class->rq_offline)
5402 				class->rq_offline(rq);
5403 		}
5404 
5405 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
5406 		rq->online = 0;
5407 	}
5408 }
5409 
5410 /*
5411  * migration_call - callback that gets triggered when a CPU is added.
5412  * Here we can start up the necessary migration thread for the new CPU.
5413  */
5414 static int __cpuinit
5415 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5416 {
5417 	int cpu = (long)hcpu;
5418 	unsigned long flags;
5419 	struct rq *rq = cpu_rq(cpu);
5420 
5421 	switch (action & ~CPU_TASKS_FROZEN) {
5422 
5423 	case CPU_UP_PREPARE:
5424 		rq->calc_load_update = calc_load_update;
5425 		break;
5426 
5427 	case CPU_ONLINE:
5428 		/* Update our root-domain */
5429 		raw_spin_lock_irqsave(&rq->lock, flags);
5430 		if (rq->rd) {
5431 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5432 
5433 			set_rq_online(rq);
5434 		}
5435 		raw_spin_unlock_irqrestore(&rq->lock, flags);
5436 		break;
5437 
5438 #ifdef CONFIG_HOTPLUG_CPU
5439 	case CPU_DYING:
5440 		sched_ttwu_pending();
5441 		/* Update our root-domain */
5442 		raw_spin_lock_irqsave(&rq->lock, flags);
5443 		if (rq->rd) {
5444 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5445 			set_rq_offline(rq);
5446 		}
5447 		migrate_tasks(cpu);
5448 		BUG_ON(rq->nr_running != 1); /* the migration thread */
5449 		raw_spin_unlock_irqrestore(&rq->lock, flags);
5450 
5451 		migrate_nr_uninterruptible(rq);
5452 		calc_global_load_remove(rq);
5453 		break;
5454 #endif
5455 	}
5456 
5457 	update_max_interval();
5458 
5459 	return NOTIFY_OK;
5460 }
5461 
5462 /*
5463  * Register at high priority so that task migration (migrate_all_tasks)
5464  * happens before everything else.  This has to be lower priority than
5465  * the notifier in the perf_event subsystem, though.
5466  */
5467 static struct notifier_block __cpuinitdata migration_notifier = {
5468 	.notifier_call = migration_call,
5469 	.priority = CPU_PRI_MIGRATION,
5470 };
5471 
5472 static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5473 				      unsigned long action, void *hcpu)
5474 {
5475 	switch (action & ~CPU_TASKS_FROZEN) {
5476 	case CPU_STARTING:
5477 	case CPU_DOWN_FAILED:
5478 		set_cpu_active((long)hcpu, true);
5479 		return NOTIFY_OK;
5480 	default:
5481 		return NOTIFY_DONE;
5482 	}
5483 }
5484 
5485 static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5486 					unsigned long action, void *hcpu)
5487 {
5488 	switch (action & ~CPU_TASKS_FROZEN) {
5489 	case CPU_DOWN_PREPARE:
5490 		set_cpu_active((long)hcpu, false);
5491 		return NOTIFY_OK;
5492 	default:
5493 		return NOTIFY_DONE;
5494 	}
5495 }
5496 
5497 static int __init migration_init(void)
5498 {
5499 	void *cpu = (void *)(long)smp_processor_id();
5500 	int err;
5501 
5502 	/* Initialize migration for the boot CPU */
5503 	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5504 	BUG_ON(err == NOTIFY_BAD);
5505 	migration_call(&migration_notifier, CPU_ONLINE, cpu);
5506 	register_cpu_notifier(&migration_notifier);
5507 
5508 	/* Register cpu active notifiers */
5509 	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5510 	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5511 
5512 	return 0;
5513 }
5514 early_initcall(migration_init);
5515 #endif
5516 
5517 #ifdef CONFIG_SMP
5518 
5519 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5520 
5521 #ifdef CONFIG_SCHED_DEBUG
5522 
5523 static __read_mostly int sched_domain_debug_enabled;
5524 
5525 static int __init sched_domain_debug_setup(char *str)
5526 {
5527 	sched_domain_debug_enabled = 1;
5528 
5529 	return 0;
5530 }
5531 early_param("sched_debug", sched_domain_debug_setup);
5532 
5533 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5534 				  struct cpumask *groupmask)
5535 {
5536 	struct sched_group *group = sd->groups;
5537 	char str[256];
5538 
5539 	cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5540 	cpumask_clear(groupmask);
5541 
5542 	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5543 
5544 	if (!(sd->flags & SD_LOAD_BALANCE)) {
5545 		printk("does not load-balance\n");
5546 		if (sd->parent)
5547 			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5548 					" has parent");
5549 		return -1;
5550 	}
5551 
5552 	printk(KERN_CONT "span %s level %s\n", str, sd->name);
5553 
5554 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5555 		printk(KERN_ERR "ERROR: domain->span does not contain "
5556 				"CPU%d\n", cpu);
5557 	}
5558 	if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5559 		printk(KERN_ERR "ERROR: domain->groups does not contain"
5560 				" CPU%d\n", cpu);
5561 	}
5562 
5563 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
5564 	do {
5565 		if (!group) {
5566 			printk("\n");
5567 			printk(KERN_ERR "ERROR: group is NULL\n");
5568 			break;
5569 		}
5570 
5571 		if (!group->sgp->power) {
5572 			printk(KERN_CONT "\n");
5573 			printk(KERN_ERR "ERROR: domain->cpu_power not "
5574 					"set\n");
5575 			break;
5576 		}
5577 
5578 		if (!cpumask_weight(sched_group_cpus(group))) {
5579 			printk(KERN_CONT "\n");
5580 			printk(KERN_ERR "ERROR: empty group\n");
5581 			break;
5582 		}
5583 
5584 		if (!(sd->flags & SD_OVERLAP) &&
5585 		    cpumask_intersects(groupmask, sched_group_cpus(group))) {
5586 			printk(KERN_CONT "\n");
5587 			printk(KERN_ERR "ERROR: repeated CPUs\n");
5588 			break;
5589 		}
5590 
5591 		cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5592 
5593 		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5594 
5595 		printk(KERN_CONT " %s", str);
5596 		if (group->sgp->power != SCHED_POWER_SCALE) {
5597 			printk(KERN_CONT " (cpu_power = %d)",
5598 				group->sgp->power);
5599 		}
5600 
5601 		group = group->next;
5602 	} while (group != sd->groups);
5603 	printk(KERN_CONT "\n");
5604 
5605 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
5606 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5607 
5608 	if (sd->parent &&
5609 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5610 		printk(KERN_ERR "ERROR: parent span is not a superset "
5611 			"of domain->span\n");
5612 	return 0;
5613 }
5614 
5615 static void sched_domain_debug(struct sched_domain *sd, int cpu)
5616 {
5617 	int level = 0;
5618 
5619 	if (!sched_domain_debug_enabled)
5620 		return;
5621 
5622 	if (!sd) {
5623 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5624 		return;
5625 	}
5626 
5627 	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5628 
5629 	for (;;) {
5630 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5631 			break;
5632 		level++;
5633 		sd = sd->parent;
5634 		if (!sd)
5635 			break;
5636 	}
5637 }
5638 #else /* !CONFIG_SCHED_DEBUG */
5639 # define sched_domain_debug(sd, cpu) do { } while (0)
5640 #endif /* CONFIG_SCHED_DEBUG */
5641 
5642 static int sd_degenerate(struct sched_domain *sd)
5643 {
5644 	if (cpumask_weight(sched_domain_span(sd)) == 1)
5645 		return 1;
5646 
5647 	/* Following flags need at least 2 groups */
5648 	if (sd->flags & (SD_LOAD_BALANCE |
5649 			 SD_BALANCE_NEWIDLE |
5650 			 SD_BALANCE_FORK |
5651 			 SD_BALANCE_EXEC |
5652 			 SD_SHARE_CPUPOWER |
5653 			 SD_SHARE_PKG_RESOURCES)) {
5654 		if (sd->groups != sd->groups->next)
5655 			return 0;
5656 	}
5657 
5658 	/* Following flags don't use groups */
5659 	if (sd->flags & (SD_WAKE_AFFINE))
5660 		return 0;
5661 
5662 	return 1;
5663 }
5664 
5665 static int
5666 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5667 {
5668 	unsigned long cflags = sd->flags, pflags = parent->flags;
5669 
5670 	if (sd_degenerate(parent))
5671 		return 1;
5672 
5673 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5674 		return 0;
5675 
5676 	/* Flags needing groups don't count if only 1 group in parent */
5677 	if (parent->groups == parent->groups->next) {
5678 		pflags &= ~(SD_LOAD_BALANCE |
5679 				SD_BALANCE_NEWIDLE |
5680 				SD_BALANCE_FORK |
5681 				SD_BALANCE_EXEC |
5682 				SD_SHARE_CPUPOWER |
5683 				SD_SHARE_PKG_RESOURCES);
5684 		if (nr_node_ids == 1)
5685 			pflags &= ~SD_SERIALIZE;
5686 	}
5687 	if (~cflags & pflags)
5688 		return 0;
5689 
5690 	return 1;
5691 }
5692 
5693 static void free_rootdomain(struct rcu_head *rcu)
5694 {
5695 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5696 
5697 	cpupri_cleanup(&rd->cpupri);
5698 	free_cpumask_var(rd->rto_mask);
5699 	free_cpumask_var(rd->online);
5700 	free_cpumask_var(rd->span);
5701 	kfree(rd);
5702 }
5703 
5704 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5705 {
5706 	struct root_domain *old_rd = NULL;
5707 	unsigned long flags;
5708 
5709 	raw_spin_lock_irqsave(&rq->lock, flags);
5710 
5711 	if (rq->rd) {
5712 		old_rd = rq->rd;
5713 
5714 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
5715 			set_rq_offline(rq);
5716 
5717 		cpumask_clear_cpu(rq->cpu, old_rd->span);
5718 
5719 		/*
5720 		 * If we dont want to free the old_rt yet then
5721 		 * set old_rd to NULL to skip the freeing later
5722 		 * in this function:
5723 		 */
5724 		if (!atomic_dec_and_test(&old_rd->refcount))
5725 			old_rd = NULL;
5726 	}
5727 
5728 	atomic_inc(&rd->refcount);
5729 	rq->rd = rd;
5730 
5731 	cpumask_set_cpu(rq->cpu, rd->span);
5732 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5733 		set_rq_online(rq);
5734 
5735 	raw_spin_unlock_irqrestore(&rq->lock, flags);
5736 
5737 	if (old_rd)
5738 		call_rcu_sched(&old_rd->rcu, free_rootdomain);
5739 }
5740 
5741 static int init_rootdomain(struct root_domain *rd)
5742 {
5743 	memset(rd, 0, sizeof(*rd));
5744 
5745 	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5746 		goto out;
5747 	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5748 		goto free_span;
5749 	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5750 		goto free_online;
5751 
5752 	if (cpupri_init(&rd->cpupri) != 0)
5753 		goto free_rto_mask;
5754 	return 0;
5755 
5756 free_rto_mask:
5757 	free_cpumask_var(rd->rto_mask);
5758 free_online:
5759 	free_cpumask_var(rd->online);
5760 free_span:
5761 	free_cpumask_var(rd->span);
5762 out:
5763 	return -ENOMEM;
5764 }
5765 
5766 /*
5767  * By default the system creates a single root-domain with all cpus as
5768  * members (mimicking the global state we have today).
5769  */
5770 struct root_domain def_root_domain;
5771 
5772 static void init_defrootdomain(void)
5773 {
5774 	init_rootdomain(&def_root_domain);
5775 
5776 	atomic_set(&def_root_domain.refcount, 1);
5777 }
5778 
5779 static struct root_domain *alloc_rootdomain(void)
5780 {
5781 	struct root_domain *rd;
5782 
5783 	rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5784 	if (!rd)
5785 		return NULL;
5786 
5787 	if (init_rootdomain(rd) != 0) {
5788 		kfree(rd);
5789 		return NULL;
5790 	}
5791 
5792 	return rd;
5793 }
5794 
5795 static void free_sched_groups(struct sched_group *sg, int free_sgp)
5796 {
5797 	struct sched_group *tmp, *first;
5798 
5799 	if (!sg)
5800 		return;
5801 
5802 	first = sg;
5803 	do {
5804 		tmp = sg->next;
5805 
5806 		if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5807 			kfree(sg->sgp);
5808 
5809 		kfree(sg);
5810 		sg = tmp;
5811 	} while (sg != first);
5812 }
5813 
5814 static void free_sched_domain(struct rcu_head *rcu)
5815 {
5816 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5817 
5818 	/*
5819 	 * If its an overlapping domain it has private groups, iterate and
5820 	 * nuke them all.
5821 	 */
5822 	if (sd->flags & SD_OVERLAP) {
5823 		free_sched_groups(sd->groups, 1);
5824 	} else if (atomic_dec_and_test(&sd->groups->ref)) {
5825 		kfree(sd->groups->sgp);
5826 		kfree(sd->groups);
5827 	}
5828 	kfree(sd);
5829 }
5830 
5831 static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5832 {
5833 	call_rcu(&sd->rcu, free_sched_domain);
5834 }
5835 
5836 static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5837 {
5838 	for (; sd; sd = sd->parent)
5839 		destroy_sched_domain(sd, cpu);
5840 }
5841 
5842 /*
5843  * Keep a special pointer to the highest sched_domain that has
5844  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5845  * allows us to avoid some pointer chasing select_idle_sibling().
5846  *
5847  * Also keep a unique ID per domain (we use the first cpu number in
5848  * the cpumask of the domain), this allows us to quickly tell if
5849  * two cpus are in the same cache domain, see cpus_share_cache().
5850  */
5851 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5852 DEFINE_PER_CPU(int, sd_llc_id);
5853 
5854 static void update_top_cache_domain(int cpu)
5855 {
5856 	struct sched_domain *sd;
5857 	int id = cpu;
5858 
5859 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5860 	if (sd)
5861 		id = cpumask_first(sched_domain_span(sd));
5862 
5863 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5864 	per_cpu(sd_llc_id, cpu) = id;
5865 }
5866 
5867 /*
5868  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5869  * hold the hotplug lock.
5870  */
5871 static void
5872 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5873 {
5874 	struct rq *rq = cpu_rq(cpu);
5875 	struct sched_domain *tmp;
5876 
5877 	/* Remove the sched domains which do not contribute to scheduling. */
5878 	for (tmp = sd; tmp; ) {
5879 		struct sched_domain *parent = tmp->parent;
5880 		if (!parent)
5881 			break;
5882 
5883 		if (sd_parent_degenerate(tmp, parent)) {
5884 			tmp->parent = parent->parent;
5885 			if (parent->parent)
5886 				parent->parent->child = tmp;
5887 			destroy_sched_domain(parent, cpu);
5888 		} else
5889 			tmp = tmp->parent;
5890 	}
5891 
5892 	if (sd && sd_degenerate(sd)) {
5893 		tmp = sd;
5894 		sd = sd->parent;
5895 		destroy_sched_domain(tmp, cpu);
5896 		if (sd)
5897 			sd->child = NULL;
5898 	}
5899 
5900 	sched_domain_debug(sd, cpu);
5901 
5902 	rq_attach_root(rq, rd);
5903 	tmp = rq->sd;
5904 	rcu_assign_pointer(rq->sd, sd);
5905 	destroy_sched_domains(tmp, cpu);
5906 
5907 	update_top_cache_domain(cpu);
5908 }
5909 
5910 /* cpus with isolated domains */
5911 static cpumask_var_t cpu_isolated_map;
5912 
5913 /* Setup the mask of cpus configured for isolated domains */
5914 static int __init isolated_cpu_setup(char *str)
5915 {
5916 	alloc_bootmem_cpumask_var(&cpu_isolated_map);
5917 	cpulist_parse(str, cpu_isolated_map);
5918 	return 1;
5919 }
5920 
5921 __setup("isolcpus=", isolated_cpu_setup);
5922 
5923 static const struct cpumask *cpu_cpu_mask(int cpu)
5924 {
5925 	return cpumask_of_node(cpu_to_node(cpu));
5926 }
5927 
5928 struct sd_data {
5929 	struct sched_domain **__percpu sd;
5930 	struct sched_group **__percpu sg;
5931 	struct sched_group_power **__percpu sgp;
5932 };
5933 
5934 struct s_data {
5935 	struct sched_domain ** __percpu sd;
5936 	struct root_domain	*rd;
5937 };
5938 
5939 enum s_alloc {
5940 	sa_rootdomain,
5941 	sa_sd,
5942 	sa_sd_storage,
5943 	sa_none,
5944 };
5945 
5946 struct sched_domain_topology_level;
5947 
5948 typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
5949 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5950 
5951 #define SDTL_OVERLAP	0x01
5952 
5953 struct sched_domain_topology_level {
5954 	sched_domain_init_f init;
5955 	sched_domain_mask_f mask;
5956 	int		    flags;
5957 	int		    numa_level;
5958 	struct sd_data      data;
5959 };
5960 
5961 static int
5962 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5963 {
5964 	struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5965 	const struct cpumask *span = sched_domain_span(sd);
5966 	struct cpumask *covered = sched_domains_tmpmask;
5967 	struct sd_data *sdd = sd->private;
5968 	struct sched_domain *child;
5969 	int i;
5970 
5971 	cpumask_clear(covered);
5972 
5973 	for_each_cpu(i, span) {
5974 		struct cpumask *sg_span;
5975 
5976 		if (cpumask_test_cpu(i, covered))
5977 			continue;
5978 
5979 		sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5980 				GFP_KERNEL, cpu_to_node(cpu));
5981 
5982 		if (!sg)
5983 			goto fail;
5984 
5985 		sg_span = sched_group_cpus(sg);
5986 
5987 		child = *per_cpu_ptr(sdd->sd, i);
5988 		if (child->child) {
5989 			child = child->child;
5990 			cpumask_copy(sg_span, sched_domain_span(child));
5991 		} else
5992 			cpumask_set_cpu(i, sg_span);
5993 
5994 		cpumask_or(covered, covered, sg_span);
5995 
5996 		sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
5997 		atomic_inc(&sg->sgp->ref);
5998 
5999 		if (cpumask_test_cpu(cpu, sg_span))
6000 			groups = sg;
6001 
6002 		if (!first)
6003 			first = sg;
6004 		if (last)
6005 			last->next = sg;
6006 		last = sg;
6007 		last->next = first;
6008 	}
6009 	sd->groups = groups;
6010 
6011 	return 0;
6012 
6013 fail:
6014 	free_sched_groups(first, 0);
6015 
6016 	return -ENOMEM;
6017 }
6018 
6019 static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6020 {
6021 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6022 	struct sched_domain *child = sd->child;
6023 
6024 	if (child)
6025 		cpu = cpumask_first(sched_domain_span(child));
6026 
6027 	if (sg) {
6028 		*sg = *per_cpu_ptr(sdd->sg, cpu);
6029 		(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
6030 		atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
6031 	}
6032 
6033 	return cpu;
6034 }
6035 
6036 /*
6037  * build_sched_groups will build a circular linked list of the groups
6038  * covered by the given span, and will set each group's ->cpumask correctly,
6039  * and ->cpu_power to 0.
6040  *
6041  * Assumes the sched_domain tree is fully constructed
6042  */
6043 static int
6044 build_sched_groups(struct sched_domain *sd, int cpu)
6045 {
6046 	struct sched_group *first = NULL, *last = NULL;
6047 	struct sd_data *sdd = sd->private;
6048 	const struct cpumask *span = sched_domain_span(sd);
6049 	struct cpumask *covered;
6050 	int i;
6051 
6052 	get_group(cpu, sdd, &sd->groups);
6053 	atomic_inc(&sd->groups->ref);
6054 
6055 	if (cpu != cpumask_first(sched_domain_span(sd)))
6056 		return 0;
6057 
6058 	lockdep_assert_held(&sched_domains_mutex);
6059 	covered = sched_domains_tmpmask;
6060 
6061 	cpumask_clear(covered);
6062 
6063 	for_each_cpu(i, span) {
6064 		struct sched_group *sg;
6065 		int group = get_group(i, sdd, &sg);
6066 		int j;
6067 
6068 		if (cpumask_test_cpu(i, covered))
6069 			continue;
6070 
6071 		cpumask_clear(sched_group_cpus(sg));
6072 		sg->sgp->power = 0;
6073 
6074 		for_each_cpu(j, span) {
6075 			if (get_group(j, sdd, NULL) != group)
6076 				continue;
6077 
6078 			cpumask_set_cpu(j, covered);
6079 			cpumask_set_cpu(j, sched_group_cpus(sg));
6080 		}
6081 
6082 		if (!first)
6083 			first = sg;
6084 		if (last)
6085 			last->next = sg;
6086 		last = sg;
6087 	}
6088 	last->next = first;
6089 
6090 	return 0;
6091 }
6092 
6093 /*
6094  * Initialize sched groups cpu_power.
6095  *
6096  * cpu_power indicates the capacity of sched group, which is used while
6097  * distributing the load between different sched groups in a sched domain.
6098  * Typically cpu_power for all the groups in a sched domain will be same unless
6099  * there are asymmetries in the topology. If there are asymmetries, group
6100  * having more cpu_power will pickup more load compared to the group having
6101  * less cpu_power.
6102  */
6103 static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6104 {
6105 	struct sched_group *sg = sd->groups;
6106 
6107 	WARN_ON(!sd || !sg);
6108 
6109 	do {
6110 		sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6111 		sg = sg->next;
6112 	} while (sg != sd->groups);
6113 
6114 	if (cpu != group_first_cpu(sg))
6115 		return;
6116 
6117 	update_group_power(sd, cpu);
6118 	atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
6119 }
6120 
6121 int __weak arch_sd_sibling_asym_packing(void)
6122 {
6123        return 0*SD_ASYM_PACKING;
6124 }
6125 
6126 /*
6127  * Initializers for schedule domains
6128  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6129  */
6130 
6131 #ifdef CONFIG_SCHED_DEBUG
6132 # define SD_INIT_NAME(sd, type)		sd->name = #type
6133 #else
6134 # define SD_INIT_NAME(sd, type)		do { } while (0)
6135 #endif
6136 
6137 #define SD_INIT_FUNC(type)						\
6138 static noinline struct sched_domain *					\
6139 sd_init_##type(struct sched_domain_topology_level *tl, int cpu) 	\
6140 {									\
6141 	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);	\
6142 	*sd = SD_##type##_INIT;						\
6143 	SD_INIT_NAME(sd, type);						\
6144 	sd->private = &tl->data;					\
6145 	return sd;							\
6146 }
6147 
6148 SD_INIT_FUNC(CPU)
6149 #ifdef CONFIG_SCHED_SMT
6150  SD_INIT_FUNC(SIBLING)
6151 #endif
6152 #ifdef CONFIG_SCHED_MC
6153  SD_INIT_FUNC(MC)
6154 #endif
6155 #ifdef CONFIG_SCHED_BOOK
6156  SD_INIT_FUNC(BOOK)
6157 #endif
6158 
6159 static int default_relax_domain_level = -1;
6160 int sched_domain_level_max;
6161 
6162 static int __init setup_relax_domain_level(char *str)
6163 {
6164 	unsigned long val;
6165 
6166 	val = simple_strtoul(str, NULL, 0);
6167 	if (val < sched_domain_level_max)
6168 		default_relax_domain_level = val;
6169 
6170 	return 1;
6171 }
6172 __setup("relax_domain_level=", setup_relax_domain_level);
6173 
6174 static void set_domain_attribute(struct sched_domain *sd,
6175 				 struct sched_domain_attr *attr)
6176 {
6177 	int request;
6178 
6179 	if (!attr || attr->relax_domain_level < 0) {
6180 		if (default_relax_domain_level < 0)
6181 			return;
6182 		else
6183 			request = default_relax_domain_level;
6184 	} else
6185 		request = attr->relax_domain_level;
6186 	if (request < sd->level) {
6187 		/* turn off idle balance on this domain */
6188 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6189 	} else {
6190 		/* turn on idle balance on this domain */
6191 		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6192 	}
6193 }
6194 
6195 static void __sdt_free(const struct cpumask *cpu_map);
6196 static int __sdt_alloc(const struct cpumask *cpu_map);
6197 
6198 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6199 				 const struct cpumask *cpu_map)
6200 {
6201 	switch (what) {
6202 	case sa_rootdomain:
6203 		if (!atomic_read(&d->rd->refcount))
6204 			free_rootdomain(&d->rd->rcu); /* fall through */
6205 	case sa_sd:
6206 		free_percpu(d->sd); /* fall through */
6207 	case sa_sd_storage:
6208 		__sdt_free(cpu_map); /* fall through */
6209 	case sa_none:
6210 		break;
6211 	}
6212 }
6213 
6214 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6215 						   const struct cpumask *cpu_map)
6216 {
6217 	memset(d, 0, sizeof(*d));
6218 
6219 	if (__sdt_alloc(cpu_map))
6220 		return sa_sd_storage;
6221 	d->sd = alloc_percpu(struct sched_domain *);
6222 	if (!d->sd)
6223 		return sa_sd_storage;
6224 	d->rd = alloc_rootdomain();
6225 	if (!d->rd)
6226 		return sa_sd;
6227 	return sa_rootdomain;
6228 }
6229 
6230 /*
6231  * NULL the sd_data elements we've used to build the sched_domain and
6232  * sched_group structure so that the subsequent __free_domain_allocs()
6233  * will not free the data we're using.
6234  */
6235 static void claim_allocations(int cpu, struct sched_domain *sd)
6236 {
6237 	struct sd_data *sdd = sd->private;
6238 
6239 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6240 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
6241 
6242 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6243 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
6244 
6245 	if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
6246 		*per_cpu_ptr(sdd->sgp, cpu) = NULL;
6247 }
6248 
6249 #ifdef CONFIG_SCHED_SMT
6250 static const struct cpumask *cpu_smt_mask(int cpu)
6251 {
6252 	return topology_thread_cpumask(cpu);
6253 }
6254 #endif
6255 
6256 /*
6257  * Topology list, bottom-up.
6258  */
6259 static struct sched_domain_topology_level default_topology[] = {
6260 #ifdef CONFIG_SCHED_SMT
6261 	{ sd_init_SIBLING, cpu_smt_mask, },
6262 #endif
6263 #ifdef CONFIG_SCHED_MC
6264 	{ sd_init_MC, cpu_coregroup_mask, },
6265 #endif
6266 #ifdef CONFIG_SCHED_BOOK
6267 	{ sd_init_BOOK, cpu_book_mask, },
6268 #endif
6269 	{ sd_init_CPU, cpu_cpu_mask, },
6270 	{ NULL, },
6271 };
6272 
6273 static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6274 
6275 #ifdef CONFIG_NUMA
6276 
6277 static int sched_domains_numa_levels;
6278 static int sched_domains_numa_scale;
6279 static int *sched_domains_numa_distance;
6280 static struct cpumask ***sched_domains_numa_masks;
6281 static int sched_domains_curr_level;
6282 
6283 static inline int sd_local_flags(int level)
6284 {
6285 	if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
6286 		return 0;
6287 
6288 	return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
6289 }
6290 
6291 static struct sched_domain *
6292 sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6293 {
6294 	struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6295 	int level = tl->numa_level;
6296 	int sd_weight = cpumask_weight(
6297 			sched_domains_numa_masks[level][cpu_to_node(cpu)]);
6298 
6299 	*sd = (struct sched_domain){
6300 		.min_interval		= sd_weight,
6301 		.max_interval		= 2*sd_weight,
6302 		.busy_factor		= 32,
6303 		.imbalance_pct		= 125,
6304 		.cache_nice_tries	= 2,
6305 		.busy_idx		= 3,
6306 		.idle_idx		= 2,
6307 		.newidle_idx		= 0,
6308 		.wake_idx		= 0,
6309 		.forkexec_idx		= 0,
6310 
6311 		.flags			= 1*SD_LOAD_BALANCE
6312 					| 1*SD_BALANCE_NEWIDLE
6313 					| 0*SD_BALANCE_EXEC
6314 					| 0*SD_BALANCE_FORK
6315 					| 0*SD_BALANCE_WAKE
6316 					| 0*SD_WAKE_AFFINE
6317 					| 0*SD_PREFER_LOCAL
6318 					| 0*SD_SHARE_CPUPOWER
6319 					| 0*SD_SHARE_PKG_RESOURCES
6320 					| 1*SD_SERIALIZE
6321 					| 0*SD_PREFER_SIBLING
6322 					| sd_local_flags(level)
6323 					,
6324 		.last_balance		= jiffies,
6325 		.balance_interval	= sd_weight,
6326 	};
6327 	SD_INIT_NAME(sd, NUMA);
6328 	sd->private = &tl->data;
6329 
6330 	/*
6331 	 * Ugly hack to pass state to sd_numa_mask()...
6332 	 */
6333 	sched_domains_curr_level = tl->numa_level;
6334 
6335 	return sd;
6336 }
6337 
6338 static const struct cpumask *sd_numa_mask(int cpu)
6339 {
6340 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6341 }
6342 
6343 static void sched_init_numa(void)
6344 {
6345 	int next_distance, curr_distance = node_distance(0, 0);
6346 	struct sched_domain_topology_level *tl;
6347 	int level = 0;
6348 	int i, j, k;
6349 
6350 	sched_domains_numa_scale = curr_distance;
6351 	sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6352 	if (!sched_domains_numa_distance)
6353 		return;
6354 
6355 	/*
6356 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6357 	 * unique distances in the node_distance() table.
6358 	 *
6359 	 * Assumes node_distance(0,j) includes all distances in
6360 	 * node_distance(i,j) in order to avoid cubic time.
6361 	 *
6362 	 * XXX: could be optimized to O(n log n) by using sort()
6363 	 */
6364 	next_distance = curr_distance;
6365 	for (i = 0; i < nr_node_ids; i++) {
6366 		for (j = 0; j < nr_node_ids; j++) {
6367 			int distance = node_distance(0, j);
6368 			if (distance > curr_distance &&
6369 					(distance < next_distance ||
6370 					 next_distance == curr_distance))
6371 				next_distance = distance;
6372 		}
6373 		if (next_distance != curr_distance) {
6374 			sched_domains_numa_distance[level++] = next_distance;
6375 			sched_domains_numa_levels = level;
6376 			curr_distance = next_distance;
6377 		} else break;
6378 	}
6379 	/*
6380 	 * 'level' contains the number of unique distances, excluding the
6381 	 * identity distance node_distance(i,i).
6382 	 *
6383 	 * The sched_domains_nume_distance[] array includes the actual distance
6384 	 * numbers.
6385 	 */
6386 
6387 	sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6388 	if (!sched_domains_numa_masks)
6389 		return;
6390 
6391 	/*
6392 	 * Now for each level, construct a mask per node which contains all
6393 	 * cpus of nodes that are that many hops away from us.
6394 	 */
6395 	for (i = 0; i < level; i++) {
6396 		sched_domains_numa_masks[i] =
6397 			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6398 		if (!sched_domains_numa_masks[i])
6399 			return;
6400 
6401 		for (j = 0; j < nr_node_ids; j++) {
6402 			struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
6403 			if (!mask)
6404 				return;
6405 
6406 			sched_domains_numa_masks[i][j] = mask;
6407 
6408 			for (k = 0; k < nr_node_ids; k++) {
6409 				if (node_distance(j, k) > sched_domains_numa_distance[i])
6410 					continue;
6411 
6412 				cpumask_or(mask, mask, cpumask_of_node(k));
6413 			}
6414 		}
6415 	}
6416 
6417 	tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
6418 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6419 	if (!tl)
6420 		return;
6421 
6422 	/*
6423 	 * Copy the default topology bits..
6424 	 */
6425 	for (i = 0; default_topology[i].init; i++)
6426 		tl[i] = default_topology[i];
6427 
6428 	/*
6429 	 * .. and append 'j' levels of NUMA goodness.
6430 	 */
6431 	for (j = 0; j < level; i++, j++) {
6432 		tl[i] = (struct sched_domain_topology_level){
6433 			.init = sd_numa_init,
6434 			.mask = sd_numa_mask,
6435 			.flags = SDTL_OVERLAP,
6436 			.numa_level = j,
6437 		};
6438 	}
6439 
6440 	sched_domain_topology = tl;
6441 }
6442 #else
6443 static inline void sched_init_numa(void)
6444 {
6445 }
6446 #endif /* CONFIG_NUMA */
6447 
6448 static int __sdt_alloc(const struct cpumask *cpu_map)
6449 {
6450 	struct sched_domain_topology_level *tl;
6451 	int j;
6452 
6453 	for (tl = sched_domain_topology; tl->init; tl++) {
6454 		struct sd_data *sdd = &tl->data;
6455 
6456 		sdd->sd = alloc_percpu(struct sched_domain *);
6457 		if (!sdd->sd)
6458 			return -ENOMEM;
6459 
6460 		sdd->sg = alloc_percpu(struct sched_group *);
6461 		if (!sdd->sg)
6462 			return -ENOMEM;
6463 
6464 		sdd->sgp = alloc_percpu(struct sched_group_power *);
6465 		if (!sdd->sgp)
6466 			return -ENOMEM;
6467 
6468 		for_each_cpu(j, cpu_map) {
6469 			struct sched_domain *sd;
6470 			struct sched_group *sg;
6471 			struct sched_group_power *sgp;
6472 
6473 		       	sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6474 					GFP_KERNEL, cpu_to_node(j));
6475 			if (!sd)
6476 				return -ENOMEM;
6477 
6478 			*per_cpu_ptr(sdd->sd, j) = sd;
6479 
6480 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6481 					GFP_KERNEL, cpu_to_node(j));
6482 			if (!sg)
6483 				return -ENOMEM;
6484 
6485 			sg->next = sg;
6486 
6487 			*per_cpu_ptr(sdd->sg, j) = sg;
6488 
6489 			sgp = kzalloc_node(sizeof(struct sched_group_power),
6490 					GFP_KERNEL, cpu_to_node(j));
6491 			if (!sgp)
6492 				return -ENOMEM;
6493 
6494 			*per_cpu_ptr(sdd->sgp, j) = sgp;
6495 		}
6496 	}
6497 
6498 	return 0;
6499 }
6500 
6501 static void __sdt_free(const struct cpumask *cpu_map)
6502 {
6503 	struct sched_domain_topology_level *tl;
6504 	int j;
6505 
6506 	for (tl = sched_domain_topology; tl->init; tl++) {
6507 		struct sd_data *sdd = &tl->data;
6508 
6509 		for_each_cpu(j, cpu_map) {
6510 			struct sched_domain *sd;
6511 
6512 			if (sdd->sd) {
6513 				sd = *per_cpu_ptr(sdd->sd, j);
6514 				if (sd && (sd->flags & SD_OVERLAP))
6515 					free_sched_groups(sd->groups, 0);
6516 				kfree(*per_cpu_ptr(sdd->sd, j));
6517 			}
6518 
6519 			if (sdd->sg)
6520 				kfree(*per_cpu_ptr(sdd->sg, j));
6521 			if (sdd->sgp)
6522 				kfree(*per_cpu_ptr(sdd->sgp, j));
6523 		}
6524 		free_percpu(sdd->sd);
6525 		sdd->sd = NULL;
6526 		free_percpu(sdd->sg);
6527 		sdd->sg = NULL;
6528 		free_percpu(sdd->sgp);
6529 		sdd->sgp = NULL;
6530 	}
6531 }
6532 
6533 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6534 		struct s_data *d, const struct cpumask *cpu_map,
6535 		struct sched_domain_attr *attr, struct sched_domain *child,
6536 		int cpu)
6537 {
6538 	struct sched_domain *sd = tl->init(tl, cpu);
6539 	if (!sd)
6540 		return child;
6541 
6542 	set_domain_attribute(sd, attr);
6543 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6544 	if (child) {
6545 		sd->level = child->level + 1;
6546 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
6547 		child->parent = sd;
6548 	}
6549 	sd->child = child;
6550 
6551 	return sd;
6552 }
6553 
6554 /*
6555  * Build sched domains for a given set of cpus and attach the sched domains
6556  * to the individual cpus
6557  */
6558 static int build_sched_domains(const struct cpumask *cpu_map,
6559 			       struct sched_domain_attr *attr)
6560 {
6561 	enum s_alloc alloc_state = sa_none;
6562 	struct sched_domain *sd;
6563 	struct s_data d;
6564 	int i, ret = -ENOMEM;
6565 
6566 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6567 	if (alloc_state != sa_rootdomain)
6568 		goto error;
6569 
6570 	/* Set up domains for cpus specified by the cpu_map. */
6571 	for_each_cpu(i, cpu_map) {
6572 		struct sched_domain_topology_level *tl;
6573 
6574 		sd = NULL;
6575 		for (tl = sched_domain_topology; tl->init; tl++) {
6576 			sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
6577 			if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6578 				sd->flags |= SD_OVERLAP;
6579 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6580 				break;
6581 		}
6582 
6583 		while (sd->child)
6584 			sd = sd->child;
6585 
6586 		*per_cpu_ptr(d.sd, i) = sd;
6587 	}
6588 
6589 	/* Build the groups for the domains */
6590 	for_each_cpu(i, cpu_map) {
6591 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6592 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
6593 			if (sd->flags & SD_OVERLAP) {
6594 				if (build_overlap_sched_groups(sd, i))
6595 					goto error;
6596 			} else {
6597 				if (build_sched_groups(sd, i))
6598 					goto error;
6599 			}
6600 		}
6601 	}
6602 
6603 	/* Calculate CPU power for physical packages and nodes */
6604 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
6605 		if (!cpumask_test_cpu(i, cpu_map))
6606 			continue;
6607 
6608 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6609 			claim_allocations(i, sd);
6610 			init_sched_groups_power(i, sd);
6611 		}
6612 	}
6613 
6614 	/* Attach the domains */
6615 	rcu_read_lock();
6616 	for_each_cpu(i, cpu_map) {
6617 		sd = *per_cpu_ptr(d.sd, i);
6618 		cpu_attach_domain(sd, d.rd, i);
6619 	}
6620 	rcu_read_unlock();
6621 
6622 	ret = 0;
6623 error:
6624 	__free_domain_allocs(&d, alloc_state, cpu_map);
6625 	return ret;
6626 }
6627 
6628 static cpumask_var_t *doms_cur;	/* current sched domains */
6629 static int ndoms_cur;		/* number of sched domains in 'doms_cur' */
6630 static struct sched_domain_attr *dattr_cur;
6631 				/* attribues of custom domains in 'doms_cur' */
6632 
6633 /*
6634  * Special case: If a kmalloc of a doms_cur partition (array of
6635  * cpumask) fails, then fallback to a single sched domain,
6636  * as determined by the single cpumask fallback_doms.
6637  */
6638 static cpumask_var_t fallback_doms;
6639 
6640 /*
6641  * arch_update_cpu_topology lets virtualized architectures update the
6642  * cpu core maps. It is supposed to return 1 if the topology changed
6643  * or 0 if it stayed the same.
6644  */
6645 int __attribute__((weak)) arch_update_cpu_topology(void)
6646 {
6647 	return 0;
6648 }
6649 
6650 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6651 {
6652 	int i;
6653 	cpumask_var_t *doms;
6654 
6655 	doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6656 	if (!doms)
6657 		return NULL;
6658 	for (i = 0; i < ndoms; i++) {
6659 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6660 			free_sched_domains(doms, i);
6661 			return NULL;
6662 		}
6663 	}
6664 	return doms;
6665 }
6666 
6667 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6668 {
6669 	unsigned int i;
6670 	for (i = 0; i < ndoms; i++)
6671 		free_cpumask_var(doms[i]);
6672 	kfree(doms);
6673 }
6674 
6675 /*
6676  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6677  * For now this just excludes isolated cpus, but could be used to
6678  * exclude other special cases in the future.
6679  */
6680 static int init_sched_domains(const struct cpumask *cpu_map)
6681 {
6682 	int err;
6683 
6684 	arch_update_cpu_topology();
6685 	ndoms_cur = 1;
6686 	doms_cur = alloc_sched_domains(ndoms_cur);
6687 	if (!doms_cur)
6688 		doms_cur = &fallback_doms;
6689 	cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6690 	dattr_cur = NULL;
6691 	err = build_sched_domains(doms_cur[0], NULL);
6692 	register_sched_domain_sysctl();
6693 
6694 	return err;
6695 }
6696 
6697 /*
6698  * Detach sched domains from a group of cpus specified in cpu_map
6699  * These cpus will now be attached to the NULL domain
6700  */
6701 static void detach_destroy_domains(const struct cpumask *cpu_map)
6702 {
6703 	int i;
6704 
6705 	rcu_read_lock();
6706 	for_each_cpu(i, cpu_map)
6707 		cpu_attach_domain(NULL, &def_root_domain, i);
6708 	rcu_read_unlock();
6709 }
6710 
6711 /* handle null as "default" */
6712 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6713 			struct sched_domain_attr *new, int idx_new)
6714 {
6715 	struct sched_domain_attr tmp;
6716 
6717 	/* fast path */
6718 	if (!new && !cur)
6719 		return 1;
6720 
6721 	tmp = SD_ATTR_INIT;
6722 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
6723 			new ? (new + idx_new) : &tmp,
6724 			sizeof(struct sched_domain_attr));
6725 }
6726 
6727 /*
6728  * Partition sched domains as specified by the 'ndoms_new'
6729  * cpumasks in the array doms_new[] of cpumasks. This compares
6730  * doms_new[] to the current sched domain partitioning, doms_cur[].
6731  * It destroys each deleted domain and builds each new domain.
6732  *
6733  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
6734  * The masks don't intersect (don't overlap.) We should setup one
6735  * sched domain for each mask. CPUs not in any of the cpumasks will
6736  * not be load balanced. If the same cpumask appears both in the
6737  * current 'doms_cur' domains and in the new 'doms_new', we can leave
6738  * it as it is.
6739  *
6740  * The passed in 'doms_new' should be allocated using
6741  * alloc_sched_domains.  This routine takes ownership of it and will
6742  * free_sched_domains it when done with it. If the caller failed the
6743  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6744  * and partition_sched_domains() will fallback to the single partition
6745  * 'fallback_doms', it also forces the domains to be rebuilt.
6746  *
6747  * If doms_new == NULL it will be replaced with cpu_online_mask.
6748  * ndoms_new == 0 is a special case for destroying existing domains,
6749  * and it will not create the default domain.
6750  *
6751  * Call with hotplug lock held
6752  */
6753 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
6754 			     struct sched_domain_attr *dattr_new)
6755 {
6756 	int i, j, n;
6757 	int new_topology;
6758 
6759 	mutex_lock(&sched_domains_mutex);
6760 
6761 	/* always unregister in case we don't destroy any domains */
6762 	unregister_sched_domain_sysctl();
6763 
6764 	/* Let architecture update cpu core mappings. */
6765 	new_topology = arch_update_cpu_topology();
6766 
6767 	n = doms_new ? ndoms_new : 0;
6768 
6769 	/* Destroy deleted domains */
6770 	for (i = 0; i < ndoms_cur; i++) {
6771 		for (j = 0; j < n && !new_topology; j++) {
6772 			if (cpumask_equal(doms_cur[i], doms_new[j])
6773 			    && dattrs_equal(dattr_cur, i, dattr_new, j))
6774 				goto match1;
6775 		}
6776 		/* no match - a current sched domain not in new doms_new[] */
6777 		detach_destroy_domains(doms_cur[i]);
6778 match1:
6779 		;
6780 	}
6781 
6782 	if (doms_new == NULL) {
6783 		ndoms_cur = 0;
6784 		doms_new = &fallback_doms;
6785 		cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6786 		WARN_ON_ONCE(dattr_new);
6787 	}
6788 
6789 	/* Build new domains */
6790 	for (i = 0; i < ndoms_new; i++) {
6791 		for (j = 0; j < ndoms_cur && !new_topology; j++) {
6792 			if (cpumask_equal(doms_new[i], doms_cur[j])
6793 			    && dattrs_equal(dattr_new, i, dattr_cur, j))
6794 				goto match2;
6795 		}
6796 		/* no match - add a new doms_new */
6797 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
6798 match2:
6799 		;
6800 	}
6801 
6802 	/* Remember the new sched domains */
6803 	if (doms_cur != &fallback_doms)
6804 		free_sched_domains(doms_cur, ndoms_cur);
6805 	kfree(dattr_cur);	/* kfree(NULL) is safe */
6806 	doms_cur = doms_new;
6807 	dattr_cur = dattr_new;
6808 	ndoms_cur = ndoms_new;
6809 
6810 	register_sched_domain_sysctl();
6811 
6812 	mutex_unlock(&sched_domains_mutex);
6813 }
6814 
6815 /*
6816  * Update cpusets according to cpu_active mask.  If cpusets are
6817  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6818  * around partition_sched_domains().
6819  */
6820 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6821 			     void *hcpu)
6822 {
6823 	switch (action & ~CPU_TASKS_FROZEN) {
6824 	case CPU_ONLINE:
6825 	case CPU_DOWN_FAILED:
6826 		cpuset_update_active_cpus();
6827 		return NOTIFY_OK;
6828 	default:
6829 		return NOTIFY_DONE;
6830 	}
6831 }
6832 
6833 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6834 			       void *hcpu)
6835 {
6836 	switch (action & ~CPU_TASKS_FROZEN) {
6837 	case CPU_DOWN_PREPARE:
6838 		cpuset_update_active_cpus();
6839 		return NOTIFY_OK;
6840 	default:
6841 		return NOTIFY_DONE;
6842 	}
6843 }
6844 
6845 void __init sched_init_smp(void)
6846 {
6847 	cpumask_var_t non_isolated_cpus;
6848 
6849 	alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6850 	alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6851 
6852 	sched_init_numa();
6853 
6854 	get_online_cpus();
6855 	mutex_lock(&sched_domains_mutex);
6856 	init_sched_domains(cpu_active_mask);
6857 	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6858 	if (cpumask_empty(non_isolated_cpus))
6859 		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6860 	mutex_unlock(&sched_domains_mutex);
6861 	put_online_cpus();
6862 
6863 	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6864 	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6865 
6866 	/* RT runtime code needs to handle some hotplug events */
6867 	hotcpu_notifier(update_runtime, 0);
6868 
6869 	init_hrtick();
6870 
6871 	/* Move init over to a non-isolated CPU */
6872 	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6873 		BUG();
6874 	sched_init_granularity();
6875 	free_cpumask_var(non_isolated_cpus);
6876 
6877 	init_sched_rt_class();
6878 }
6879 #else
6880 void __init sched_init_smp(void)
6881 {
6882 	sched_init_granularity();
6883 }
6884 #endif /* CONFIG_SMP */
6885 
6886 const_debug unsigned int sysctl_timer_migration = 1;
6887 
6888 int in_sched_functions(unsigned long addr)
6889 {
6890 	return in_lock_functions(addr) ||
6891 		(addr >= (unsigned long)__sched_text_start
6892 		&& addr < (unsigned long)__sched_text_end);
6893 }
6894 
6895 #ifdef CONFIG_CGROUP_SCHED
6896 struct task_group root_task_group;
6897 #endif
6898 
6899 DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
6900 
6901 void __init sched_init(void)
6902 {
6903 	int i, j;
6904 	unsigned long alloc_size = 0, ptr;
6905 
6906 #ifdef CONFIG_FAIR_GROUP_SCHED
6907 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6908 #endif
6909 #ifdef CONFIG_RT_GROUP_SCHED
6910 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6911 #endif
6912 #ifdef CONFIG_CPUMASK_OFFSTACK
6913 	alloc_size += num_possible_cpus() * cpumask_size();
6914 #endif
6915 	if (alloc_size) {
6916 		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6917 
6918 #ifdef CONFIG_FAIR_GROUP_SCHED
6919 		root_task_group.se = (struct sched_entity **)ptr;
6920 		ptr += nr_cpu_ids * sizeof(void **);
6921 
6922 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6923 		ptr += nr_cpu_ids * sizeof(void **);
6924 
6925 #endif /* CONFIG_FAIR_GROUP_SCHED */
6926 #ifdef CONFIG_RT_GROUP_SCHED
6927 		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6928 		ptr += nr_cpu_ids * sizeof(void **);
6929 
6930 		root_task_group.rt_rq = (struct rt_rq **)ptr;
6931 		ptr += nr_cpu_ids * sizeof(void **);
6932 
6933 #endif /* CONFIG_RT_GROUP_SCHED */
6934 #ifdef CONFIG_CPUMASK_OFFSTACK
6935 		for_each_possible_cpu(i) {
6936 			per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6937 			ptr += cpumask_size();
6938 		}
6939 #endif /* CONFIG_CPUMASK_OFFSTACK */
6940 	}
6941 
6942 #ifdef CONFIG_SMP
6943 	init_defrootdomain();
6944 #endif
6945 
6946 	init_rt_bandwidth(&def_rt_bandwidth,
6947 			global_rt_period(), global_rt_runtime());
6948 
6949 #ifdef CONFIG_RT_GROUP_SCHED
6950 	init_rt_bandwidth(&root_task_group.rt_bandwidth,
6951 			global_rt_period(), global_rt_runtime());
6952 #endif /* CONFIG_RT_GROUP_SCHED */
6953 
6954 #ifdef CONFIG_CGROUP_SCHED
6955 	list_add(&root_task_group.list, &task_groups);
6956 	INIT_LIST_HEAD(&root_task_group.children);
6957 	INIT_LIST_HEAD(&root_task_group.siblings);
6958 	autogroup_init(&init_task);
6959 
6960 #endif /* CONFIG_CGROUP_SCHED */
6961 
6962 #ifdef CONFIG_CGROUP_CPUACCT
6963 	root_cpuacct.cpustat = &kernel_cpustat;
6964 	root_cpuacct.cpuusage = alloc_percpu(u64);
6965 	/* Too early, not expected to fail */
6966 	BUG_ON(!root_cpuacct.cpuusage);
6967 #endif
6968 	for_each_possible_cpu(i) {
6969 		struct rq *rq;
6970 
6971 		rq = cpu_rq(i);
6972 		raw_spin_lock_init(&rq->lock);
6973 		rq->nr_running = 0;
6974 		rq->calc_load_active = 0;
6975 		rq->calc_load_update = jiffies + LOAD_FREQ;
6976 		init_cfs_rq(&rq->cfs);
6977 		init_rt_rq(&rq->rt, rq);
6978 #ifdef CONFIG_FAIR_GROUP_SCHED
6979 		root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6980 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6981 		/*
6982 		 * How much cpu bandwidth does root_task_group get?
6983 		 *
6984 		 * In case of task-groups formed thr' the cgroup filesystem, it
6985 		 * gets 100% of the cpu resources in the system. This overall
6986 		 * system cpu resource is divided among the tasks of
6987 		 * root_task_group and its child task-groups in a fair manner,
6988 		 * based on each entity's (task or task-group's) weight
6989 		 * (se->load.weight).
6990 		 *
6991 		 * In other words, if root_task_group has 10 tasks of weight
6992 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6993 		 * then A0's share of the cpu resource is:
6994 		 *
6995 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
6996 		 *
6997 		 * We achieve this by letting root_task_group's tasks sit
6998 		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
6999 		 */
7000 		init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7001 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7002 #endif /* CONFIG_FAIR_GROUP_SCHED */
7003 
7004 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7005 #ifdef CONFIG_RT_GROUP_SCHED
7006 		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7007 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7008 #endif
7009 
7010 		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7011 			rq->cpu_load[j] = 0;
7012 
7013 		rq->last_load_update_tick = jiffies;
7014 
7015 #ifdef CONFIG_SMP
7016 		rq->sd = NULL;
7017 		rq->rd = NULL;
7018 		rq->cpu_power = SCHED_POWER_SCALE;
7019 		rq->post_schedule = 0;
7020 		rq->active_balance = 0;
7021 		rq->next_balance = jiffies;
7022 		rq->push_cpu = 0;
7023 		rq->cpu = i;
7024 		rq->online = 0;
7025 		rq->idle_stamp = 0;
7026 		rq->avg_idle = 2*sysctl_sched_migration_cost;
7027 
7028 		INIT_LIST_HEAD(&rq->cfs_tasks);
7029 
7030 		rq_attach_root(rq, &def_root_domain);
7031 #ifdef CONFIG_NO_HZ
7032 		rq->nohz_flags = 0;
7033 #endif
7034 #endif
7035 		init_rq_hrtick(rq);
7036 		atomic_set(&rq->nr_iowait, 0);
7037 	}
7038 
7039 	set_load_weight(&init_task);
7040 
7041 #ifdef CONFIG_PREEMPT_NOTIFIERS
7042 	INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7043 #endif
7044 
7045 #ifdef CONFIG_RT_MUTEXES
7046 	plist_head_init(&init_task.pi_waiters);
7047 #endif
7048 
7049 	/*
7050 	 * The boot idle thread does lazy MMU switching as well:
7051 	 */
7052 	atomic_inc(&init_mm.mm_count);
7053 	enter_lazy_tlb(&init_mm, current);
7054 
7055 	/*
7056 	 * Make us the idle thread. Technically, schedule() should not be
7057 	 * called from this thread, however somewhere below it might be,
7058 	 * but because we are the idle thread, we just pick up running again
7059 	 * when this runqueue becomes "idle".
7060 	 */
7061 	init_idle(current, smp_processor_id());
7062 
7063 	calc_load_update = jiffies + LOAD_FREQ;
7064 
7065 	/*
7066 	 * During early bootup we pretend to be a normal task:
7067 	 */
7068 	current->sched_class = &fair_sched_class;
7069 
7070 #ifdef CONFIG_SMP
7071 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7072 	/* May be allocated at isolcpus cmdline parse time */
7073 	if (cpu_isolated_map == NULL)
7074 		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7075 	idle_thread_set_boot_cpu();
7076 #endif
7077 	init_sched_fair_class();
7078 
7079 	scheduler_running = 1;
7080 }
7081 
7082 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7083 static inline int preempt_count_equals(int preempt_offset)
7084 {
7085 	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
7086 
7087 	return (nested == preempt_offset);
7088 }
7089 
7090 void __might_sleep(const char *file, int line, int preempt_offset)
7091 {
7092 	static unsigned long prev_jiffy;	/* ratelimiting */
7093 
7094 	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
7095 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
7096 	    system_state != SYSTEM_RUNNING || oops_in_progress)
7097 		return;
7098 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7099 		return;
7100 	prev_jiffy = jiffies;
7101 
7102 	printk(KERN_ERR
7103 		"BUG: sleeping function called from invalid context at %s:%d\n",
7104 			file, line);
7105 	printk(KERN_ERR
7106 		"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7107 			in_atomic(), irqs_disabled(),
7108 			current->pid, current->comm);
7109 
7110 	debug_show_held_locks(current);
7111 	if (irqs_disabled())
7112 		print_irqtrace_events(current);
7113 	dump_stack();
7114 }
7115 EXPORT_SYMBOL(__might_sleep);
7116 #endif
7117 
7118 #ifdef CONFIG_MAGIC_SYSRQ
7119 static void normalize_task(struct rq *rq, struct task_struct *p)
7120 {
7121 	const struct sched_class *prev_class = p->sched_class;
7122 	int old_prio = p->prio;
7123 	int on_rq;
7124 
7125 	on_rq = p->on_rq;
7126 	if (on_rq)
7127 		dequeue_task(rq, p, 0);
7128 	__setscheduler(rq, p, SCHED_NORMAL, 0);
7129 	if (on_rq) {
7130 		enqueue_task(rq, p, 0);
7131 		resched_task(rq->curr);
7132 	}
7133 
7134 	check_class_changed(rq, p, prev_class, old_prio);
7135 }
7136 
7137 void normalize_rt_tasks(void)
7138 {
7139 	struct task_struct *g, *p;
7140 	unsigned long flags;
7141 	struct rq *rq;
7142 
7143 	read_lock_irqsave(&tasklist_lock, flags);
7144 	do_each_thread(g, p) {
7145 		/*
7146 		 * Only normalize user tasks:
7147 		 */
7148 		if (!p->mm)
7149 			continue;
7150 
7151 		p->se.exec_start		= 0;
7152 #ifdef CONFIG_SCHEDSTATS
7153 		p->se.statistics.wait_start	= 0;
7154 		p->se.statistics.sleep_start	= 0;
7155 		p->se.statistics.block_start	= 0;
7156 #endif
7157 
7158 		if (!rt_task(p)) {
7159 			/*
7160 			 * Renice negative nice level userspace
7161 			 * tasks back to 0:
7162 			 */
7163 			if (TASK_NICE(p) < 0 && p->mm)
7164 				set_user_nice(p, 0);
7165 			continue;
7166 		}
7167 
7168 		raw_spin_lock(&p->pi_lock);
7169 		rq = __task_rq_lock(p);
7170 
7171 		normalize_task(rq, p);
7172 
7173 		__task_rq_unlock(rq);
7174 		raw_spin_unlock(&p->pi_lock);
7175 	} while_each_thread(g, p);
7176 
7177 	read_unlock_irqrestore(&tasklist_lock, flags);
7178 }
7179 
7180 #endif /* CONFIG_MAGIC_SYSRQ */
7181 
7182 #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7183 /*
7184  * These functions are only useful for the IA64 MCA handling, or kdb.
7185  *
7186  * They can only be called when the whole system has been
7187  * stopped - every CPU needs to be quiescent, and no scheduling
7188  * activity can take place. Using them for anything else would
7189  * be a serious bug, and as a result, they aren't even visible
7190  * under any other configuration.
7191  */
7192 
7193 /**
7194  * curr_task - return the current task for a given cpu.
7195  * @cpu: the processor in question.
7196  *
7197  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7198  */
7199 struct task_struct *curr_task(int cpu)
7200 {
7201 	return cpu_curr(cpu);
7202 }
7203 
7204 #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7205 
7206 #ifdef CONFIG_IA64
7207 /**
7208  * set_curr_task - set the current task for a given cpu.
7209  * @cpu: the processor in question.
7210  * @p: the task pointer to set.
7211  *
7212  * Description: This function must only be used when non-maskable interrupts
7213  * are serviced on a separate stack. It allows the architecture to switch the
7214  * notion of the current task on a cpu in a non-blocking manner. This function
7215  * must be called with all CPU's synchronized, and interrupts disabled, the
7216  * and caller must save the original value of the current task (see
7217  * curr_task() above) and restore that value before reenabling interrupts and
7218  * re-starting the system.
7219  *
7220  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7221  */
7222 void set_curr_task(int cpu, struct task_struct *p)
7223 {
7224 	cpu_curr(cpu) = p;
7225 }
7226 
7227 #endif
7228 
7229 #ifdef CONFIG_CGROUP_SCHED
7230 /* task_group_lock serializes the addition/removal of task groups */
7231 static DEFINE_SPINLOCK(task_group_lock);
7232 
7233 static void free_sched_group(struct task_group *tg)
7234 {
7235 	free_fair_sched_group(tg);
7236 	free_rt_sched_group(tg);
7237 	autogroup_free(tg);
7238 	kfree(tg);
7239 }
7240 
7241 /* allocate runqueue etc for a new task group */
7242 struct task_group *sched_create_group(struct task_group *parent)
7243 {
7244 	struct task_group *tg;
7245 	unsigned long flags;
7246 
7247 	tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7248 	if (!tg)
7249 		return ERR_PTR(-ENOMEM);
7250 
7251 	if (!alloc_fair_sched_group(tg, parent))
7252 		goto err;
7253 
7254 	if (!alloc_rt_sched_group(tg, parent))
7255 		goto err;
7256 
7257 	spin_lock_irqsave(&task_group_lock, flags);
7258 	list_add_rcu(&tg->list, &task_groups);
7259 
7260 	WARN_ON(!parent); /* root should already exist */
7261 
7262 	tg->parent = parent;
7263 	INIT_LIST_HEAD(&tg->children);
7264 	list_add_rcu(&tg->siblings, &parent->children);
7265 	spin_unlock_irqrestore(&task_group_lock, flags);
7266 
7267 	return tg;
7268 
7269 err:
7270 	free_sched_group(tg);
7271 	return ERR_PTR(-ENOMEM);
7272 }
7273 
7274 /* rcu callback to free various structures associated with a task group */
7275 static void free_sched_group_rcu(struct rcu_head *rhp)
7276 {
7277 	/* now it should be safe to free those cfs_rqs */
7278 	free_sched_group(container_of(rhp, struct task_group, rcu));
7279 }
7280 
7281 /* Destroy runqueue etc associated with a task group */
7282 void sched_destroy_group(struct task_group *tg)
7283 {
7284 	unsigned long flags;
7285 	int i;
7286 
7287 	/* end participation in shares distribution */
7288 	for_each_possible_cpu(i)
7289 		unregister_fair_sched_group(tg, i);
7290 
7291 	spin_lock_irqsave(&task_group_lock, flags);
7292 	list_del_rcu(&tg->list);
7293 	list_del_rcu(&tg->siblings);
7294 	spin_unlock_irqrestore(&task_group_lock, flags);
7295 
7296 	/* wait for possible concurrent references to cfs_rqs complete */
7297 	call_rcu(&tg->rcu, free_sched_group_rcu);
7298 }
7299 
7300 /* change task's runqueue when it moves between groups.
7301  *	The caller of this function should have put the task in its new group
7302  *	by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7303  *	reflect its new group.
7304  */
7305 void sched_move_task(struct task_struct *tsk)
7306 {
7307 	int on_rq, running;
7308 	unsigned long flags;
7309 	struct rq *rq;
7310 
7311 	rq = task_rq_lock(tsk, &flags);
7312 
7313 	running = task_current(rq, tsk);
7314 	on_rq = tsk->on_rq;
7315 
7316 	if (on_rq)
7317 		dequeue_task(rq, tsk, 0);
7318 	if (unlikely(running))
7319 		tsk->sched_class->put_prev_task(rq, tsk);
7320 
7321 #ifdef CONFIG_FAIR_GROUP_SCHED
7322 	if (tsk->sched_class->task_move_group)
7323 		tsk->sched_class->task_move_group(tsk, on_rq);
7324 	else
7325 #endif
7326 		set_task_rq(tsk, task_cpu(tsk));
7327 
7328 	if (unlikely(running))
7329 		tsk->sched_class->set_curr_task(rq);
7330 	if (on_rq)
7331 		enqueue_task(rq, tsk, 0);
7332 
7333 	task_rq_unlock(rq, tsk, &flags);
7334 }
7335 #endif /* CONFIG_CGROUP_SCHED */
7336 
7337 #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
7338 static unsigned long to_ratio(u64 period, u64 runtime)
7339 {
7340 	if (runtime == RUNTIME_INF)
7341 		return 1ULL << 20;
7342 
7343 	return div64_u64(runtime << 20, period);
7344 }
7345 #endif
7346 
7347 #ifdef CONFIG_RT_GROUP_SCHED
7348 /*
7349  * Ensure that the real time constraints are schedulable.
7350  */
7351 static DEFINE_MUTEX(rt_constraints_mutex);
7352 
7353 /* Must be called with tasklist_lock held */
7354 static inline int tg_has_rt_tasks(struct task_group *tg)
7355 {
7356 	struct task_struct *g, *p;
7357 
7358 	do_each_thread(g, p) {
7359 		if (rt_task(p) && task_rq(p)->rt.tg == tg)
7360 			return 1;
7361 	} while_each_thread(g, p);
7362 
7363 	return 0;
7364 }
7365 
7366 struct rt_schedulable_data {
7367 	struct task_group *tg;
7368 	u64 rt_period;
7369 	u64 rt_runtime;
7370 };
7371 
7372 static int tg_rt_schedulable(struct task_group *tg, void *data)
7373 {
7374 	struct rt_schedulable_data *d = data;
7375 	struct task_group *child;
7376 	unsigned long total, sum = 0;
7377 	u64 period, runtime;
7378 
7379 	period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7380 	runtime = tg->rt_bandwidth.rt_runtime;
7381 
7382 	if (tg == d->tg) {
7383 		period = d->rt_period;
7384 		runtime = d->rt_runtime;
7385 	}
7386 
7387 	/*
7388 	 * Cannot have more runtime than the period.
7389 	 */
7390 	if (runtime > period && runtime != RUNTIME_INF)
7391 		return -EINVAL;
7392 
7393 	/*
7394 	 * Ensure we don't starve existing RT tasks.
7395 	 */
7396 	if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7397 		return -EBUSY;
7398 
7399 	total = to_ratio(period, runtime);
7400 
7401 	/*
7402 	 * Nobody can have more than the global setting allows.
7403 	 */
7404 	if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7405 		return -EINVAL;
7406 
7407 	/*
7408 	 * The sum of our children's runtime should not exceed our own.
7409 	 */
7410 	list_for_each_entry_rcu(child, &tg->children, siblings) {
7411 		period = ktime_to_ns(child->rt_bandwidth.rt_period);
7412 		runtime = child->rt_bandwidth.rt_runtime;
7413 
7414 		if (child == d->tg) {
7415 			period = d->rt_period;
7416 			runtime = d->rt_runtime;
7417 		}
7418 
7419 		sum += to_ratio(period, runtime);
7420 	}
7421 
7422 	if (sum > total)
7423 		return -EINVAL;
7424 
7425 	return 0;
7426 }
7427 
7428 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7429 {
7430 	int ret;
7431 
7432 	struct rt_schedulable_data data = {
7433 		.tg = tg,
7434 		.rt_period = period,
7435 		.rt_runtime = runtime,
7436 	};
7437 
7438 	rcu_read_lock();
7439 	ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7440 	rcu_read_unlock();
7441 
7442 	return ret;
7443 }
7444 
7445 static int tg_set_rt_bandwidth(struct task_group *tg,
7446 		u64 rt_period, u64 rt_runtime)
7447 {
7448 	int i, err = 0;
7449 
7450 	mutex_lock(&rt_constraints_mutex);
7451 	read_lock(&tasklist_lock);
7452 	err = __rt_schedulable(tg, rt_period, rt_runtime);
7453 	if (err)
7454 		goto unlock;
7455 
7456 	raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7457 	tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7458 	tg->rt_bandwidth.rt_runtime = rt_runtime;
7459 
7460 	for_each_possible_cpu(i) {
7461 		struct rt_rq *rt_rq = tg->rt_rq[i];
7462 
7463 		raw_spin_lock(&rt_rq->rt_runtime_lock);
7464 		rt_rq->rt_runtime = rt_runtime;
7465 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7466 	}
7467 	raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7468 unlock:
7469 	read_unlock(&tasklist_lock);
7470 	mutex_unlock(&rt_constraints_mutex);
7471 
7472 	return err;
7473 }
7474 
7475 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7476 {
7477 	u64 rt_runtime, rt_period;
7478 
7479 	rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7480 	rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7481 	if (rt_runtime_us < 0)
7482 		rt_runtime = RUNTIME_INF;
7483 
7484 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7485 }
7486 
7487 long sched_group_rt_runtime(struct task_group *tg)
7488 {
7489 	u64 rt_runtime_us;
7490 
7491 	if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7492 		return -1;
7493 
7494 	rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7495 	do_div(rt_runtime_us, NSEC_PER_USEC);
7496 	return rt_runtime_us;
7497 }
7498 
7499 int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7500 {
7501 	u64 rt_runtime, rt_period;
7502 
7503 	rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7504 	rt_runtime = tg->rt_bandwidth.rt_runtime;
7505 
7506 	if (rt_period == 0)
7507 		return -EINVAL;
7508 
7509 	return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7510 }
7511 
7512 long sched_group_rt_period(struct task_group *tg)
7513 {
7514 	u64 rt_period_us;
7515 
7516 	rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7517 	do_div(rt_period_us, NSEC_PER_USEC);
7518 	return rt_period_us;
7519 }
7520 
7521 static int sched_rt_global_constraints(void)
7522 {
7523 	u64 runtime, period;
7524 	int ret = 0;
7525 
7526 	if (sysctl_sched_rt_period <= 0)
7527 		return -EINVAL;
7528 
7529 	runtime = global_rt_runtime();
7530 	period = global_rt_period();
7531 
7532 	/*
7533 	 * Sanity check on the sysctl variables.
7534 	 */
7535 	if (runtime > period && runtime != RUNTIME_INF)
7536 		return -EINVAL;
7537 
7538 	mutex_lock(&rt_constraints_mutex);
7539 	read_lock(&tasklist_lock);
7540 	ret = __rt_schedulable(NULL, 0, 0);
7541 	read_unlock(&tasklist_lock);
7542 	mutex_unlock(&rt_constraints_mutex);
7543 
7544 	return ret;
7545 }
7546 
7547 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7548 {
7549 	/* Don't accept realtime tasks when there is no way for them to run */
7550 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7551 		return 0;
7552 
7553 	return 1;
7554 }
7555 
7556 #else /* !CONFIG_RT_GROUP_SCHED */
7557 static int sched_rt_global_constraints(void)
7558 {
7559 	unsigned long flags;
7560 	int i;
7561 
7562 	if (sysctl_sched_rt_period <= 0)
7563 		return -EINVAL;
7564 
7565 	/*
7566 	 * There's always some RT tasks in the root group
7567 	 * -- migration, kstopmachine etc..
7568 	 */
7569 	if (sysctl_sched_rt_runtime == 0)
7570 		return -EBUSY;
7571 
7572 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7573 	for_each_possible_cpu(i) {
7574 		struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7575 
7576 		raw_spin_lock(&rt_rq->rt_runtime_lock);
7577 		rt_rq->rt_runtime = global_rt_runtime();
7578 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
7579 	}
7580 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7581 
7582 	return 0;
7583 }
7584 #endif /* CONFIG_RT_GROUP_SCHED */
7585 
7586 int sched_rt_handler(struct ctl_table *table, int write,
7587 		void __user *buffer, size_t *lenp,
7588 		loff_t *ppos)
7589 {
7590 	int ret;
7591 	int old_period, old_runtime;
7592 	static DEFINE_MUTEX(mutex);
7593 
7594 	mutex_lock(&mutex);
7595 	old_period = sysctl_sched_rt_period;
7596 	old_runtime = sysctl_sched_rt_runtime;
7597 
7598 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7599 
7600 	if (!ret && write) {
7601 		ret = sched_rt_global_constraints();
7602 		if (ret) {
7603 			sysctl_sched_rt_period = old_period;
7604 			sysctl_sched_rt_runtime = old_runtime;
7605 		} else {
7606 			def_rt_bandwidth.rt_runtime = global_rt_runtime();
7607 			def_rt_bandwidth.rt_period =
7608 				ns_to_ktime(global_rt_period());
7609 		}
7610 	}
7611 	mutex_unlock(&mutex);
7612 
7613 	return ret;
7614 }
7615 
7616 #ifdef CONFIG_CGROUP_SCHED
7617 
7618 /* return corresponding task_group object of a cgroup */
7619 static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7620 {
7621 	return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7622 			    struct task_group, css);
7623 }
7624 
7625 static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
7626 {
7627 	struct task_group *tg, *parent;
7628 
7629 	if (!cgrp->parent) {
7630 		/* This is early initialization for the top cgroup */
7631 		return &root_task_group.css;
7632 	}
7633 
7634 	parent = cgroup_tg(cgrp->parent);
7635 	tg = sched_create_group(parent);
7636 	if (IS_ERR(tg))
7637 		return ERR_PTR(-ENOMEM);
7638 
7639 	return &tg->css;
7640 }
7641 
7642 static void cpu_cgroup_destroy(struct cgroup *cgrp)
7643 {
7644 	struct task_group *tg = cgroup_tg(cgrp);
7645 
7646 	sched_destroy_group(tg);
7647 }
7648 
7649 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7650 				 struct cgroup_taskset *tset)
7651 {
7652 	struct task_struct *task;
7653 
7654 	cgroup_taskset_for_each(task, cgrp, tset) {
7655 #ifdef CONFIG_RT_GROUP_SCHED
7656 		if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
7657 			return -EINVAL;
7658 #else
7659 		/* We don't support RT-tasks being in separate groups */
7660 		if (task->sched_class != &fair_sched_class)
7661 			return -EINVAL;
7662 #endif
7663 	}
7664 	return 0;
7665 }
7666 
7667 static void cpu_cgroup_attach(struct cgroup *cgrp,
7668 			      struct cgroup_taskset *tset)
7669 {
7670 	struct task_struct *task;
7671 
7672 	cgroup_taskset_for_each(task, cgrp, tset)
7673 		sched_move_task(task);
7674 }
7675 
7676 static void
7677 cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7678 		struct task_struct *task)
7679 {
7680 	/*
7681 	 * cgroup_exit() is called in the copy_process() failure path.
7682 	 * Ignore this case since the task hasn't ran yet, this avoids
7683 	 * trying to poke a half freed task state from generic code.
7684 	 */
7685 	if (!(task->flags & PF_EXITING))
7686 		return;
7687 
7688 	sched_move_task(task);
7689 }
7690 
7691 #ifdef CONFIG_FAIR_GROUP_SCHED
7692 static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7693 				u64 shareval)
7694 {
7695 	return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
7696 }
7697 
7698 static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
7699 {
7700 	struct task_group *tg = cgroup_tg(cgrp);
7701 
7702 	return (u64) scale_load_down(tg->shares);
7703 }
7704 
7705 #ifdef CONFIG_CFS_BANDWIDTH
7706 static DEFINE_MUTEX(cfs_constraints_mutex);
7707 
7708 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7709 const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7710 
7711 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7712 
7713 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7714 {
7715 	int i, ret = 0, runtime_enabled, runtime_was_enabled;
7716 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7717 
7718 	if (tg == &root_task_group)
7719 		return -EINVAL;
7720 
7721 	/*
7722 	 * Ensure we have at some amount of bandwidth every period.  This is
7723 	 * to prevent reaching a state of large arrears when throttled via
7724 	 * entity_tick() resulting in prolonged exit starvation.
7725 	 */
7726 	if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7727 		return -EINVAL;
7728 
7729 	/*
7730 	 * Likewise, bound things on the otherside by preventing insane quota
7731 	 * periods.  This also allows us to normalize in computing quota
7732 	 * feasibility.
7733 	 */
7734 	if (period > max_cfs_quota_period)
7735 		return -EINVAL;
7736 
7737 	mutex_lock(&cfs_constraints_mutex);
7738 	ret = __cfs_schedulable(tg, period, quota);
7739 	if (ret)
7740 		goto out_unlock;
7741 
7742 	runtime_enabled = quota != RUNTIME_INF;
7743 	runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7744 	account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
7745 	raw_spin_lock_irq(&cfs_b->lock);
7746 	cfs_b->period = ns_to_ktime(period);
7747 	cfs_b->quota = quota;
7748 
7749 	__refill_cfs_bandwidth_runtime(cfs_b);
7750 	/* restart the period timer (if active) to handle new period expiry */
7751 	if (runtime_enabled && cfs_b->timer_active) {
7752 		/* force a reprogram */
7753 		cfs_b->timer_active = 0;
7754 		__start_cfs_bandwidth(cfs_b);
7755 	}
7756 	raw_spin_unlock_irq(&cfs_b->lock);
7757 
7758 	for_each_possible_cpu(i) {
7759 		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7760 		struct rq *rq = cfs_rq->rq;
7761 
7762 		raw_spin_lock_irq(&rq->lock);
7763 		cfs_rq->runtime_enabled = runtime_enabled;
7764 		cfs_rq->runtime_remaining = 0;
7765 
7766 		if (cfs_rq->throttled)
7767 			unthrottle_cfs_rq(cfs_rq);
7768 		raw_spin_unlock_irq(&rq->lock);
7769 	}
7770 out_unlock:
7771 	mutex_unlock(&cfs_constraints_mutex);
7772 
7773 	return ret;
7774 }
7775 
7776 int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7777 {
7778 	u64 quota, period;
7779 
7780 	period = ktime_to_ns(tg->cfs_bandwidth.period);
7781 	if (cfs_quota_us < 0)
7782 		quota = RUNTIME_INF;
7783 	else
7784 		quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7785 
7786 	return tg_set_cfs_bandwidth(tg, period, quota);
7787 }
7788 
7789 long tg_get_cfs_quota(struct task_group *tg)
7790 {
7791 	u64 quota_us;
7792 
7793 	if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7794 		return -1;
7795 
7796 	quota_us = tg->cfs_bandwidth.quota;
7797 	do_div(quota_us, NSEC_PER_USEC);
7798 
7799 	return quota_us;
7800 }
7801 
7802 int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7803 {
7804 	u64 quota, period;
7805 
7806 	period = (u64)cfs_period_us * NSEC_PER_USEC;
7807 	quota = tg->cfs_bandwidth.quota;
7808 
7809 	return tg_set_cfs_bandwidth(tg, period, quota);
7810 }
7811 
7812 long tg_get_cfs_period(struct task_group *tg)
7813 {
7814 	u64 cfs_period_us;
7815 
7816 	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7817 	do_div(cfs_period_us, NSEC_PER_USEC);
7818 
7819 	return cfs_period_us;
7820 }
7821 
7822 static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7823 {
7824 	return tg_get_cfs_quota(cgroup_tg(cgrp));
7825 }
7826 
7827 static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7828 				s64 cfs_quota_us)
7829 {
7830 	return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7831 }
7832 
7833 static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7834 {
7835 	return tg_get_cfs_period(cgroup_tg(cgrp));
7836 }
7837 
7838 static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7839 				u64 cfs_period_us)
7840 {
7841 	return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7842 }
7843 
7844 struct cfs_schedulable_data {
7845 	struct task_group *tg;
7846 	u64 period, quota;
7847 };
7848 
7849 /*
7850  * normalize group quota/period to be quota/max_period
7851  * note: units are usecs
7852  */
7853 static u64 normalize_cfs_quota(struct task_group *tg,
7854 			       struct cfs_schedulable_data *d)
7855 {
7856 	u64 quota, period;
7857 
7858 	if (tg == d->tg) {
7859 		period = d->period;
7860 		quota = d->quota;
7861 	} else {
7862 		period = tg_get_cfs_period(tg);
7863 		quota = tg_get_cfs_quota(tg);
7864 	}
7865 
7866 	/* note: these should typically be equivalent */
7867 	if (quota == RUNTIME_INF || quota == -1)
7868 		return RUNTIME_INF;
7869 
7870 	return to_ratio(period, quota);
7871 }
7872 
7873 static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7874 {
7875 	struct cfs_schedulable_data *d = data;
7876 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7877 	s64 quota = 0, parent_quota = -1;
7878 
7879 	if (!tg->parent) {
7880 		quota = RUNTIME_INF;
7881 	} else {
7882 		struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7883 
7884 		quota = normalize_cfs_quota(tg, d);
7885 		parent_quota = parent_b->hierarchal_quota;
7886 
7887 		/*
7888 		 * ensure max(child_quota) <= parent_quota, inherit when no
7889 		 * limit is set
7890 		 */
7891 		if (quota == RUNTIME_INF)
7892 			quota = parent_quota;
7893 		else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7894 			return -EINVAL;
7895 	}
7896 	cfs_b->hierarchal_quota = quota;
7897 
7898 	return 0;
7899 }
7900 
7901 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7902 {
7903 	int ret;
7904 	struct cfs_schedulable_data data = {
7905 		.tg = tg,
7906 		.period = period,
7907 		.quota = quota,
7908 	};
7909 
7910 	if (quota != RUNTIME_INF) {
7911 		do_div(data.period, NSEC_PER_USEC);
7912 		do_div(data.quota, NSEC_PER_USEC);
7913 	}
7914 
7915 	rcu_read_lock();
7916 	ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7917 	rcu_read_unlock();
7918 
7919 	return ret;
7920 }
7921 
7922 static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7923 		struct cgroup_map_cb *cb)
7924 {
7925 	struct task_group *tg = cgroup_tg(cgrp);
7926 	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7927 
7928 	cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7929 	cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7930 	cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7931 
7932 	return 0;
7933 }
7934 #endif /* CONFIG_CFS_BANDWIDTH */
7935 #endif /* CONFIG_FAIR_GROUP_SCHED */
7936 
7937 #ifdef CONFIG_RT_GROUP_SCHED
7938 static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
7939 				s64 val)
7940 {
7941 	return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
7942 }
7943 
7944 static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
7945 {
7946 	return sched_group_rt_runtime(cgroup_tg(cgrp));
7947 }
7948 
7949 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7950 		u64 rt_period_us)
7951 {
7952 	return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7953 }
7954 
7955 static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7956 {
7957 	return sched_group_rt_period(cgroup_tg(cgrp));
7958 }
7959 #endif /* CONFIG_RT_GROUP_SCHED */
7960 
7961 static struct cftype cpu_files[] = {
7962 #ifdef CONFIG_FAIR_GROUP_SCHED
7963 	{
7964 		.name = "shares",
7965 		.read_u64 = cpu_shares_read_u64,
7966 		.write_u64 = cpu_shares_write_u64,
7967 	},
7968 #endif
7969 #ifdef CONFIG_CFS_BANDWIDTH
7970 	{
7971 		.name = "cfs_quota_us",
7972 		.read_s64 = cpu_cfs_quota_read_s64,
7973 		.write_s64 = cpu_cfs_quota_write_s64,
7974 	},
7975 	{
7976 		.name = "cfs_period_us",
7977 		.read_u64 = cpu_cfs_period_read_u64,
7978 		.write_u64 = cpu_cfs_period_write_u64,
7979 	},
7980 	{
7981 		.name = "stat",
7982 		.read_map = cpu_stats_show,
7983 	},
7984 #endif
7985 #ifdef CONFIG_RT_GROUP_SCHED
7986 	{
7987 		.name = "rt_runtime_us",
7988 		.read_s64 = cpu_rt_runtime_read,
7989 		.write_s64 = cpu_rt_runtime_write,
7990 	},
7991 	{
7992 		.name = "rt_period_us",
7993 		.read_u64 = cpu_rt_period_read_uint,
7994 		.write_u64 = cpu_rt_period_write_uint,
7995 	},
7996 #endif
7997 	{ }	/* terminate */
7998 };
7999 
8000 struct cgroup_subsys cpu_cgroup_subsys = {
8001 	.name		= "cpu",
8002 	.create		= cpu_cgroup_create,
8003 	.destroy	= cpu_cgroup_destroy,
8004 	.can_attach	= cpu_cgroup_can_attach,
8005 	.attach		= cpu_cgroup_attach,
8006 	.exit		= cpu_cgroup_exit,
8007 	.subsys_id	= cpu_cgroup_subsys_id,
8008 	.base_cftypes	= cpu_files,
8009 	.early_init	= 1,
8010 };
8011 
8012 #endif	/* CONFIG_CGROUP_SCHED */
8013 
8014 #ifdef CONFIG_CGROUP_CPUACCT
8015 
8016 /*
8017  * CPU accounting code for task groups.
8018  *
8019  * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8020  * (balbir@in.ibm.com).
8021  */
8022 
8023 /* create a new cpu accounting group */
8024 static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
8025 {
8026 	struct cpuacct *ca;
8027 
8028 	if (!cgrp->parent)
8029 		return &root_cpuacct.css;
8030 
8031 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
8032 	if (!ca)
8033 		goto out;
8034 
8035 	ca->cpuusage = alloc_percpu(u64);
8036 	if (!ca->cpuusage)
8037 		goto out_free_ca;
8038 
8039 	ca->cpustat = alloc_percpu(struct kernel_cpustat);
8040 	if (!ca->cpustat)
8041 		goto out_free_cpuusage;
8042 
8043 	return &ca->css;
8044 
8045 out_free_cpuusage:
8046 	free_percpu(ca->cpuusage);
8047 out_free_ca:
8048 	kfree(ca);
8049 out:
8050 	return ERR_PTR(-ENOMEM);
8051 }
8052 
8053 /* destroy an existing cpu accounting group */
8054 static void cpuacct_destroy(struct cgroup *cgrp)
8055 {
8056 	struct cpuacct *ca = cgroup_ca(cgrp);
8057 
8058 	free_percpu(ca->cpustat);
8059 	free_percpu(ca->cpuusage);
8060 	kfree(ca);
8061 }
8062 
8063 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
8064 {
8065 	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8066 	u64 data;
8067 
8068 #ifndef CONFIG_64BIT
8069 	/*
8070 	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
8071 	 */
8072 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
8073 	data = *cpuusage;
8074 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
8075 #else
8076 	data = *cpuusage;
8077 #endif
8078 
8079 	return data;
8080 }
8081 
8082 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
8083 {
8084 	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8085 
8086 #ifndef CONFIG_64BIT
8087 	/*
8088 	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
8089 	 */
8090 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
8091 	*cpuusage = val;
8092 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
8093 #else
8094 	*cpuusage = val;
8095 #endif
8096 }
8097 
8098 /* return total cpu usage (in nanoseconds) of a group */
8099 static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
8100 {
8101 	struct cpuacct *ca = cgroup_ca(cgrp);
8102 	u64 totalcpuusage = 0;
8103 	int i;
8104 
8105 	for_each_present_cpu(i)
8106 		totalcpuusage += cpuacct_cpuusage_read(ca, i);
8107 
8108 	return totalcpuusage;
8109 }
8110 
8111 static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8112 								u64 reset)
8113 {
8114 	struct cpuacct *ca = cgroup_ca(cgrp);
8115 	int err = 0;
8116 	int i;
8117 
8118 	if (reset) {
8119 		err = -EINVAL;
8120 		goto out;
8121 	}
8122 
8123 	for_each_present_cpu(i)
8124 		cpuacct_cpuusage_write(ca, i, 0);
8125 
8126 out:
8127 	return err;
8128 }
8129 
8130 static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
8131 				   struct seq_file *m)
8132 {
8133 	struct cpuacct *ca = cgroup_ca(cgroup);
8134 	u64 percpu;
8135 	int i;
8136 
8137 	for_each_present_cpu(i) {
8138 		percpu = cpuacct_cpuusage_read(ca, i);
8139 		seq_printf(m, "%llu ", (unsigned long long) percpu);
8140 	}
8141 	seq_printf(m, "\n");
8142 	return 0;
8143 }
8144 
8145 static const char *cpuacct_stat_desc[] = {
8146 	[CPUACCT_STAT_USER] = "user",
8147 	[CPUACCT_STAT_SYSTEM] = "system",
8148 };
8149 
8150 static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
8151 			      struct cgroup_map_cb *cb)
8152 {
8153 	struct cpuacct *ca = cgroup_ca(cgrp);
8154 	int cpu;
8155 	s64 val = 0;
8156 
8157 	for_each_online_cpu(cpu) {
8158 		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8159 		val += kcpustat->cpustat[CPUTIME_USER];
8160 		val += kcpustat->cpustat[CPUTIME_NICE];
8161 	}
8162 	val = cputime64_to_clock_t(val);
8163 	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
8164 
8165 	val = 0;
8166 	for_each_online_cpu(cpu) {
8167 		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
8168 		val += kcpustat->cpustat[CPUTIME_SYSTEM];
8169 		val += kcpustat->cpustat[CPUTIME_IRQ];
8170 		val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
8171 	}
8172 
8173 	val = cputime64_to_clock_t(val);
8174 	cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
8175 
8176 	return 0;
8177 }
8178 
8179 static struct cftype files[] = {
8180 	{
8181 		.name = "usage",
8182 		.read_u64 = cpuusage_read,
8183 		.write_u64 = cpuusage_write,
8184 	},
8185 	{
8186 		.name = "usage_percpu",
8187 		.read_seq_string = cpuacct_percpu_seq_read,
8188 	},
8189 	{
8190 		.name = "stat",
8191 		.read_map = cpuacct_stats_show,
8192 	},
8193 	{ }	/* terminate */
8194 };
8195 
8196 /*
8197  * charge this task's execution time to its accounting group.
8198  *
8199  * called with rq->lock held.
8200  */
8201 void cpuacct_charge(struct task_struct *tsk, u64 cputime)
8202 {
8203 	struct cpuacct *ca;
8204 	int cpu;
8205 
8206 	if (unlikely(!cpuacct_subsys.active))
8207 		return;
8208 
8209 	cpu = task_cpu(tsk);
8210 
8211 	rcu_read_lock();
8212 
8213 	ca = task_ca(tsk);
8214 
8215 	for (; ca; ca = parent_ca(ca)) {
8216 		u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8217 		*cpuusage += cputime;
8218 	}
8219 
8220 	rcu_read_unlock();
8221 }
8222 
8223 struct cgroup_subsys cpuacct_subsys = {
8224 	.name = "cpuacct",
8225 	.create = cpuacct_create,
8226 	.destroy = cpuacct_destroy,
8227 	.subsys_id = cpuacct_subsys_id,
8228 	.base_cftypes = files,
8229 };
8230 #endif	/* CONFIG_CGROUP_CPUACCT */
8231