core.c (ec4fc801a02d96180c597238fe87141471b70971) core.c (24a9c54182b3758801b8ca6c8c237cc2ff654732)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */

--- 3794 unchanged lines hidden (view full) ---

3803bool cpus_share_cache(int this_cpu, int that_cpu)
3804{
3805 if (this_cpu == that_cpu)
3806 return true;
3807
3808 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3809}
3810
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */

--- 3794 unchanged lines hidden (view full) ---

3803bool cpus_share_cache(int this_cpu, int that_cpu)
3804{
3805 if (this_cpu == that_cpu)
3806 return true;
3807
3808 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
3809}
3810
3811static inline bool ttwu_queue_cond(int cpu)
3811static inline bool ttwu_queue_cond(int cpu, int wake_flags)
3812{
3813 /*
3814 * Do not complicate things with the async wake_list while the CPU is
3815 * in hotplug state.
3816 */
3817 if (!cpu_active(cpu))
3818 return false;
3819
3820 /*
3821 * If the CPU does not share cache, then queue the task on the
3822 * remote rqs wakelist to avoid accessing remote data.
3823 */
3824 if (!cpus_share_cache(smp_processor_id(), cpu))
3825 return true;
3826
3812{
3813 /*
3814 * Do not complicate things with the async wake_list while the CPU is
3815 * in hotplug state.
3816 */
3817 if (!cpu_active(cpu))
3818 return false;
3819
3820 /*
3821 * If the CPU does not share cache, then queue the task on the
3822 * remote rqs wakelist to avoid accessing remote data.
3823 */
3824 if (!cpus_share_cache(smp_processor_id(), cpu))
3825 return true;
3826
3827 if (cpu == smp_processor_id())
3828 return false;
3829
3830 /*
3827 /*
3831 * If the wakee cpu is idle, or the task is descheduling and the
3832 * only running task on the CPU, then use the wakelist to offload
3833 * the task activation to the idle (or soon-to-be-idle) CPU as
3834 * the current CPU is likely busy. nr_running is checked to
3835 * avoid unnecessary task stacking.
3836 *
3837 * Note that we can only get here with (wakee) p->on_rq=0,
3838 * p->on_cpu can be whatever, we've done the dequeue, so
3839 * the wakee has been accounted out of ->nr_running.
3828 * If the task is descheduling and the only running task on the
3829 * CPU then use the wakelist to offload the task activation to
3830 * the soon-to-be-idle CPU as the current CPU is likely busy.
3831 * nr_running is checked to avoid unnecessary task stacking.
3840 */
3832 */
3841 if (!cpu_rq(cpu)->nr_running)
3833 if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
3842 return true;
3843
3844 return false;
3845}
3846
3847static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3848{
3834 return true;
3835
3836 return false;
3837}
3838
3839static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
3840{
3849 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu)) {
3841 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
3842 if (WARN_ON_ONCE(cpu == smp_processor_id()))
3843 return false;
3844
3850 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3851 __ttwu_queue_wakelist(p, cpu, wake_flags);
3852 return true;
3853 }
3854
3855 return false;
3856}
3857

--- 305 unchanged lines hidden (view full) ---

4163 * LOCK rq->lock
4164 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4165 * STORE p->on_cpu = 1 LOAD p->cpu
4166 *
4167 * to ensure we observe the correct CPU on which the task is currently
4168 * scheduling.
4169 */
4170 if (smp_load_acquire(&p->on_cpu) &&
3845 sched_clock_cpu(cpu); /* Sync clocks across CPUs */
3846 __ttwu_queue_wakelist(p, cpu, wake_flags);
3847 return true;
3848 }
3849
3850 return false;
3851}
3852

--- 305 unchanged lines hidden (view full) ---

4158 * LOCK rq->lock
4159 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
4160 * STORE p->on_cpu = 1 LOAD p->cpu
4161 *
4162 * to ensure we observe the correct CPU on which the task is currently
4163 * scheduling.
4164 */
4165 if (smp_load_acquire(&p->on_cpu) &&
4171 ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
4166 ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
4172 goto unlock;
4173
4174 /*
4175 * If the owning (remote) CPU is still in the middle of schedule() with
4176 * this task as prev, wait until it's done referencing the task.
4177 *
4178 * Pairs with the smp_store_release() in finish_task().
4179 *

--- 573 unchanged lines hidden (view full) ---

4753
4754static inline void prepare_task(struct task_struct *next)
4755{
4756#ifdef CONFIG_SMP
4757 /*
4758 * Claim the task as running, we do this before switching to it
4759 * such that any running task will have this set.
4760 *
4167 goto unlock;
4168
4169 /*
4170 * If the owning (remote) CPU is still in the middle of schedule() with
4171 * this task as prev, wait until it's done referencing the task.
4172 *
4173 * Pairs with the smp_store_release() in finish_task().
4174 *

--- 573 unchanged lines hidden (view full) ---

4748
4749static inline void prepare_task(struct task_struct *next)
4750{
4751#ifdef CONFIG_SMP
4752 /*
4753 * Claim the task as running, we do this before switching to it
4754 * such that any running task will have this set.
4755 *
4761 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
4762 * its ordering comment.
4756 * See the ttwu() WF_ON_CPU case and its ordering comment.
4763 */
4764 WRITE_ONCE(next->on_cpu, 1);
4765#endif
4766}
4767
4768static inline void finish_task(struct task_struct *prev)
4769{
4770#ifdef CONFIG_SMP

--- 28 unchanged lines hidden (view full) ---

4799 head = next;
4800
4801 func(rq);
4802 }
4803}
4804
4805static void balance_push(struct rq *rq);
4806
4757 */
4758 WRITE_ONCE(next->on_cpu, 1);
4759#endif
4760}
4761
4762static inline void finish_task(struct task_struct *prev)
4763{
4764#ifdef CONFIG_SMP

--- 28 unchanged lines hidden (view full) ---

4793 head = next;
4794
4795 func(rq);
4796 }
4797}
4798
4799static void balance_push(struct rq *rq);
4800
4801/*
4802 * balance_push_callback is a right abuse of the callback interface and plays
4803 * by significantly different rules.
4804 *
4805 * Where the normal balance_callback's purpose is to be ran in the same context
4806 * that queued it (only later, when it's safe to drop rq->lock again),
4807 * balance_push_callback is specifically targeted at __schedule().
4808 *
4809 * This abuse is tolerated because it places all the unlikely/odd cases behind
4810 * a single test, namely: rq->balance_callback == NULL.
4811 */
4807struct callback_head balance_push_callback = {
4808 .next = NULL,
4809 .func = (void (*)(struct callback_head *))balance_push,
4810};
4811
4812struct callback_head balance_push_callback = {
4813 .next = NULL,
4814 .func = (void (*)(struct callback_head *))balance_push,
4815};
4816
4812static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4817static inline struct callback_head *
4818__splice_balance_callbacks(struct rq *rq, bool split)
4813{
4814 struct callback_head *head = rq->balance_callback;
4815
4819{
4820 struct callback_head *head = rq->balance_callback;
4821
4822 if (likely(!head))
4823 return NULL;
4824
4816 lockdep_assert_rq_held(rq);
4825 lockdep_assert_rq_held(rq);
4817 if (head)
4826 /*
4827 * Must not take balance_push_callback off the list when
4828 * splice_balance_callbacks() and balance_callbacks() are not
4829 * in the same rq->lock section.
4830 *
4831 * In that case it would be possible for __schedule() to interleave
4832 * and observe the list empty.
4833 */
4834 if (split && head == &balance_push_callback)
4835 head = NULL;
4836 else
4818 rq->balance_callback = NULL;
4819
4820 return head;
4821}
4822
4837 rq->balance_callback = NULL;
4838
4839 return head;
4840}
4841
4842static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4843{
4844 return __splice_balance_callbacks(rq, true);
4845}
4846
4823static void __balance_callbacks(struct rq *rq)
4824{
4847static void __balance_callbacks(struct rq *rq)
4848{
4825 do_balance_callbacks(rq, splice_balance_callbacks(rq));
4849 do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
4826}
4827
4828static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4829{
4830 unsigned long flags;
4831
4832 if (unlikely(head)) {
4833 raw_spin_rq_lock_irqsave(rq, flags);

--- 1696 unchanged lines hidden (view full) ---

6530 * TASK_RUNNING state.
6531 */
6532 WARN_ON_ONCE(current->__state);
6533 do {
6534 __schedule(SM_NONE);
6535 } while (need_resched());
6536}
6537
4850}
4851
4852static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4853{
4854 unsigned long flags;
4855
4856 if (unlikely(head)) {
4857 raw_spin_rq_lock_irqsave(rq, flags);

--- 1696 unchanged lines hidden (view full) ---

6554 * TASK_RUNNING state.
6555 */
6556 WARN_ON_ONCE(current->__state);
6557 do {
6558 __schedule(SM_NONE);
6559 } while (need_resched());
6560}
6561
6538#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
6562#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
6539asmlinkage __visible void __sched schedule_user(void)
6540{
6541 /*
6542 * If we come here after a random call to set_need_resched(),
6543 * or we have been woken up remotely but the IPI has not yet arrived,
6544 * we haven't yet exited the RCU idle mode. Do it here manually until
6545 * we find a better solution.
6546 *

--- 422 unchanged lines hidden (view full) ---

6969 p->sched_class->prio_changed(rq, p, old_prio);
6970
6971out_unlock:
6972 task_rq_unlock(rq, p, &rf);
6973}
6974EXPORT_SYMBOL(set_user_nice);
6975
6976/*
6563asmlinkage __visible void __sched schedule_user(void)
6564{
6565 /*
6566 * If we come here after a random call to set_need_resched(),
6567 * or we have been woken up remotely but the IPI has not yet arrived,
6568 * we haven't yet exited the RCU idle mode. Do it here manually until
6569 * we find a better solution.
6570 *

--- 422 unchanged lines hidden (view full) ---

6993 p->sched_class->prio_changed(rq, p, old_prio);
6994
6995out_unlock:
6996 task_rq_unlock(rq, p, &rf);
6997}
6998EXPORT_SYMBOL(set_user_nice);
6999
7000/*
6977 * is_nice_reduction - check if nice value is an actual reduction
6978 *
6979 * Similar to can_nice() but does not perform a capability check.
6980 *
7001 * can_nice - check if a task can reduce its nice value
6981 * @p: task
6982 * @nice: nice value
6983 */
7002 * @p: task
7003 * @nice: nice value
7004 */
6984static bool is_nice_reduction(const struct task_struct *p, const int nice)
7005int can_nice(const struct task_struct *p, const int nice)
6985{
6986 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
6987 int nice_rlim = nice_to_rlimit(nice);
6988
7006{
7007 /* Convert nice value [19,-20] to rlimit style value [1,40]: */
7008 int nice_rlim = nice_to_rlimit(nice);
7009
6989 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
7010 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
7011 capable(CAP_SYS_NICE));
6990}
6991
7012}
7013
6992/*
6993 * can_nice - check if a task can reduce its nice value
6994 * @p: task
6995 * @nice: nice value
6996 */
6997int can_nice(const struct task_struct *p, const int nice)
6998{
6999 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
7000}
7001
7002#ifdef __ARCH_WANT_SYS_NICE
7003
7004/*
7005 * sys_nice - change the priority of the current process.
7006 * @increment: priority increment
7007 *
7008 * sys_setpriority is a more generic, but much slower function that
7009 * does similar things.

--- 110 unchanged lines hidden (view full) ---

7120 * which excludes things like IRQ and steal-time. These latter are then accrued
7121 * in the irq utilization.
7122 *
7123 * The DL bandwidth number otoh is not a measured metric but a value computed
7124 * based on the task model parameters and gives the minimal utilization
7125 * required to meet deadlines.
7126 */
7127unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7014#ifdef __ARCH_WANT_SYS_NICE
7015
7016/*
7017 * sys_nice - change the priority of the current process.
7018 * @increment: priority increment
7019 *
7020 * sys_setpriority is a more generic, but much slower function that
7021 * does similar things.

--- 110 unchanged lines hidden (view full) ---

7132 * which excludes things like IRQ and steal-time. These latter are then accrued
7133 * in the irq utilization.
7134 *
7135 * The DL bandwidth number otoh is not a measured metric but a value computed
7136 * based on the task model parameters and gives the minimal utilization
7137 * required to meet deadlines.
7138 */
7139unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
7128 enum cpu_util_type type,
7140 unsigned long max, enum cpu_util_type type,
7129 struct task_struct *p)
7130{
7141 struct task_struct *p)
7142{
7131 unsigned long dl_util, util, irq, max;
7143 unsigned long dl_util, util, irq;
7132 struct rq *rq = cpu_rq(cpu);
7133
7144 struct rq *rq = cpu_rq(cpu);
7145
7134 max = arch_scale_cpu_capacity(cpu);
7135
7136 if (!uclamp_is_used() &&
7137 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7138 return max;
7139 }
7140
7141 /*
7142 * Early check to see if IRQ/steal time saturates the CPU, can be
7143 * because of inaccuracies in how we track these -- see

--- 63 unchanged lines hidden (view full) ---

7207 * an interface. So, we only do the latter for now.
7208 */
7209 if (type == FREQUENCY_UTIL)
7210 util += cpu_bw_dl(rq);
7211
7212 return min(max, util);
7213}
7214
7146 if (!uclamp_is_used() &&
7147 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
7148 return max;
7149 }
7150
7151 /*
7152 * Early check to see if IRQ/steal time saturates the CPU, can be
7153 * because of inaccuracies in how we track these -- see

--- 63 unchanged lines hidden (view full) ---

7217 * an interface. So, we only do the latter for now.
7218 */
7219 if (type == FREQUENCY_UTIL)
7220 util += cpu_bw_dl(rq);
7221
7222 return min(max, util);
7223}
7224
7215unsigned long sched_cpu_util(int cpu)
7225unsigned long sched_cpu_util(int cpu, unsigned long max)
7216{
7226{
7217 return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL);
7227 return effective_cpu_util(cpu, cpu_util_cfs(cpu), max,
7228 ENERGY_UTIL, NULL);
7218}
7219#endif /* CONFIG_SMP */
7220
7221/**
7222 * find_process_by_pid - find a process with a matching PID value.
7223 * @pid: the pid in question.
7224 *
7225 * The task of @pid, if found. %NULL otherwise.

--- 45 unchanged lines hidden (view full) ---

7271 rcu_read_lock();
7272 pcred = __task_cred(p);
7273 match = (uid_eq(cred->euid, pcred->euid) ||
7274 uid_eq(cred->euid, pcred->uid));
7275 rcu_read_unlock();
7276 return match;
7277}
7278
7229}
7230#endif /* CONFIG_SMP */
7231
7232/**
7233 * find_process_by_pid - find a process with a matching PID value.
7234 * @pid: the pid in question.
7235 *
7236 * The task of @pid, if found. %NULL otherwise.

--- 45 unchanged lines hidden (view full) ---

7282 rcu_read_lock();
7283 pcred = __task_cred(p);
7284 match = (uid_eq(cred->euid, pcred->euid) ||
7285 uid_eq(cred->euid, pcred->uid));
7286 rcu_read_unlock();
7287 return match;
7288}
7289
7279/*
7280 * Allow unprivileged RT tasks to decrease priority.
7281 * Only issue a capable test if needed and only once to avoid an audit
7282 * event on permitted non-privileged operations:
7283 */
7284static int user_check_sched_setscheduler(struct task_struct *p,
7285 const struct sched_attr *attr,
7286 int policy, int reset_on_fork)
7287{
7288 if (fair_policy(policy)) {
7289 if (attr->sched_nice < task_nice(p) &&
7290 !is_nice_reduction(p, attr->sched_nice))
7291 goto req_priv;
7292 }
7293
7294 if (rt_policy(policy)) {
7295 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
7296
7297 /* Can't set/change the rt policy: */
7298 if (policy != p->policy && !rlim_rtprio)
7299 goto req_priv;
7300
7301 /* Can't increase priority: */
7302 if (attr->sched_priority > p->rt_priority &&
7303 attr->sched_priority > rlim_rtprio)
7304 goto req_priv;
7305 }
7306
7307 /*
7308 * Can't set/change SCHED_DEADLINE policy at all for now
7309 * (safest behavior); in the future we would like to allow
7310 * unprivileged DL tasks to increase their relative deadline
7311 * or reduce their runtime (both ways reducing utilization)
7312 */
7313 if (dl_policy(policy))
7314 goto req_priv;
7315
7316 /*
7317 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7318 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7319 */
7320 if (task_has_idle_policy(p) && !idle_policy(policy)) {
7321 if (!is_nice_reduction(p, task_nice(p)))
7322 goto req_priv;
7323 }
7324
7325 /* Can't change other user's priorities: */
7326 if (!check_same_owner(p))
7327 goto req_priv;
7328
7329 /* Normal users shall not reset the sched_reset_on_fork flag: */
7330 if (p->sched_reset_on_fork && !reset_on_fork)
7331 goto req_priv;
7332
7333 return 0;
7334
7335req_priv:
7336 if (!capable(CAP_SYS_NICE))
7337 return -EPERM;
7338
7339 return 0;
7340}
7341
7342static int __sched_setscheduler(struct task_struct *p,
7343 const struct sched_attr *attr,
7344 bool user, bool pi)
7345{
7346 int oldpolicy = -1, policy = attr->sched_policy;
7347 int retval, oldprio, newprio, queued, running;
7348 const struct sched_class *prev_class;
7349 struct callback_head *head;

--- 25 unchanged lines hidden (view full) ---

7375 * SCHED_BATCH and SCHED_IDLE is 0.
7376 */
7377 if (attr->sched_priority > MAX_RT_PRIO-1)
7378 return -EINVAL;
7379 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7380 (rt_policy(policy) != (attr->sched_priority != 0)))
7381 return -EINVAL;
7382
7290static int __sched_setscheduler(struct task_struct *p,
7291 const struct sched_attr *attr,
7292 bool user, bool pi)
7293{
7294 int oldpolicy = -1, policy = attr->sched_policy;
7295 int retval, oldprio, newprio, queued, running;
7296 const struct sched_class *prev_class;
7297 struct callback_head *head;

--- 25 unchanged lines hidden (view full) ---

7323 * SCHED_BATCH and SCHED_IDLE is 0.
7324 */
7325 if (attr->sched_priority > MAX_RT_PRIO-1)
7326 return -EINVAL;
7327 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
7328 (rt_policy(policy) != (attr->sched_priority != 0)))
7329 return -EINVAL;
7330
7383 if (user) {
7384 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
7385 if (retval)
7386 return retval;
7331 /*
7332 * Allow unprivileged RT tasks to decrease priority:
7333 */
7334 if (user && !capable(CAP_SYS_NICE)) {
7335 if (fair_policy(policy)) {
7336 if (attr->sched_nice < task_nice(p) &&
7337 !can_nice(p, attr->sched_nice))
7338 return -EPERM;
7339 }
7387
7340
7341 if (rt_policy(policy)) {
7342 unsigned long rlim_rtprio =
7343 task_rlimit(p, RLIMIT_RTPRIO);
7344
7345 /* Can't set/change the rt policy: */
7346 if (policy != p->policy && !rlim_rtprio)
7347 return -EPERM;
7348
7349 /* Can't increase priority: */
7350 if (attr->sched_priority > p->rt_priority &&
7351 attr->sched_priority > rlim_rtprio)
7352 return -EPERM;
7353 }
7354
7355 /*
7356 * Can't set/change SCHED_DEADLINE policy at all for now
7357 * (safest behavior); in the future we would like to allow
7358 * unprivileged DL tasks to increase their relative deadline
7359 * or reduce their runtime (both ways reducing utilization)
7360 */
7361 if (dl_policy(policy))
7362 return -EPERM;
7363
7364 /*
7365 * Treat SCHED_IDLE as nice 20. Only allow a switch to
7366 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
7367 */
7368 if (task_has_idle_policy(p) && !idle_policy(policy)) {
7369 if (!can_nice(p, task_nice(p)))
7370 return -EPERM;
7371 }
7372
7373 /* Can't change other user's priorities: */
7374 if (!check_same_owner(p))
7375 return -EPERM;
7376
7377 /* Normal users shall not reset the sched_reset_on_fork flag: */
7378 if (p->sched_reset_on_fork && !reset_on_fork)
7379 return -EPERM;
7380 }
7381
7382 if (user) {
7388 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7389 return -EINVAL;
7390
7391 retval = security_task_setscheduler(p);
7392 if (retval)
7393 return retval;
7394 }
7395

--- 2135 unchanged lines hidden (view full) ---

9531struct task_group root_task_group;
9532LIST_HEAD(task_groups);
9533
9534/* Cacheline aligned slab cache for task_group */
9535static struct kmem_cache *task_group_cache __read_mostly;
9536#endif
9537
9538DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7383 if (attr->sched_flags & SCHED_FLAG_SUGOV)
7384 return -EINVAL;
7385
7386 retval = security_task_setscheduler(p);
7387 if (retval)
7388 return retval;
7389 }
7390

--- 2135 unchanged lines hidden (view full) ---

9526struct task_group root_task_group;
9527LIST_HEAD(task_groups);
9528
9529/* Cacheline aligned slab cache for task_group */
9530static struct kmem_cache *task_group_cache __read_mostly;
9531#endif
9532
9533DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
9539DECLARE_PER_CPU(cpumask_var_t, select_rq_mask);
9534DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
9540
9541void __init sched_init(void)
9542{
9543 unsigned long ptr = 0;
9544 int i;
9545
9546 /* Make sure the linker didn't screw up */
9547 BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||

--- 32 unchanged lines hidden (view full) ---

9580 ptr += nr_cpu_ids * sizeof(void **);
9581
9582#endif /* CONFIG_RT_GROUP_SCHED */
9583 }
9584#ifdef CONFIG_CPUMASK_OFFSTACK
9585 for_each_possible_cpu(i) {
9586 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
9587 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9535
9536void __init sched_init(void)
9537{
9538 unsigned long ptr = 0;
9539 int i;
9540
9541 /* Make sure the linker didn't screw up */
9542 BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||

--- 32 unchanged lines hidden (view full) ---

9575 ptr += nr_cpu_ids * sizeof(void **);
9576
9577#endif /* CONFIG_RT_GROUP_SCHED */
9578 }
9579#ifdef CONFIG_CPUMASK_OFFSTACK
9580 for_each_possible_cpu(i) {
9581 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
9582 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9588 per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node(
9583 per_cpu(select_idle_mask, i) = (cpumask_var_t)kzalloc_node(
9589 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9590 }
9591#endif /* CONFIG_CPUMASK_OFFSTACK */
9592
9593 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9594
9595#ifdef CONFIG_SMP
9596 init_defrootdomain();

--- 1572 unchanged lines hidden ---
9584 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
9585 }
9586#endif /* CONFIG_CPUMASK_OFFSTACK */
9587
9588 init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
9589
9590#ifdef CONFIG_SMP
9591 init_defrootdomain();

--- 1572 unchanged lines hidden ---