cpuset.c (d477f8c202d1f0d4791ab1263ca7657bbe5cf79e) cpuset.c (99c8b231ae6c6ca4ca2fd1c0b3701071f589661f)
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc

--- 715 unchanged lines hidden (view full) ---

724 * This function builds a partial partition of the systems CPUs
725 * A 'partial partition' is a set of non-overlapping subsets whose
726 * union is a subset of that set.
727 * The output of this function needs to be passed to kernel/sched/core.c
728 * partition_sched_domains() routine, which will rebuild the scheduler's
729 * load balancing domains (sched domains) as specified by that partial
730 * partition.
731 *
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc

--- 715 unchanged lines hidden (view full) ---

724 * This function builds a partial partition of the systems CPUs
725 * A 'partial partition' is a set of non-overlapping subsets whose
726 * union is a subset of that set.
727 * The output of this function needs to be passed to kernel/sched/core.c
728 * partition_sched_domains() routine, which will rebuild the scheduler's
729 * load balancing domains (sched domains) as specified by that partial
730 * partition.
731 *
732 * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.txt
732 * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.rst
733 * for a background explanation of this.
734 *
735 * Does not return errors, on the theory that the callers of this
736 * routine would rather not worry about failures to rebuild sched
737 * domains when operating in the severe memory shortage situations
738 * that could cause allocation failures below.
739 *
740 * Must be called with cpuset_mutex held.

--- 2508 unchanged lines hidden (view full) ---

3249
3250 spin_lock_irqsave(&callback_lock, flags);
3251 rcu_read_lock();
3252 guarantee_online_cpus(task_cs(tsk), pmask);
3253 rcu_read_unlock();
3254 spin_unlock_irqrestore(&callback_lock, flags);
3255}
3256
733 * for a background explanation of this.
734 *
735 * Does not return errors, on the theory that the callers of this
736 * routine would rather not worry about failures to rebuild sched
737 * domains when operating in the severe memory shortage situations
738 * that could cause allocation failures below.
739 *
740 * Must be called with cpuset_mutex held.

--- 2508 unchanged lines hidden (view full) ---

3249
3250 spin_lock_irqsave(&callback_lock, flags);
3251 rcu_read_lock();
3252 guarantee_online_cpus(task_cs(tsk), pmask);
3253 rcu_read_unlock();
3254 spin_unlock_irqrestore(&callback_lock, flags);
3255}
3256
3257/**
3258 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3259 * @tsk: pointer to task_struct with which the scheduler is struggling
3260 *
3261 * Description: In the case that the scheduler cannot find an allowed cpu in
3262 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3263 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3264 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3265 * This is the absolute last resort for the scheduler and it is only used if
3266 * _every_ other avenue has been traveled.
3267 **/
3268
3269void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3270{
3271 rcu_read_lock();
3257void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3258{
3259 rcu_read_lock();
3272 do_set_cpus_allowed(tsk, is_in_v2_mode() ?
3273 task_cs(tsk)->cpus_allowed : cpu_possible_mask);
3260 do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
3274 rcu_read_unlock();
3275
3276 /*
3277 * We own tsk->cpus_allowed, nobody can change it under us.
3278 *
3279 * But we used cs && cs->cpus_allowed lockless and thus can
3280 * race with cgroup_attach_task() or update_cpumask() and get
3281 * the wrong tsk->cpus_allowed. However, both cases imply the

--- 311 unchanged lines hidden ---
3261 rcu_read_unlock();
3262
3263 /*
3264 * We own tsk->cpus_allowed, nobody can change it under us.
3265 *
3266 * But we used cs && cs->cpus_allowed lockless and thus can
3267 * race with cgroup_attach_task() or update_cpumask() and get
3268 * the wrong tsk->cpus_allowed. However, both cases imply the

--- 311 unchanged lines hidden ---