xref: /linux/kernel/cgroup/cpuset.c (revision ca174c705db52db3cc842e754fd25a5f50eb702d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  kernel/cpuset.c
4  *
5  *  Processor and Memory placement constraints for sets of tasks.
6  *
7  *  Copyright (C) 2003 BULL SA.
8  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
9  *  Copyright (C) 2006 Google, Inc
10  *
11  *  Portions derived from Patrick Mochel's sysfs code.
12  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
13  *
14  *  2003-10-10 Written by Simon Derr.
15  *  2003-10-22 Updates by Stephen Hemminger.
16  *  2004 May-July Rework by Paul Jackson.
17  *  2006 Rework by Paul Menage to use generic cgroups
18  *  2008 Rework of the scheduler domains and CPU hotplug handling
19  *       by Max Krasnyansky
20  */
21 #include "cpuset-internal.h"
22 
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mm.h>
28 #include <linux/memory.h>
29 #include <linux/rcupdate.h>
30 #include <linux/sched.h>
31 #include <linux/sched/deadline.h>
32 #include <linux/sched/mm.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <linux/oom.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/task_work.h>
40 
41 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
42 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
43 
44 /*
45  * There could be abnormal cpuset configurations for cpu or memory
46  * node binding, add this key to provide a quick low-cost judgment
47  * of the situation.
48  */
49 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
50 
51 static const char * const perr_strings[] = {
52 	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus.exclusive",
53 	[PERR_INVPARENT] = "Parent is an invalid partition root",
54 	[PERR_NOTPART]   = "Parent is not a partition root",
55 	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
56 	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
57 	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
58 	[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
59 	[PERR_HKEEPING]  = "partition config conflicts with housekeeping setup",
60 	[PERR_ACCESS]    = "Enable partition not permitted",
61 	[PERR_REMOTE]    = "Have remote partition underneath",
62 };
63 
64 /*
65  * CPUSET Locking Convention
66  * -------------------------
67  *
68  * Below are the four global/local locks guarding cpuset structures in lock
69  * acquisition order:
70  *  - cpuset_top_mutex
71  *  - cpu_hotplug_lock (cpus_read_lock/cpus_write_lock)
72  *  - cpuset_mutex
73  *  - callback_lock (raw spinlock)
74  *
75  * As cpuset will now indirectly flush a number of different workqueues in
76  * housekeeping_update() to update housekeeping cpumasks when the set of
77  * isolated CPUs is going to be changed, it may be vulnerable to deadlock
78  * if we hold cpus_read_lock while calling into housekeeping_update().
79  *
80  * The first cpuset_top_mutex will be held except when calling into
81  * cpuset_handle_hotplug() from the CPU hotplug code where cpus_write_lock
82  * and cpuset_mutex will be held instead. The main purpose of this mutex
83  * is to prevent regular cpuset control file write actions from interfering
84  * with the call to housekeeping_update(), though CPU hotplug operation can
85  * still happen in parallel. This mutex also provides protection for some
86  * internal variables.
87  *
88  * A task must hold all the remaining three locks to modify externally visible
89  * or used fields of cpusets, though some of the internally used cpuset fields
90  * and internal variables can be modified without holding callback_lock. If only
91  * reliable read access of the externally used fields are needed, a task can
92  * hold either cpuset_mutex or callback_lock which are exposed to other
93  * external subsystems.
94  *
95  * If a task holds cpu_hotplug_lock and cpuset_mutex, it blocks others,
96  * ensuring that it is the only task able to also acquire callback_lock and
97  * be able to modify cpusets.  It can perform various checks on the cpuset
98  * structure first, knowing nothing will change. It can also allocate memory
99  * without holding callback_lock. While it is performing these checks, various
100  * callback routines can briefly acquire callback_lock to query cpusets.  Once
101  * it is ready to make the changes, it takes callback_lock, blocking everyone
102  * else.
103  *
104  * Calls to the kernel memory allocator cannot be made while holding
105  * callback_lock which is a spinlock, as the memory allocator may sleep or
106  * call back into cpuset code and acquire callback_lock.
107  *
108  * Now, the task_struct fields mems_allowed and mempolicy may be changed
109  * by other task, we use alloc_lock in the task_struct fields to protect
110  * them.
111  *
112  * The cpuset_common_seq_show() handlers only hold callback_lock across
113  * small pieces of code, such as when reading out possibly multi-word
114  * cpumasks and nodemasks.
115  */
116 
117 static DEFINE_MUTEX(cpuset_top_mutex);
118 static DEFINE_MUTEX(cpuset_mutex);
119 
120 /*
121  * File level internal variables below follow one of the following exclusion
122  * rules.
123  *
124  * RWCS: Read/write-able by holding either cpus_write_lock (and optionally
125  *	 cpuset_mutex) or both cpus_read_lock and cpuset_mutex.
126  *
127  * CSCB: Readable by holding either cpuset_mutex or callback_lock. Writable
128  *	 by holding both cpuset_mutex and callback_lock.
129  *
130  * T:	 Read/write-able by holding the cpuset_top_mutex.
131  */
132 
133 /*
134  * For local partitions, update to subpartitions_cpus & isolated_cpus is done
135  * in update_parent_effective_cpumask(). For remote partitions, it is done in
136  * the remote_partition_*() and remote_cpus_update() helpers.
137  */
138 /*
139  * Exclusive CPUs distributed out to local or remote sub-partitions of
140  * top_cpuset
141  */
142 static cpumask_var_t	subpartitions_cpus;	/* RWCS */
143 
144 /*
145  * Exclusive CPUs in isolated partitions (shown in cpuset.cpus.isolated)
146  */
147 static cpumask_var_t	isolated_cpus;		/* CSCB */
148 
149 /*
150  * Set if housekeeping cpumasks are to be updated.
151  */
152 static bool		update_housekeeping;	/* RWCS */
153 
154 /*
155  * Copy of isolated_cpus to be passed to housekeeping_update()
156  */
157 static cpumask_var_t	isolated_hk_cpus;	/* T */
158 
159 /*
160  * A flag to force sched domain rebuild at the end of an operation.
161  * It can be set in
162  *  - update_partition_sd_lb()
163  *  - update_cpumasks_hier()
164  *  - cpuset_update_flag()
165  *  - cpuset_hotplug_update_tasks()
166  *  - cpuset_handle_hotplug()
167  *
168  * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
169  *
170  * Note that update_relax_domain_level() in cpuset-v1.c can still call
171  * rebuild_sched_domains_locked() directly without using this flag.
172  */
173 static bool force_sd_rebuild;			/* RWCS */
174 
175 /*
176  * Partition root states:
177  *
178  *   0 - member (not a partition root)
179  *   1 - partition root
180  *   2 - partition root without load balancing (isolated)
181  *  -1 - invalid partition root
182  *  -2 - invalid isolated partition root
183  *
184  *  There are 2 types of partitions - local or remote. Local partitions are
185  *  those whose parents are partition root themselves. Setting of
186  *  cpuset.cpus.exclusive are optional in setting up local partitions.
187  *  Remote partitions are those whose parents are not partition roots. Passing
188  *  down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
189  *  nodes are mandatory in creating a remote partition.
190  *
191  *  For simplicity, a local partition can be created under a local or remote
192  *  partition but a remote partition cannot have any partition root in its
193  *  ancestor chain except the cgroup root.
194  *
195  *  A valid partition can be formed by setting exclusive_cpus or cpus_allowed
196  *  if exclusive_cpus is not set. In the case of partition with empty
197  *  exclusive_cpus, all the conflicting exclusive CPUs specified in the
198  *  following cpumasks of sibling cpusets will be removed from its
199  *  cpus_allowed in determining its effective_xcpus.
200  *  - effective_xcpus
201  *  - exclusive_cpus
202  *
203  *  The "cpuset.cpus.exclusive" control file should be used for setting up
204  *  partition if the users want to get as many CPUs as possible.
205  */
206 #define PRS_MEMBER		0
207 #define PRS_ROOT		1
208 #define PRS_ISOLATED		2
209 #define PRS_INVALID_ROOT	-1
210 #define PRS_INVALID_ISOLATED	-2
211 
212 /*
213  * Temporary cpumasks for working with partitions that are passed among
214  * functions to avoid memory allocation in inner functions.
215  */
216 struct tmpmasks {
217 	cpumask_var_t addmask, delmask;	/* For partition root */
218 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
219 };
220 
221 void inc_dl_tasks_cs(struct task_struct *p)
222 {
223 	struct cpuset *cs = task_cs(p);
224 
225 	cs->nr_deadline_tasks++;
226 }
227 
228 void dec_dl_tasks_cs(struct task_struct *p)
229 {
230 	struct cpuset *cs = task_cs(p);
231 
232 	cs->nr_deadline_tasks--;
233 }
234 
235 static inline bool is_partition_valid(const struct cpuset *cs)
236 {
237 	return cs->partition_root_state > 0;
238 }
239 
240 static inline bool is_partition_invalid(const struct cpuset *cs)
241 {
242 	return cs->partition_root_state < 0;
243 }
244 
245 static inline bool cs_is_member(const struct cpuset *cs)
246 {
247 	return cs->partition_root_state == PRS_MEMBER;
248 }
249 
250 /*
251  * Callers should hold callback_lock to modify partition_root_state.
252  */
253 static inline void make_partition_invalid(struct cpuset *cs)
254 {
255 	if (cs->partition_root_state > 0)
256 		cs->partition_root_state = -cs->partition_root_state;
257 }
258 
259 /*
260  * Send notification event of whenever partition_root_state changes.
261  */
262 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
263 {
264 	if (old_prs == cs->partition_root_state)
265 		return;
266 	cgroup_file_notify(&cs->partition_file);
267 
268 	/* Reset prs_err if not invalid */
269 	if (is_partition_valid(cs))
270 		WRITE_ONCE(cs->prs_err, PERR_NONE);
271 }
272 
273 /*
274  * The top_cpuset is always synchronized to cpu_active_mask and we should avoid
275  * using cpu_online_mask as much as possible. An active CPU is always an online
276  * CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
277  * during hotplug operations. A CPU is marked active at the last stage of CPU
278  * bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
279  * will be called to update the sched domains so that the scheduler can move
280  * a normal task to a newly active CPU or remove tasks away from a newly
281  * inactivated CPU. The online bit is set much earlier in the CPU bringup
282  * process and cleared much later in CPU teardown.
283  *
284  * If cpu_online_mask is used while a hotunplug operation is happening in
285  * parallel, we may leave an offline CPU in cpu_allowed or some other masks.
286  */
287 struct cpuset top_cpuset = {
288 	.flags = BIT(CS_CPU_EXCLUSIVE) |
289 		 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
290 	.partition_root_state = PRS_ROOT,
291 };
292 
293 /**
294  * cpuset_lock - Acquire the global cpuset mutex
295  *
296  * This locks the global cpuset mutex to prevent modifications to cpuset
297  * hierarchy and configurations. This helper is not enough to make modification.
298  */
299 void cpuset_lock(void)
300 {
301 	mutex_lock(&cpuset_mutex);
302 }
303 
304 void cpuset_unlock(void)
305 {
306 	mutex_unlock(&cpuset_mutex);
307 }
308 
309 void lockdep_assert_cpuset_lock_held(void)
310 {
311 	lockdep_assert_held(&cpuset_mutex);
312 }
313 
314 /**
315  * cpuset_full_lock - Acquire full protection for cpuset modification
316  *
317  * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
318  * to safely modify cpuset data.
319  */
320 void cpuset_full_lock(void)
321 {
322 	mutex_lock(&cpuset_top_mutex);
323 	cpus_read_lock();
324 	mutex_lock(&cpuset_mutex);
325 }
326 
327 void cpuset_full_unlock(void)
328 {
329 	mutex_unlock(&cpuset_mutex);
330 	cpus_read_unlock();
331 	mutex_unlock(&cpuset_top_mutex);
332 }
333 
334 #ifdef CONFIG_LOCKDEP
335 bool lockdep_is_cpuset_held(void)
336 {
337 	return lockdep_is_held(&cpuset_mutex) ||
338 	       lockdep_is_held(&cpuset_top_mutex);
339 }
340 #endif
341 
342 static DEFINE_SPINLOCK(callback_lock);
343 
344 void cpuset_callback_lock_irq(void)
345 {
346 	spin_lock_irq(&callback_lock);
347 }
348 
349 void cpuset_callback_unlock_irq(void)
350 {
351 	spin_unlock_irq(&callback_lock);
352 }
353 
354 static struct workqueue_struct *cpuset_migrate_mm_wq;
355 
356 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
357 
358 static inline void check_insane_mems_config(nodemask_t *nodes)
359 {
360 	if (!cpusets_insane_config() &&
361 		movable_only_nodes(nodes)) {
362 		static_branch_enable_cpuslocked(&cpusets_insane_config_key);
363 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
364 			"Cpuset allocations might fail even with a lot of memory available.\n",
365 			nodemask_pr_args(nodes));
366 	}
367 }
368 
369 /*
370  * decrease cs->attach_in_progress.
371  * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
372  */
373 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
374 {
375 	lockdep_assert_cpuset_lock_held();
376 
377 	cs->attach_in_progress--;
378 	if (!cs->attach_in_progress)
379 		wake_up(&cpuset_attach_wq);
380 }
381 
382 static inline void dec_attach_in_progress(struct cpuset *cs)
383 {
384 	mutex_lock(&cpuset_mutex);
385 	dec_attach_in_progress_locked(cs);
386 	mutex_unlock(&cpuset_mutex);
387 }
388 
389 static inline bool cpuset_v2(void)
390 {
391 	return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
392 		cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
393 }
394 
395 /*
396  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
397  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
398  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
399  * With v2 behavior, "cpus" and "mems" are always what the users have
400  * requested and won't be changed by hotplug events. Only the effective
401  * cpus or mems will be affected.
402  */
403 static inline bool is_in_v2_mode(void)
404 {
405 	return cpuset_v2() ||
406 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
407 }
408 
409 /**
410  * partition_is_populated - check if partition has tasks
411  * @cs: partition root to be checked
412  * @excluded_child: a child cpuset to be excluded in task checking
413  * Return: true if there are tasks, false otherwise
414  *
415  * @cs should be a valid partition root or going to become a partition root.
416  * @excluded_child should be non-NULL when this cpuset is going to become a
417  * partition itself.
418  *
419  * Note that a remote partition is not allowed underneath a valid local
420  * or remote partition. So if a non-partition root child is populated,
421  * the whole partition is considered populated.
422  */
423 static inline bool partition_is_populated(struct cpuset *cs,
424 					  struct cpuset *excluded_child)
425 {
426 	struct cpuset *cp;
427 	struct cgroup_subsys_state *pos_css;
428 
429 	/*
430 	 * We cannot call cs_is_populated(cs) directly, as
431 	 * nr_populated_domain_children may include populated
432 	 * csets from descendants that are partitions.
433 	 */
434 	if (cs->css.cgroup->nr_populated_csets ||
435 	    cs->attach_in_progress)
436 		return true;
437 
438 	rcu_read_lock();
439 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
440 		if (cp == cs || cp == excluded_child)
441 			continue;
442 
443 		if (is_partition_valid(cp)) {
444 			pos_css = css_rightmost_descendant(pos_css);
445 			continue;
446 		}
447 
448 		if (cpuset_is_populated(cp)) {
449 			rcu_read_unlock();
450 			return true;
451 		}
452 	}
453 	rcu_read_unlock();
454 	return false;
455 }
456 
457 /*
458  * Return in pmask the portion of a task's cpusets's cpus_allowed that
459  * are online and are capable of running the task.  If none are found,
460  * walk up the cpuset hierarchy until we find one that does have some
461  * appropriate cpus.
462  *
463  * One way or another, we guarantee to return some non-empty subset
464  * of cpu_active_mask.
465  *
466  * Call with callback_lock or cpuset_mutex held.
467  */
468 static void guarantee_active_cpus(struct task_struct *tsk,
469 				  struct cpumask *pmask)
470 {
471 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
472 	struct cpuset *cs;
473 
474 	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
475 		cpumask_copy(pmask, cpu_active_mask);
476 
477 	rcu_read_lock();
478 	cs = task_cs(tsk);
479 
480 	while (!cpumask_intersects(cs->effective_cpus, pmask))
481 		cs = parent_cs(cs);
482 
483 	cpumask_and(pmask, pmask, cs->effective_cpus);
484 	rcu_read_unlock();
485 }
486 
487 /*
488  * Return in *pmask the portion of a cpusets's mems_allowed that
489  * are online, with memory.  If none are online with memory, walk
490  * up the cpuset hierarchy until we find one that does have some
491  * online mems.  The top cpuset always has some mems online.
492  *
493  * One way or another, we guarantee to return some non-empty subset
494  * of node_states[N_MEMORY].
495  *
496  * Call with callback_lock or cpuset_mutex held.
497  */
498 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
499 {
500 	while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
501 		cs = parent_cs(cs);
502 }
503 
504 /**
505  * alloc_cpumasks - Allocate an array of cpumask variables
506  * @pmasks: Pointer to array of cpumask_var_t pointers
507  * @size: Number of cpumasks to allocate
508  * Return: 0 if successful, -ENOMEM otherwise.
509  *
510  * Allocates @size cpumasks and initializes them to empty. Returns 0 on
511  * success, -ENOMEM on allocation failure. On failure, any previously
512  * allocated cpumasks are freed.
513  */
514 static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
515 {
516 	int i;
517 
518 	for (i = 0; i < size; i++) {
519 		if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
520 			while (--i >= 0)
521 				free_cpumask_var(*pmasks[i]);
522 			return -ENOMEM;
523 		}
524 	}
525 	return 0;
526 }
527 
528 /**
529  * alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
530  * @tmp: Pointer to tmpmasks structure to populate
531  * Return: 0 on success, -ENOMEM on allocation failure
532  */
533 static inline int alloc_tmpmasks(struct tmpmasks *tmp)
534 {
535 	/*
536 	 * Array of pointers to the three cpumask_var_t fields in tmpmasks.
537 	 * Note: Array size must match actual number of masks (3)
538 	 */
539 	cpumask_var_t *pmask[3] = {
540 		&tmp->new_cpus,
541 		&tmp->addmask,
542 		&tmp->delmask
543 	};
544 
545 	return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
546 }
547 
548 /**
549  * free_tmpmasks - free cpumasks in a tmpmasks structure
550  * @tmp: the tmpmasks structure pointer
551  */
552 static inline void free_tmpmasks(struct tmpmasks *tmp)
553 {
554 	if (!tmp)
555 		return;
556 
557 	free_cpumask_var(tmp->new_cpus);
558 	free_cpumask_var(tmp->addmask);
559 	free_cpumask_var(tmp->delmask);
560 }
561 
562 /**
563  * dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
564  * @cs: Source cpuset to duplicate (NULL for a fresh allocation)
565  *
566  * Creates a new cpuset by either:
567  * 1. Duplicating an existing cpuset (if @cs is non-NULL), or
568  * 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
569  *
570  * Return: Pointer to newly allocated cpuset on success, NULL on failure
571  */
572 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
573 {
574 	struct cpuset *trial;
575 
576 	/* Allocate base structure */
577 	trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
578 		     kzalloc_obj(*cs);
579 	if (!trial)
580 		return NULL;
581 
582 	/* Setup cpumask pointer array */
583 	cpumask_var_t *pmask[4] = {
584 		&trial->cpus_allowed,
585 		&trial->effective_cpus,
586 		&trial->effective_xcpus,
587 		&trial->exclusive_cpus
588 	};
589 
590 	if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
591 		kfree(trial);
592 		return NULL;
593 	}
594 
595 	/* Copy masks if duplicating */
596 	if (cs) {
597 		cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
598 		cpumask_copy(trial->effective_cpus, cs->effective_cpus);
599 		cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
600 		cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
601 	}
602 
603 	return trial;
604 }
605 
606 /**
607  * free_cpuset - free the cpuset
608  * @cs: the cpuset to be freed
609  */
610 static inline void free_cpuset(struct cpuset *cs)
611 {
612 	free_cpumask_var(cs->cpus_allowed);
613 	free_cpumask_var(cs->effective_cpus);
614 	free_cpumask_var(cs->effective_xcpus);
615 	free_cpumask_var(cs->exclusive_cpus);
616 	kfree(cs);
617 }
618 
619 /* Return user specified exclusive CPUs */
620 static inline struct cpumask *user_xcpus(struct cpuset *cs)
621 {
622 	return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
623 						 : cs->exclusive_cpus;
624 }
625 
626 static inline bool xcpus_empty(struct cpuset *cs)
627 {
628 	return cpumask_empty(cs->cpus_allowed) &&
629 	       cpumask_empty(cs->exclusive_cpus);
630 }
631 
632 /*
633  * cpusets_are_exclusive() - check if two cpusets are exclusive
634  *
635  * Return true if exclusive, false if not
636  */
637 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
638 {
639 	struct cpumask *xcpus1 = user_xcpus(cs1);
640 	struct cpumask *xcpus2 = user_xcpus(cs2);
641 
642 	if (cpumask_intersects(xcpus1, xcpus2))
643 		return false;
644 	return true;
645 }
646 
647 /**
648  * cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
649  * @trial:	the trial cpuset to be checked
650  * @sibling:	a sibling cpuset to be checked against
651  * @xcpus_changed: set if exclusive_cpus has been set
652  *
653  * Returns: true if CPU exclusivity conflict exists, false otherwise
654  *
655  * Conflict detection rules:
656  *  o cgroup v1
657  *    See cpuset1_cpus_excl_conflict()
658  *  o cgroup v2
659  *    - The exclusive_cpus values cannot overlap.
660  *    - New exclusive_cpus cannot be a superset of a sibling's cpus_allowed.
661  */
662 static inline bool cpus_excl_conflict(struct cpuset *trial, struct cpuset *sibling,
663 				      bool xcpus_changed)
664 {
665 	if (!cpuset_v2())
666 		return cpuset1_cpus_excl_conflict(trial, sibling);
667 
668 	/* The cpus_allowed of a sibling cpuset cannot be a subset of the new exclusive_cpus */
669 	if (xcpus_changed && !cpumask_empty(sibling->cpus_allowed) &&
670 	    cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
671 		return true;
672 
673 	/* Exclusive_cpus cannot intersect */
674 	return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus);
675 }
676 
677 static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
678 {
679 	if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
680 		return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
681 	return false;
682 }
683 
684 /*
685  * validate_change() - Used to validate that any proposed cpuset change
686  *		       follows the structural rules for cpusets.
687  *
688  * If we replaced the flag and mask values of the current cpuset
689  * (cur) with those values in the trial cpuset (trial), would
690  * our various subset and exclusive rules still be valid?  Presumes
691  * cpuset_mutex held.
692  *
693  * 'cur' is the address of an actual, in-use cpuset.  Operations
694  * such as list traversal that depend on the actual address of the
695  * cpuset in the list must use cur below, not trial.
696  *
697  * 'trial' is the address of bulk structure copy of cur, with
698  * perhaps one or more of the fields cpus_allowed, mems_allowed,
699  * or flags changed to new, trial values.
700  *
701  * Return 0 if valid, -errno if not.
702  */
703 
704 static int validate_change(struct cpuset *cur, struct cpuset *trial)
705 {
706 	struct cgroup_subsys_state *css;
707 	struct cpuset *c, *par;
708 	bool xcpus_changed;
709 	int ret = 0;
710 
711 	rcu_read_lock();
712 
713 	if (!is_in_v2_mode())
714 		ret = cpuset1_validate_change(cur, trial);
715 	if (ret)
716 		goto out;
717 
718 	/* Remaining checks don't apply to root cpuset */
719 	if (cur == &top_cpuset)
720 		goto out;
721 
722 	par = parent_cs(cur);
723 
724 	/*
725 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
726 	 * tasks. This check is not done when scheduling is disabled as the
727 	 * users should know what they are doing.
728 	 *
729 	 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
730 	 * cpus_allowed.
731 	 *
732 	 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
733 	 * for non-isolated partition root. At this point, the target
734 	 * effective_cpus isn't computed yet. user_xcpus() is the best
735 	 * approximation.
736 	 *
737 	 * TBD: May need to precompute the real effective_cpus here in case
738 	 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
739 	 * becomes an issue.
740 	 */
741 	ret = -EBUSY;
742 	if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
743 	    !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
744 		goto out;
745 
746 	/*
747 	 * If either I or some sibling (!= me) is exclusive, we can't
748 	 * overlap. exclusive_cpus cannot overlap with each other if set.
749 	 */
750 	ret = -EINVAL;
751 	xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
752 	cpuset_for_each_child(c, css, par) {
753 		if (c == cur)
754 			continue;
755 		if (cpus_excl_conflict(trial, c, xcpus_changed))
756 			goto out;
757 		if (mems_excl_conflict(trial, c))
758 			goto out;
759 	}
760 
761 	ret = 0;
762 out:
763 	rcu_read_unlock();
764 	return ret;
765 }
766 
767 #ifdef CONFIG_SMP
768 
769 /*
770  * generate_sched_domains()
771  *
772  * This function builds a partial partition of the systems CPUs
773  * A 'partial partition' is a set of non-overlapping subsets whose
774  * union is a subset of that set.
775  * The output of this function needs to be passed to kernel/sched/core.c
776  * partition_sched_domains() routine, which will rebuild the scheduler's
777  * load balancing domains (sched domains) as specified by that partial
778  * partition.
779  *
780  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
781  * for a background explanation of this.
782  *
783  * Does not return errors, on the theory that the callers of this
784  * routine would rather not worry about failures to rebuild sched
785  * domains when operating in the severe memory shortage situations
786  * that could cause allocation failures below.
787  *
788  * Must be called with cpuset_mutex held.
789  *
790  * The three key local variables below are:
791  *    cp - cpuset pointer, used (together with pos_css) to perform a
792  *	   top-down scan of all cpusets. For our purposes, rebuilding
793  *	   the schedulers sched domains, we can ignore !is_sched_load_
794  *	   balance cpusets.
795  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
796  *	   that need to be load balanced, for convenient iterative
797  *	   access by the subsequent code that finds the best partition,
798  *	   i.e the set of domains (subsets) of CPUs such that the
799  *	   cpus_allowed of every cpuset marked is_sched_load_balance
800  *	   is a subset of one of these domains, while there are as
801  *	   many such domains as possible, each as small as possible.
802  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
803  *	   the kernel/sched/core.c routine partition_sched_domains() in a
804  *	   convenient format, that can be easily compared to the prior
805  *	   value to determine what partition elements (sched domains)
806  *	   were changed (added or removed.)
807  */
808 static int generate_sched_domains(cpumask_var_t **domains,
809 			struct sched_domain_attr **attributes)
810 {
811 	struct cpuset *cp;	/* top-down scan of cpusets */
812 	struct cpuset **csa;	/* array of all cpuset ptrs */
813 	int i, j;		/* indices for partition finding loops */
814 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
815 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
816 	int ndoms = 0;		/* number of sched domains in result */
817 	struct cgroup_subsys_state *pos_css;
818 
819 	if (!cpuset_v2())
820 		return cpuset1_generate_sched_domains(domains, attributes);
821 
822 	doms = NULL;
823 	dattr = NULL;
824 	csa = NULL;
825 
826 	/* Special case for the 99% of systems with one, full, sched domain */
827 	if (cpumask_empty(subpartitions_cpus)) {
828 		ndoms = 1;
829 		/* !csa will be checked and can be correctly handled */
830 		goto generate_doms;
831 	}
832 
833 	csa = kmalloc_objs(cp, nr_cpusets());
834 	if (!csa)
835 		goto done;
836 
837 	/* Find how many partitions and cache them to csa[] */
838 	rcu_read_lock();
839 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
840 		/*
841 		 * Only valid partition roots that are not isolated and with
842 		 * non-empty effective_cpus will be saved into csa[].
843 		 */
844 		if ((cp->partition_root_state == PRS_ROOT) &&
845 		    !cpumask_empty(cp->effective_cpus))
846 			csa[ndoms++] = cp;
847 
848 		/*
849 		 * Skip @cp's subtree if not a partition root and has no
850 		 * exclusive CPUs to be granted to child cpusets.
851 		 */
852 		if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
853 			pos_css = css_rightmost_descendant(pos_css);
854 	}
855 	rcu_read_unlock();
856 
857 	for (i = 0; i < ndoms; i++) {
858 		for (j = i + 1; j < ndoms; j++) {
859 			if (cpusets_overlap(csa[i], csa[j]))
860 				/*
861 				 * Cgroup v2 shouldn't pass down overlapping
862 				 * partition root cpusets.
863 				 */
864 				WARN_ON_ONCE(1);
865 		}
866 	}
867 
868 generate_doms:
869 	doms = alloc_sched_domains(ndoms);
870 	if (!doms)
871 		goto done;
872 
873 	/*
874 	 * The rest of the code, including the scheduler, can deal with
875 	 * dattr==NULL case. No need to abort if alloc fails.
876 	 */
877 	dattr = kmalloc_objs(struct sched_domain_attr, ndoms);
878 
879 	/*
880 	 * Cgroup v2 doesn't support domain attributes, just set all of them
881 	 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
882 	 * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs.
883 	 */
884 	for (i = 0; i < ndoms; i++) {
885 		/*
886 		 * The top cpuset may contain some boot time isolated
887 		 * CPUs that need to be excluded from the sched domain.
888 		 */
889 		if (!csa || csa[i] == &top_cpuset)
890 			cpumask_and(doms[i], top_cpuset.effective_cpus,
891 				    housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
892 		else
893 			cpumask_copy(doms[i], csa[i]->effective_cpus);
894 		if (dattr)
895 			dattr[i] = SD_ATTR_INIT;
896 	}
897 
898 done:
899 	kfree(csa);
900 
901 	/*
902 	 * Fallback to the default domain if kmalloc() failed.
903 	 * See comments in partition_sched_domains().
904 	 */
905 	if (doms == NULL)
906 		ndoms = 1;
907 
908 	*domains    = doms;
909 	*attributes = dattr;
910 	return ndoms;
911 }
912 
913 static void dl_update_tasks_root_domain(struct cpuset *cs)
914 {
915 	struct css_task_iter it;
916 	struct task_struct *task;
917 
918 	if (cs->nr_deadline_tasks == 0)
919 		return;
920 
921 	css_task_iter_start(&cs->css, 0, &it);
922 
923 	while ((task = css_task_iter_next(&it)))
924 		dl_add_task_root_domain(task);
925 
926 	css_task_iter_end(&it);
927 }
928 
929 void dl_rebuild_rd_accounting(void)
930 {
931 	struct cpuset *cs = NULL;
932 	struct cgroup_subsys_state *pos_css;
933 	int cpu;
934 	u64 cookie = ++dl_cookie;
935 
936 	lockdep_assert_cpuset_lock_held();
937 	lockdep_assert_cpus_held();
938 	lockdep_assert_held(&sched_domains_mutex);
939 
940 	rcu_read_lock();
941 
942 	for_each_possible_cpu(cpu) {
943 		if (dl_bw_visited(cpu, cookie))
944 			continue;
945 
946 		dl_clear_root_domain_cpu(cpu);
947 	}
948 
949 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
950 
951 		if (cpumask_empty(cs->effective_cpus)) {
952 			pos_css = css_rightmost_descendant(pos_css);
953 			continue;
954 		}
955 
956 		css_get(&cs->css);
957 
958 		rcu_read_unlock();
959 
960 		dl_update_tasks_root_domain(cs);
961 
962 		rcu_read_lock();
963 		css_put(&cs->css);
964 	}
965 	rcu_read_unlock();
966 }
967 
968 /*
969  * Rebuild scheduler domains.
970  *
971  * If the flag 'sched_load_balance' of any cpuset with non-empty
972  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
973  * which has that flag enabled, or if any cpuset with a non-empty
974  * 'cpus' is removed, then call this routine to rebuild the
975  * scheduler's dynamic sched domains.
976  *
977  * Call with cpuset_mutex held.  Takes cpus_read_lock().
978  */
979 void rebuild_sched_domains_locked(void)
980 {
981 	struct sched_domain_attr *attr;
982 	cpumask_var_t *doms;
983 	int ndoms;
984 	int i;
985 
986 	lockdep_assert_cpus_held();
987 	lockdep_assert_cpuset_lock_held();
988 	force_sd_rebuild = false;
989 
990 	/* Generate domain masks and attrs */
991 	ndoms = generate_sched_domains(&doms, &attr);
992 
993 	/*
994 	* cpuset_hotplug_workfn is invoked synchronously now, thus this
995 	* function should not race with CPU hotplug. And the effective CPUs
996 	* must not include any offline CPUs. Passing an offline CPU in the
997 	* doms to partition_sched_domains() will trigger a kernel panic.
998 	*
999 	* We perform a final check here: if the doms contains any
1000 	* offline CPUs, a warning is emitted and we return directly to
1001 	* prevent the panic.
1002 	*/
1003 	for (i = 0; doms && i < ndoms; i++) {
1004 		if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
1005 			return;
1006 	}
1007 
1008 	/* Have scheduler rebuild the domains */
1009 	partition_sched_domains(ndoms, doms, attr);
1010 }
1011 #else /* !CONFIG_SMP */
1012 void rebuild_sched_domains_locked(void)
1013 {
1014 }
1015 #endif /* CONFIG_SMP */
1016 
1017 static void rebuild_sched_domains_cpuslocked(void)
1018 {
1019 	mutex_lock(&cpuset_mutex);
1020 	rebuild_sched_domains_locked();
1021 	mutex_unlock(&cpuset_mutex);
1022 }
1023 
1024 void rebuild_sched_domains(void)
1025 {
1026 	cpus_read_lock();
1027 	rebuild_sched_domains_cpuslocked();
1028 	cpus_read_unlock();
1029 }
1030 
1031 void cpuset_reset_sched_domains(void)
1032 {
1033 	mutex_lock(&cpuset_mutex);
1034 	partition_sched_domains(1, NULL, NULL);
1035 	mutex_unlock(&cpuset_mutex);
1036 }
1037 
1038 /**
1039  * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1040  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1041  * @new_cpus: the temp variable for the new effective_cpus mask
1042  *
1043  * Iterate through each task of @cs updating its cpus_allowed to the
1044  * effective cpuset's.  As this function is called with cpuset_mutex held,
1045  * cpuset membership stays stable.
1046  *
1047  * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1048  * to make sure all offline CPUs are also included as hotplug code won't
1049  * update cpumasks for tasks in top_cpuset.
1050  *
1051  * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1052  * do cpu masking per task instead of doing it once for all.
1053  */
1054 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1055 {
1056 	struct css_task_iter it;
1057 	struct task_struct *task;
1058 	bool top_cs = cs == &top_cpuset;
1059 
1060 	css_task_iter_start(&cs->css, 0, &it);
1061 	while ((task = css_task_iter_next(&it))) {
1062 		const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1063 
1064 		if (top_cs) {
1065 			/*
1066 			 * PF_KTHREAD tasks are handled by housekeeping.
1067 			 * PF_NO_SETAFFINITY tasks are ignored.
1068 			 */
1069 			if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
1070 				continue;
1071 			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1072 		} else {
1073 			cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1074 		}
1075 		set_cpus_allowed_ptr(task, new_cpus);
1076 	}
1077 	css_task_iter_end(&it);
1078 }
1079 
1080 /**
1081  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1082  * @new_cpus: the temp variable for the new effective_cpus mask
1083  * @cs: the cpuset the need to recompute the new effective_cpus mask
1084  * @parent: the parent cpuset
1085  *
1086  * The result is valid only if the given cpuset isn't a partition root.
1087  */
1088 static void compute_effective_cpumask(struct cpumask *new_cpus,
1089 				      struct cpuset *cs, struct cpuset *parent)
1090 {
1091 	cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1092 }
1093 
1094 /*
1095  * Commands for update_parent_effective_cpumask
1096  */
1097 enum partition_cmd {
1098 	partcmd_enable,		/* Enable partition root	  */
1099 	partcmd_enablei,	/* Enable isolated partition root */
1100 	partcmd_disable,	/* Disable partition root	  */
1101 	partcmd_update,		/* Update parent's effective_cpus */
1102 	partcmd_invalidate,	/* Make partition invalid	  */
1103 };
1104 
1105 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1106 				    struct tmpmasks *tmp);
1107 
1108 /*
1109  * Update partition exclusive flag
1110  *
1111  * Return: 0 if successful, an error code otherwise
1112  */
1113 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1114 {
1115 	bool exclusive = (new_prs > PRS_MEMBER);
1116 
1117 	if (exclusive && !is_cpu_exclusive(cs)) {
1118 		if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1119 			return PERR_NOTEXCL;
1120 	} else if (!exclusive && is_cpu_exclusive(cs)) {
1121 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1122 		cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1123 	}
1124 	return 0;
1125 }
1126 
1127 /*
1128  * Update partition load balance flag and/or rebuild sched domain
1129  *
1130  * Changing load balance flag will automatically call
1131  * rebuild_sched_domains_locked().
1132  * This function is for cgroup v2 only.
1133  */
1134 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1135 {
1136 	int new_prs = cs->partition_root_state;
1137 	bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1138 	bool new_lb;
1139 
1140 	/*
1141 	 * If cs is not a valid partition root, the load balance state
1142 	 * will follow its parent.
1143 	 */
1144 	if (new_prs > 0) {
1145 		new_lb = (new_prs != PRS_ISOLATED);
1146 	} else {
1147 		new_lb = is_sched_load_balance(parent_cs(cs));
1148 	}
1149 	if (new_lb != !!is_sched_load_balance(cs)) {
1150 		rebuild_domains = true;
1151 		if (new_lb)
1152 			set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1153 		else
1154 			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1155 	}
1156 
1157 	if (rebuild_domains)
1158 		cpuset_force_rebuild();
1159 }
1160 
1161 /*
1162  * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1163  */
1164 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1165 			      struct cpumask *xcpus)
1166 {
1167 	/*
1168 	 * A populated partition (cs or parent) can't have empty effective_cpus
1169 	 */
1170 	return (cpumask_subset(parent->effective_cpus, xcpus) &&
1171 		partition_is_populated(parent, cs)) ||
1172 	       (!cpumask_intersects(xcpus, cpu_active_mask) &&
1173 		partition_is_populated(cs, NULL));
1174 }
1175 
1176 static void reset_partition_data(struct cpuset *cs)
1177 {
1178 	struct cpuset *parent = parent_cs(cs);
1179 
1180 	if (!cpuset_v2())
1181 		return;
1182 
1183 	lockdep_assert_held(&callback_lock);
1184 
1185 	if (cpumask_empty(cs->exclusive_cpus)) {
1186 		cpumask_clear(cs->effective_xcpus);
1187 		if (is_cpu_exclusive(cs))
1188 			clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1189 	}
1190 	if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1191 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1192 }
1193 
1194 /*
1195  * isolated_cpus_update - Update the isolated_cpus mask
1196  * @old_prs: old partition_root_state
1197  * @new_prs: new partition_root_state
1198  * @xcpus: exclusive CPUs with state change
1199  */
1200 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1201 {
1202 	WARN_ON_ONCE(old_prs == new_prs);
1203 	lockdep_assert_held(&callback_lock);
1204 	lockdep_assert_held(&cpuset_mutex);
1205 	if (new_prs == PRS_ISOLATED) {
1206 		if (cpumask_subset(xcpus, isolated_cpus))
1207 			return;
1208 		cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1209 	} else {
1210 		if (!cpumask_intersects(xcpus, isolated_cpus))
1211 			return;
1212 		cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1213 	}
1214 	update_housekeeping = true;
1215 }
1216 
1217 /*
1218  * partition_xcpus_add - Add new exclusive CPUs to partition
1219  * @new_prs: new partition_root_state
1220  * @parent: parent cpuset
1221  * @xcpus: exclusive CPUs to be added
1222  *
1223  * Remote partition if parent == NULL
1224  */
1225 static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1226 				struct cpumask *xcpus)
1227 {
1228 	WARN_ON_ONCE(new_prs < 0);
1229 	lockdep_assert_held(&callback_lock);
1230 	if (!parent)
1231 		parent = &top_cpuset;
1232 
1233 
1234 	if (parent == &top_cpuset)
1235 		cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1236 
1237 	if (new_prs != parent->partition_root_state)
1238 		isolated_cpus_update(parent->partition_root_state, new_prs,
1239 				     xcpus);
1240 
1241 	cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1242 }
1243 
1244 /*
1245  * partition_xcpus_del - Remove exclusive CPUs from partition
1246  * @old_prs: old partition_root_state
1247  * @parent: parent cpuset
1248  * @xcpus: exclusive CPUs to be removed
1249  *
1250  * Remote partition if parent == NULL
1251  */
1252 static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1253 				struct cpumask *xcpus)
1254 {
1255 	WARN_ON_ONCE(old_prs < 0);
1256 	lockdep_assert_held(&callback_lock);
1257 	if (!parent)
1258 		parent = &top_cpuset;
1259 
1260 	if (parent == &top_cpuset)
1261 		cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1262 
1263 	if (old_prs != parent->partition_root_state)
1264 		isolated_cpus_update(old_prs, parent->partition_root_state,
1265 				     xcpus);
1266 
1267 	cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1268 	cpumask_and(parent->effective_cpus, parent->effective_cpus, cpu_active_mask);
1269 }
1270 
1271 /*
1272  * isolated_cpus_can_update - check for isolated & nohz_full conflicts
1273  * @add_cpus: cpu mask for cpus that are going to be isolated
1274  * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1275  * Return: false if there is conflict, true otherwise
1276  *
1277  * If nohz_full is enabled and we have isolated CPUs, their combination must
1278  * still leave housekeeping CPUs.
1279  *
1280  * TBD: Should consider merging this function into
1281  *      prstate_housekeeping_conflict().
1282  */
1283 static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1284 				     struct cpumask *del_cpus)
1285 {
1286 	cpumask_var_t full_hk_cpus;
1287 	int res = true;
1288 
1289 	if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1290 		return true;
1291 
1292 	if (del_cpus && cpumask_weight_and(del_cpus,
1293 			housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1294 		return true;
1295 
1296 	if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1297 		return false;
1298 
1299 	cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1300 		    housekeeping_cpumask(HK_TYPE_DOMAIN));
1301 	cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1302 	cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1303 	if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1304 		res = false;
1305 
1306 	free_cpumask_var(full_hk_cpus);
1307 	return res;
1308 }
1309 
1310 /*
1311  * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1312  * @prstate: partition root state to be checked
1313  * @new_cpus: cpu mask
1314  * Return: true if there is conflict, false otherwise
1315  *
1316  * CPUs outside of HK_TYPE_DOMAIN_BOOT, if defined, can only be used in an
1317  * isolated partition.
1318  */
1319 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1320 {
1321 	if (!housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
1322 		return false;
1323 
1324 	if ((prstate != PRS_ISOLATED) &&
1325 	    !cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
1326 		return true;
1327 
1328 	return false;
1329 }
1330 
1331 /*
1332  * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock
1333  *
1334  * Update housekeeping cpumasks and rebuild sched domains if necessary and
1335  * then do a cpuset_full_unlock().
1336  * This should be called at the end of cpuset operation.
1337  */
1338 static void cpuset_update_sd_hk_unlock(void)
1339 	__releases(&cpuset_mutex)
1340 	__releases(&cpuset_top_mutex)
1341 {
1342 	/* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
1343 	if (force_sd_rebuild)
1344 		rebuild_sched_domains_locked();
1345 
1346 	if (update_housekeeping) {
1347 		update_housekeeping = false;
1348 		cpumask_copy(isolated_hk_cpus, isolated_cpus);
1349 
1350 		/*
1351 		 * housekeeping_update() is now called without holding
1352 		 * cpus_read_lock and cpuset_mutex. Only cpuset_top_mutex
1353 		 * is still being held for mutual exclusion.
1354 		 */
1355 		mutex_unlock(&cpuset_mutex);
1356 		cpus_read_unlock();
1357 		WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus));
1358 		mutex_unlock(&cpuset_top_mutex);
1359 	} else {
1360 		cpuset_full_unlock();
1361 	}
1362 }
1363 
1364 /*
1365  * Work function to invoke cpuset_update_sd_hk_unlock()
1366  */
1367 static void hk_sd_workfn(struct work_struct *work)
1368 {
1369 	cpuset_full_lock();
1370 	cpuset_update_sd_hk_unlock();
1371 }
1372 
1373 /**
1374  * rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1375  * @parent: Parent cpuset containing all siblings
1376  * @cs: Current cpuset (will be skipped)
1377  * @excpus:  exclusive effective CPU mask to modify
1378  *
1379  * This function ensures the given @excpus mask doesn't include any CPUs that
1380  * are exclusively allocated to sibling cpusets. It walks through all siblings
1381  * of @cs under @parent and removes their exclusive CPUs from @excpus.
1382  */
1383 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1384 					struct cpumask *excpus)
1385 {
1386 	struct cgroup_subsys_state *css;
1387 	struct cpuset *sibling;
1388 	int retval = 0;
1389 
1390 	if (cpumask_empty(excpus))
1391 		return 0;
1392 
1393 	/*
1394 	 * Remove exclusive CPUs from siblings
1395 	 */
1396 	rcu_read_lock();
1397 	cpuset_for_each_child(sibling, css, parent) {
1398 		struct cpumask *sibling_xcpus;
1399 
1400 		if (sibling == cs)
1401 			continue;
1402 
1403 		/*
1404 		 * If exclusive_cpus is defined, effective_xcpus will always
1405 		 * be a subset. Otherwise, effective_xcpus will only be set
1406 		 * in a valid partition root.
1407 		 */
1408 		sibling_xcpus = cpumask_empty(sibling->exclusive_cpus)
1409 			      ? sibling->effective_xcpus
1410 			      : sibling->exclusive_cpus;
1411 
1412 		if (cpumask_intersects(excpus, sibling_xcpus)) {
1413 			cpumask_andnot(excpus, excpus, sibling_xcpus);
1414 			retval++;
1415 		}
1416 	}
1417 	rcu_read_unlock();
1418 
1419 	return retval;
1420 }
1421 
1422 /*
1423  * compute_excpus - compute effective exclusive CPUs
1424  * @cs: cpuset
1425  * @xcpus: effective exclusive CPUs value to be set
1426  * Return: 0 if there is no sibling conflict, > 0 otherwise
1427  *
1428  * If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1429  * and exclude their exclusive_cpus or effective_xcpus as well.
1430  */
1431 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1432 {
1433 	struct cpuset *parent = parent_cs(cs);
1434 
1435 	cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1436 
1437 	if (!cpumask_empty(cs->exclusive_cpus))
1438 		return 0;
1439 
1440 	return rm_siblings_excl_cpus(parent, cs, excpus);
1441 }
1442 
1443 /*
1444  * compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1445  * @trialcs: The trial cpuset containing the proposed new configuration
1446  * @cs: The original cpuset that the trial configuration is based on
1447  * Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1448  *
1449  * Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1450  * the real cs.
1451  */
1452 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1453 {
1454 	struct cpuset *parent = parent_cs(trialcs);
1455 	struct cpumask *excpus = trialcs->effective_xcpus;
1456 
1457 	/* trialcs is member, cpuset.cpus has no impact to excpus */
1458 	if (cs_is_member(cs))
1459 		cpumask_and(excpus, trialcs->exclusive_cpus,
1460 				parent->effective_xcpus);
1461 	else
1462 		cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1463 
1464 	return rm_siblings_excl_cpus(parent, cs, excpus);
1465 }
1466 
1467 static inline bool is_remote_partition(struct cpuset *cs)
1468 {
1469 	return cs->remote_partition;
1470 }
1471 
1472 static inline bool is_local_partition(struct cpuset *cs)
1473 {
1474 	return is_partition_valid(cs) && !is_remote_partition(cs);
1475 }
1476 
1477 /*
1478  * remote_partition_enable - Enable current cpuset as a remote partition root
1479  * @cs: the cpuset to update
1480  * @new_prs: new partition_root_state
1481  * @tmp: temporary masks
1482  * Return: 0 if successful, errcode if error
1483  *
1484  * Enable the current cpuset to become a remote partition root taking CPUs
1485  * directly from the top cpuset. cpuset_mutex must be held by the caller.
1486  */
1487 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1488 				   struct tmpmasks *tmp)
1489 {
1490 	/*
1491 	 * The user must have sysadmin privilege.
1492 	 */
1493 	if (!capable(CAP_SYS_ADMIN))
1494 		return PERR_ACCESS;
1495 
1496 	/*
1497 	 * The requested exclusive_cpus must not be allocated to other
1498 	 * partitions and it can't use up all the root's effective_cpus.
1499 	 *
1500 	 * The effective_xcpus mask can contain offline CPUs, but there must
1501 	 * be at least one or more online CPUs present before it can be enabled.
1502 	 *
1503 	 * Note that creating a remote partition with any local partition root
1504 	 * above it or remote partition root underneath it is not allowed.
1505 	 */
1506 	compute_excpus(cs, tmp->new_cpus);
1507 	WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1508 	if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1509 	    cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1510 		return PERR_INVCPUS;
1511 	if (((new_prs == PRS_ISOLATED) &&
1512 	     !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1513 	    prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1514 		return PERR_HKEEPING;
1515 
1516 	spin_lock_irq(&callback_lock);
1517 	partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1518 	cs->remote_partition = true;
1519 	cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1520 	spin_unlock_irq(&callback_lock);
1521 	cpuset_force_rebuild();
1522 	cs->prs_err = 0;
1523 
1524 	/*
1525 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1526 	 */
1527 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1528 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1529 	return 0;
1530 }
1531 
1532 /*
1533  * remote_partition_disable - Remove current cpuset from remote partition list
1534  * @cs: the cpuset to update
1535  * @tmp: temporary masks
1536  *
1537  * The effective_cpus is also updated.
1538  *
1539  * cpuset_mutex must be held by the caller.
1540  */
1541 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1542 {
1543 	WARN_ON_ONCE(!is_remote_partition(cs));
1544 	/*
1545 	 * When a CPU is offlined, top_cpuset may end up with no available CPUs,
1546 	 * which should clear subpartitions_cpus. We should not emit a warning for this
1547 	 * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1548 	 * may already be cleared when disabling the partition.
1549 	 */
1550 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1551 		     !cpumask_empty(subpartitions_cpus));
1552 
1553 	spin_lock_irq(&callback_lock);
1554 	cs->remote_partition = false;
1555 	partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1556 	if (cs->prs_err)
1557 		cs->partition_root_state = -cs->partition_root_state;
1558 	else
1559 		cs->partition_root_state = PRS_MEMBER;
1560 
1561 	/* effective_xcpus may need to be changed */
1562 	compute_excpus(cs, cs->effective_xcpus);
1563 	reset_partition_data(cs);
1564 	spin_unlock_irq(&callback_lock);
1565 	cpuset_force_rebuild();
1566 
1567 	/*
1568 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1569 	 */
1570 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1571 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1572 }
1573 
1574 /*
1575  * remote_cpus_update - cpus_exclusive change of remote partition
1576  * @cs: the cpuset to be updated
1577  * @xcpus: the new exclusive_cpus mask, if non-NULL
1578  * @excpus: the new effective_xcpus mask
1579  * @tmp: temporary masks
1580  *
1581  * top_cpuset and subpartitions_cpus will be updated or partition can be
1582  * invalidated.
1583  */
1584 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1585 			       struct cpumask *excpus, struct tmpmasks *tmp)
1586 {
1587 	bool adding, deleting;
1588 	int prs = cs->partition_root_state;
1589 
1590 	if (WARN_ON_ONCE(!is_remote_partition(cs)))
1591 		return;
1592 
1593 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1594 
1595 	if (cpumask_empty(excpus)) {
1596 		cs->prs_err = PERR_CPUSEMPTY;
1597 		goto invalidate;
1598 	}
1599 
1600 	adding   = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1601 	deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1602 
1603 	/*
1604 	 * Additions of remote CPUs is only allowed if those CPUs are
1605 	 * not allocated to other partitions and there are effective_cpus
1606 	 * left in the top cpuset.
1607 	 */
1608 	if (adding) {
1609 		WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1610 		if (!capable(CAP_SYS_ADMIN))
1611 			cs->prs_err = PERR_ACCESS;
1612 		else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1613 			 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1614 			cs->prs_err = PERR_NOCPUS;
1615 		else if ((prs == PRS_ISOLATED) &&
1616 			 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1617 			cs->prs_err = PERR_HKEEPING;
1618 		if (cs->prs_err)
1619 			goto invalidate;
1620 	}
1621 
1622 	spin_lock_irq(&callback_lock);
1623 	if (adding)
1624 		partition_xcpus_add(prs, NULL, tmp->addmask);
1625 	if (deleting)
1626 		partition_xcpus_del(prs, NULL, tmp->delmask);
1627 	/*
1628 	 * Need to update effective_xcpus and exclusive_cpus now as
1629 	 * update_sibling_cpumasks() below may iterate back to the same cs.
1630 	 */
1631 	cpumask_copy(cs->effective_xcpus, excpus);
1632 	if (xcpus)
1633 		cpumask_copy(cs->exclusive_cpus, xcpus);
1634 	spin_unlock_irq(&callback_lock);
1635 	if (adding || deleting)
1636 		cpuset_force_rebuild();
1637 
1638 	/*
1639 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1640 	 */
1641 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1642 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1643 	return;
1644 
1645 invalidate:
1646 	remote_partition_disable(cs, tmp);
1647 }
1648 
1649 /**
1650  * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1651  * @cs:      The cpuset that requests change in partition root state
1652  * @cmd:     Partition root state change command
1653  * @newmask: Optional new cpumask for partcmd_update
1654  * @tmp:     Temporary addmask and delmask
1655  * Return:   0 or a partition root state error code
1656  *
1657  * For partcmd_enable*, the cpuset is being transformed from a non-partition
1658  * root to a partition root. The effective_xcpus (cpus_allowed if
1659  * effective_xcpus not set) mask of the given cpuset will be taken away from
1660  * parent's effective_cpus. The function will return 0 if all the CPUs listed
1661  * in effective_xcpus can be granted or an error code will be returned.
1662  *
1663  * For partcmd_disable, the cpuset is being transformed from a partition
1664  * root back to a non-partition root. Any CPUs in effective_xcpus will be
1665  * given back to parent's effective_cpus. 0 will always be returned.
1666  *
1667  * For partcmd_update, if the optional newmask is specified, the cpu list is
1668  * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1669  * assumed to remain the same. The cpuset should either be a valid or invalid
1670  * partition root. The partition root state may change from valid to invalid
1671  * or vice versa. An error code will be returned if transitioning from
1672  * invalid to valid violates the exclusivity rule.
1673  *
1674  * For partcmd_invalidate, the current partition will be made invalid.
1675  *
1676  * The partcmd_enable* and partcmd_disable commands are used by
1677  * update_prstate(). An error code may be returned and the caller will check
1678  * for error.
1679  *
1680  * The partcmd_update command is used by update_cpumasks_hier() with newmask
1681  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1682  * by update_cpumask() with NULL newmask. In both cases, the callers won't
1683  * check for error and so partition_root_state and prs_err will be updated
1684  * directly.
1685  */
1686 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1687 					   struct cpumask *newmask,
1688 					   struct tmpmasks *tmp)
1689 {
1690 	struct cpuset *parent = parent_cs(cs);
1691 	int adding;	/* Adding cpus to parent's effective_cpus	*/
1692 	int deleting;	/* Deleting cpus from parent's effective_cpus	*/
1693 	int old_prs, new_prs;
1694 	int part_error = PERR_NONE;	/* Partition error? */
1695 	struct cpumask *xcpus = user_xcpus(cs);
1696 	int parent_prs = parent->partition_root_state;
1697 	bool nocpu;
1698 
1699 	lockdep_assert_cpuset_lock_held();
1700 	WARN_ON_ONCE(is_remote_partition(cs));	/* For local partition only */
1701 
1702 	/*
1703 	 * new_prs will only be changed for the partcmd_update and
1704 	 * partcmd_invalidate commands.
1705 	 */
1706 	adding = deleting = false;
1707 	old_prs = new_prs = cs->partition_root_state;
1708 
1709 	if (cmd == partcmd_invalidate) {
1710 		if (is_partition_invalid(cs))
1711 			return 0;
1712 
1713 		/*
1714 		 * Make the current partition invalid.
1715 		 */
1716 		if (is_partition_valid(parent))
1717 			adding = cpumask_and(tmp->addmask,
1718 					     xcpus, parent->effective_xcpus);
1719 		if (old_prs > 0)
1720 			new_prs = -old_prs;
1721 
1722 		goto write_error;
1723 	}
1724 
1725 	/*
1726 	 * The parent must be a partition root.
1727 	 * The new cpumask, if present, or the current cpus_allowed must
1728 	 * not be empty.
1729 	 */
1730 	if (!is_partition_valid(parent)) {
1731 		return is_partition_invalid(parent)
1732 		       ? PERR_INVPARENT : PERR_NOTPART;
1733 	}
1734 	if (!newmask && xcpus_empty(cs))
1735 		return PERR_CPUSEMPTY;
1736 
1737 	nocpu = tasks_nocpu_error(parent, cs, xcpus);
1738 
1739 	if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1740 		/*
1741 		 * Need to call compute_excpus() in case
1742 		 * exclusive_cpus not set. Sibling conflict should only happen
1743 		 * if exclusive_cpus isn't set.
1744 		 */
1745 		xcpus = tmp->delmask;
1746 		if (compute_excpus(cs, xcpus))
1747 			WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1748 		new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1749 
1750 		/*
1751 		 * Enabling partition root is not allowed if its
1752 		 * effective_xcpus is empty.
1753 		 */
1754 		if (cpumask_empty(xcpus))
1755 			return PERR_INVCPUS;
1756 
1757 		if (prstate_housekeeping_conflict(new_prs, xcpus))
1758 			return PERR_HKEEPING;
1759 
1760 		if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1761 		    !isolated_cpus_can_update(xcpus, NULL))
1762 			return PERR_HKEEPING;
1763 
1764 		if (tasks_nocpu_error(parent, cs, xcpus))
1765 			return PERR_NOCPUS;
1766 
1767 		/*
1768 		 * This function will only be called when all the preliminary
1769 		 * checks have passed. At this point, the following condition
1770 		 * should hold.
1771 		 *
1772 		 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1773 		 *
1774 		 * Warn if it is not the case.
1775 		 */
1776 		cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1777 		WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1778 
1779 		deleting = true;
1780 	} else if (cmd == partcmd_disable) {
1781 		/*
1782 		 * May need to add cpus back to parent's effective_cpus
1783 		 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1784 		 * for valid partition root. xcpus may contain CPUs that
1785 		 * shouldn't be removed from the two global cpumasks.
1786 		 */
1787 		if (is_partition_valid(cs)) {
1788 			cpumask_copy(tmp->addmask, cs->effective_xcpus);
1789 			adding = true;
1790 		}
1791 		new_prs = PRS_MEMBER;
1792 	} else if (newmask) {
1793 		/*
1794 		 * Empty cpumask is not allowed
1795 		 */
1796 		if (cpumask_empty(newmask)) {
1797 			part_error = PERR_CPUSEMPTY;
1798 			goto write_error;
1799 		}
1800 
1801 		/* Check newmask again, whether cpus are available for parent/cs */
1802 		nocpu |= tasks_nocpu_error(parent, cs, newmask);
1803 
1804 		/*
1805 		 * partcmd_update with newmask:
1806 		 *
1807 		 * Compute add/delete mask to/from effective_cpus
1808 		 *
1809 		 * For valid partition:
1810 		 *   addmask = exclusive_cpus & ~newmask
1811 		 *			      & parent->effective_xcpus
1812 		 *   delmask = newmask & ~exclusive_cpus
1813 		 *		       & parent->effective_xcpus
1814 		 *
1815 		 * For invalid partition:
1816 		 *   delmask = newmask & parent->effective_xcpus
1817 		 *   The partition may become valid soon.
1818 		 */
1819 		if (is_partition_invalid(cs)) {
1820 			adding = false;
1821 			deleting = cpumask_and(tmp->delmask,
1822 					newmask, parent->effective_xcpus);
1823 		} else {
1824 			cpumask_andnot(tmp->addmask, xcpus, newmask);
1825 			adding = cpumask_and(tmp->addmask, tmp->addmask,
1826 					     parent->effective_xcpus);
1827 
1828 			cpumask_andnot(tmp->delmask, newmask, xcpus);
1829 			deleting = cpumask_and(tmp->delmask, tmp->delmask,
1830 					       parent->effective_xcpus);
1831 		}
1832 
1833 		/*
1834 		 * TBD: Invalidate a currently valid child root partition may
1835 		 * still break isolated_cpus_can_update() rule if parent is an
1836 		 * isolated partition.
1837 		 */
1838 		if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1839 			if ((parent_prs == PRS_ROOT) &&
1840 			    /* Adding to parent means removing isolated CPUs */
1841 			    !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1842 				part_error = PERR_HKEEPING;
1843 			if ((parent_prs == PRS_ISOLATED) &&
1844 			    /* Adding to parent means adding isolated CPUs */
1845 			    !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1846 				part_error = PERR_HKEEPING;
1847 		}
1848 
1849 		/*
1850 		 * The new CPUs to be removed from parent's effective CPUs
1851 		 * must be present.
1852 		 */
1853 		if (deleting) {
1854 			cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1855 			WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1856 		}
1857 
1858 		/*
1859 		 * Make partition invalid if parent's effective_cpus could
1860 		 * become empty and there are tasks in the parent.
1861 		 */
1862 		if (nocpu && (!adding ||
1863 		    !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1864 			part_error = PERR_NOCPUS;
1865 			deleting = false;
1866 			adding = cpumask_and(tmp->addmask,
1867 					     xcpus, parent->effective_xcpus);
1868 		}
1869 	} else {
1870 		/*
1871 		 * partcmd_update w/o newmask
1872 		 *
1873 		 * delmask = effective_xcpus & parent->effective_cpus
1874 		 *
1875 		 * This can be called from:
1876 		 * 1) update_cpumasks_hier()
1877 		 * 2) cpuset_hotplug_update_tasks()
1878 		 *
1879 		 * Check to see if it can be transitioned from valid to
1880 		 * invalid partition or vice versa.
1881 		 *
1882 		 * A partition error happens when parent has tasks and all
1883 		 * its effective CPUs will have to be distributed out.
1884 		 */
1885 		if (nocpu) {
1886 			part_error = PERR_NOCPUS;
1887 			if (is_partition_valid(cs))
1888 				adding = cpumask_and(tmp->addmask,
1889 						xcpus, parent->effective_xcpus);
1890 		} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
1891 			   cpumask_subset(xcpus, parent->effective_xcpus)) {
1892 			struct cgroup_subsys_state *css;
1893 			struct cpuset *child;
1894 			bool exclusive = true;
1895 
1896 			/*
1897 			 * Convert invalid partition to valid has to
1898 			 * pass the cpu exclusivity test.
1899 			 */
1900 			rcu_read_lock();
1901 			cpuset_for_each_child(child, css, parent) {
1902 				if (child == cs)
1903 					continue;
1904 				if (!cpusets_are_exclusive(cs, child)) {
1905 					exclusive = false;
1906 					break;
1907 				}
1908 			}
1909 			rcu_read_unlock();
1910 			if (exclusive)
1911 				deleting = cpumask_and(tmp->delmask,
1912 						xcpus, parent->effective_cpus);
1913 			else
1914 				part_error = PERR_NOTEXCL;
1915 		}
1916 	}
1917 
1918 write_error:
1919 	if (part_error)
1920 		WRITE_ONCE(cs->prs_err, part_error);
1921 
1922 	if (cmd == partcmd_update) {
1923 		/*
1924 		 * Check for possible transition between valid and invalid
1925 		 * partition root.
1926 		 */
1927 		switch (cs->partition_root_state) {
1928 		case PRS_ROOT:
1929 		case PRS_ISOLATED:
1930 			if (part_error)
1931 				new_prs = -old_prs;
1932 			break;
1933 		case PRS_INVALID_ROOT:
1934 		case PRS_INVALID_ISOLATED:
1935 			if (!part_error)
1936 				new_prs = -old_prs;
1937 			break;
1938 		}
1939 	}
1940 
1941 	if (!adding && !deleting && (new_prs == old_prs))
1942 		return 0;
1943 
1944 	/*
1945 	 * Transitioning between invalid to valid or vice versa may require
1946 	 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1947 	 * validate_change() has already been successfully called and
1948 	 * CPU lists in cs haven't been updated yet. So defer it to later.
1949 	 */
1950 	if ((old_prs != new_prs) && (cmd != partcmd_update))  {
1951 		int err = update_partition_exclusive_flag(cs, new_prs);
1952 
1953 		if (err)
1954 			return err;
1955 	}
1956 
1957 	/*
1958 	 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1959 	 * only).
1960 	 *
1961 	 * Newly added CPUs will be removed from effective_cpus and
1962 	 * newly deleted ones will be added back to effective_cpus.
1963 	 */
1964 	spin_lock_irq(&callback_lock);
1965 	if (old_prs != new_prs)
1966 		cs->partition_root_state = new_prs;
1967 
1968 	/*
1969 	 * Adding to parent's effective_cpus means deletion CPUs from cs
1970 	 * and vice versa.
1971 	 */
1972 	if (adding)
1973 		partition_xcpus_del(old_prs, parent, tmp->addmask);
1974 	if (deleting)
1975 		partition_xcpus_add(new_prs, parent, tmp->delmask);
1976 
1977 	spin_unlock_irq(&callback_lock);
1978 
1979 	if ((old_prs != new_prs) && (cmd == partcmd_update))
1980 		update_partition_exclusive_flag(cs, new_prs);
1981 
1982 	if (adding || deleting) {
1983 		cpuset_update_tasks_cpumask(parent, tmp->addmask);
1984 		update_sibling_cpumasks(parent, cs, tmp);
1985 	}
1986 
1987 	/*
1988 	 * For partcmd_update without newmask, it is being called from
1989 	 * cpuset_handle_hotplug(). Update the load balance flag and
1990 	 * scheduling domain accordingly.
1991 	 */
1992 	if ((cmd == partcmd_update) && !newmask)
1993 		update_partition_sd_lb(cs, old_prs);
1994 
1995 	notify_partition_change(cs, old_prs);
1996 	return 0;
1997 }
1998 
1999 /**
2000  * compute_partition_effective_cpumask - compute effective_cpus for partition
2001  * @cs: partition root cpuset
2002  * @new_ecpus: previously computed effective_cpus to be updated
2003  *
2004  * Compute the effective_cpus of a partition root by scanning effective_xcpus
2005  * of child partition roots and excluding their effective_xcpus.
2006  *
2007  * This has the side effect of invalidating valid child partition roots,
2008  * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2009  * or update_cpumasks_hier() where parent and children are modified
2010  * successively, we don't need to call update_parent_effective_cpumask()
2011  * and the child's effective_cpus will be updated in later iterations.
2012  *
2013  * Note that rcu_read_lock() is assumed to be held.
2014  */
2015 static void compute_partition_effective_cpumask(struct cpuset *cs,
2016 						struct cpumask *new_ecpus)
2017 {
2018 	struct cgroup_subsys_state *css;
2019 	struct cpuset *child;
2020 	bool populated = partition_is_populated(cs, NULL);
2021 
2022 	/*
2023 	 * Check child partition roots to see if they should be
2024 	 * invalidated when
2025 	 *  1) child effective_xcpus not a subset of new
2026 	 *     excluisve_cpus
2027 	 *  2) All the effective_cpus will be used up and cp
2028 	 *     has tasks
2029 	 */
2030 	compute_excpus(cs, new_ecpus);
2031 	cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2032 
2033 	rcu_read_lock();
2034 	cpuset_for_each_child(child, css, cs) {
2035 		if (!is_partition_valid(child))
2036 			continue;
2037 
2038 		/*
2039 		 * There shouldn't be a remote partition underneath another
2040 		 * partition root.
2041 		 */
2042 		WARN_ON_ONCE(is_remote_partition(child));
2043 		child->prs_err = 0;
2044 		if (!cpumask_subset(child->effective_xcpus,
2045 				    cs->effective_xcpus))
2046 			child->prs_err = PERR_INVCPUS;
2047 		else if (populated &&
2048 			 cpumask_subset(new_ecpus, child->effective_xcpus))
2049 			child->prs_err = PERR_NOCPUS;
2050 
2051 		if (child->prs_err) {
2052 			int old_prs = child->partition_root_state;
2053 
2054 			/*
2055 			 * Invalidate child partition
2056 			 */
2057 			spin_lock_irq(&callback_lock);
2058 			make_partition_invalid(child);
2059 			spin_unlock_irq(&callback_lock);
2060 			notify_partition_change(child, old_prs);
2061 			continue;
2062 		}
2063 		cpumask_andnot(new_ecpus, new_ecpus,
2064 			       child->effective_xcpus);
2065 	}
2066 	rcu_read_unlock();
2067 }
2068 
2069 /*
2070  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2071  * @cs:  the cpuset to consider
2072  * @tmp: temp variables for calculating effective_cpus & partition setup
2073  * @force: don't skip any descendant cpusets if set
2074  *
2075  * When configured cpumask is changed, the effective cpumasks of this cpuset
2076  * and all its descendants need to be updated.
2077  *
2078  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2079  *
2080  * Called with cpuset_mutex held
2081  */
2082 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2083 				 bool force)
2084 {
2085 	struct cpuset *cp;
2086 	struct cgroup_subsys_state *pos_css;
2087 	int old_prs, new_prs;
2088 
2089 	rcu_read_lock();
2090 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2091 		struct cpuset *parent = parent_cs(cp);
2092 		bool remote = is_remote_partition(cp);
2093 		bool update_parent = false;
2094 
2095 		old_prs = new_prs = cp->partition_root_state;
2096 
2097 		/*
2098 		 * For child remote partition root (!= cs), we need to call
2099 		 * remote_cpus_update() if effective_xcpus will be changed.
2100 		 * Otherwise, we can skip the whole subtree.
2101 		 *
2102 		 * remote_cpus_update() will reuse tmp->new_cpus only after
2103 		 * its value is being processed.
2104 		 */
2105 		if (remote && (cp != cs)) {
2106 			compute_excpus(cp, tmp->new_cpus);
2107 			if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2108 				pos_css = css_rightmost_descendant(pos_css);
2109 				continue;
2110 			}
2111 			rcu_read_unlock();
2112 			remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2113 			rcu_read_lock();
2114 
2115 			/* Remote partition may be invalidated */
2116 			new_prs = cp->partition_root_state;
2117 			remote = (new_prs == old_prs);
2118 		}
2119 
2120 		if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2121 			compute_partition_effective_cpumask(cp, tmp->new_cpus);
2122 		else
2123 			compute_effective_cpumask(tmp->new_cpus, cp, parent);
2124 
2125 		if (remote)
2126 			goto get_css;	/* Ready to update cpuset data */
2127 
2128 		/*
2129 		 * A partition with no effective_cpus is allowed as long as
2130 		 * there is no task associated with it. Call
2131 		 * update_parent_effective_cpumask() to check it.
2132 		 */
2133 		if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2134 			update_parent = true;
2135 			goto update_parent_effective;
2136 		}
2137 
2138 		/*
2139 		 * If it becomes empty, inherit the effective mask of the
2140 		 * parent, which is guaranteed to have some CPUs unless
2141 		 * it is a partition root that has explicitly distributed
2142 		 * out all its CPUs.
2143 		 */
2144 		if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2145 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2146 
2147 		/*
2148 		 * Skip the whole subtree if
2149 		 * 1) the cpumask remains the same,
2150 		 * 2) has no partition root state,
2151 		 * 3) force flag not set, and
2152 		 * 4) for v2 load balance state same as its parent.
2153 		 */
2154 		if (!cp->partition_root_state && !force &&
2155 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2156 		    (!cpuset_v2() ||
2157 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2158 			pos_css = css_rightmost_descendant(pos_css);
2159 			continue;
2160 		}
2161 
2162 update_parent_effective:
2163 		/*
2164 		 * update_parent_effective_cpumask() should have been called
2165 		 * for cs already in update_cpumask(). We should also call
2166 		 * cpuset_update_tasks_cpumask() again for tasks in the parent
2167 		 * cpuset if the parent's effective_cpus changes.
2168 		 */
2169 		if ((cp != cs) && old_prs) {
2170 			switch (parent->partition_root_state) {
2171 			case PRS_ROOT:
2172 			case PRS_ISOLATED:
2173 				update_parent = true;
2174 				break;
2175 
2176 			default:
2177 				/*
2178 				 * When parent is not a partition root or is
2179 				 * invalid, child partition roots become
2180 				 * invalid too.
2181 				 */
2182 				if (is_partition_valid(cp))
2183 					new_prs = -cp->partition_root_state;
2184 				WRITE_ONCE(cp->prs_err,
2185 					   is_partition_invalid(parent)
2186 					   ? PERR_INVPARENT : PERR_NOTPART);
2187 				break;
2188 			}
2189 		}
2190 get_css:
2191 		if (!css_tryget_online(&cp->css))
2192 			continue;
2193 		rcu_read_unlock();
2194 
2195 		if (update_parent) {
2196 			update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2197 			/*
2198 			 * The cpuset partition_root_state may become
2199 			 * invalid. Capture it.
2200 			 */
2201 			new_prs = cp->partition_root_state;
2202 		}
2203 
2204 		spin_lock_irq(&callback_lock);
2205 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2206 		cp->partition_root_state = new_prs;
2207 		/*
2208 		 * Need to compute effective_xcpus if either exclusive_cpus
2209 		 * is non-empty or it is a valid partition root.
2210 		 */
2211 		if ((new_prs > 0) || !cpumask_empty(cp->exclusive_cpus))
2212 			compute_excpus(cp, cp->effective_xcpus);
2213 		if (new_prs <= 0)
2214 			reset_partition_data(cp);
2215 		spin_unlock_irq(&callback_lock);
2216 
2217 		notify_partition_change(cp, old_prs);
2218 
2219 		WARN_ON(!is_in_v2_mode() &&
2220 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2221 
2222 		cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
2223 
2224 		/*
2225 		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2226 		 * from parent if current cpuset isn't a valid partition root
2227 		 * and their load balance states differ.
2228 		 */
2229 		if (cpuset_v2() && !is_partition_valid(cp) &&
2230 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2231 			if (is_sched_load_balance(parent))
2232 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2233 			else
2234 				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2235 		}
2236 
2237 		/*
2238 		 * On legacy hierarchy, if the effective cpumask of any non-
2239 		 * empty cpuset is changed, we need to rebuild sched domains.
2240 		 * On default hierarchy, the cpuset needs to be a partition
2241 		 * root as well.
2242 		 */
2243 		if (!cpumask_empty(cp->cpus_allowed) &&
2244 		    is_sched_load_balance(cp) &&
2245 		   (!cpuset_v2() || is_partition_valid(cp)))
2246 			cpuset_force_rebuild();
2247 
2248 		rcu_read_lock();
2249 		css_put(&cp->css);
2250 	}
2251 	rcu_read_unlock();
2252 }
2253 
2254 /**
2255  * update_sibling_cpumasks - Update siblings cpumasks
2256  * @parent:  Parent cpuset
2257  * @cs:      Current cpuset
2258  * @tmp:     Temp variables
2259  */
2260 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2261 				    struct tmpmasks *tmp)
2262 {
2263 	struct cpuset *sibling;
2264 	struct cgroup_subsys_state *pos_css;
2265 
2266 	lockdep_assert_cpuset_lock_held();
2267 
2268 	/*
2269 	 * Check all its siblings and call update_cpumasks_hier()
2270 	 * if their effective_cpus will need to be changed.
2271 	 *
2272 	 * It is possible a change in parent's effective_cpus
2273 	 * due to a change in a child partition's effective_xcpus will impact
2274 	 * its siblings even if they do not inherit parent's effective_cpus
2275 	 * directly. It should not impact valid partition.
2276 	 *
2277 	 * The update_cpumasks_hier() function may sleep. So we have to
2278 	 * release the RCU read lock before calling it.
2279 	 */
2280 	rcu_read_lock();
2281 	cpuset_for_each_child(sibling, pos_css, parent) {
2282 		if (sibling == cs || is_partition_valid(sibling))
2283 			continue;
2284 
2285 		compute_effective_cpumask(tmp->new_cpus, sibling,
2286 					  parent);
2287 		if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2288 			continue;
2289 
2290 		if (!css_tryget_online(&sibling->css))
2291 			continue;
2292 
2293 		rcu_read_unlock();
2294 		update_cpumasks_hier(sibling, tmp, false);
2295 		rcu_read_lock();
2296 		css_put(&sibling->css);
2297 	}
2298 	rcu_read_unlock();
2299 }
2300 
2301 static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2302 {
2303 	int retval;
2304 
2305 	retval = cpulist_parse(buf, out_mask);
2306 	if (retval < 0)
2307 		return retval;
2308 	if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2309 		return -EINVAL;
2310 
2311 	return 0;
2312 }
2313 
2314 /**
2315  * validate_partition - Validate a cpuset partition configuration
2316  * @cs: The cpuset to validate
2317  * @trialcs: The trial cpuset containing proposed configuration changes
2318  *
2319  * If any validation check fails, the appropriate error code is set in the
2320  * cpuset's prs_err field.
2321  *
2322  * Return: PRS error code (0 if valid, non-zero error code if invalid)
2323  */
2324 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2325 {
2326 	struct cpuset *parent = parent_cs(cs);
2327 
2328 	if (cs_is_member(trialcs))
2329 		return PERR_NONE;
2330 
2331 	if (cpumask_empty(trialcs->effective_xcpus))
2332 		return PERR_INVCPUS;
2333 
2334 	if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2335 					  trialcs->effective_xcpus))
2336 		return PERR_HKEEPING;
2337 
2338 	if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2339 		return PERR_NOCPUS;
2340 
2341 	return PERR_NONE;
2342 }
2343 
2344 /**
2345  * partition_cpus_change - Handle partition state changes due to CPU mask updates
2346  * @cs: The target cpuset being modified
2347  * @trialcs: The trial cpuset containing proposed configuration changes
2348  * @tmp: Temporary masks for intermediate calculations
2349  *
2350  * This function handles partition state transitions triggered by CPU mask changes.
2351  * CPU modifications may cause a partition to be disabled or require state updates.
2352  */
2353 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2354 					struct tmpmasks *tmp)
2355 {
2356 	enum prs_errcode prs_err;
2357 
2358 	if (cs_is_member(cs))
2359 		return;
2360 
2361 	prs_err = validate_partition(cs, trialcs);
2362 	if (prs_err)
2363 		trialcs->prs_err = cs->prs_err = prs_err;
2364 
2365 	if (is_remote_partition(cs)) {
2366 		if (trialcs->prs_err)
2367 			remote_partition_disable(cs, tmp);
2368 		else
2369 			remote_cpus_update(cs, trialcs->exclusive_cpus,
2370 					   trialcs->effective_xcpus, tmp);
2371 	} else {
2372 		if (trialcs->prs_err)
2373 			update_parent_effective_cpumask(cs, partcmd_invalidate,
2374 							NULL, tmp);
2375 		else
2376 			update_parent_effective_cpumask(cs, partcmd_update,
2377 							trialcs->effective_xcpus, tmp);
2378 	}
2379 }
2380 
2381 /**
2382  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2383  * @cs: the cpuset to consider
2384  * @trialcs: trial cpuset
2385  * @buf: buffer of cpu numbers written to this cpuset
2386  */
2387 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2388 			  const char *buf)
2389 {
2390 	int retval;
2391 	struct tmpmasks tmp;
2392 	bool force = false;
2393 	int old_prs = cs->partition_root_state;
2394 
2395 	retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2396 	if (retval < 0)
2397 		return retval;
2398 
2399 	/* Nothing to do if the cpus didn't change */
2400 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2401 		return 0;
2402 
2403 	compute_trialcs_excpus(trialcs, cs);
2404 	trialcs->prs_err = PERR_NONE;
2405 
2406 	retval = validate_change(cs, trialcs);
2407 	if (retval < 0)
2408 		return retval;
2409 
2410 	if (alloc_tmpmasks(&tmp))
2411 		return -ENOMEM;
2412 
2413 	/*
2414 	 * Check all the descendants in update_cpumasks_hier() if
2415 	 * effective_xcpus is to be changed.
2416 	 */
2417 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2418 
2419 	partition_cpus_change(cs, trialcs, &tmp);
2420 
2421 	spin_lock_irq(&callback_lock);
2422 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2423 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2424 	if ((old_prs > 0) && !is_partition_valid(cs))
2425 		reset_partition_data(cs);
2426 	spin_unlock_irq(&callback_lock);
2427 
2428 	/* effective_cpus/effective_xcpus will be updated here */
2429 	update_cpumasks_hier(cs, &tmp, force);
2430 
2431 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2432 	if (cs->partition_root_state)
2433 		update_partition_sd_lb(cs, old_prs);
2434 
2435 	free_tmpmasks(&tmp);
2436 	return retval;
2437 }
2438 
2439 /**
2440  * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2441  * @cs: the cpuset to consider
2442  * @trialcs: trial cpuset
2443  * @buf: buffer of cpu numbers written to this cpuset
2444  *
2445  * The tasks' cpumask will be updated if cs is a valid partition root.
2446  */
2447 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2448 				    const char *buf)
2449 {
2450 	int retval;
2451 	struct tmpmasks tmp;
2452 	bool force = false;
2453 	int old_prs = cs->partition_root_state;
2454 
2455 	retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2456 	if (retval < 0)
2457 		return retval;
2458 
2459 	/* Nothing to do if the CPUs didn't change */
2460 	if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2461 		return 0;
2462 
2463 	/*
2464 	 * Reject the change if there is exclusive CPUs conflict with
2465 	 * the siblings.
2466 	 */
2467 	if (compute_trialcs_excpus(trialcs, cs))
2468 		return -EINVAL;
2469 
2470 	/*
2471 	 * Check all the descendants in update_cpumasks_hier() if
2472 	 * effective_xcpus is to be changed.
2473 	 */
2474 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2475 
2476 	retval = validate_change(cs, trialcs);
2477 	if (retval)
2478 		return retval;
2479 
2480 	if (alloc_tmpmasks(&tmp))
2481 		return -ENOMEM;
2482 
2483 	trialcs->prs_err = PERR_NONE;
2484 	partition_cpus_change(cs, trialcs, &tmp);
2485 
2486 	spin_lock_irq(&callback_lock);
2487 	cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2488 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2489 	if ((old_prs > 0) && !is_partition_valid(cs))
2490 		reset_partition_data(cs);
2491 	spin_unlock_irq(&callback_lock);
2492 
2493 	/*
2494 	 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2495 	 * of the subtree when it is a valid partition root or effective_xcpus
2496 	 * is updated.
2497 	 */
2498 	if (is_partition_valid(cs) || force)
2499 		update_cpumasks_hier(cs, &tmp, force);
2500 
2501 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2502 	if (cs->partition_root_state)
2503 		update_partition_sd_lb(cs, old_prs);
2504 
2505 	free_tmpmasks(&tmp);
2506 	return 0;
2507 }
2508 
2509 /*
2510  * Migrate memory region from one set of nodes to another.  This is
2511  * performed asynchronously as it can be called from process migration path
2512  * holding locks involved in process management.  All mm migrations are
2513  * performed in the queued order and can be waited for by flushing
2514  * cpuset_migrate_mm_wq.
2515  */
2516 
2517 struct cpuset_migrate_mm_work {
2518 	struct work_struct	work;
2519 	struct mm_struct	*mm;
2520 	nodemask_t		from;
2521 	nodemask_t		to;
2522 };
2523 
2524 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2525 {
2526 	struct cpuset_migrate_mm_work *mwork =
2527 		container_of(work, struct cpuset_migrate_mm_work, work);
2528 
2529 	/* on a wq worker, no need to worry about %current's mems_allowed */
2530 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2531 	mmput(mwork->mm);
2532 	kfree(mwork);
2533 }
2534 
2535 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2536 							const nodemask_t *to)
2537 {
2538 	struct cpuset_migrate_mm_work *mwork;
2539 
2540 	if (nodes_equal(*from, *to)) {
2541 		mmput(mm);
2542 		return;
2543 	}
2544 
2545 	mwork = kzalloc_obj(*mwork);
2546 	if (mwork) {
2547 		mwork->mm = mm;
2548 		mwork->from = *from;
2549 		mwork->to = *to;
2550 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2551 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
2552 	} else {
2553 		mmput(mm);
2554 	}
2555 }
2556 
2557 static void flush_migrate_mm_task_workfn(struct callback_head *head)
2558 {
2559 	flush_workqueue(cpuset_migrate_mm_wq);
2560 	kfree(head);
2561 }
2562 
2563 static void schedule_flush_migrate_mm(void)
2564 {
2565 	struct callback_head *flush_cb;
2566 
2567 	flush_cb = kzalloc_obj(struct callback_head);
2568 	if (!flush_cb)
2569 		return;
2570 
2571 	init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2572 
2573 	if (task_work_add(current, flush_cb, TWA_RESUME))
2574 		kfree(flush_cb);
2575 }
2576 
2577 /*
2578  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2579  * @tsk: the task to change
2580  * @newmems: new nodes that the task will be set
2581  *
2582  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2583  * and rebind an eventual tasks' mempolicy. If the task is allocating in
2584  * parallel, it might temporarily see an empty intersection, which results in
2585  * a seqlock check and retry before OOM or allocation failure.
2586  */
2587 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2588 					nodemask_t *newmems)
2589 {
2590 	task_lock(tsk);
2591 
2592 	local_irq_disable();
2593 	write_seqcount_begin(&tsk->mems_allowed_seq);
2594 
2595 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2596 	mpol_rebind_task(tsk, newmems);
2597 	tsk->mems_allowed = *newmems;
2598 
2599 	write_seqcount_end(&tsk->mems_allowed_seq);
2600 	local_irq_enable();
2601 
2602 	task_unlock(tsk);
2603 }
2604 
2605 static void *cpuset_being_rebound;
2606 
2607 /**
2608  * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2609  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2610  *
2611  * Iterate through each task of @cs updating its mems_allowed to the
2612  * effective cpuset's.  As this function is called with cpuset_mutex held,
2613  * cpuset membership stays stable.
2614  */
2615 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2616 {
2617 	static nodemask_t newmems;	/* protected by cpuset_mutex */
2618 	struct css_task_iter it;
2619 	struct task_struct *task;
2620 
2621 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2622 
2623 	guarantee_online_mems(cs, &newmems);
2624 
2625 	/*
2626 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2627 	 * take while holding tasklist_lock.  Forks can happen - the
2628 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2629 	 * and rebind their vma mempolicies too.  Because we still hold
2630 	 * the global cpuset_mutex, we know that no other rebind effort
2631 	 * will be contending for the global variable cpuset_being_rebound.
2632 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2633 	 * is idempotent.  Also migrate pages in each mm to new nodes.
2634 	 */
2635 	css_task_iter_start(&cs->css, 0, &it);
2636 	while ((task = css_task_iter_next(&it))) {
2637 		struct mm_struct *mm;
2638 		bool migrate;
2639 
2640 		cpuset_change_task_nodemask(task, &newmems);
2641 
2642 		mm = get_task_mm(task);
2643 		if (!mm)
2644 			continue;
2645 
2646 		migrate = is_memory_migrate(cs);
2647 
2648 		mpol_rebind_mm(mm, &cs->mems_allowed);
2649 		if (migrate)
2650 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2651 		else
2652 			mmput(mm);
2653 	}
2654 	css_task_iter_end(&it);
2655 
2656 	/*
2657 	 * All the tasks' nodemasks have been updated, update
2658 	 * cs->old_mems_allowed.
2659 	 */
2660 	cs->old_mems_allowed = newmems;
2661 
2662 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2663 	cpuset_being_rebound = NULL;
2664 }
2665 
2666 /*
2667  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2668  * @cs: the cpuset to consider
2669  * @new_mems: a temp variable for calculating new effective_mems
2670  *
2671  * When configured nodemask is changed, the effective nodemasks of this cpuset
2672  * and all its descendants need to be updated.
2673  *
2674  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2675  *
2676  * Called with cpuset_mutex held
2677  */
2678 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2679 {
2680 	struct cpuset *cp;
2681 	struct cgroup_subsys_state *pos_css;
2682 
2683 	rcu_read_lock();
2684 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2685 		struct cpuset *parent = parent_cs(cp);
2686 
2687 		bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2688 
2689 		/*
2690 		 * If it becomes empty, inherit the effective mask of the
2691 		 * parent, which is guaranteed to have some MEMs.
2692 		 */
2693 		if (is_in_v2_mode() && !has_mems)
2694 			*new_mems = parent->effective_mems;
2695 
2696 		/* Skip the whole subtree if the nodemask remains the same. */
2697 		if (nodes_equal(*new_mems, cp->effective_mems)) {
2698 			pos_css = css_rightmost_descendant(pos_css);
2699 			continue;
2700 		}
2701 
2702 		if (!css_tryget_online(&cp->css))
2703 			continue;
2704 		rcu_read_unlock();
2705 
2706 		spin_lock_irq(&callback_lock);
2707 		cp->effective_mems = *new_mems;
2708 		spin_unlock_irq(&callback_lock);
2709 
2710 		WARN_ON(!is_in_v2_mode() &&
2711 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2712 
2713 		cpuset_update_tasks_nodemask(cp);
2714 
2715 		rcu_read_lock();
2716 		css_put(&cp->css);
2717 	}
2718 	rcu_read_unlock();
2719 }
2720 
2721 /*
2722  * Handle user request to change the 'mems' memory placement
2723  * of a cpuset.  Needs to validate the request, update the
2724  * cpusets mems_allowed, and for each task in the cpuset,
2725  * update mems_allowed and rebind task's mempolicy and any vma
2726  * mempolicies and if the cpuset is marked 'memory_migrate',
2727  * migrate the tasks pages to the new memory.
2728  *
2729  * Call with cpuset_mutex held. May take callback_lock during call.
2730  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2731  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2732  * their mempolicies to the cpusets new mems_allowed.
2733  */
2734 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2735 			   const char *buf)
2736 {
2737 	int retval;
2738 
2739 	/*
2740 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2741 	 * The validate_change() call ensures that cpusets with tasks have memory.
2742 	 */
2743 	retval = nodelist_parse(buf, trialcs->mems_allowed);
2744 	if (retval < 0)
2745 		return retval;
2746 
2747 	if (!nodes_subset(trialcs->mems_allowed,
2748 			  top_cpuset.mems_allowed))
2749 		return -EINVAL;
2750 
2751 	/* No change? nothing to do */
2752 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2753 		return 0;
2754 
2755 	retval = validate_change(cs, trialcs);
2756 	if (retval < 0)
2757 		return retval;
2758 
2759 	check_insane_mems_config(&trialcs->mems_allowed);
2760 
2761 	spin_lock_irq(&callback_lock);
2762 	cs->mems_allowed = trialcs->mems_allowed;
2763 	spin_unlock_irq(&callback_lock);
2764 
2765 	/* use trialcs->mems_allowed as a temp variable */
2766 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2767 	return 0;
2768 }
2769 
2770 bool current_cpuset_is_being_rebound(void)
2771 {
2772 	bool ret;
2773 
2774 	rcu_read_lock();
2775 	ret = task_cs(current) == cpuset_being_rebound;
2776 	rcu_read_unlock();
2777 
2778 	return ret;
2779 }
2780 
2781 /*
2782  * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2783  * bit:		the bit to update (see cpuset_flagbits_t)
2784  * cs:		the cpuset to update
2785  * turning_on: 	whether the flag is being set or cleared
2786  *
2787  * Call with cpuset_mutex held.
2788  */
2789 
2790 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2791 		       int turning_on)
2792 {
2793 	struct cpuset *trialcs;
2794 	int balance_flag_changed;
2795 	int spread_flag_changed;
2796 	int err;
2797 
2798 	trialcs = dup_or_alloc_cpuset(cs);
2799 	if (!trialcs)
2800 		return -ENOMEM;
2801 
2802 	if (turning_on)
2803 		set_bit(bit, &trialcs->flags);
2804 	else
2805 		clear_bit(bit, &trialcs->flags);
2806 
2807 	err = validate_change(cs, trialcs);
2808 	if (err < 0)
2809 		goto out;
2810 
2811 	balance_flag_changed = (is_sched_load_balance(cs) !=
2812 				is_sched_load_balance(trialcs));
2813 
2814 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2815 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2816 
2817 	spin_lock_irq(&callback_lock);
2818 	cs->flags = trialcs->flags;
2819 	spin_unlock_irq(&callback_lock);
2820 
2821 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2822 		if (cpuset_v2())
2823 			cpuset_force_rebuild();
2824 		else
2825 			rebuild_sched_domains_locked();
2826 	}
2827 
2828 	if (spread_flag_changed)
2829 		cpuset1_update_tasks_flags(cs);
2830 out:
2831 	free_cpuset(trialcs);
2832 	return err;
2833 }
2834 
2835 /**
2836  * update_prstate - update partition_root_state
2837  * @cs: the cpuset to update
2838  * @new_prs: new partition root state
2839  * Return: 0 if successful, != 0 if error
2840  *
2841  * Call with cpuset_mutex held.
2842  */
2843 static int update_prstate(struct cpuset *cs, int new_prs)
2844 {
2845 	int err = PERR_NONE, old_prs = cs->partition_root_state;
2846 	struct cpuset *parent = parent_cs(cs);
2847 	struct tmpmasks tmpmask;
2848 	bool isolcpus_updated = false;
2849 
2850 	if (old_prs == new_prs)
2851 		return 0;
2852 
2853 	/*
2854 	 * Treat a previously invalid partition root as if it is a "member".
2855 	 */
2856 	if (new_prs && is_partition_invalid(cs))
2857 		old_prs = PRS_MEMBER;
2858 
2859 	if (alloc_tmpmasks(&tmpmask))
2860 		return -ENOMEM;
2861 
2862 	err = update_partition_exclusive_flag(cs, new_prs);
2863 	if (err)
2864 		goto out;
2865 
2866 	if (!old_prs) {
2867 		/*
2868 		 * cpus_allowed and exclusive_cpus cannot be both empty.
2869 		 */
2870 		if (xcpus_empty(cs)) {
2871 			err = PERR_CPUSEMPTY;
2872 			goto out;
2873 		}
2874 
2875 		/*
2876 		 * We don't support the creation of a new local partition with
2877 		 * a remote partition underneath it. This unsupported
2878 		 * setting can happen only if parent is the top_cpuset because
2879 		 * a remote partition cannot be created underneath an existing
2880 		 * local or remote partition.
2881 		 */
2882 		if ((parent == &top_cpuset) &&
2883 		    cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2884 			err = PERR_REMOTE;
2885 			goto out;
2886 		}
2887 
2888 		/*
2889 		 * If parent is valid partition, enable local partiion.
2890 		 * Otherwise, enable a remote partition.
2891 		 */
2892 		if (is_partition_valid(parent)) {
2893 			enum partition_cmd cmd = (new_prs == PRS_ROOT)
2894 					       ? partcmd_enable : partcmd_enablei;
2895 
2896 			err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2897 		} else {
2898 			err = remote_partition_enable(cs, new_prs, &tmpmask);
2899 		}
2900 	} else if (old_prs && new_prs) {
2901 		/*
2902 		 * A change in load balance state only, no change in cpumasks.
2903 		 * Need to update isolated_cpus.
2904 		 */
2905 		if (((new_prs == PRS_ISOLATED) &&
2906 		     !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
2907 		    prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
2908 			err = PERR_HKEEPING;
2909 		else
2910 			isolcpus_updated = true;
2911 	} else {
2912 		/*
2913 		 * Switching back to member is always allowed even if it
2914 		 * disables child partitions.
2915 		 */
2916 		if (is_remote_partition(cs))
2917 			remote_partition_disable(cs, &tmpmask);
2918 		else
2919 			update_parent_effective_cpumask(cs, partcmd_disable,
2920 							NULL, &tmpmask);
2921 
2922 		/*
2923 		 * Invalidation of child partitions will be done in
2924 		 * update_cpumasks_hier().
2925 		 */
2926 	}
2927 out:
2928 	/*
2929 	 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2930 	 * happens.
2931 	 */
2932 	if (err) {
2933 		new_prs = -new_prs;
2934 		update_partition_exclusive_flag(cs, new_prs);
2935 	}
2936 
2937 	spin_lock_irq(&callback_lock);
2938 	cs->partition_root_state = new_prs;
2939 	WRITE_ONCE(cs->prs_err, err);
2940 	if (!is_partition_valid(cs))
2941 		reset_partition_data(cs);
2942 	else if (isolcpus_updated)
2943 		isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2944 	spin_unlock_irq(&callback_lock);
2945 
2946 	/* Force update if switching back to member & update effective_xcpus */
2947 	update_cpumasks_hier(cs, &tmpmask, !new_prs);
2948 
2949 	/* A newly created partition must have effective_xcpus set */
2950 	WARN_ON_ONCE(!old_prs && (new_prs > 0)
2951 			      && cpumask_empty(cs->effective_xcpus));
2952 
2953 	/* Update sched domains and load balance flag */
2954 	update_partition_sd_lb(cs, old_prs);
2955 
2956 	notify_partition_change(cs, old_prs);
2957 	if (force_sd_rebuild)
2958 		rebuild_sched_domains_locked();
2959 	free_tmpmasks(&tmpmask);
2960 	return 0;
2961 }
2962 
2963 static struct cpuset *cpuset_attach_old_cs;
2964 
2965 /*
2966  * Check to see if a cpuset can accept a new task
2967  * For v1, cpus_allowed and mems_allowed can't be empty.
2968  * For v2, effective_cpus can't be empty.
2969  * Note that in v1, effective_cpus = cpus_allowed.
2970  */
2971 static int cpuset_can_attach_check(struct cpuset *cs)
2972 {
2973 	if (cpumask_empty(cs->effective_cpus) ||
2974 	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2975 		return -ENOSPC;
2976 	return 0;
2977 }
2978 
2979 static void reset_migrate_dl_data(struct cpuset *cs)
2980 {
2981 	cs->nr_migrate_dl_tasks = 0;
2982 	cs->sum_migrate_dl_bw = 0;
2983 }
2984 
2985 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2986 static int cpuset_can_attach(struct cgroup_taskset *tset)
2987 {
2988 	struct cgroup_subsys_state *css;
2989 	struct cpuset *cs, *oldcs;
2990 	struct task_struct *task;
2991 	bool cpus_updated, mems_updated;
2992 	int ret;
2993 
2994 	/* used later by cpuset_attach() */
2995 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2996 	oldcs = cpuset_attach_old_cs;
2997 	cs = css_cs(css);
2998 
2999 	mutex_lock(&cpuset_mutex);
3000 
3001 	/* Check to see if task is allowed in the cpuset */
3002 	ret = cpuset_can_attach_check(cs);
3003 	if (ret)
3004 		goto out_unlock;
3005 
3006 	cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
3007 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3008 
3009 	cgroup_taskset_for_each(task, css, tset) {
3010 		ret = task_can_attach(task);
3011 		if (ret)
3012 			goto out_unlock;
3013 
3014 		/*
3015 		 * Skip rights over task check in v2 when nothing changes,
3016 		 * migration permission derives from hierarchy ownership in
3017 		 * cgroup_procs_write_permission()).
3018 		 */
3019 		if (!cpuset_v2() || (cpus_updated || mems_updated)) {
3020 			ret = security_task_setscheduler(task);
3021 			if (ret)
3022 				goto out_unlock;
3023 		}
3024 
3025 		if (dl_task(task)) {
3026 			cs->nr_migrate_dl_tasks++;
3027 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
3028 		}
3029 	}
3030 
3031 	if (!cs->nr_migrate_dl_tasks)
3032 		goto out_success;
3033 
3034 	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3035 		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3036 
3037 		if (unlikely(cpu >= nr_cpu_ids)) {
3038 			reset_migrate_dl_data(cs);
3039 			ret = -EINVAL;
3040 			goto out_unlock;
3041 		}
3042 
3043 		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3044 		if (ret) {
3045 			reset_migrate_dl_data(cs);
3046 			goto out_unlock;
3047 		}
3048 	}
3049 
3050 out_success:
3051 	/*
3052 	 * Mark attach is in progress.  This makes validate_change() fail
3053 	 * changes which zero cpus/mems_allowed.
3054 	 */
3055 	cs->attach_in_progress++;
3056 out_unlock:
3057 	mutex_unlock(&cpuset_mutex);
3058 	return ret;
3059 }
3060 
3061 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3062 {
3063 	struct cgroup_subsys_state *css;
3064 	struct cpuset *cs;
3065 
3066 	cgroup_taskset_first(tset, &css);
3067 	cs = css_cs(css);
3068 
3069 	mutex_lock(&cpuset_mutex);
3070 	dec_attach_in_progress_locked(cs);
3071 
3072 	if (cs->nr_migrate_dl_tasks) {
3073 		int cpu = cpumask_any(cs->effective_cpus);
3074 
3075 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3076 		reset_migrate_dl_data(cs);
3077 	}
3078 
3079 	mutex_unlock(&cpuset_mutex);
3080 }
3081 
3082 /*
3083  * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3084  * but we can't allocate it dynamically there.  Define it global and
3085  * allocate from cpuset_init().
3086  */
3087 static cpumask_var_t cpus_attach;
3088 static nodemask_t cpuset_attach_nodemask_to;
3089 
3090 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3091 {
3092 	lockdep_assert_cpuset_lock_held();
3093 
3094 	if (cs != &top_cpuset)
3095 		guarantee_active_cpus(task, cpus_attach);
3096 	else
3097 		cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3098 			       subpartitions_cpus);
3099 	/*
3100 	 * can_attach beforehand should guarantee that this doesn't
3101 	 * fail.  TODO: have a better way to handle failure here
3102 	 */
3103 	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3104 
3105 	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3106 	cpuset1_update_task_spread_flags(cs, task);
3107 }
3108 
3109 static void cpuset_attach(struct cgroup_taskset *tset)
3110 {
3111 	struct task_struct *task;
3112 	struct task_struct *leader;
3113 	struct cgroup_subsys_state *css;
3114 	struct cpuset *cs;
3115 	struct cpuset *oldcs = cpuset_attach_old_cs;
3116 	bool cpus_updated, mems_updated;
3117 	bool queue_task_work = false;
3118 
3119 	cgroup_taskset_first(tset, &css);
3120 	cs = css_cs(css);
3121 
3122 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
3123 	mutex_lock(&cpuset_mutex);
3124 	cpus_updated = !cpumask_equal(cs->effective_cpus,
3125 				      oldcs->effective_cpus);
3126 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3127 
3128 	/*
3129 	 * In the default hierarchy, enabling cpuset in the child cgroups
3130 	 * will trigger a number of cpuset_attach() calls with no change
3131 	 * in effective cpus and mems. In that case, we can optimize out
3132 	 * by skipping the task iteration and update.
3133 	 */
3134 	if (cpuset_v2() && !cpus_updated && !mems_updated) {
3135 		cpuset_attach_nodemask_to = cs->effective_mems;
3136 		goto out;
3137 	}
3138 
3139 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3140 
3141 	cgroup_taskset_for_each(task, css, tset)
3142 		cpuset_attach_task(cs, task);
3143 
3144 	/*
3145 	 * Change mm for all threadgroup leaders. This is expensive and may
3146 	 * sleep and should be moved outside migration path proper. Skip it
3147 	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3148 	 * not set.
3149 	 */
3150 	cpuset_attach_nodemask_to = cs->effective_mems;
3151 	if (!is_memory_migrate(cs) && !mems_updated)
3152 		goto out;
3153 
3154 	cgroup_taskset_for_each_leader(leader, css, tset) {
3155 		struct mm_struct *mm = get_task_mm(leader);
3156 
3157 		if (mm) {
3158 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3159 
3160 			/*
3161 			 * old_mems_allowed is the same with mems_allowed
3162 			 * here, except if this task is being moved
3163 			 * automatically due to hotplug.  In that case
3164 			 * @mems_allowed has been updated and is empty, so
3165 			 * @old_mems_allowed is the right nodesets that we
3166 			 * migrate mm from.
3167 			 */
3168 			if (is_memory_migrate(cs)) {
3169 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3170 						  &cpuset_attach_nodemask_to);
3171 				queue_task_work = true;
3172 			} else
3173 				mmput(mm);
3174 		}
3175 	}
3176 
3177 out:
3178 	if (queue_task_work)
3179 		schedule_flush_migrate_mm();
3180 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
3181 
3182 	if (cs->nr_migrate_dl_tasks) {
3183 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3184 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3185 		reset_migrate_dl_data(cs);
3186 	}
3187 
3188 	dec_attach_in_progress_locked(cs);
3189 
3190 	mutex_unlock(&cpuset_mutex);
3191 }
3192 
3193 /*
3194  * Common handling for a write to a "cpus" or "mems" file.
3195  */
3196 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3197 				    char *buf, size_t nbytes, loff_t off)
3198 {
3199 	struct cpuset *cs = css_cs(of_css(of));
3200 	struct cpuset *trialcs;
3201 	int retval = -ENODEV;
3202 
3203 	/* root is read-only */
3204 	if (cs == &top_cpuset)
3205 		return -EACCES;
3206 
3207 	buf = strstrip(buf);
3208 	cpuset_full_lock();
3209 	if (!is_cpuset_online(cs))
3210 		goto out_unlock;
3211 
3212 	trialcs = dup_or_alloc_cpuset(cs);
3213 	if (!trialcs) {
3214 		retval = -ENOMEM;
3215 		goto out_unlock;
3216 	}
3217 
3218 	switch (of_cft(of)->private) {
3219 	case FILE_CPULIST:
3220 		retval = update_cpumask(cs, trialcs, buf);
3221 		break;
3222 	case FILE_EXCLUSIVE_CPULIST:
3223 		retval = update_exclusive_cpumask(cs, trialcs, buf);
3224 		break;
3225 	case FILE_MEMLIST:
3226 		retval = update_nodemask(cs, trialcs, buf);
3227 		break;
3228 	default:
3229 		retval = -EINVAL;
3230 		break;
3231 	}
3232 
3233 	free_cpuset(trialcs);
3234 out_unlock:
3235 	cpuset_update_sd_hk_unlock();
3236 	if (of_cft(of)->private == FILE_MEMLIST)
3237 		schedule_flush_migrate_mm();
3238 	return retval ?: nbytes;
3239 }
3240 
3241 /*
3242  * These ascii lists should be read in a single call, by using a user
3243  * buffer large enough to hold the entire map.  If read in smaller
3244  * chunks, there is no guarantee of atomicity.  Since the display format
3245  * used, list of ranges of sequential numbers, is variable length,
3246  * and since these maps can change value dynamically, one could read
3247  * gibberish by doing partial reads while a list was changing.
3248  */
3249 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3250 {
3251 	struct cpuset *cs = css_cs(seq_css(sf));
3252 	cpuset_filetype_t type = seq_cft(sf)->private;
3253 	int ret = 0;
3254 
3255 	spin_lock_irq(&callback_lock);
3256 
3257 	switch (type) {
3258 	case FILE_CPULIST:
3259 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3260 		break;
3261 	case FILE_MEMLIST:
3262 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3263 		break;
3264 	case FILE_EFFECTIVE_CPULIST:
3265 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3266 		break;
3267 	case FILE_EFFECTIVE_MEMLIST:
3268 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3269 		break;
3270 	case FILE_EXCLUSIVE_CPULIST:
3271 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3272 		break;
3273 	case FILE_EFFECTIVE_XCPULIST:
3274 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3275 		break;
3276 	case FILE_SUBPARTS_CPULIST:
3277 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3278 		break;
3279 	case FILE_ISOLATED_CPULIST:
3280 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3281 		break;
3282 	default:
3283 		ret = -EINVAL;
3284 	}
3285 
3286 	spin_unlock_irq(&callback_lock);
3287 	return ret;
3288 }
3289 
3290 static int cpuset_partition_show(struct seq_file *seq, void *v)
3291 {
3292 	struct cpuset *cs = css_cs(seq_css(seq));
3293 	const char *err, *type = NULL;
3294 
3295 	switch (cs->partition_root_state) {
3296 	case PRS_ROOT:
3297 		seq_puts(seq, "root\n");
3298 		break;
3299 	case PRS_ISOLATED:
3300 		seq_puts(seq, "isolated\n");
3301 		break;
3302 	case PRS_MEMBER:
3303 		seq_puts(seq, "member\n");
3304 		break;
3305 	case PRS_INVALID_ROOT:
3306 		type = "root";
3307 		fallthrough;
3308 	case PRS_INVALID_ISOLATED:
3309 		if (!type)
3310 			type = "isolated";
3311 		err = perr_strings[READ_ONCE(cs->prs_err)];
3312 		if (err)
3313 			seq_printf(seq, "%s invalid (%s)\n", type, err);
3314 		else
3315 			seq_printf(seq, "%s invalid\n", type);
3316 		break;
3317 	}
3318 	return 0;
3319 }
3320 
3321 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3322 				     size_t nbytes, loff_t off)
3323 {
3324 	struct cpuset *cs = css_cs(of_css(of));
3325 	int val;
3326 	int retval = -ENODEV;
3327 
3328 	buf = strstrip(buf);
3329 
3330 	if (!strcmp(buf, "root"))
3331 		val = PRS_ROOT;
3332 	else if (!strcmp(buf, "member"))
3333 		val = PRS_MEMBER;
3334 	else if (!strcmp(buf, "isolated"))
3335 		val = PRS_ISOLATED;
3336 	else
3337 		return -EINVAL;
3338 
3339 	cpuset_full_lock();
3340 	if (is_cpuset_online(cs))
3341 		retval = update_prstate(cs, val);
3342 	cpuset_update_sd_hk_unlock();
3343 	return retval ?: nbytes;
3344 }
3345 
3346 /*
3347  * This is currently a minimal set for the default hierarchy. It can be
3348  * expanded later on by migrating more features and control files from v1.
3349  */
3350 static struct cftype dfl_files[] = {
3351 	{
3352 		.name = "cpus",
3353 		.seq_show = cpuset_common_seq_show,
3354 		.write = cpuset_write_resmask,
3355 		.max_write_len = (100U + 6 * NR_CPUS),
3356 		.private = FILE_CPULIST,
3357 		.flags = CFTYPE_NOT_ON_ROOT,
3358 	},
3359 
3360 	{
3361 		.name = "mems",
3362 		.seq_show = cpuset_common_seq_show,
3363 		.write = cpuset_write_resmask,
3364 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3365 		.private = FILE_MEMLIST,
3366 		.flags = CFTYPE_NOT_ON_ROOT,
3367 	},
3368 
3369 	{
3370 		.name = "cpus.effective",
3371 		.seq_show = cpuset_common_seq_show,
3372 		.private = FILE_EFFECTIVE_CPULIST,
3373 	},
3374 
3375 	{
3376 		.name = "mems.effective",
3377 		.seq_show = cpuset_common_seq_show,
3378 		.private = FILE_EFFECTIVE_MEMLIST,
3379 	},
3380 
3381 	{
3382 		.name = "cpus.partition",
3383 		.seq_show = cpuset_partition_show,
3384 		.write = cpuset_partition_write,
3385 		.private = FILE_PARTITION_ROOT,
3386 		.flags = CFTYPE_NOT_ON_ROOT,
3387 		.file_offset = offsetof(struct cpuset, partition_file),
3388 	},
3389 
3390 	{
3391 		.name = "cpus.exclusive",
3392 		.seq_show = cpuset_common_seq_show,
3393 		.write = cpuset_write_resmask,
3394 		.max_write_len = (100U + 6 * NR_CPUS),
3395 		.private = FILE_EXCLUSIVE_CPULIST,
3396 		.flags = CFTYPE_NOT_ON_ROOT,
3397 	},
3398 
3399 	{
3400 		.name = "cpus.exclusive.effective",
3401 		.seq_show = cpuset_common_seq_show,
3402 		.private = FILE_EFFECTIVE_XCPULIST,
3403 		.flags = CFTYPE_NOT_ON_ROOT,
3404 	},
3405 
3406 	{
3407 		.name = "cpus.subpartitions",
3408 		.seq_show = cpuset_common_seq_show,
3409 		.private = FILE_SUBPARTS_CPULIST,
3410 		.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3411 	},
3412 
3413 	{
3414 		.name = "cpus.isolated",
3415 		.seq_show = cpuset_common_seq_show,
3416 		.private = FILE_ISOLATED_CPULIST,
3417 		.flags = CFTYPE_ONLY_ON_ROOT,
3418 	},
3419 
3420 	{ }	/* terminate */
3421 };
3422 
3423 
3424 /**
3425  * cpuset_css_alloc - Allocate a cpuset css
3426  * @parent_css: Parent css of the control group that the new cpuset will be
3427  *              part of
3428  * Return: cpuset css on success, -ENOMEM on failure.
3429  *
3430  * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3431  * top cpuset css otherwise.
3432  */
3433 static struct cgroup_subsys_state *
3434 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3435 {
3436 	struct cpuset *cs;
3437 
3438 	if (!parent_css)
3439 		return &top_cpuset.css;
3440 
3441 	cs = dup_or_alloc_cpuset(NULL);
3442 	if (!cs)
3443 		return ERR_PTR(-ENOMEM);
3444 
3445 	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3446 	cpuset1_init(cs);
3447 
3448 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3449 	if (cpuset_v2())
3450 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3451 
3452 	return &cs->css;
3453 }
3454 
3455 static int cpuset_css_online(struct cgroup_subsys_state *css)
3456 {
3457 	struct cpuset *cs = css_cs(css);
3458 	struct cpuset *parent = parent_cs(cs);
3459 
3460 	if (!parent)
3461 		return 0;
3462 
3463 	cpuset_full_lock();
3464 	/*
3465 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3466 	 */
3467 	if (cpuset_v2() && !is_sched_load_balance(parent))
3468 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3469 
3470 	cpuset_inc();
3471 
3472 	spin_lock_irq(&callback_lock);
3473 	if (is_in_v2_mode()) {
3474 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3475 		cs->effective_mems = parent->effective_mems;
3476 	}
3477 	spin_unlock_irq(&callback_lock);
3478 	cpuset1_online_css(css);
3479 
3480 	cpuset_full_unlock();
3481 	return 0;
3482 }
3483 
3484 /*
3485  * If the cpuset being removed has its flag 'sched_load_balance'
3486  * enabled, then simulate turning sched_load_balance off, which
3487  * will call rebuild_sched_domains_locked(). That is not needed
3488  * in the default hierarchy where only changes in partition
3489  * will cause repartitioning.
3490  */
3491 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3492 {
3493 	struct cpuset *cs = css_cs(css);
3494 
3495 	cpuset_full_lock();
3496 	if (!cpuset_v2() && is_sched_load_balance(cs))
3497 		cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3498 
3499 	cpuset_dec();
3500 	cpuset_full_unlock();
3501 }
3502 
3503 /*
3504  * If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3505  * changing it back to member to free its exclusive CPUs back to the pool to
3506  * be used by other online cpusets.
3507  */
3508 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3509 {
3510 	struct cpuset *cs = css_cs(css);
3511 
3512 	cpuset_full_lock();
3513 	/* Reset valid partition back to member */
3514 	if (is_partition_valid(cs))
3515 		update_prstate(cs, PRS_MEMBER);
3516 	cpuset_update_sd_hk_unlock();
3517 }
3518 
3519 static void cpuset_css_free(struct cgroup_subsys_state *css)
3520 {
3521 	struct cpuset *cs = css_cs(css);
3522 
3523 	free_cpuset(cs);
3524 }
3525 
3526 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3527 {
3528 	mutex_lock(&cpuset_mutex);
3529 	spin_lock_irq(&callback_lock);
3530 
3531 	if (is_in_v2_mode()) {
3532 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3533 		cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3534 		top_cpuset.mems_allowed = node_possible_map;
3535 	} else {
3536 		cpumask_copy(top_cpuset.cpus_allowed,
3537 			     top_cpuset.effective_cpus);
3538 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3539 	}
3540 
3541 	spin_unlock_irq(&callback_lock);
3542 	mutex_unlock(&cpuset_mutex);
3543 }
3544 
3545 /*
3546  * In case the child is cloned into a cpuset different from its parent,
3547  * additional checks are done to see if the move is allowed.
3548  */
3549 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3550 {
3551 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3552 	bool same_cs;
3553 	int ret;
3554 
3555 	rcu_read_lock();
3556 	same_cs = (cs == task_cs(current));
3557 	rcu_read_unlock();
3558 
3559 	if (same_cs)
3560 		return 0;
3561 
3562 	lockdep_assert_held(&cgroup_mutex);
3563 	mutex_lock(&cpuset_mutex);
3564 
3565 	/* Check to see if task is allowed in the cpuset */
3566 	ret = cpuset_can_attach_check(cs);
3567 	if (ret)
3568 		goto out_unlock;
3569 
3570 	ret = task_can_attach(task);
3571 	if (ret)
3572 		goto out_unlock;
3573 
3574 	ret = security_task_setscheduler(task);
3575 	if (ret)
3576 		goto out_unlock;
3577 
3578 	/*
3579 	 * Mark attach is in progress.  This makes validate_change() fail
3580 	 * changes which zero cpus/mems_allowed.
3581 	 */
3582 	cs->attach_in_progress++;
3583 out_unlock:
3584 	mutex_unlock(&cpuset_mutex);
3585 	return ret;
3586 }
3587 
3588 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3589 {
3590 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3591 	bool same_cs;
3592 
3593 	rcu_read_lock();
3594 	same_cs = (cs == task_cs(current));
3595 	rcu_read_unlock();
3596 
3597 	if (same_cs)
3598 		return;
3599 
3600 	dec_attach_in_progress(cs);
3601 }
3602 
3603 /*
3604  * Make sure the new task conform to the current state of its parent,
3605  * which could have been changed by cpuset just after it inherits the
3606  * state from the parent and before it sits on the cgroup's task list.
3607  */
3608 static void cpuset_fork(struct task_struct *task)
3609 {
3610 	struct cpuset *cs;
3611 	bool same_cs;
3612 
3613 	rcu_read_lock();
3614 	cs = task_cs(task);
3615 	same_cs = (cs == task_cs(current));
3616 	rcu_read_unlock();
3617 
3618 	if (same_cs) {
3619 		if (cs == &top_cpuset)
3620 			return;
3621 
3622 		set_cpus_allowed_ptr(task, current->cpus_ptr);
3623 		task->mems_allowed = current->mems_allowed;
3624 		return;
3625 	}
3626 
3627 	/* CLONE_INTO_CGROUP */
3628 	mutex_lock(&cpuset_mutex);
3629 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3630 	cpuset_attach_task(cs, task);
3631 
3632 	dec_attach_in_progress_locked(cs);
3633 	mutex_unlock(&cpuset_mutex);
3634 }
3635 
3636 struct cgroup_subsys cpuset_cgrp_subsys = {
3637 	.css_alloc	= cpuset_css_alloc,
3638 	.css_online	= cpuset_css_online,
3639 	.css_offline	= cpuset_css_offline,
3640 	.css_killed	= cpuset_css_killed,
3641 	.css_free	= cpuset_css_free,
3642 	.can_attach	= cpuset_can_attach,
3643 	.cancel_attach	= cpuset_cancel_attach,
3644 	.attach		= cpuset_attach,
3645 	.bind		= cpuset_bind,
3646 	.can_fork	= cpuset_can_fork,
3647 	.cancel_fork	= cpuset_cancel_fork,
3648 	.fork		= cpuset_fork,
3649 #ifdef CONFIG_CPUSETS_V1
3650 	.legacy_cftypes	= cpuset1_files,
3651 #endif
3652 	.dfl_cftypes	= dfl_files,
3653 	.early_init	= true,
3654 	.threaded	= true,
3655 };
3656 
3657 /**
3658  * cpuset_init - initialize cpusets at system boot
3659  *
3660  * Description: Initialize top_cpuset
3661  **/
3662 
3663 int __init cpuset_init(void)
3664 {
3665 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3666 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3667 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3668 	BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3669 	BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3670 	BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3671 	BUG_ON(!zalloc_cpumask_var(&isolated_hk_cpus, GFP_KERNEL));
3672 
3673 	cpumask_setall(top_cpuset.cpus_allowed);
3674 	nodes_setall(top_cpuset.mems_allowed);
3675 	cpumask_setall(top_cpuset.effective_cpus);
3676 	cpumask_setall(top_cpuset.effective_xcpus);
3677 	cpumask_setall(top_cpuset.exclusive_cpus);
3678 	nodes_setall(top_cpuset.effective_mems);
3679 
3680 	cpuset1_init(&top_cpuset);
3681 
3682 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3683 
3684 	if (housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
3685 		cpumask_andnot(isolated_cpus, cpu_possible_mask,
3686 			       housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
3687 
3688 	return 0;
3689 }
3690 
3691 static void
3692 hotplug_update_tasks(struct cpuset *cs,
3693 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3694 		     bool cpus_updated, bool mems_updated)
3695 {
3696 	/* A partition root is allowed to have empty effective cpus */
3697 	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3698 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3699 	if (nodes_empty(*new_mems))
3700 		*new_mems = parent_cs(cs)->effective_mems;
3701 
3702 	spin_lock_irq(&callback_lock);
3703 	cpumask_copy(cs->effective_cpus, new_cpus);
3704 	cs->effective_mems = *new_mems;
3705 	spin_unlock_irq(&callback_lock);
3706 
3707 	if (cpus_updated)
3708 		cpuset_update_tasks_cpumask(cs, new_cpus);
3709 	if (mems_updated)
3710 		cpuset_update_tasks_nodemask(cs);
3711 }
3712 
3713 void cpuset_force_rebuild(void)
3714 {
3715 	force_sd_rebuild = true;
3716 }
3717 
3718 /**
3719  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3720  * @cs: cpuset in interest
3721  * @tmp: the tmpmasks structure pointer
3722  *
3723  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3724  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3725  * all its tasks are moved to the nearest ancestor with both resources.
3726  */
3727 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3728 {
3729 	static cpumask_t new_cpus;
3730 	static nodemask_t new_mems;
3731 	bool cpus_updated;
3732 	bool mems_updated;
3733 	bool remote;
3734 	int partcmd = -1;
3735 	struct cpuset *parent;
3736 retry:
3737 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3738 
3739 	mutex_lock(&cpuset_mutex);
3740 
3741 	/*
3742 	 * We have raced with task attaching. We wait until attaching
3743 	 * is finished, so we won't attach a task to an empty cpuset.
3744 	 */
3745 	if (cs->attach_in_progress) {
3746 		mutex_unlock(&cpuset_mutex);
3747 		goto retry;
3748 	}
3749 
3750 	parent = parent_cs(cs);
3751 	compute_effective_cpumask(&new_cpus, cs, parent);
3752 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3753 
3754 	if (!tmp || !cs->partition_root_state)
3755 		goto update_tasks;
3756 
3757 	/*
3758 	 * Compute effective_cpus for valid partition root, may invalidate
3759 	 * child partition roots if necessary.
3760 	 */
3761 	remote = is_remote_partition(cs);
3762 	if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3763 		compute_partition_effective_cpumask(cs, &new_cpus);
3764 
3765 	if (remote && (cpumask_empty(subpartitions_cpus) ||
3766 			(cpumask_empty(&new_cpus) &&
3767 			 partition_is_populated(cs, NULL)))) {
3768 		cs->prs_err = PERR_HOTPLUG;
3769 		remote_partition_disable(cs, tmp);
3770 		compute_effective_cpumask(&new_cpus, cs, parent);
3771 		remote = false;
3772 	}
3773 
3774 	/*
3775 	 * Force the partition to become invalid if either one of
3776 	 * the following conditions hold:
3777 	 * 1) empty effective cpus but not valid empty partition.
3778 	 * 2) parent is invalid or doesn't grant any cpus to child
3779 	 *    partitions.
3780 	 * 3) subpartitions_cpus is empty.
3781 	 */
3782 	if (is_local_partition(cs) &&
3783 	    (!is_partition_valid(parent) ||
3784 	     tasks_nocpu_error(parent, cs, &new_cpus) ||
3785 	     cpumask_empty(subpartitions_cpus)))
3786 		partcmd = partcmd_invalidate;
3787 	/*
3788 	 * On the other hand, an invalid partition root may be transitioned
3789 	 * back to a regular one with a non-empty effective xcpus.
3790 	 */
3791 	else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
3792 		 !cpumask_empty(cs->effective_xcpus))
3793 		partcmd = partcmd_update;
3794 
3795 	if (partcmd >= 0) {
3796 		update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3797 		if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3798 			compute_partition_effective_cpumask(cs, &new_cpus);
3799 			cpuset_force_rebuild();
3800 		}
3801 	}
3802 
3803 update_tasks:
3804 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3805 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3806 	if (!cpus_updated && !mems_updated)
3807 		goto unlock;	/* Hotplug doesn't affect this cpuset */
3808 
3809 	if (mems_updated)
3810 		check_insane_mems_config(&new_mems);
3811 
3812 	if (is_in_v2_mode())
3813 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3814 				     cpus_updated, mems_updated);
3815 	else
3816 		cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3817 					    cpus_updated, mems_updated);
3818 
3819 unlock:
3820 	mutex_unlock(&cpuset_mutex);
3821 }
3822 
3823 /**
3824  * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3825  *
3826  * This function is called after either CPU or memory configuration has
3827  * changed and updates cpuset accordingly.  The top_cpuset is always
3828  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3829  * order to make cpusets transparent (of no affect) on systems that are
3830  * actively using CPU hotplug but making no active use of cpusets.
3831  *
3832  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3833  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3834  * all descendants.
3835  *
3836  * Note that CPU offlining during suspend is ignored.  We don't modify
3837  * cpusets across suspend/resume cycles at all.
3838  *
3839  * CPU / memory hotplug is handled synchronously.
3840  */
3841 static void cpuset_handle_hotplug(void)
3842 {
3843 	static DECLARE_WORK(hk_sd_work, hk_sd_workfn);
3844 	static cpumask_t new_cpus;
3845 	static nodemask_t new_mems;
3846 	bool cpus_updated, mems_updated;
3847 	bool on_dfl = is_in_v2_mode();
3848 	struct tmpmasks tmp, *ptmp = NULL;
3849 
3850 	if (on_dfl && !alloc_tmpmasks(&tmp))
3851 		ptmp = &tmp;
3852 
3853 	lockdep_assert_cpus_held();
3854 	mutex_lock(&cpuset_mutex);
3855 
3856 	/* fetch the available cpus/mems and find out which changed how */
3857 	cpumask_copy(&new_cpus, cpu_active_mask);
3858 	new_mems = node_states[N_MEMORY];
3859 
3860 	/*
3861 	 * If subpartitions_cpus is populated, it is likely that the check
3862 	 * below will produce a false positive on cpus_updated when the cpu
3863 	 * list isn't changed. It is extra work, but it is better to be safe.
3864 	 */
3865 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3866 		       !cpumask_empty(subpartitions_cpus);
3867 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3868 
3869 	/* For v1, synchronize cpus_allowed to cpu_active_mask */
3870 	if (cpus_updated) {
3871 		cpuset_force_rebuild();
3872 		spin_lock_irq(&callback_lock);
3873 		if (!on_dfl)
3874 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3875 		/*
3876 		 * Make sure that CPUs allocated to child partitions
3877 		 * do not show up in effective_cpus. If no CPU is left,
3878 		 * we clear the subpartitions_cpus & let the child partitions
3879 		 * fight for the CPUs again.
3880 		 */
3881 		if (!cpumask_empty(subpartitions_cpus)) {
3882 			if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3883 				cpumask_clear(subpartitions_cpus);
3884 			} else {
3885 				cpumask_andnot(&new_cpus, &new_cpus,
3886 					       subpartitions_cpus);
3887 			}
3888 		}
3889 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3890 		spin_unlock_irq(&callback_lock);
3891 		/* we don't mess with cpumasks of tasks in top_cpuset */
3892 	}
3893 
3894 	/* synchronize mems_allowed to N_MEMORY */
3895 	if (mems_updated) {
3896 		spin_lock_irq(&callback_lock);
3897 		if (!on_dfl)
3898 			top_cpuset.mems_allowed = new_mems;
3899 		top_cpuset.effective_mems = new_mems;
3900 		spin_unlock_irq(&callback_lock);
3901 		cpuset_update_tasks_nodemask(&top_cpuset);
3902 	}
3903 
3904 	mutex_unlock(&cpuset_mutex);
3905 
3906 	/* if cpus or mems changed, we need to propagate to descendants */
3907 	if (cpus_updated || mems_updated) {
3908 		struct cpuset *cs;
3909 		struct cgroup_subsys_state *pos_css;
3910 
3911 		rcu_read_lock();
3912 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3913 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3914 				continue;
3915 			rcu_read_unlock();
3916 
3917 			cpuset_hotplug_update_tasks(cs, ptmp);
3918 
3919 			rcu_read_lock();
3920 			css_put(&cs->css);
3921 		}
3922 		rcu_read_unlock();
3923 	}
3924 
3925 	/*
3926 	 * rebuild_sched_domains() will always be called directly if needed
3927 	 * to make sure that newly added or removed CPU will be reflected in
3928 	 * the sched domains. However, if isolated partition invalidation
3929 	 * or recreation is being done (update_housekeeping set), a work item
3930 	 * will be queued to call housekeeping_update() to update the
3931 	 * corresponding housekeeping cpumasks after some slight delay.
3932 	 *
3933 	 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
3934 	 * is still pending. Before the pending bit is cleared, the work data
3935 	 * is copied out and work item dequeued. So it is possible to queue
3936 	 * the work again before the hk_sd_workfn() is invoked to process the
3937 	 * previously queued work. Since hk_sd_workfn() doesn't use the work
3938 	 * item at all, this is not a problem.
3939 	 */
3940 	if (force_sd_rebuild)
3941 		rebuild_sched_domains_cpuslocked();
3942 	if (update_housekeeping)
3943 		queue_work(system_dfl_wq, &hk_sd_work);
3944 
3945 	free_tmpmasks(ptmp);
3946 }
3947 
3948 void cpuset_update_active_cpus(void)
3949 {
3950 	/*
3951 	 * We're inside cpu hotplug critical region which usually nests
3952 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3953 	 * to a work item to avoid reverse locking order.
3954 	 */
3955 	cpuset_handle_hotplug();
3956 }
3957 
3958 /*
3959  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3960  * Call this routine anytime after node_states[N_MEMORY] changes.
3961  * See cpuset_update_active_cpus() for CPU hotplug handling.
3962  */
3963 static int cpuset_track_online_nodes(struct notifier_block *self,
3964 				unsigned long action, void *arg)
3965 {
3966 	cpuset_handle_hotplug();
3967 	return NOTIFY_OK;
3968 }
3969 
3970 /**
3971  * cpuset_init_smp - initialize cpus_allowed
3972  *
3973  * Description: Finish top cpuset after cpu, node maps are initialized
3974  */
3975 void __init cpuset_init_smp(void)
3976 {
3977 	/*
3978 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3979 	 * cpuset_bind() call will be reset to v1 values in another
3980 	 * cpuset_bind() call when v1 cpuset is mounted.
3981 	 */
3982 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3983 
3984 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3985 	top_cpuset.effective_mems = node_states[N_MEMORY];
3986 
3987 	hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3988 
3989 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3990 	BUG_ON(!cpuset_migrate_mm_wq);
3991 }
3992 
3993 /*
3994  * Return cpus_allowed mask from a task's cpuset.
3995  */
3996 static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
3997 {
3998 	struct cpuset *cs;
3999 
4000 	cs = task_cs(tsk);
4001 	if (cs != &top_cpuset)
4002 		guarantee_active_cpus(tsk, pmask);
4003 	/*
4004 	 * Tasks in the top cpuset won't get update to their cpumasks
4005 	 * when a hotplug online/offline event happens. So we include all
4006 	 * offline cpus in the allowed cpu list.
4007 	 */
4008 	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4009 		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4010 
4011 		/*
4012 		 * We first exclude cpus allocated to partitions. If there is no
4013 		 * allowable online cpu left, we fall back to all possible cpus.
4014 		 */
4015 		cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4016 		if (!cpumask_intersects(pmask, cpu_active_mask))
4017 			cpumask_copy(pmask, possible_mask);
4018 	}
4019 }
4020 
4021 /**
4022  * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
4023  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4024  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4025  *
4026  * Similir to cpuset_cpus_allowed() except that the caller must have acquired
4027  * cpuset_mutex.
4028  */
4029 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4030 {
4031 	lockdep_assert_cpuset_lock_held();
4032 	__cpuset_cpus_allowed_locked(tsk, pmask);
4033 }
4034 
4035 /**
4036  * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
4037  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4038  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4039  *
4040  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4041  * attached to the specified @tsk.  Guaranteed to return some non-empty
4042  * subset of cpu_active_mask, even if this means going outside the
4043  * tasks cpuset, except when the task is in the top cpuset.
4044  **/
4045 
4046 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4047 {
4048 	unsigned long flags;
4049 
4050 	spin_lock_irqsave(&callback_lock, flags);
4051 	__cpuset_cpus_allowed_locked(tsk, pmask);
4052 	spin_unlock_irqrestore(&callback_lock, flags);
4053 }
4054 
4055 /**
4056  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4057  * @tsk: pointer to task_struct with which the scheduler is struggling
4058  *
4059  * Description: In the case that the scheduler cannot find an allowed cpu in
4060  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4061  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4062  * which will not contain a sane cpumask during cases such as cpu hotplugging.
4063  * This is the absolute last resort for the scheduler and it is only used if
4064  * _every_ other avenue has been traveled.
4065  *
4066  * Returns true if the affinity of @tsk was changed, false otherwise.
4067  **/
4068 
4069 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4070 {
4071 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4072 	const struct cpumask *cs_mask;
4073 	bool changed = false;
4074 
4075 	rcu_read_lock();
4076 	cs_mask = task_cs(tsk)->cpus_allowed;
4077 	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4078 		set_cpus_allowed_force(tsk, cs_mask);
4079 		changed = true;
4080 	}
4081 	rcu_read_unlock();
4082 
4083 	/*
4084 	 * We own tsk->cpus_allowed, nobody can change it under us.
4085 	 *
4086 	 * But we used cs && cs->cpus_allowed lockless and thus can
4087 	 * race with cgroup_attach_task() or update_cpumask() and get
4088 	 * the wrong tsk->cpus_allowed. However, both cases imply the
4089 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4090 	 * which takes task_rq_lock().
4091 	 *
4092 	 * If we are called after it dropped the lock we must see all
4093 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4094 	 * set any mask even if it is not right from task_cs() pov,
4095 	 * the pending set_cpus_allowed_ptr() will fix things.
4096 	 *
4097 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4098 	 * if required.
4099 	 */
4100 	return changed;
4101 }
4102 
4103 void __init cpuset_init_current_mems_allowed(void)
4104 {
4105 	nodes_setall(current->mems_allowed);
4106 }
4107 
4108 /**
4109  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4110  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4111  *
4112  * Description: Returns the nodemask_t mems_allowed of the cpuset
4113  * attached to the specified @tsk.  Guaranteed to return some non-empty
4114  * subset of node_states[N_MEMORY], even if this means going outside the
4115  * tasks cpuset.
4116  **/
4117 
4118 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4119 {
4120 	nodemask_t mask;
4121 	unsigned long flags;
4122 
4123 	spin_lock_irqsave(&callback_lock, flags);
4124 	guarantee_online_mems(task_cs(tsk), &mask);
4125 	spin_unlock_irqrestore(&callback_lock, flags);
4126 
4127 	return mask;
4128 }
4129 
4130 /**
4131  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4132  * @nodemask: the nodemask to be checked
4133  *
4134  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4135  */
4136 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4137 {
4138 	return nodes_intersects(*nodemask, current->mems_allowed);
4139 }
4140 
4141 /*
4142  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4143  * mem_hardwall ancestor to the specified cpuset.  Call holding
4144  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4145  * (an unusual configuration), then returns the root cpuset.
4146  */
4147 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4148 {
4149 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4150 		cs = parent_cs(cs);
4151 	return cs;
4152 }
4153 
4154 /*
4155  * cpuset_current_node_allowed - Can current task allocate on a memory node?
4156  * @node: is this an allowed node?
4157  * @gfp_mask: memory allocation flags
4158  *
4159  * If we're in interrupt, yes, we can always allocate.  If @node is set in
4160  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4161  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4162  * yes.  If current has access to memory reserves as an oom victim, yes.
4163  * Otherwise, no.
4164  *
4165  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4166  * and do not allow allocations outside the current tasks cpuset
4167  * unless the task has been OOM killed.
4168  * GFP_KERNEL allocations are not so marked, so can escape to the
4169  * nearest enclosing hardwalled ancestor cpuset.
4170  *
4171  * Scanning up parent cpusets requires callback_lock.  The
4172  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4173  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4174  * current tasks mems_allowed came up empty on the first pass over
4175  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4176  * cpuset are short of memory, might require taking the callback_lock.
4177  *
4178  * The first call here from mm/page_alloc:get_page_from_freelist()
4179  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4180  * so no allocation on a node outside the cpuset is allowed (unless
4181  * in interrupt, of course).
4182  *
4183  * The second pass through get_page_from_freelist() doesn't even call
4184  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4185  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4186  * in alloc_flags.  That logic and the checks below have the combined
4187  * affect that:
4188  *	in_interrupt - any node ok (current task context irrelevant)
4189  *	GFP_ATOMIC   - any node ok
4190  *	tsk_is_oom_victim   - any node ok
4191  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4192  *	GFP_USER     - only nodes in current tasks mems allowed ok.
4193  */
4194 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4195 {
4196 	struct cpuset *cs;		/* current cpuset ancestors */
4197 	bool allowed;			/* is allocation in zone z allowed? */
4198 	unsigned long flags;
4199 
4200 	if (in_interrupt())
4201 		return true;
4202 	if (node_isset(node, current->mems_allowed))
4203 		return true;
4204 	/*
4205 	 * Allow tasks that have access to memory reserves because they have
4206 	 * been OOM killed to get memory anywhere.
4207 	 */
4208 	if (unlikely(tsk_is_oom_victim(current)))
4209 		return true;
4210 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4211 		return false;
4212 
4213 	if (current->flags & PF_EXITING) /* Let dying task have memory */
4214 		return true;
4215 
4216 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4217 	spin_lock_irqsave(&callback_lock, flags);
4218 
4219 	cs = nearest_hardwall_ancestor(task_cs(current));
4220 	allowed = node_isset(node, cs->mems_allowed);
4221 
4222 	spin_unlock_irqrestore(&callback_lock, flags);
4223 	return allowed;
4224 }
4225 
4226 /**
4227  * cpuset_nodes_allowed - return effective_mems mask from a cgroup cpuset.
4228  * @cgroup: pointer to struct cgroup.
4229  * @mask: pointer to struct nodemask_t to be returned.
4230  *
4231  * Returns effective_mems mask from a cgroup cpuset if it is cgroup v2 and
4232  * has cpuset subsys. Otherwise, returns node_states[N_MEMORY].
4233  *
4234  * This function intentionally avoids taking the cpuset_mutex or callback_lock
4235  * when accessing effective_mems. This is because the obtained effective_mems
4236  * is stale immediately after the query anyway (e.g., effective_mems is updated
4237  * immediately after releasing the lock but before returning).
4238  *
4239  * As a result, returned @mask may be empty because cs->effective_mems can be
4240  * rebound during this call. Besides, nodes in @mask are not guaranteed to be
4241  * online due to hot plugins. Callers should check the mask for validity on
4242  * return based on its subsequent use.
4243  **/
4244 void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
4245 {
4246 	struct cgroup_subsys_state *css;
4247 	struct cpuset *cs;
4248 
4249 	/*
4250 	 * In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4251 	 * and mems_allowed is likely to be empty even if we could get to it,
4252 	 * so return directly to avoid taking a global lock on the empty check.
4253 	 */
4254 	if (!cgroup || !cpuset_v2()) {
4255 		nodes_copy(*mask, node_states[N_MEMORY]);
4256 		return;
4257 	}
4258 
4259 	css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4260 	if (!css) {
4261 		nodes_copy(*mask, node_states[N_MEMORY]);
4262 		return;
4263 	}
4264 
4265 	/*
4266 	 * The reference taken via cgroup_get_e_css is sufficient to
4267 	 * protect css, but it does not imply safe accesses to effective_mems.
4268 	 *
4269 	 * Normally, accessing effective_mems would require the cpuset_mutex
4270 	 * or callback_lock - but the correctness of this information is stale
4271 	 * immediately after the query anyway. We do not acquire the lock
4272 	 * during this process to save lock contention in exchange for racing
4273 	 * against mems_allowed rebinds.
4274 	 */
4275 	cs = container_of(css, struct cpuset, css);
4276 	nodes_copy(*mask, cs->effective_mems);
4277 	css_put(css);
4278 }
4279 
4280 /**
4281  * cpuset_spread_node() - On which node to begin search for a page
4282  * @rotor: round robin rotor
4283  *
4284  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4285  * tasks in a cpuset with is_spread_page or is_spread_slab set),
4286  * and if the memory allocation used cpuset_mem_spread_node()
4287  * to determine on which node to start looking, as it will for
4288  * certain page cache or slab cache pages such as used for file
4289  * system buffers and inode caches, then instead of starting on the
4290  * local node to look for a free page, rather spread the starting
4291  * node around the tasks mems_allowed nodes.
4292  *
4293  * We don't have to worry about the returned node being offline
4294  * because "it can't happen", and even if it did, it would be ok.
4295  *
4296  * The routines calling guarantee_online_mems() are careful to
4297  * only set nodes in task->mems_allowed that are online.  So it
4298  * should not be possible for the following code to return an
4299  * offline node.  But if it did, that would be ok, as this routine
4300  * is not returning the node where the allocation must be, only
4301  * the node where the search should start.  The zonelist passed to
4302  * __alloc_pages() will include all nodes.  If the slab allocator
4303  * is passed an offline node, it will fall back to the local node.
4304  * See kmem_cache_alloc_node().
4305  */
4306 static int cpuset_spread_node(int *rotor)
4307 {
4308 	return *rotor = next_node_in(*rotor, current->mems_allowed);
4309 }
4310 
4311 /**
4312  * cpuset_mem_spread_node() - On which node to begin search for a file page
4313  */
4314 int cpuset_mem_spread_node(void)
4315 {
4316 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4317 		current->cpuset_mem_spread_rotor =
4318 			node_random(&current->mems_allowed);
4319 
4320 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4321 }
4322 
4323 /**
4324  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4325  * @tsk1: pointer to task_struct of some task.
4326  * @tsk2: pointer to task_struct of some other task.
4327  *
4328  * Description: Return true if @tsk1's mems_allowed intersects the
4329  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
4330  * one of the task's memory usage might impact the memory available
4331  * to the other.
4332  **/
4333 
4334 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4335 				   const struct task_struct *tsk2)
4336 {
4337 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4338 }
4339 
4340 /**
4341  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4342  *
4343  * Description: Prints current's name, cpuset name, and cached copy of its
4344  * mems_allowed to the kernel log.
4345  */
4346 void cpuset_print_current_mems_allowed(void)
4347 {
4348 	struct cgroup *cgrp;
4349 
4350 	rcu_read_lock();
4351 
4352 	cgrp = task_cs(current)->css.cgroup;
4353 	pr_cont(",cpuset=");
4354 	pr_cont_cgroup_name(cgrp);
4355 	pr_cont(",mems_allowed=%*pbl",
4356 		nodemask_pr_args(&current->mems_allowed));
4357 
4358 	rcu_read_unlock();
4359 }
4360 
4361 /* Display task mems_allowed in /proc/<pid>/status file. */
4362 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4363 {
4364 	seq_printf(m, "Mems_allowed:\t%*pb\n",
4365 		   nodemask_pr_args(&task->mems_allowed));
4366 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4367 		   nodemask_pr_args(&task->mems_allowed));
4368 }
4369