xref: /linux/kernel/cgroup/cpuset.c (revision 4cff5c05e076d2ee4e34122aa956b84a2eaac587)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  kernel/cpuset.c
4  *
5  *  Processor and Memory placement constraints for sets of tasks.
6  *
7  *  Copyright (C) 2003 BULL SA.
8  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
9  *  Copyright (C) 2006 Google, Inc
10  *
11  *  Portions derived from Patrick Mochel's sysfs code.
12  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
13  *
14  *  2003-10-10 Written by Simon Derr.
15  *  2003-10-22 Updates by Stephen Hemminger.
16  *  2004 May-July Rework by Paul Jackson.
17  *  2006 Rework by Paul Menage to use generic cgroups
18  *  2008 Rework of the scheduler domains and CPU hotplug handling
19  *       by Max Krasnyansky
20  */
21 #include "cpuset-internal.h"
22 
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mm.h>
28 #include <linux/memory.h>
29 #include <linux/rcupdate.h>
30 #include <linux/sched.h>
31 #include <linux/sched/deadline.h>
32 #include <linux/sched/mm.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <linux/oom.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/task_work.h>
40 
41 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
42 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
43 
44 /*
45  * There could be abnormal cpuset configurations for cpu or memory
46  * node binding, add this key to provide a quick low-cost judgment
47  * of the situation.
48  */
49 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
50 
51 static const char * const perr_strings[] = {
52 	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus.exclusive",
53 	[PERR_INVPARENT] = "Parent is an invalid partition root",
54 	[PERR_NOTPART]   = "Parent is not a partition root",
55 	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
56 	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
57 	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
58 	[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
59 	[PERR_HKEEPING]  = "partition config conflicts with housekeeping setup",
60 	[PERR_ACCESS]    = "Enable partition not permitted",
61 	[PERR_REMOTE]    = "Have remote partition underneath",
62 };
63 
64 /*
65  * For local partitions, update to subpartitions_cpus & isolated_cpus is done
66  * in update_parent_effective_cpumask(). For remote partitions, it is done in
67  * the remote_partition_*() and remote_cpus_update() helpers.
68  */
69 /*
70  * Exclusive CPUs distributed out to local or remote sub-partitions of
71  * top_cpuset
72  */
73 static cpumask_var_t	subpartitions_cpus;
74 
75 /*
76  * Exclusive CPUs in isolated partitions
77  */
78 static cpumask_var_t	isolated_cpus;
79 
80 /*
81  * isolated_cpus updating flag (protected by cpuset_mutex)
82  * Set if isolated_cpus is going to be updated in the current
83  * cpuset_mutex crtical section.
84  */
85 static bool isolated_cpus_updating;
86 
87 /*
88  * A flag to force sched domain rebuild at the end of an operation.
89  * It can be set in
90  *  - update_partition_sd_lb()
91  *  - update_cpumasks_hier()
92  *  - cpuset_update_flag()
93  *  - cpuset_hotplug_update_tasks()
94  *  - cpuset_handle_hotplug()
95  *
96  * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
97  *
98  * Note that update_relax_domain_level() in cpuset-v1.c can still call
99  * rebuild_sched_domains_locked() directly without using this flag.
100  */
101 static bool force_sd_rebuild;
102 
103 /*
104  * Partition root states:
105  *
106  *   0 - member (not a partition root)
107  *   1 - partition root
108  *   2 - partition root without load balancing (isolated)
109  *  -1 - invalid partition root
110  *  -2 - invalid isolated partition root
111  *
112  *  There are 2 types of partitions - local or remote. Local partitions are
113  *  those whose parents are partition root themselves. Setting of
114  *  cpuset.cpus.exclusive are optional in setting up local partitions.
115  *  Remote partitions are those whose parents are not partition roots. Passing
116  *  down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
117  *  nodes are mandatory in creating a remote partition.
118  *
119  *  For simplicity, a local partition can be created under a local or remote
120  *  partition but a remote partition cannot have any partition root in its
121  *  ancestor chain except the cgroup root.
122  *
123  *  A valid partition can be formed by setting exclusive_cpus or cpus_allowed
124  *  if exclusive_cpus is not set. In the case of partition with empty
125  *  exclusive_cpus, all the conflicting exclusive CPUs specified in the
126  *  following cpumasks of sibling cpusets will be removed from its
127  *  cpus_allowed in determining its effective_xcpus.
128  *  - effective_xcpus
129  *  - exclusive_cpus
130  *
131  *  The "cpuset.cpus.exclusive" control file should be used for setting up
132  *  partition if the users want to get as many CPUs as possible.
133  */
134 #define PRS_MEMBER		0
135 #define PRS_ROOT		1
136 #define PRS_ISOLATED		2
137 #define PRS_INVALID_ROOT	-1
138 #define PRS_INVALID_ISOLATED	-2
139 
140 /*
141  * Temporary cpumasks for working with partitions that are passed among
142  * functions to avoid memory allocation in inner functions.
143  */
144 struct tmpmasks {
145 	cpumask_var_t addmask, delmask;	/* For partition root */
146 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
147 };
148 
149 void inc_dl_tasks_cs(struct task_struct *p)
150 {
151 	struct cpuset *cs = task_cs(p);
152 
153 	cs->nr_deadline_tasks++;
154 }
155 
156 void dec_dl_tasks_cs(struct task_struct *p)
157 {
158 	struct cpuset *cs = task_cs(p);
159 
160 	cs->nr_deadline_tasks--;
161 }
162 
163 static inline bool is_partition_valid(const struct cpuset *cs)
164 {
165 	return cs->partition_root_state > 0;
166 }
167 
168 static inline bool is_partition_invalid(const struct cpuset *cs)
169 {
170 	return cs->partition_root_state < 0;
171 }
172 
173 static inline bool cs_is_member(const struct cpuset *cs)
174 {
175 	return cs->partition_root_state == PRS_MEMBER;
176 }
177 
178 /*
179  * Callers should hold callback_lock to modify partition_root_state.
180  */
181 static inline void make_partition_invalid(struct cpuset *cs)
182 {
183 	if (cs->partition_root_state > 0)
184 		cs->partition_root_state = -cs->partition_root_state;
185 }
186 
187 /*
188  * Send notification event of whenever partition_root_state changes.
189  */
190 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
191 {
192 	if (old_prs == cs->partition_root_state)
193 		return;
194 	cgroup_file_notify(&cs->partition_file);
195 
196 	/* Reset prs_err if not invalid */
197 	if (is_partition_valid(cs))
198 		WRITE_ONCE(cs->prs_err, PERR_NONE);
199 }
200 
201 /*
202  * The top_cpuset is always synchronized to cpu_active_mask and we should avoid
203  * using cpu_online_mask as much as possible. An active CPU is always an online
204  * CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
205  * during hotplug operations. A CPU is marked active at the last stage of CPU
206  * bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
207  * will be called to update the sched domains so that the scheduler can move
208  * a normal task to a newly active CPU or remove tasks away from a newly
209  * inactivated CPU. The online bit is set much earlier in the CPU bringup
210  * process and cleared much later in CPU teardown.
211  *
212  * If cpu_online_mask is used while a hotunplug operation is happening in
213  * parallel, we may leave an offline CPU in cpu_allowed or some other masks.
214  */
215 struct cpuset top_cpuset = {
216 	.flags = BIT(CS_CPU_EXCLUSIVE) |
217 		 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
218 	.partition_root_state = PRS_ROOT,
219 };
220 
221 /*
222  * There are two global locks guarding cpuset structures - cpuset_mutex and
223  * callback_lock. The cpuset code uses only cpuset_mutex. Other kernel
224  * subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
225  * structures. Note that cpuset_mutex needs to be a mutex as it is used in
226  * paths that rely on priority inheritance (e.g. scheduler - on RT) for
227  * correctness.
228  *
229  * A task must hold both locks to modify cpusets.  If a task holds
230  * cpuset_mutex, it blocks others, ensuring that it is the only task able to
231  * also acquire callback_lock and be able to modify cpusets.  It can perform
232  * various checks on the cpuset structure first, knowing nothing will change.
233  * It can also allocate memory while just holding cpuset_mutex.  While it is
234  * performing these checks, various callback routines can briefly acquire
235  * callback_lock to query cpusets.  Once it is ready to make the changes, it
236  * takes callback_lock, blocking everyone else.
237  *
238  * Calls to the kernel memory allocator can not be made while holding
239  * callback_lock, as that would risk double tripping on callback_lock
240  * from one of the callbacks into the cpuset code from within
241  * __alloc_pages().
242  *
243  * If a task is only holding callback_lock, then it has read-only
244  * access to cpusets.
245  *
246  * Now, the task_struct fields mems_allowed and mempolicy may be changed
247  * by other task, we use alloc_lock in the task_struct fields to protect
248  * them.
249  *
250  * The cpuset_common_seq_show() handlers only hold callback_lock across
251  * small pieces of code, such as when reading out possibly multi-word
252  * cpumasks and nodemasks.
253  */
254 
255 static DEFINE_MUTEX(cpuset_mutex);
256 
257 /**
258  * cpuset_lock - Acquire the global cpuset mutex
259  *
260  * This locks the global cpuset mutex to prevent modifications to cpuset
261  * hierarchy and configurations. This helper is not enough to make modification.
262  */
263 void cpuset_lock(void)
264 {
265 	mutex_lock(&cpuset_mutex);
266 }
267 
268 void cpuset_unlock(void)
269 {
270 	mutex_unlock(&cpuset_mutex);
271 }
272 
273 void lockdep_assert_cpuset_lock_held(void)
274 {
275 	lockdep_assert_held(&cpuset_mutex);
276 }
277 
278 /**
279  * cpuset_full_lock - Acquire full protection for cpuset modification
280  *
281  * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
282  * to safely modify cpuset data.
283  */
284 void cpuset_full_lock(void)
285 {
286 	cpus_read_lock();
287 	mutex_lock(&cpuset_mutex);
288 }
289 
290 void cpuset_full_unlock(void)
291 {
292 	mutex_unlock(&cpuset_mutex);
293 	cpus_read_unlock();
294 }
295 
296 #ifdef CONFIG_LOCKDEP
297 bool lockdep_is_cpuset_held(void)
298 {
299 	return lockdep_is_held(&cpuset_mutex);
300 }
301 #endif
302 
303 static DEFINE_SPINLOCK(callback_lock);
304 
305 void cpuset_callback_lock_irq(void)
306 {
307 	spin_lock_irq(&callback_lock);
308 }
309 
310 void cpuset_callback_unlock_irq(void)
311 {
312 	spin_unlock_irq(&callback_lock);
313 }
314 
315 static struct workqueue_struct *cpuset_migrate_mm_wq;
316 
317 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
318 
319 static inline void check_insane_mems_config(nodemask_t *nodes)
320 {
321 	if (!cpusets_insane_config() &&
322 		movable_only_nodes(nodes)) {
323 		static_branch_enable_cpuslocked(&cpusets_insane_config_key);
324 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
325 			"Cpuset allocations might fail even with a lot of memory available.\n",
326 			nodemask_pr_args(nodes));
327 	}
328 }
329 
330 /*
331  * decrease cs->attach_in_progress.
332  * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
333  */
334 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
335 {
336 	lockdep_assert_cpuset_lock_held();
337 
338 	cs->attach_in_progress--;
339 	if (!cs->attach_in_progress)
340 		wake_up(&cpuset_attach_wq);
341 }
342 
343 static inline void dec_attach_in_progress(struct cpuset *cs)
344 {
345 	mutex_lock(&cpuset_mutex);
346 	dec_attach_in_progress_locked(cs);
347 	mutex_unlock(&cpuset_mutex);
348 }
349 
350 static inline bool cpuset_v2(void)
351 {
352 	return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
353 		cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
354 }
355 
356 /*
357  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
358  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
359  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
360  * With v2 behavior, "cpus" and "mems" are always what the users have
361  * requested and won't be changed by hotplug events. Only the effective
362  * cpus or mems will be affected.
363  */
364 static inline bool is_in_v2_mode(void)
365 {
366 	return cpuset_v2() ||
367 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
368 }
369 
370 /**
371  * partition_is_populated - check if partition has tasks
372  * @cs: partition root to be checked
373  * @excluded_child: a child cpuset to be excluded in task checking
374  * Return: true if there are tasks, false otherwise
375  *
376  * @cs should be a valid partition root or going to become a partition root.
377  * @excluded_child should be non-NULL when this cpuset is going to become a
378  * partition itself.
379  *
380  * Note that a remote partition is not allowed underneath a valid local
381  * or remote partition. So if a non-partition root child is populated,
382  * the whole partition is considered populated.
383  */
384 static inline bool partition_is_populated(struct cpuset *cs,
385 					  struct cpuset *excluded_child)
386 {
387 	struct cpuset *cp;
388 	struct cgroup_subsys_state *pos_css;
389 
390 	/*
391 	 * We cannot call cs_is_populated(cs) directly, as
392 	 * nr_populated_domain_children may include populated
393 	 * csets from descendants that are partitions.
394 	 */
395 	if (cs->css.cgroup->nr_populated_csets ||
396 	    cs->attach_in_progress)
397 		return true;
398 
399 	rcu_read_lock();
400 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
401 		if (cp == cs || cp == excluded_child)
402 			continue;
403 
404 		if (is_partition_valid(cp)) {
405 			pos_css = css_rightmost_descendant(pos_css);
406 			continue;
407 		}
408 
409 		if (cpuset_is_populated(cp)) {
410 			rcu_read_unlock();
411 			return true;
412 		}
413 	}
414 	rcu_read_unlock();
415 	return false;
416 }
417 
418 /*
419  * Return in pmask the portion of a task's cpusets's cpus_allowed that
420  * are online and are capable of running the task.  If none are found,
421  * walk up the cpuset hierarchy until we find one that does have some
422  * appropriate cpus.
423  *
424  * One way or another, we guarantee to return some non-empty subset
425  * of cpu_active_mask.
426  *
427  * Call with callback_lock or cpuset_mutex held.
428  */
429 static void guarantee_active_cpus(struct task_struct *tsk,
430 				  struct cpumask *pmask)
431 {
432 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
433 	struct cpuset *cs;
434 
435 	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
436 		cpumask_copy(pmask, cpu_active_mask);
437 
438 	rcu_read_lock();
439 	cs = task_cs(tsk);
440 
441 	while (!cpumask_intersects(cs->effective_cpus, pmask))
442 		cs = parent_cs(cs);
443 
444 	cpumask_and(pmask, pmask, cs->effective_cpus);
445 	rcu_read_unlock();
446 }
447 
448 /*
449  * Return in *pmask the portion of a cpusets's mems_allowed that
450  * are online, with memory.  If none are online with memory, walk
451  * up the cpuset hierarchy until we find one that does have some
452  * online mems.  The top cpuset always has some mems online.
453  *
454  * One way or another, we guarantee to return some non-empty subset
455  * of node_states[N_MEMORY].
456  *
457  * Call with callback_lock or cpuset_mutex held.
458  */
459 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
460 {
461 	while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
462 		cs = parent_cs(cs);
463 }
464 
465 /**
466  * alloc_cpumasks - Allocate an array of cpumask variables
467  * @pmasks: Pointer to array of cpumask_var_t pointers
468  * @size: Number of cpumasks to allocate
469  * Return: 0 if successful, -ENOMEM otherwise.
470  *
471  * Allocates @size cpumasks and initializes them to empty. Returns 0 on
472  * success, -ENOMEM on allocation failure. On failure, any previously
473  * allocated cpumasks are freed.
474  */
475 static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
476 {
477 	int i;
478 
479 	for (i = 0; i < size; i++) {
480 		if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
481 			while (--i >= 0)
482 				free_cpumask_var(*pmasks[i]);
483 			return -ENOMEM;
484 		}
485 	}
486 	return 0;
487 }
488 
489 /**
490  * alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
491  * @tmp: Pointer to tmpmasks structure to populate
492  * Return: 0 on success, -ENOMEM on allocation failure
493  */
494 static inline int alloc_tmpmasks(struct tmpmasks *tmp)
495 {
496 	/*
497 	 * Array of pointers to the three cpumask_var_t fields in tmpmasks.
498 	 * Note: Array size must match actual number of masks (3)
499 	 */
500 	cpumask_var_t *pmask[3] = {
501 		&tmp->new_cpus,
502 		&tmp->addmask,
503 		&tmp->delmask
504 	};
505 
506 	return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
507 }
508 
509 /**
510  * free_tmpmasks - free cpumasks in a tmpmasks structure
511  * @tmp: the tmpmasks structure pointer
512  */
513 static inline void free_tmpmasks(struct tmpmasks *tmp)
514 {
515 	if (!tmp)
516 		return;
517 
518 	free_cpumask_var(tmp->new_cpus);
519 	free_cpumask_var(tmp->addmask);
520 	free_cpumask_var(tmp->delmask);
521 }
522 
523 /**
524  * dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
525  * @cs: Source cpuset to duplicate (NULL for a fresh allocation)
526  *
527  * Creates a new cpuset by either:
528  * 1. Duplicating an existing cpuset (if @cs is non-NULL), or
529  * 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
530  *
531  * Return: Pointer to newly allocated cpuset on success, NULL on failure
532  */
533 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
534 {
535 	struct cpuset *trial;
536 
537 	/* Allocate base structure */
538 	trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
539 		     kzalloc(sizeof(*cs), GFP_KERNEL);
540 	if (!trial)
541 		return NULL;
542 
543 	/* Setup cpumask pointer array */
544 	cpumask_var_t *pmask[4] = {
545 		&trial->cpus_allowed,
546 		&trial->effective_cpus,
547 		&trial->effective_xcpus,
548 		&trial->exclusive_cpus
549 	};
550 
551 	if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
552 		kfree(trial);
553 		return NULL;
554 	}
555 
556 	/* Copy masks if duplicating */
557 	if (cs) {
558 		cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
559 		cpumask_copy(trial->effective_cpus, cs->effective_cpus);
560 		cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
561 		cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
562 	}
563 
564 	return trial;
565 }
566 
567 /**
568  * free_cpuset - free the cpuset
569  * @cs: the cpuset to be freed
570  */
571 static inline void free_cpuset(struct cpuset *cs)
572 {
573 	free_cpumask_var(cs->cpus_allowed);
574 	free_cpumask_var(cs->effective_cpus);
575 	free_cpumask_var(cs->effective_xcpus);
576 	free_cpumask_var(cs->exclusive_cpus);
577 	kfree(cs);
578 }
579 
580 /* Return user specified exclusive CPUs */
581 static inline struct cpumask *user_xcpus(struct cpuset *cs)
582 {
583 	return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
584 						 : cs->exclusive_cpus;
585 }
586 
587 static inline bool xcpus_empty(struct cpuset *cs)
588 {
589 	return cpumask_empty(cs->cpus_allowed) &&
590 	       cpumask_empty(cs->exclusive_cpus);
591 }
592 
593 /*
594  * cpusets_are_exclusive() - check if two cpusets are exclusive
595  *
596  * Return true if exclusive, false if not
597  */
598 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
599 {
600 	struct cpumask *xcpus1 = user_xcpus(cs1);
601 	struct cpumask *xcpus2 = user_xcpus(cs2);
602 
603 	if (cpumask_intersects(xcpus1, xcpus2))
604 		return false;
605 	return true;
606 }
607 
608 /**
609  * cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
610  * @trial:	the trial cpuset to be checked
611  * @sibling:	a sibling cpuset to be checked against
612  * @xcpus_changed: set if exclusive_cpus has been set
613  *
614  * Returns: true if CPU exclusivity conflict exists, false otherwise
615  *
616  * Conflict detection rules:
617  *  o cgroup v1
618  *    See cpuset1_cpus_excl_conflict()
619  *  o cgroup v2
620  *    - The exclusive_cpus values cannot overlap.
621  *    - New exclusive_cpus cannot be a superset of a sibling's cpus_allowed.
622  */
623 static inline bool cpus_excl_conflict(struct cpuset *trial, struct cpuset *sibling,
624 				      bool xcpus_changed)
625 {
626 	if (!cpuset_v2())
627 		return cpuset1_cpus_excl_conflict(trial, sibling);
628 
629 	/* The cpus_allowed of a sibling cpuset cannot be a subset of the new exclusive_cpus */
630 	if (xcpus_changed && !cpumask_empty(sibling->cpus_allowed) &&
631 	    cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
632 		return true;
633 
634 	/* Exclusive_cpus cannot intersect */
635 	return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus);
636 }
637 
638 static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
639 {
640 	if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
641 		return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
642 	return false;
643 }
644 
645 /*
646  * validate_change() - Used to validate that any proposed cpuset change
647  *		       follows the structural rules for cpusets.
648  *
649  * If we replaced the flag and mask values of the current cpuset
650  * (cur) with those values in the trial cpuset (trial), would
651  * our various subset and exclusive rules still be valid?  Presumes
652  * cpuset_mutex held.
653  *
654  * 'cur' is the address of an actual, in-use cpuset.  Operations
655  * such as list traversal that depend on the actual address of the
656  * cpuset in the list must use cur below, not trial.
657  *
658  * 'trial' is the address of bulk structure copy of cur, with
659  * perhaps one or more of the fields cpus_allowed, mems_allowed,
660  * or flags changed to new, trial values.
661  *
662  * Return 0 if valid, -errno if not.
663  */
664 
665 static int validate_change(struct cpuset *cur, struct cpuset *trial)
666 {
667 	struct cgroup_subsys_state *css;
668 	struct cpuset *c, *par;
669 	bool xcpus_changed;
670 	int ret = 0;
671 
672 	rcu_read_lock();
673 
674 	if (!is_in_v2_mode())
675 		ret = cpuset1_validate_change(cur, trial);
676 	if (ret)
677 		goto out;
678 
679 	/* Remaining checks don't apply to root cpuset */
680 	if (cur == &top_cpuset)
681 		goto out;
682 
683 	par = parent_cs(cur);
684 
685 	/*
686 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
687 	 * tasks. This check is not done when scheduling is disabled as the
688 	 * users should know what they are doing.
689 	 *
690 	 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
691 	 * cpus_allowed.
692 	 *
693 	 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
694 	 * for non-isolated partition root. At this point, the target
695 	 * effective_cpus isn't computed yet. user_xcpus() is the best
696 	 * approximation.
697 	 *
698 	 * TBD: May need to precompute the real effective_cpus here in case
699 	 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
700 	 * becomes an issue.
701 	 */
702 	ret = -EBUSY;
703 	if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
704 	    !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
705 		goto out;
706 
707 	/*
708 	 * If either I or some sibling (!= me) is exclusive, we can't
709 	 * overlap. exclusive_cpus cannot overlap with each other if set.
710 	 */
711 	ret = -EINVAL;
712 	xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
713 	cpuset_for_each_child(c, css, par) {
714 		if (c == cur)
715 			continue;
716 		if (cpus_excl_conflict(trial, c, xcpus_changed))
717 			goto out;
718 		if (mems_excl_conflict(trial, c))
719 			goto out;
720 	}
721 
722 	ret = 0;
723 out:
724 	rcu_read_unlock();
725 	return ret;
726 }
727 
728 #ifdef CONFIG_SMP
729 
730 /*
731  * generate_sched_domains()
732  *
733  * This function builds a partial partition of the systems CPUs
734  * A 'partial partition' is a set of non-overlapping subsets whose
735  * union is a subset of that set.
736  * The output of this function needs to be passed to kernel/sched/core.c
737  * partition_sched_domains() routine, which will rebuild the scheduler's
738  * load balancing domains (sched domains) as specified by that partial
739  * partition.
740  *
741  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
742  * for a background explanation of this.
743  *
744  * Does not return errors, on the theory that the callers of this
745  * routine would rather not worry about failures to rebuild sched
746  * domains when operating in the severe memory shortage situations
747  * that could cause allocation failures below.
748  *
749  * Must be called with cpuset_mutex held.
750  *
751  * The three key local variables below are:
752  *    cp - cpuset pointer, used (together with pos_css) to perform a
753  *	   top-down scan of all cpusets. For our purposes, rebuilding
754  *	   the schedulers sched domains, we can ignore !is_sched_load_
755  *	   balance cpusets.
756  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
757  *	   that need to be load balanced, for convenient iterative
758  *	   access by the subsequent code that finds the best partition,
759  *	   i.e the set of domains (subsets) of CPUs such that the
760  *	   cpus_allowed of every cpuset marked is_sched_load_balance
761  *	   is a subset of one of these domains, while there are as
762  *	   many such domains as possible, each as small as possible.
763  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
764  *	   the kernel/sched/core.c routine partition_sched_domains() in a
765  *	   convenient format, that can be easily compared to the prior
766  *	   value to determine what partition elements (sched domains)
767  *	   were changed (added or removed.)
768  */
769 static int generate_sched_domains(cpumask_var_t **domains,
770 			struct sched_domain_attr **attributes)
771 {
772 	struct cpuset *cp;	/* top-down scan of cpusets */
773 	struct cpuset **csa;	/* array of all cpuset ptrs */
774 	int i, j;		/* indices for partition finding loops */
775 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
776 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
777 	int ndoms = 0;		/* number of sched domains in result */
778 	struct cgroup_subsys_state *pos_css;
779 
780 	if (!cpuset_v2())
781 		return cpuset1_generate_sched_domains(domains, attributes);
782 
783 	doms = NULL;
784 	dattr = NULL;
785 	csa = NULL;
786 
787 	/* Special case for the 99% of systems with one, full, sched domain */
788 	if (cpumask_empty(subpartitions_cpus)) {
789 		ndoms = 1;
790 		/* !csa will be checked and can be correctly handled */
791 		goto generate_doms;
792 	}
793 
794 	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
795 	if (!csa)
796 		goto done;
797 
798 	/* Find how many partitions and cache them to csa[] */
799 	rcu_read_lock();
800 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
801 		/*
802 		 * Only valid partition roots that are not isolated and with
803 		 * non-empty effective_cpus will be saved into csa[].
804 		 */
805 		if ((cp->partition_root_state == PRS_ROOT) &&
806 		    !cpumask_empty(cp->effective_cpus))
807 			csa[ndoms++] = cp;
808 
809 		/*
810 		 * Skip @cp's subtree if not a partition root and has no
811 		 * exclusive CPUs to be granted to child cpusets.
812 		 */
813 		if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
814 			pos_css = css_rightmost_descendant(pos_css);
815 	}
816 	rcu_read_unlock();
817 
818 	for (i = 0; i < ndoms; i++) {
819 		for (j = i + 1; j < ndoms; j++) {
820 			if (cpusets_overlap(csa[i], csa[j]))
821 				/*
822 				 * Cgroup v2 shouldn't pass down overlapping
823 				 * partition root cpusets.
824 				 */
825 				WARN_ON_ONCE(1);
826 		}
827 	}
828 
829 generate_doms:
830 	doms = alloc_sched_domains(ndoms);
831 	if (!doms)
832 		goto done;
833 
834 	/*
835 	 * The rest of the code, including the scheduler, can deal with
836 	 * dattr==NULL case. No need to abort if alloc fails.
837 	 */
838 	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
839 			      GFP_KERNEL);
840 
841 	/*
842 	 * Cgroup v2 doesn't support domain attributes, just set all of them
843 	 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
844 	 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
845 	 */
846 	for (i = 0; i < ndoms; i++) {
847 		/*
848 		 * The top cpuset may contain some boot time isolated
849 		 * CPUs that need to be excluded from the sched domain.
850 		 */
851 		if (!csa || csa[i] == &top_cpuset)
852 			cpumask_and(doms[i], top_cpuset.effective_cpus,
853 				    housekeeping_cpumask(HK_TYPE_DOMAIN));
854 		else
855 			cpumask_copy(doms[i], csa[i]->effective_cpus);
856 		if (dattr)
857 			dattr[i] = SD_ATTR_INIT;
858 	}
859 
860 done:
861 	kfree(csa);
862 
863 	/*
864 	 * Fallback to the default domain if kmalloc() failed.
865 	 * See comments in partition_sched_domains().
866 	 */
867 	if (doms == NULL)
868 		ndoms = 1;
869 
870 	*domains    = doms;
871 	*attributes = dattr;
872 	return ndoms;
873 }
874 
875 static void dl_update_tasks_root_domain(struct cpuset *cs)
876 {
877 	struct css_task_iter it;
878 	struct task_struct *task;
879 
880 	if (cs->nr_deadline_tasks == 0)
881 		return;
882 
883 	css_task_iter_start(&cs->css, 0, &it);
884 
885 	while ((task = css_task_iter_next(&it)))
886 		dl_add_task_root_domain(task);
887 
888 	css_task_iter_end(&it);
889 }
890 
891 void dl_rebuild_rd_accounting(void)
892 {
893 	struct cpuset *cs = NULL;
894 	struct cgroup_subsys_state *pos_css;
895 	int cpu;
896 	u64 cookie = ++dl_cookie;
897 
898 	lockdep_assert_cpuset_lock_held();
899 	lockdep_assert_cpus_held();
900 	lockdep_assert_held(&sched_domains_mutex);
901 
902 	rcu_read_lock();
903 
904 	for_each_possible_cpu(cpu) {
905 		if (dl_bw_visited(cpu, cookie))
906 			continue;
907 
908 		dl_clear_root_domain_cpu(cpu);
909 	}
910 
911 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
912 
913 		if (cpumask_empty(cs->effective_cpus)) {
914 			pos_css = css_rightmost_descendant(pos_css);
915 			continue;
916 		}
917 
918 		css_get(&cs->css);
919 
920 		rcu_read_unlock();
921 
922 		dl_update_tasks_root_domain(cs);
923 
924 		rcu_read_lock();
925 		css_put(&cs->css);
926 	}
927 	rcu_read_unlock();
928 }
929 
930 /*
931  * Rebuild scheduler domains.
932  *
933  * If the flag 'sched_load_balance' of any cpuset with non-empty
934  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
935  * which has that flag enabled, or if any cpuset with a non-empty
936  * 'cpus' is removed, then call this routine to rebuild the
937  * scheduler's dynamic sched domains.
938  *
939  * Call with cpuset_mutex held.  Takes cpus_read_lock().
940  */
941 void rebuild_sched_domains_locked(void)
942 {
943 	struct sched_domain_attr *attr;
944 	cpumask_var_t *doms;
945 	int ndoms;
946 	int i;
947 
948 	lockdep_assert_cpus_held();
949 	lockdep_assert_cpuset_lock_held();
950 	force_sd_rebuild = false;
951 
952 	/* Generate domain masks and attrs */
953 	ndoms = generate_sched_domains(&doms, &attr);
954 
955 	/*
956 	* cpuset_hotplug_workfn is invoked synchronously now, thus this
957 	* function should not race with CPU hotplug. And the effective CPUs
958 	* must not include any offline CPUs. Passing an offline CPU in the
959 	* doms to partition_sched_domains() will trigger a kernel panic.
960 	*
961 	* We perform a final check here: if the doms contains any
962 	* offline CPUs, a warning is emitted and we return directly to
963 	* prevent the panic.
964 	*/
965 	for (i = 0; i < ndoms; ++i) {
966 		if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
967 			return;
968 	}
969 
970 	/* Have scheduler rebuild the domains */
971 	partition_sched_domains(ndoms, doms, attr);
972 }
973 #else /* !CONFIG_SMP */
974 void rebuild_sched_domains_locked(void)
975 {
976 }
977 #endif /* CONFIG_SMP */
978 
979 static void rebuild_sched_domains_cpuslocked(void)
980 {
981 	mutex_lock(&cpuset_mutex);
982 	rebuild_sched_domains_locked();
983 	mutex_unlock(&cpuset_mutex);
984 }
985 
986 void rebuild_sched_domains(void)
987 {
988 	cpus_read_lock();
989 	rebuild_sched_domains_cpuslocked();
990 	cpus_read_unlock();
991 }
992 
993 void cpuset_reset_sched_domains(void)
994 {
995 	mutex_lock(&cpuset_mutex);
996 	partition_sched_domains(1, NULL, NULL);
997 	mutex_unlock(&cpuset_mutex);
998 }
999 
1000 /**
1001  * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1002  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1003  * @new_cpus: the temp variable for the new effective_cpus mask
1004  *
1005  * Iterate through each task of @cs updating its cpus_allowed to the
1006  * effective cpuset's.  As this function is called with cpuset_mutex held,
1007  * cpuset membership stays stable.
1008  *
1009  * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1010  * to make sure all offline CPUs are also included as hotplug code won't
1011  * update cpumasks for tasks in top_cpuset.
1012  *
1013  * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1014  * do cpu masking per task instead of doing it once for all.
1015  */
1016 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1017 {
1018 	struct css_task_iter it;
1019 	struct task_struct *task;
1020 	bool top_cs = cs == &top_cpuset;
1021 
1022 	css_task_iter_start(&cs->css, 0, &it);
1023 	while ((task = css_task_iter_next(&it))) {
1024 		const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1025 
1026 		if (top_cs) {
1027 			/*
1028 			 * PF_KTHREAD tasks are handled by housekeeping.
1029 			 * PF_NO_SETAFFINITY tasks are ignored.
1030 			 */
1031 			if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
1032 				continue;
1033 			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1034 		} else {
1035 			cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1036 		}
1037 		set_cpus_allowed_ptr(task, new_cpus);
1038 	}
1039 	css_task_iter_end(&it);
1040 }
1041 
1042 /**
1043  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1044  * @new_cpus: the temp variable for the new effective_cpus mask
1045  * @cs: the cpuset the need to recompute the new effective_cpus mask
1046  * @parent: the parent cpuset
1047  *
1048  * The result is valid only if the given cpuset isn't a partition root.
1049  */
1050 static void compute_effective_cpumask(struct cpumask *new_cpus,
1051 				      struct cpuset *cs, struct cpuset *parent)
1052 {
1053 	cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1054 }
1055 
1056 /*
1057  * Commands for update_parent_effective_cpumask
1058  */
1059 enum partition_cmd {
1060 	partcmd_enable,		/* Enable partition root	  */
1061 	partcmd_enablei,	/* Enable isolated partition root */
1062 	partcmd_disable,	/* Disable partition root	  */
1063 	partcmd_update,		/* Update parent's effective_cpus */
1064 	partcmd_invalidate,	/* Make partition invalid	  */
1065 };
1066 
1067 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1068 				    struct tmpmasks *tmp);
1069 
1070 /*
1071  * Update partition exclusive flag
1072  *
1073  * Return: 0 if successful, an error code otherwise
1074  */
1075 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1076 {
1077 	bool exclusive = (new_prs > PRS_MEMBER);
1078 
1079 	if (exclusive && !is_cpu_exclusive(cs)) {
1080 		if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1081 			return PERR_NOTEXCL;
1082 	} else if (!exclusive && is_cpu_exclusive(cs)) {
1083 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1084 		cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1085 	}
1086 	return 0;
1087 }
1088 
1089 /*
1090  * Update partition load balance flag and/or rebuild sched domain
1091  *
1092  * Changing load balance flag will automatically call
1093  * rebuild_sched_domains_locked().
1094  * This function is for cgroup v2 only.
1095  */
1096 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1097 {
1098 	int new_prs = cs->partition_root_state;
1099 	bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1100 	bool new_lb;
1101 
1102 	/*
1103 	 * If cs is not a valid partition root, the load balance state
1104 	 * will follow its parent.
1105 	 */
1106 	if (new_prs > 0) {
1107 		new_lb = (new_prs != PRS_ISOLATED);
1108 	} else {
1109 		new_lb = is_sched_load_balance(parent_cs(cs));
1110 	}
1111 	if (new_lb != !!is_sched_load_balance(cs)) {
1112 		rebuild_domains = true;
1113 		if (new_lb)
1114 			set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1115 		else
1116 			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1117 	}
1118 
1119 	if (rebuild_domains)
1120 		cpuset_force_rebuild();
1121 }
1122 
1123 /*
1124  * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1125  */
1126 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1127 			      struct cpumask *xcpus)
1128 {
1129 	/*
1130 	 * A populated partition (cs or parent) can't have empty effective_cpus
1131 	 */
1132 	return (cpumask_subset(parent->effective_cpus, xcpus) &&
1133 		partition_is_populated(parent, cs)) ||
1134 	       (!cpumask_intersects(xcpus, cpu_active_mask) &&
1135 		partition_is_populated(cs, NULL));
1136 }
1137 
1138 static void reset_partition_data(struct cpuset *cs)
1139 {
1140 	struct cpuset *parent = parent_cs(cs);
1141 
1142 	if (!cpuset_v2())
1143 		return;
1144 
1145 	lockdep_assert_held(&callback_lock);
1146 
1147 	if (cpumask_empty(cs->exclusive_cpus)) {
1148 		cpumask_clear(cs->effective_xcpus);
1149 		if (is_cpu_exclusive(cs))
1150 			clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1151 	}
1152 	if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1153 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1154 }
1155 
1156 /*
1157  * isolated_cpus_update - Update the isolated_cpus mask
1158  * @old_prs: old partition_root_state
1159  * @new_prs: new partition_root_state
1160  * @xcpus: exclusive CPUs with state change
1161  */
1162 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1163 {
1164 	WARN_ON_ONCE(old_prs == new_prs);
1165 	if (new_prs == PRS_ISOLATED)
1166 		cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1167 	else
1168 		cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1169 
1170 	isolated_cpus_updating = true;
1171 }
1172 
1173 /*
1174  * partition_xcpus_add - Add new exclusive CPUs to partition
1175  * @new_prs: new partition_root_state
1176  * @parent: parent cpuset
1177  * @xcpus: exclusive CPUs to be added
1178  *
1179  * Remote partition if parent == NULL
1180  */
1181 static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1182 				struct cpumask *xcpus)
1183 {
1184 	WARN_ON_ONCE(new_prs < 0);
1185 	lockdep_assert_held(&callback_lock);
1186 	if (!parent)
1187 		parent = &top_cpuset;
1188 
1189 
1190 	if (parent == &top_cpuset)
1191 		cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1192 
1193 	if (new_prs != parent->partition_root_state)
1194 		isolated_cpus_update(parent->partition_root_state, new_prs,
1195 				     xcpus);
1196 
1197 	cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1198 }
1199 
1200 /*
1201  * partition_xcpus_del - Remove exclusive CPUs from partition
1202  * @old_prs: old partition_root_state
1203  * @parent: parent cpuset
1204  * @xcpus: exclusive CPUs to be removed
1205  *
1206  * Remote partition if parent == NULL
1207  */
1208 static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1209 				struct cpumask *xcpus)
1210 {
1211 	WARN_ON_ONCE(old_prs < 0);
1212 	lockdep_assert_held(&callback_lock);
1213 	if (!parent)
1214 		parent = &top_cpuset;
1215 
1216 	if (parent == &top_cpuset)
1217 		cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1218 
1219 	if (old_prs != parent->partition_root_state)
1220 		isolated_cpus_update(old_prs, parent->partition_root_state,
1221 				     xcpus);
1222 
1223 	cpumask_and(xcpus, xcpus, cpu_active_mask);
1224 	cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1225 }
1226 
1227 /*
1228  * isolated_cpus_can_update - check for isolated & nohz_full conflicts
1229  * @add_cpus: cpu mask for cpus that are going to be isolated
1230  * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1231  * Return: false if there is conflict, true otherwise
1232  *
1233  * If nohz_full is enabled and we have isolated CPUs, their combination must
1234  * still leave housekeeping CPUs.
1235  *
1236  * TBD: Should consider merging this function into
1237  *      prstate_housekeeping_conflict().
1238  */
1239 static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1240 				     struct cpumask *del_cpus)
1241 {
1242 	cpumask_var_t full_hk_cpus;
1243 	int res = true;
1244 
1245 	if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1246 		return true;
1247 
1248 	if (del_cpus && cpumask_weight_and(del_cpus,
1249 			housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1250 		return true;
1251 
1252 	if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1253 		return false;
1254 
1255 	cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1256 		    housekeeping_cpumask(HK_TYPE_DOMAIN));
1257 	cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1258 	cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1259 	if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1260 		res = false;
1261 
1262 	free_cpumask_var(full_hk_cpus);
1263 	return res;
1264 }
1265 
1266 /*
1267  * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1268  * @prstate: partition root state to be checked
1269  * @new_cpus: cpu mask
1270  * Return: true if there is conflict, false otherwise
1271  *
1272  * CPUs outside of HK_TYPE_DOMAIN_BOOT, if defined, can only be used in an
1273  * isolated partition.
1274  */
1275 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1276 {
1277 	if (!housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
1278 		return false;
1279 
1280 	if ((prstate != PRS_ISOLATED) &&
1281 	    !cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
1282 		return true;
1283 
1284 	return false;
1285 }
1286 
1287 /*
1288  * update_isolation_cpumasks - Update external isolation related CPU masks
1289  *
1290  * The following external CPU masks will be updated if necessary:
1291  * - workqueue unbound cpumask
1292  */
1293 static void update_isolation_cpumasks(void)
1294 {
1295 	int ret;
1296 
1297 	if (!isolated_cpus_updating)
1298 		return;
1299 
1300 	ret = housekeeping_update(isolated_cpus);
1301 	WARN_ON_ONCE(ret < 0);
1302 
1303 	isolated_cpus_updating = false;
1304 }
1305 
1306 /**
1307  * rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1308  * @parent: Parent cpuset containing all siblings
1309  * @cs: Current cpuset (will be skipped)
1310  * @excpus:  exclusive effective CPU mask to modify
1311  *
1312  * This function ensures the given @excpus mask doesn't include any CPUs that
1313  * are exclusively allocated to sibling cpusets. It walks through all siblings
1314  * of @cs under @parent and removes their exclusive CPUs from @excpus.
1315  */
1316 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1317 					struct cpumask *excpus)
1318 {
1319 	struct cgroup_subsys_state *css;
1320 	struct cpuset *sibling;
1321 	int retval = 0;
1322 
1323 	if (cpumask_empty(excpus))
1324 		return 0;
1325 
1326 	/*
1327 	 * Remove exclusive CPUs from siblings
1328 	 */
1329 	rcu_read_lock();
1330 	cpuset_for_each_child(sibling, css, parent) {
1331 		struct cpumask *sibling_xcpus;
1332 
1333 		if (sibling == cs)
1334 			continue;
1335 
1336 		/*
1337 		 * If exclusive_cpus is defined, effective_xcpus will always
1338 		 * be a subset. Otherwise, effective_xcpus will only be set
1339 		 * in a valid partition root.
1340 		 */
1341 		sibling_xcpus = cpumask_empty(sibling->exclusive_cpus)
1342 			      ? sibling->effective_xcpus
1343 			      : sibling->exclusive_cpus;
1344 
1345 		if (cpumask_intersects(excpus, sibling_xcpus)) {
1346 			cpumask_andnot(excpus, excpus, sibling_xcpus);
1347 			retval++;
1348 		}
1349 	}
1350 	rcu_read_unlock();
1351 
1352 	return retval;
1353 }
1354 
1355 /*
1356  * compute_excpus - compute effective exclusive CPUs
1357  * @cs: cpuset
1358  * @xcpus: effective exclusive CPUs value to be set
1359  * Return: 0 if there is no sibling conflict, > 0 otherwise
1360  *
1361  * If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1362  * and exclude their exclusive_cpus or effective_xcpus as well.
1363  */
1364 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1365 {
1366 	struct cpuset *parent = parent_cs(cs);
1367 
1368 	cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1369 
1370 	if (!cpumask_empty(cs->exclusive_cpus))
1371 		return 0;
1372 
1373 	return rm_siblings_excl_cpus(parent, cs, excpus);
1374 }
1375 
1376 /*
1377  * compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1378  * @trialcs: The trial cpuset containing the proposed new configuration
1379  * @cs: The original cpuset that the trial configuration is based on
1380  * Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1381  *
1382  * Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1383  * the real cs.
1384  */
1385 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1386 {
1387 	struct cpuset *parent = parent_cs(trialcs);
1388 	struct cpumask *excpus = trialcs->effective_xcpus;
1389 
1390 	/* trialcs is member, cpuset.cpus has no impact to excpus */
1391 	if (cs_is_member(cs))
1392 		cpumask_and(excpus, trialcs->exclusive_cpus,
1393 				parent->effective_xcpus);
1394 	else
1395 		cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1396 
1397 	return rm_siblings_excl_cpus(parent, cs, excpus);
1398 }
1399 
1400 static inline bool is_remote_partition(struct cpuset *cs)
1401 {
1402 	return cs->remote_partition;
1403 }
1404 
1405 static inline bool is_local_partition(struct cpuset *cs)
1406 {
1407 	return is_partition_valid(cs) && !is_remote_partition(cs);
1408 }
1409 
1410 /*
1411  * remote_partition_enable - Enable current cpuset as a remote partition root
1412  * @cs: the cpuset to update
1413  * @new_prs: new partition_root_state
1414  * @tmp: temporary masks
1415  * Return: 0 if successful, errcode if error
1416  *
1417  * Enable the current cpuset to become a remote partition root taking CPUs
1418  * directly from the top cpuset. cpuset_mutex must be held by the caller.
1419  */
1420 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1421 				   struct tmpmasks *tmp)
1422 {
1423 	/*
1424 	 * The user must have sysadmin privilege.
1425 	 */
1426 	if (!capable(CAP_SYS_ADMIN))
1427 		return PERR_ACCESS;
1428 
1429 	/*
1430 	 * The requested exclusive_cpus must not be allocated to other
1431 	 * partitions and it can't use up all the root's effective_cpus.
1432 	 *
1433 	 * The effective_xcpus mask can contain offline CPUs, but there must
1434 	 * be at least one or more online CPUs present before it can be enabled.
1435 	 *
1436 	 * Note that creating a remote partition with any local partition root
1437 	 * above it or remote partition root underneath it is not allowed.
1438 	 */
1439 	compute_excpus(cs, tmp->new_cpus);
1440 	WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1441 	if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1442 	    cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1443 		return PERR_INVCPUS;
1444 	if (((new_prs == PRS_ISOLATED) &&
1445 	     !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1446 	    prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1447 		return PERR_HKEEPING;
1448 
1449 	spin_lock_irq(&callback_lock);
1450 	partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1451 	cs->remote_partition = true;
1452 	cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1453 	spin_unlock_irq(&callback_lock);
1454 	update_isolation_cpumasks();
1455 	cpuset_force_rebuild();
1456 	cs->prs_err = 0;
1457 
1458 	/*
1459 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1460 	 */
1461 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1462 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1463 	return 0;
1464 }
1465 
1466 /*
1467  * remote_partition_disable - Remove current cpuset from remote partition list
1468  * @cs: the cpuset to update
1469  * @tmp: temporary masks
1470  *
1471  * The effective_cpus is also updated.
1472  *
1473  * cpuset_mutex must be held by the caller.
1474  */
1475 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1476 {
1477 	WARN_ON_ONCE(!is_remote_partition(cs));
1478 	/*
1479 	 * When a CPU is offlined, top_cpuset may end up with no available CPUs,
1480 	 * which should clear subpartitions_cpus. We should not emit a warning for this
1481 	 * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1482 	 * may already be cleared when disabling the partition.
1483 	 */
1484 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1485 		     !cpumask_empty(subpartitions_cpus));
1486 
1487 	spin_lock_irq(&callback_lock);
1488 	cs->remote_partition = false;
1489 	partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1490 	if (cs->prs_err)
1491 		cs->partition_root_state = -cs->partition_root_state;
1492 	else
1493 		cs->partition_root_state = PRS_MEMBER;
1494 
1495 	/* effective_xcpus may need to be changed */
1496 	compute_excpus(cs, cs->effective_xcpus);
1497 	reset_partition_data(cs);
1498 	spin_unlock_irq(&callback_lock);
1499 	update_isolation_cpumasks();
1500 	cpuset_force_rebuild();
1501 
1502 	/*
1503 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1504 	 */
1505 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1506 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1507 }
1508 
1509 /*
1510  * remote_cpus_update - cpus_exclusive change of remote partition
1511  * @cs: the cpuset to be updated
1512  * @xcpus: the new exclusive_cpus mask, if non-NULL
1513  * @excpus: the new effective_xcpus mask
1514  * @tmp: temporary masks
1515  *
1516  * top_cpuset and subpartitions_cpus will be updated or partition can be
1517  * invalidated.
1518  */
1519 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1520 			       struct cpumask *excpus, struct tmpmasks *tmp)
1521 {
1522 	bool adding, deleting;
1523 	int prs = cs->partition_root_state;
1524 
1525 	if (WARN_ON_ONCE(!is_remote_partition(cs)))
1526 		return;
1527 
1528 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1529 
1530 	if (cpumask_empty(excpus)) {
1531 		cs->prs_err = PERR_CPUSEMPTY;
1532 		goto invalidate;
1533 	}
1534 
1535 	adding   = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1536 	deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1537 
1538 	/*
1539 	 * Additions of remote CPUs is only allowed if those CPUs are
1540 	 * not allocated to other partitions and there are effective_cpus
1541 	 * left in the top cpuset.
1542 	 */
1543 	if (adding) {
1544 		WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1545 		if (!capable(CAP_SYS_ADMIN))
1546 			cs->prs_err = PERR_ACCESS;
1547 		else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1548 			 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1549 			cs->prs_err = PERR_NOCPUS;
1550 		else if ((prs == PRS_ISOLATED) &&
1551 			 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1552 			cs->prs_err = PERR_HKEEPING;
1553 		if (cs->prs_err)
1554 			goto invalidate;
1555 	}
1556 
1557 	spin_lock_irq(&callback_lock);
1558 	if (adding)
1559 		partition_xcpus_add(prs, NULL, tmp->addmask);
1560 	if (deleting)
1561 		partition_xcpus_del(prs, NULL, tmp->delmask);
1562 	/*
1563 	 * Need to update effective_xcpus and exclusive_cpus now as
1564 	 * update_sibling_cpumasks() below may iterate back to the same cs.
1565 	 */
1566 	cpumask_copy(cs->effective_xcpus, excpus);
1567 	if (xcpus)
1568 		cpumask_copy(cs->exclusive_cpus, xcpus);
1569 	spin_unlock_irq(&callback_lock);
1570 	update_isolation_cpumasks();
1571 	if (adding || deleting)
1572 		cpuset_force_rebuild();
1573 
1574 	/*
1575 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1576 	 */
1577 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1578 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1579 	return;
1580 
1581 invalidate:
1582 	remote_partition_disable(cs, tmp);
1583 }
1584 
1585 /**
1586  * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1587  * @cs:      The cpuset that requests change in partition root state
1588  * @cmd:     Partition root state change command
1589  * @newmask: Optional new cpumask for partcmd_update
1590  * @tmp:     Temporary addmask and delmask
1591  * Return:   0 or a partition root state error code
1592  *
1593  * For partcmd_enable*, the cpuset is being transformed from a non-partition
1594  * root to a partition root. The effective_xcpus (cpus_allowed if
1595  * effective_xcpus not set) mask of the given cpuset will be taken away from
1596  * parent's effective_cpus. The function will return 0 if all the CPUs listed
1597  * in effective_xcpus can be granted or an error code will be returned.
1598  *
1599  * For partcmd_disable, the cpuset is being transformed from a partition
1600  * root back to a non-partition root. Any CPUs in effective_xcpus will be
1601  * given back to parent's effective_cpus. 0 will always be returned.
1602  *
1603  * For partcmd_update, if the optional newmask is specified, the cpu list is
1604  * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1605  * assumed to remain the same. The cpuset should either be a valid or invalid
1606  * partition root. The partition root state may change from valid to invalid
1607  * or vice versa. An error code will be returned if transitioning from
1608  * invalid to valid violates the exclusivity rule.
1609  *
1610  * For partcmd_invalidate, the current partition will be made invalid.
1611  *
1612  * The partcmd_enable* and partcmd_disable commands are used by
1613  * update_prstate(). An error code may be returned and the caller will check
1614  * for error.
1615  *
1616  * The partcmd_update command is used by update_cpumasks_hier() with newmask
1617  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1618  * by update_cpumask() with NULL newmask. In both cases, the callers won't
1619  * check for error and so partition_root_state and prs_err will be updated
1620  * directly.
1621  */
1622 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1623 					   struct cpumask *newmask,
1624 					   struct tmpmasks *tmp)
1625 {
1626 	struct cpuset *parent = parent_cs(cs);
1627 	int adding;	/* Adding cpus to parent's effective_cpus	*/
1628 	int deleting;	/* Deleting cpus from parent's effective_cpus	*/
1629 	int old_prs, new_prs;
1630 	int part_error = PERR_NONE;	/* Partition error? */
1631 	struct cpumask *xcpus = user_xcpus(cs);
1632 	int parent_prs = parent->partition_root_state;
1633 	bool nocpu;
1634 
1635 	lockdep_assert_cpuset_lock_held();
1636 	WARN_ON_ONCE(is_remote_partition(cs));	/* For local partition only */
1637 
1638 	/*
1639 	 * new_prs will only be changed for the partcmd_update and
1640 	 * partcmd_invalidate commands.
1641 	 */
1642 	adding = deleting = false;
1643 	old_prs = new_prs = cs->partition_root_state;
1644 
1645 	if (cmd == partcmd_invalidate) {
1646 		if (is_partition_invalid(cs))
1647 			return 0;
1648 
1649 		/*
1650 		 * Make the current partition invalid.
1651 		 */
1652 		if (is_partition_valid(parent))
1653 			adding = cpumask_and(tmp->addmask,
1654 					     xcpus, parent->effective_xcpus);
1655 		if (old_prs > 0)
1656 			new_prs = -old_prs;
1657 
1658 		goto write_error;
1659 	}
1660 
1661 	/*
1662 	 * The parent must be a partition root.
1663 	 * The new cpumask, if present, or the current cpus_allowed must
1664 	 * not be empty.
1665 	 */
1666 	if (!is_partition_valid(parent)) {
1667 		return is_partition_invalid(parent)
1668 		       ? PERR_INVPARENT : PERR_NOTPART;
1669 	}
1670 	if (!newmask && xcpus_empty(cs))
1671 		return PERR_CPUSEMPTY;
1672 
1673 	nocpu = tasks_nocpu_error(parent, cs, xcpus);
1674 
1675 	if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1676 		/*
1677 		 * Need to call compute_excpus() in case
1678 		 * exclusive_cpus not set. Sibling conflict should only happen
1679 		 * if exclusive_cpus isn't set.
1680 		 */
1681 		xcpus = tmp->delmask;
1682 		if (compute_excpus(cs, xcpus))
1683 			WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1684 		new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1685 
1686 		/*
1687 		 * Enabling partition root is not allowed if its
1688 		 * effective_xcpus is empty.
1689 		 */
1690 		if (cpumask_empty(xcpus))
1691 			return PERR_INVCPUS;
1692 
1693 		if (prstate_housekeeping_conflict(new_prs, xcpus))
1694 			return PERR_HKEEPING;
1695 
1696 		if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1697 		    !isolated_cpus_can_update(xcpus, NULL))
1698 			return PERR_HKEEPING;
1699 
1700 		if (tasks_nocpu_error(parent, cs, xcpus))
1701 			return PERR_NOCPUS;
1702 
1703 		/*
1704 		 * This function will only be called when all the preliminary
1705 		 * checks have passed. At this point, the following condition
1706 		 * should hold.
1707 		 *
1708 		 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1709 		 *
1710 		 * Warn if it is not the case.
1711 		 */
1712 		cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1713 		WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1714 
1715 		deleting = true;
1716 	} else if (cmd == partcmd_disable) {
1717 		/*
1718 		 * May need to add cpus back to parent's effective_cpus
1719 		 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1720 		 * for valid partition root. xcpus may contain CPUs that
1721 		 * shouldn't be removed from the two global cpumasks.
1722 		 */
1723 		if (is_partition_valid(cs)) {
1724 			cpumask_copy(tmp->addmask, cs->effective_xcpus);
1725 			adding = true;
1726 		}
1727 		new_prs = PRS_MEMBER;
1728 	} else if (newmask) {
1729 		/*
1730 		 * Empty cpumask is not allowed
1731 		 */
1732 		if (cpumask_empty(newmask)) {
1733 			part_error = PERR_CPUSEMPTY;
1734 			goto write_error;
1735 		}
1736 
1737 		/* Check newmask again, whether cpus are available for parent/cs */
1738 		nocpu |= tasks_nocpu_error(parent, cs, newmask);
1739 
1740 		/*
1741 		 * partcmd_update with newmask:
1742 		 *
1743 		 * Compute add/delete mask to/from effective_cpus
1744 		 *
1745 		 * For valid partition:
1746 		 *   addmask = exclusive_cpus & ~newmask
1747 		 *			      & parent->effective_xcpus
1748 		 *   delmask = newmask & ~exclusive_cpus
1749 		 *		       & parent->effective_xcpus
1750 		 *
1751 		 * For invalid partition:
1752 		 *   delmask = newmask & parent->effective_xcpus
1753 		 *   The partition may become valid soon.
1754 		 */
1755 		if (is_partition_invalid(cs)) {
1756 			adding = false;
1757 			deleting = cpumask_and(tmp->delmask,
1758 					newmask, parent->effective_xcpus);
1759 		} else {
1760 			cpumask_andnot(tmp->addmask, xcpus, newmask);
1761 			adding = cpumask_and(tmp->addmask, tmp->addmask,
1762 					     parent->effective_xcpus);
1763 
1764 			cpumask_andnot(tmp->delmask, newmask, xcpus);
1765 			deleting = cpumask_and(tmp->delmask, tmp->delmask,
1766 					       parent->effective_xcpus);
1767 		}
1768 
1769 		/*
1770 		 * TBD: Invalidate a currently valid child root partition may
1771 		 * still break isolated_cpus_can_update() rule if parent is an
1772 		 * isolated partition.
1773 		 */
1774 		if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1775 			if ((parent_prs == PRS_ROOT) &&
1776 			    /* Adding to parent means removing isolated CPUs */
1777 			    !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1778 				part_error = PERR_HKEEPING;
1779 			if ((parent_prs == PRS_ISOLATED) &&
1780 			    /* Adding to parent means adding isolated CPUs */
1781 			    !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1782 				part_error = PERR_HKEEPING;
1783 		}
1784 
1785 		/*
1786 		 * The new CPUs to be removed from parent's effective CPUs
1787 		 * must be present.
1788 		 */
1789 		if (deleting) {
1790 			cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1791 			WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1792 		}
1793 
1794 		/*
1795 		 * Make partition invalid if parent's effective_cpus could
1796 		 * become empty and there are tasks in the parent.
1797 		 */
1798 		if (nocpu && (!adding ||
1799 		    !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1800 			part_error = PERR_NOCPUS;
1801 			deleting = false;
1802 			adding = cpumask_and(tmp->addmask,
1803 					     xcpus, parent->effective_xcpus);
1804 		}
1805 	} else {
1806 		/*
1807 		 * partcmd_update w/o newmask
1808 		 *
1809 		 * delmask = effective_xcpus & parent->effective_cpus
1810 		 *
1811 		 * This can be called from:
1812 		 * 1) update_cpumasks_hier()
1813 		 * 2) cpuset_hotplug_update_tasks()
1814 		 *
1815 		 * Check to see if it can be transitioned from valid to
1816 		 * invalid partition or vice versa.
1817 		 *
1818 		 * A partition error happens when parent has tasks and all
1819 		 * its effective CPUs will have to be distributed out.
1820 		 */
1821 		if (nocpu) {
1822 			part_error = PERR_NOCPUS;
1823 			if (is_partition_valid(cs))
1824 				adding = cpumask_and(tmp->addmask,
1825 						xcpus, parent->effective_xcpus);
1826 		} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
1827 			   cpumask_subset(xcpus, parent->effective_xcpus)) {
1828 			struct cgroup_subsys_state *css;
1829 			struct cpuset *child;
1830 			bool exclusive = true;
1831 
1832 			/*
1833 			 * Convert invalid partition to valid has to
1834 			 * pass the cpu exclusivity test.
1835 			 */
1836 			rcu_read_lock();
1837 			cpuset_for_each_child(child, css, parent) {
1838 				if (child == cs)
1839 					continue;
1840 				if (!cpusets_are_exclusive(cs, child)) {
1841 					exclusive = false;
1842 					break;
1843 				}
1844 			}
1845 			rcu_read_unlock();
1846 			if (exclusive)
1847 				deleting = cpumask_and(tmp->delmask,
1848 						xcpus, parent->effective_cpus);
1849 			else
1850 				part_error = PERR_NOTEXCL;
1851 		}
1852 	}
1853 
1854 write_error:
1855 	if (part_error)
1856 		WRITE_ONCE(cs->prs_err, part_error);
1857 
1858 	if (cmd == partcmd_update) {
1859 		/*
1860 		 * Check for possible transition between valid and invalid
1861 		 * partition root.
1862 		 */
1863 		switch (cs->partition_root_state) {
1864 		case PRS_ROOT:
1865 		case PRS_ISOLATED:
1866 			if (part_error)
1867 				new_prs = -old_prs;
1868 			break;
1869 		case PRS_INVALID_ROOT:
1870 		case PRS_INVALID_ISOLATED:
1871 			if (!part_error)
1872 				new_prs = -old_prs;
1873 			break;
1874 		}
1875 	}
1876 
1877 	if (!adding && !deleting && (new_prs == old_prs))
1878 		return 0;
1879 
1880 	/*
1881 	 * Transitioning between invalid to valid or vice versa may require
1882 	 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1883 	 * validate_change() has already been successfully called and
1884 	 * CPU lists in cs haven't been updated yet. So defer it to later.
1885 	 */
1886 	if ((old_prs != new_prs) && (cmd != partcmd_update))  {
1887 		int err = update_partition_exclusive_flag(cs, new_prs);
1888 
1889 		if (err)
1890 			return err;
1891 	}
1892 
1893 	/*
1894 	 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1895 	 * only).
1896 	 *
1897 	 * Newly added CPUs will be removed from effective_cpus and
1898 	 * newly deleted ones will be added back to effective_cpus.
1899 	 */
1900 	spin_lock_irq(&callback_lock);
1901 	if (old_prs != new_prs)
1902 		cs->partition_root_state = new_prs;
1903 
1904 	/*
1905 	 * Adding to parent's effective_cpus means deletion CPUs from cs
1906 	 * and vice versa.
1907 	 */
1908 	if (adding)
1909 		partition_xcpus_del(old_prs, parent, tmp->addmask);
1910 	if (deleting)
1911 		partition_xcpus_add(new_prs, parent, tmp->delmask);
1912 
1913 	spin_unlock_irq(&callback_lock);
1914 	update_isolation_cpumasks();
1915 
1916 	if ((old_prs != new_prs) && (cmd == partcmd_update))
1917 		update_partition_exclusive_flag(cs, new_prs);
1918 
1919 	if (adding || deleting) {
1920 		cpuset_update_tasks_cpumask(parent, tmp->addmask);
1921 		update_sibling_cpumasks(parent, cs, tmp);
1922 	}
1923 
1924 	/*
1925 	 * For partcmd_update without newmask, it is being called from
1926 	 * cpuset_handle_hotplug(). Update the load balance flag and
1927 	 * scheduling domain accordingly.
1928 	 */
1929 	if ((cmd == partcmd_update) && !newmask)
1930 		update_partition_sd_lb(cs, old_prs);
1931 
1932 	notify_partition_change(cs, old_prs);
1933 	return 0;
1934 }
1935 
1936 /**
1937  * compute_partition_effective_cpumask - compute effective_cpus for partition
1938  * @cs: partition root cpuset
1939  * @new_ecpus: previously computed effective_cpus to be updated
1940  *
1941  * Compute the effective_cpus of a partition root by scanning effective_xcpus
1942  * of child partition roots and excluding their effective_xcpus.
1943  *
1944  * This has the side effect of invalidating valid child partition roots,
1945  * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
1946  * or update_cpumasks_hier() where parent and children are modified
1947  * successively, we don't need to call update_parent_effective_cpumask()
1948  * and the child's effective_cpus will be updated in later iterations.
1949  *
1950  * Note that rcu_read_lock() is assumed to be held.
1951  */
1952 static void compute_partition_effective_cpumask(struct cpuset *cs,
1953 						struct cpumask *new_ecpus)
1954 {
1955 	struct cgroup_subsys_state *css;
1956 	struct cpuset *child;
1957 	bool populated = partition_is_populated(cs, NULL);
1958 
1959 	/*
1960 	 * Check child partition roots to see if they should be
1961 	 * invalidated when
1962 	 *  1) child effective_xcpus not a subset of new
1963 	 *     excluisve_cpus
1964 	 *  2) All the effective_cpus will be used up and cp
1965 	 *     has tasks
1966 	 */
1967 	compute_excpus(cs, new_ecpus);
1968 	cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
1969 
1970 	rcu_read_lock();
1971 	cpuset_for_each_child(child, css, cs) {
1972 		if (!is_partition_valid(child))
1973 			continue;
1974 
1975 		/*
1976 		 * There shouldn't be a remote partition underneath another
1977 		 * partition root.
1978 		 */
1979 		WARN_ON_ONCE(is_remote_partition(child));
1980 		child->prs_err = 0;
1981 		if (!cpumask_subset(child->effective_xcpus,
1982 				    cs->effective_xcpus))
1983 			child->prs_err = PERR_INVCPUS;
1984 		else if (populated &&
1985 			 cpumask_subset(new_ecpus, child->effective_xcpus))
1986 			child->prs_err = PERR_NOCPUS;
1987 
1988 		if (child->prs_err) {
1989 			int old_prs = child->partition_root_state;
1990 
1991 			/*
1992 			 * Invalidate child partition
1993 			 */
1994 			spin_lock_irq(&callback_lock);
1995 			make_partition_invalid(child);
1996 			spin_unlock_irq(&callback_lock);
1997 			notify_partition_change(child, old_prs);
1998 			continue;
1999 		}
2000 		cpumask_andnot(new_ecpus, new_ecpus,
2001 			       child->effective_xcpus);
2002 	}
2003 	rcu_read_unlock();
2004 }
2005 
2006 /*
2007  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2008  * @cs:  the cpuset to consider
2009  * @tmp: temp variables for calculating effective_cpus & partition setup
2010  * @force: don't skip any descendant cpusets if set
2011  *
2012  * When configured cpumask is changed, the effective cpumasks of this cpuset
2013  * and all its descendants need to be updated.
2014  *
2015  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2016  *
2017  * Called with cpuset_mutex held
2018  */
2019 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2020 				 bool force)
2021 {
2022 	struct cpuset *cp;
2023 	struct cgroup_subsys_state *pos_css;
2024 	int old_prs, new_prs;
2025 
2026 	rcu_read_lock();
2027 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2028 		struct cpuset *parent = parent_cs(cp);
2029 		bool remote = is_remote_partition(cp);
2030 		bool update_parent = false;
2031 
2032 		old_prs = new_prs = cp->partition_root_state;
2033 
2034 		/*
2035 		 * For child remote partition root (!= cs), we need to call
2036 		 * remote_cpus_update() if effective_xcpus will be changed.
2037 		 * Otherwise, we can skip the whole subtree.
2038 		 *
2039 		 * remote_cpus_update() will reuse tmp->new_cpus only after
2040 		 * its value is being processed.
2041 		 */
2042 		if (remote && (cp != cs)) {
2043 			compute_excpus(cp, tmp->new_cpus);
2044 			if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2045 				pos_css = css_rightmost_descendant(pos_css);
2046 				continue;
2047 			}
2048 			rcu_read_unlock();
2049 			remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2050 			rcu_read_lock();
2051 
2052 			/* Remote partition may be invalidated */
2053 			new_prs = cp->partition_root_state;
2054 			remote = (new_prs == old_prs);
2055 		}
2056 
2057 		if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2058 			compute_partition_effective_cpumask(cp, tmp->new_cpus);
2059 		else
2060 			compute_effective_cpumask(tmp->new_cpus, cp, parent);
2061 
2062 		if (remote)
2063 			goto get_css;	/* Ready to update cpuset data */
2064 
2065 		/*
2066 		 * A partition with no effective_cpus is allowed as long as
2067 		 * there is no task associated with it. Call
2068 		 * update_parent_effective_cpumask() to check it.
2069 		 */
2070 		if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2071 			update_parent = true;
2072 			goto update_parent_effective;
2073 		}
2074 
2075 		/*
2076 		 * If it becomes empty, inherit the effective mask of the
2077 		 * parent, which is guaranteed to have some CPUs unless
2078 		 * it is a partition root that has explicitly distributed
2079 		 * out all its CPUs.
2080 		 */
2081 		if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2082 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2083 
2084 		/*
2085 		 * Skip the whole subtree if
2086 		 * 1) the cpumask remains the same,
2087 		 * 2) has no partition root state,
2088 		 * 3) force flag not set, and
2089 		 * 4) for v2 load balance state same as its parent.
2090 		 */
2091 		if (!cp->partition_root_state && !force &&
2092 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2093 		    (!cpuset_v2() ||
2094 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2095 			pos_css = css_rightmost_descendant(pos_css);
2096 			continue;
2097 		}
2098 
2099 update_parent_effective:
2100 		/*
2101 		 * update_parent_effective_cpumask() should have been called
2102 		 * for cs already in update_cpumask(). We should also call
2103 		 * cpuset_update_tasks_cpumask() again for tasks in the parent
2104 		 * cpuset if the parent's effective_cpus changes.
2105 		 */
2106 		if ((cp != cs) && old_prs) {
2107 			switch (parent->partition_root_state) {
2108 			case PRS_ROOT:
2109 			case PRS_ISOLATED:
2110 				update_parent = true;
2111 				break;
2112 
2113 			default:
2114 				/*
2115 				 * When parent is not a partition root or is
2116 				 * invalid, child partition roots become
2117 				 * invalid too.
2118 				 */
2119 				if (is_partition_valid(cp))
2120 					new_prs = -cp->partition_root_state;
2121 				WRITE_ONCE(cp->prs_err,
2122 					   is_partition_invalid(parent)
2123 					   ? PERR_INVPARENT : PERR_NOTPART);
2124 				break;
2125 			}
2126 		}
2127 get_css:
2128 		if (!css_tryget_online(&cp->css))
2129 			continue;
2130 		rcu_read_unlock();
2131 
2132 		if (update_parent) {
2133 			update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2134 			/*
2135 			 * The cpuset partition_root_state may become
2136 			 * invalid. Capture it.
2137 			 */
2138 			new_prs = cp->partition_root_state;
2139 		}
2140 
2141 		spin_lock_irq(&callback_lock);
2142 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2143 		cp->partition_root_state = new_prs;
2144 		/*
2145 		 * Need to compute effective_xcpus if either exclusive_cpus
2146 		 * is non-empty or it is a valid partition root.
2147 		 */
2148 		if ((new_prs > 0) || !cpumask_empty(cp->exclusive_cpus))
2149 			compute_excpus(cp, cp->effective_xcpus);
2150 		if (new_prs <= 0)
2151 			reset_partition_data(cp);
2152 		spin_unlock_irq(&callback_lock);
2153 
2154 		notify_partition_change(cp, old_prs);
2155 
2156 		WARN_ON(!is_in_v2_mode() &&
2157 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2158 
2159 		cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2160 
2161 		/*
2162 		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2163 		 * from parent if current cpuset isn't a valid partition root
2164 		 * and their load balance states differ.
2165 		 */
2166 		if (cpuset_v2() && !is_partition_valid(cp) &&
2167 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2168 			if (is_sched_load_balance(parent))
2169 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2170 			else
2171 				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2172 		}
2173 
2174 		/*
2175 		 * On legacy hierarchy, if the effective cpumask of any non-
2176 		 * empty cpuset is changed, we need to rebuild sched domains.
2177 		 * On default hierarchy, the cpuset needs to be a partition
2178 		 * root as well.
2179 		 */
2180 		if (!cpumask_empty(cp->cpus_allowed) &&
2181 		    is_sched_load_balance(cp) &&
2182 		   (!cpuset_v2() || is_partition_valid(cp)))
2183 			cpuset_force_rebuild();
2184 
2185 		rcu_read_lock();
2186 		css_put(&cp->css);
2187 	}
2188 	rcu_read_unlock();
2189 }
2190 
2191 /**
2192  * update_sibling_cpumasks - Update siblings cpumasks
2193  * @parent:  Parent cpuset
2194  * @cs:      Current cpuset
2195  * @tmp:     Temp variables
2196  */
2197 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2198 				    struct tmpmasks *tmp)
2199 {
2200 	struct cpuset *sibling;
2201 	struct cgroup_subsys_state *pos_css;
2202 
2203 	lockdep_assert_cpuset_lock_held();
2204 
2205 	/*
2206 	 * Check all its siblings and call update_cpumasks_hier()
2207 	 * if their effective_cpus will need to be changed.
2208 	 *
2209 	 * It is possible a change in parent's effective_cpus
2210 	 * due to a change in a child partition's effective_xcpus will impact
2211 	 * its siblings even if they do not inherit parent's effective_cpus
2212 	 * directly. It should not impact valid partition.
2213 	 *
2214 	 * The update_cpumasks_hier() function may sleep. So we have to
2215 	 * release the RCU read lock before calling it.
2216 	 */
2217 	rcu_read_lock();
2218 	cpuset_for_each_child(sibling, pos_css, parent) {
2219 		if (sibling == cs || is_partition_valid(sibling))
2220 			continue;
2221 
2222 		compute_effective_cpumask(tmp->new_cpus, sibling,
2223 					  parent);
2224 		if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2225 			continue;
2226 
2227 		if (!css_tryget_online(&sibling->css))
2228 			continue;
2229 
2230 		rcu_read_unlock();
2231 		update_cpumasks_hier(sibling, tmp, false);
2232 		rcu_read_lock();
2233 		css_put(&sibling->css);
2234 	}
2235 	rcu_read_unlock();
2236 }
2237 
2238 static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2239 {
2240 	int retval;
2241 
2242 	retval = cpulist_parse(buf, out_mask);
2243 	if (retval < 0)
2244 		return retval;
2245 	if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2246 		return -EINVAL;
2247 
2248 	return 0;
2249 }
2250 
2251 /**
2252  * validate_partition - Validate a cpuset partition configuration
2253  * @cs: The cpuset to validate
2254  * @trialcs: The trial cpuset containing proposed configuration changes
2255  *
2256  * If any validation check fails, the appropriate error code is set in the
2257  * cpuset's prs_err field.
2258  *
2259  * Return: PRS error code (0 if valid, non-zero error code if invalid)
2260  */
2261 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2262 {
2263 	struct cpuset *parent = parent_cs(cs);
2264 
2265 	if (cs_is_member(trialcs))
2266 		return PERR_NONE;
2267 
2268 	if (cpumask_empty(trialcs->effective_xcpus))
2269 		return PERR_INVCPUS;
2270 
2271 	if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2272 					  trialcs->effective_xcpus))
2273 		return PERR_HKEEPING;
2274 
2275 	if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2276 		return PERR_NOCPUS;
2277 
2278 	return PERR_NONE;
2279 }
2280 
2281 /**
2282  * partition_cpus_change - Handle partition state changes due to CPU mask updates
2283  * @cs: The target cpuset being modified
2284  * @trialcs: The trial cpuset containing proposed configuration changes
2285  * @tmp: Temporary masks for intermediate calculations
2286  *
2287  * This function handles partition state transitions triggered by CPU mask changes.
2288  * CPU modifications may cause a partition to be disabled or require state updates.
2289  */
2290 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2291 					struct tmpmasks *tmp)
2292 {
2293 	enum prs_errcode prs_err;
2294 
2295 	if (cs_is_member(cs))
2296 		return;
2297 
2298 	prs_err = validate_partition(cs, trialcs);
2299 	if (prs_err)
2300 		trialcs->prs_err = cs->prs_err = prs_err;
2301 
2302 	if (is_remote_partition(cs)) {
2303 		if (trialcs->prs_err)
2304 			remote_partition_disable(cs, tmp);
2305 		else
2306 			remote_cpus_update(cs, trialcs->exclusive_cpus,
2307 					   trialcs->effective_xcpus, tmp);
2308 	} else {
2309 		if (trialcs->prs_err)
2310 			update_parent_effective_cpumask(cs, partcmd_invalidate,
2311 							NULL, tmp);
2312 		else
2313 			update_parent_effective_cpumask(cs, partcmd_update,
2314 							trialcs->effective_xcpus, tmp);
2315 	}
2316 }
2317 
2318 /**
2319  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2320  * @cs: the cpuset to consider
2321  * @trialcs: trial cpuset
2322  * @buf: buffer of cpu numbers written to this cpuset
2323  */
2324 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2325 			  const char *buf)
2326 {
2327 	int retval;
2328 	struct tmpmasks tmp;
2329 	bool force = false;
2330 	int old_prs = cs->partition_root_state;
2331 
2332 	retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2333 	if (retval < 0)
2334 		return retval;
2335 
2336 	/* Nothing to do if the cpus didn't change */
2337 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2338 		return 0;
2339 
2340 	compute_trialcs_excpus(trialcs, cs);
2341 	trialcs->prs_err = PERR_NONE;
2342 
2343 	retval = validate_change(cs, trialcs);
2344 	if (retval < 0)
2345 		return retval;
2346 
2347 	if (alloc_tmpmasks(&tmp))
2348 		return -ENOMEM;
2349 
2350 	/*
2351 	 * Check all the descendants in update_cpumasks_hier() if
2352 	 * effective_xcpus is to be changed.
2353 	 */
2354 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2355 
2356 	partition_cpus_change(cs, trialcs, &tmp);
2357 
2358 	spin_lock_irq(&callback_lock);
2359 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2360 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2361 	if ((old_prs > 0) && !is_partition_valid(cs))
2362 		reset_partition_data(cs);
2363 	spin_unlock_irq(&callback_lock);
2364 
2365 	/* effective_cpus/effective_xcpus will be updated here */
2366 	update_cpumasks_hier(cs, &tmp, force);
2367 
2368 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2369 	if (cs->partition_root_state)
2370 		update_partition_sd_lb(cs, old_prs);
2371 
2372 	free_tmpmasks(&tmp);
2373 	return retval;
2374 }
2375 
2376 /**
2377  * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2378  * @cs: the cpuset to consider
2379  * @trialcs: trial cpuset
2380  * @buf: buffer of cpu numbers written to this cpuset
2381  *
2382  * The tasks' cpumask will be updated if cs is a valid partition root.
2383  */
2384 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2385 				    const char *buf)
2386 {
2387 	int retval;
2388 	struct tmpmasks tmp;
2389 	bool force = false;
2390 	int old_prs = cs->partition_root_state;
2391 
2392 	retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2393 	if (retval < 0)
2394 		return retval;
2395 
2396 	/* Nothing to do if the CPUs didn't change */
2397 	if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2398 		return 0;
2399 
2400 	/*
2401 	 * Reject the change if there is exclusive CPUs conflict with
2402 	 * the siblings.
2403 	 */
2404 	if (compute_trialcs_excpus(trialcs, cs))
2405 		return -EINVAL;
2406 
2407 	/*
2408 	 * Check all the descendants in update_cpumasks_hier() if
2409 	 * effective_xcpus is to be changed.
2410 	 */
2411 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2412 
2413 	retval = validate_change(cs, trialcs);
2414 	if (retval)
2415 		return retval;
2416 
2417 	if (alloc_tmpmasks(&tmp))
2418 		return -ENOMEM;
2419 
2420 	trialcs->prs_err = PERR_NONE;
2421 	partition_cpus_change(cs, trialcs, &tmp);
2422 
2423 	spin_lock_irq(&callback_lock);
2424 	cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2425 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2426 	if ((old_prs > 0) && !is_partition_valid(cs))
2427 		reset_partition_data(cs);
2428 	spin_unlock_irq(&callback_lock);
2429 
2430 	/*
2431 	 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2432 	 * of the subtree when it is a valid partition root or effective_xcpus
2433 	 * is updated.
2434 	 */
2435 	if (is_partition_valid(cs) || force)
2436 		update_cpumasks_hier(cs, &tmp, force);
2437 
2438 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2439 	if (cs->partition_root_state)
2440 		update_partition_sd_lb(cs, old_prs);
2441 
2442 	free_tmpmasks(&tmp);
2443 	return 0;
2444 }
2445 
2446 /*
2447  * Migrate memory region from one set of nodes to another.  This is
2448  * performed asynchronously as it can be called from process migration path
2449  * holding locks involved in process management.  All mm migrations are
2450  * performed in the queued order and can be waited for by flushing
2451  * cpuset_migrate_mm_wq.
2452  */
2453 
2454 struct cpuset_migrate_mm_work {
2455 	struct work_struct	work;
2456 	struct mm_struct	*mm;
2457 	nodemask_t		from;
2458 	nodemask_t		to;
2459 };
2460 
2461 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2462 {
2463 	struct cpuset_migrate_mm_work *mwork =
2464 		container_of(work, struct cpuset_migrate_mm_work, work);
2465 
2466 	/* on a wq worker, no need to worry about %current's mems_allowed */
2467 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2468 	mmput(mwork->mm);
2469 	kfree(mwork);
2470 }
2471 
2472 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2473 							const nodemask_t *to)
2474 {
2475 	struct cpuset_migrate_mm_work *mwork;
2476 
2477 	if (nodes_equal(*from, *to)) {
2478 		mmput(mm);
2479 		return;
2480 	}
2481 
2482 	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2483 	if (mwork) {
2484 		mwork->mm = mm;
2485 		mwork->from = *from;
2486 		mwork->to = *to;
2487 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2488 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
2489 	} else {
2490 		mmput(mm);
2491 	}
2492 }
2493 
2494 static void flush_migrate_mm_task_workfn(struct callback_head *head)
2495 {
2496 	flush_workqueue(cpuset_migrate_mm_wq);
2497 	kfree(head);
2498 }
2499 
2500 static void schedule_flush_migrate_mm(void)
2501 {
2502 	struct callback_head *flush_cb;
2503 
2504 	flush_cb = kzalloc(sizeof(struct callback_head), GFP_KERNEL);
2505 	if (!flush_cb)
2506 		return;
2507 
2508 	init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2509 
2510 	if (task_work_add(current, flush_cb, TWA_RESUME))
2511 		kfree(flush_cb);
2512 }
2513 
2514 /*
2515  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2516  * @tsk: the task to change
2517  * @newmems: new nodes that the task will be set
2518  *
2519  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2520  * and rebind an eventual tasks' mempolicy. If the task is allocating in
2521  * parallel, it might temporarily see an empty intersection, which results in
2522  * a seqlock check and retry before OOM or allocation failure.
2523  */
2524 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2525 					nodemask_t *newmems)
2526 {
2527 	task_lock(tsk);
2528 
2529 	local_irq_disable();
2530 	write_seqcount_begin(&tsk->mems_allowed_seq);
2531 
2532 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2533 	mpol_rebind_task(tsk, newmems);
2534 	tsk->mems_allowed = *newmems;
2535 
2536 	write_seqcount_end(&tsk->mems_allowed_seq);
2537 	local_irq_enable();
2538 
2539 	task_unlock(tsk);
2540 }
2541 
2542 static void *cpuset_being_rebound;
2543 
2544 /**
2545  * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2546  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2547  *
2548  * Iterate through each task of @cs updating its mems_allowed to the
2549  * effective cpuset's.  As this function is called with cpuset_mutex held,
2550  * cpuset membership stays stable.
2551  */
2552 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2553 {
2554 	static nodemask_t newmems;	/* protected by cpuset_mutex */
2555 	struct css_task_iter it;
2556 	struct task_struct *task;
2557 
2558 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2559 
2560 	guarantee_online_mems(cs, &newmems);
2561 
2562 	/*
2563 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2564 	 * take while holding tasklist_lock.  Forks can happen - the
2565 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2566 	 * and rebind their vma mempolicies too.  Because we still hold
2567 	 * the global cpuset_mutex, we know that no other rebind effort
2568 	 * will be contending for the global variable cpuset_being_rebound.
2569 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2570 	 * is idempotent.  Also migrate pages in each mm to new nodes.
2571 	 */
2572 	css_task_iter_start(&cs->css, 0, &it);
2573 	while ((task = css_task_iter_next(&it))) {
2574 		struct mm_struct *mm;
2575 		bool migrate;
2576 
2577 		cpuset_change_task_nodemask(task, &newmems);
2578 
2579 		mm = get_task_mm(task);
2580 		if (!mm)
2581 			continue;
2582 
2583 		migrate = is_memory_migrate(cs);
2584 
2585 		mpol_rebind_mm(mm, &cs->mems_allowed);
2586 		if (migrate)
2587 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2588 		else
2589 			mmput(mm);
2590 	}
2591 	css_task_iter_end(&it);
2592 
2593 	/*
2594 	 * All the tasks' nodemasks have been updated, update
2595 	 * cs->old_mems_allowed.
2596 	 */
2597 	cs->old_mems_allowed = newmems;
2598 
2599 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2600 	cpuset_being_rebound = NULL;
2601 }
2602 
2603 /*
2604  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2605  * @cs: the cpuset to consider
2606  * @new_mems: a temp variable for calculating new effective_mems
2607  *
2608  * When configured nodemask is changed, the effective nodemasks of this cpuset
2609  * and all its descendants need to be updated.
2610  *
2611  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2612  *
2613  * Called with cpuset_mutex held
2614  */
2615 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2616 {
2617 	struct cpuset *cp;
2618 	struct cgroup_subsys_state *pos_css;
2619 
2620 	rcu_read_lock();
2621 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2622 		struct cpuset *parent = parent_cs(cp);
2623 
2624 		bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2625 
2626 		/*
2627 		 * If it becomes empty, inherit the effective mask of the
2628 		 * parent, which is guaranteed to have some MEMs.
2629 		 */
2630 		if (is_in_v2_mode() && !has_mems)
2631 			*new_mems = parent->effective_mems;
2632 
2633 		/* Skip the whole subtree if the nodemask remains the same. */
2634 		if (nodes_equal(*new_mems, cp->effective_mems)) {
2635 			pos_css = css_rightmost_descendant(pos_css);
2636 			continue;
2637 		}
2638 
2639 		if (!css_tryget_online(&cp->css))
2640 			continue;
2641 		rcu_read_unlock();
2642 
2643 		spin_lock_irq(&callback_lock);
2644 		cp->effective_mems = *new_mems;
2645 		spin_unlock_irq(&callback_lock);
2646 
2647 		WARN_ON(!is_in_v2_mode() &&
2648 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2649 
2650 		cpuset_update_tasks_nodemask(cp);
2651 
2652 		rcu_read_lock();
2653 		css_put(&cp->css);
2654 	}
2655 	rcu_read_unlock();
2656 }
2657 
2658 /*
2659  * Handle user request to change the 'mems' memory placement
2660  * of a cpuset.  Needs to validate the request, update the
2661  * cpusets mems_allowed, and for each task in the cpuset,
2662  * update mems_allowed and rebind task's mempolicy and any vma
2663  * mempolicies and if the cpuset is marked 'memory_migrate',
2664  * migrate the tasks pages to the new memory.
2665  *
2666  * Call with cpuset_mutex held. May take callback_lock during call.
2667  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2668  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2669  * their mempolicies to the cpusets new mems_allowed.
2670  */
2671 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2672 			   const char *buf)
2673 {
2674 	int retval;
2675 
2676 	/*
2677 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2678 	 * The validate_change() call ensures that cpusets with tasks have memory.
2679 	 */
2680 	retval = nodelist_parse(buf, trialcs->mems_allowed);
2681 	if (retval < 0)
2682 		return retval;
2683 
2684 	if (!nodes_subset(trialcs->mems_allowed,
2685 			  top_cpuset.mems_allowed))
2686 		return -EINVAL;
2687 
2688 	/* No change? nothing to do */
2689 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2690 		return 0;
2691 
2692 	retval = validate_change(cs, trialcs);
2693 	if (retval < 0)
2694 		return retval;
2695 
2696 	check_insane_mems_config(&trialcs->mems_allowed);
2697 
2698 	spin_lock_irq(&callback_lock);
2699 	cs->mems_allowed = trialcs->mems_allowed;
2700 	spin_unlock_irq(&callback_lock);
2701 
2702 	/* use trialcs->mems_allowed as a temp variable */
2703 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2704 	return 0;
2705 }
2706 
2707 bool current_cpuset_is_being_rebound(void)
2708 {
2709 	bool ret;
2710 
2711 	rcu_read_lock();
2712 	ret = task_cs(current) == cpuset_being_rebound;
2713 	rcu_read_unlock();
2714 
2715 	return ret;
2716 }
2717 
2718 /*
2719  * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2720  * bit:		the bit to update (see cpuset_flagbits_t)
2721  * cs:		the cpuset to update
2722  * turning_on: 	whether the flag is being set or cleared
2723  *
2724  * Call with cpuset_mutex held.
2725  */
2726 
2727 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2728 		       int turning_on)
2729 {
2730 	struct cpuset *trialcs;
2731 	int balance_flag_changed;
2732 	int spread_flag_changed;
2733 	int err;
2734 
2735 	trialcs = dup_or_alloc_cpuset(cs);
2736 	if (!trialcs)
2737 		return -ENOMEM;
2738 
2739 	if (turning_on)
2740 		set_bit(bit, &trialcs->flags);
2741 	else
2742 		clear_bit(bit, &trialcs->flags);
2743 
2744 	err = validate_change(cs, trialcs);
2745 	if (err < 0)
2746 		goto out;
2747 
2748 	balance_flag_changed = (is_sched_load_balance(cs) !=
2749 				is_sched_load_balance(trialcs));
2750 
2751 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2752 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2753 
2754 	spin_lock_irq(&callback_lock);
2755 	cs->flags = trialcs->flags;
2756 	spin_unlock_irq(&callback_lock);
2757 
2758 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2759 		if (cpuset_v2())
2760 			cpuset_force_rebuild();
2761 		else
2762 			rebuild_sched_domains_locked();
2763 	}
2764 
2765 	if (spread_flag_changed)
2766 		cpuset1_update_tasks_flags(cs);
2767 out:
2768 	free_cpuset(trialcs);
2769 	return err;
2770 }
2771 
2772 /**
2773  * update_prstate - update partition_root_state
2774  * @cs: the cpuset to update
2775  * @new_prs: new partition root state
2776  * Return: 0 if successful, != 0 if error
2777  *
2778  * Call with cpuset_mutex held.
2779  */
2780 static int update_prstate(struct cpuset *cs, int new_prs)
2781 {
2782 	int err = PERR_NONE, old_prs = cs->partition_root_state;
2783 	struct cpuset *parent = parent_cs(cs);
2784 	struct tmpmasks tmpmask;
2785 	bool isolcpus_updated = false;
2786 
2787 	if (old_prs == new_prs)
2788 		return 0;
2789 
2790 	/*
2791 	 * Treat a previously invalid partition root as if it is a "member".
2792 	 */
2793 	if (new_prs && is_partition_invalid(cs))
2794 		old_prs = PRS_MEMBER;
2795 
2796 	if (alloc_tmpmasks(&tmpmask))
2797 		return -ENOMEM;
2798 
2799 	err = update_partition_exclusive_flag(cs, new_prs);
2800 	if (err)
2801 		goto out;
2802 
2803 	if (!old_prs) {
2804 		/*
2805 		 * cpus_allowed and exclusive_cpus cannot be both empty.
2806 		 */
2807 		if (xcpus_empty(cs)) {
2808 			err = PERR_CPUSEMPTY;
2809 			goto out;
2810 		}
2811 
2812 		/*
2813 		 * We don't support the creation of a new local partition with
2814 		 * a remote partition underneath it. This unsupported
2815 		 * setting can happen only if parent is the top_cpuset because
2816 		 * a remote partition cannot be created underneath an existing
2817 		 * local or remote partition.
2818 		 */
2819 		if ((parent == &top_cpuset) &&
2820 		    cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2821 			err = PERR_REMOTE;
2822 			goto out;
2823 		}
2824 
2825 		/*
2826 		 * If parent is valid partition, enable local partiion.
2827 		 * Otherwise, enable a remote partition.
2828 		 */
2829 		if (is_partition_valid(parent)) {
2830 			enum partition_cmd cmd = (new_prs == PRS_ROOT)
2831 					       ? partcmd_enable : partcmd_enablei;
2832 
2833 			err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2834 		} else {
2835 			err = remote_partition_enable(cs, new_prs, &tmpmask);
2836 		}
2837 	} else if (old_prs && new_prs) {
2838 		/*
2839 		 * A change in load balance state only, no change in cpumasks.
2840 		 * Need to update isolated_cpus.
2841 		 */
2842 		if (((new_prs == PRS_ISOLATED) &&
2843 		     !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
2844 		    prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
2845 			err = PERR_HKEEPING;
2846 		else
2847 			isolcpus_updated = true;
2848 	} else {
2849 		/*
2850 		 * Switching back to member is always allowed even if it
2851 		 * disables child partitions.
2852 		 */
2853 		if (is_remote_partition(cs))
2854 			remote_partition_disable(cs, &tmpmask);
2855 		else
2856 			update_parent_effective_cpumask(cs, partcmd_disable,
2857 							NULL, &tmpmask);
2858 
2859 		/*
2860 		 * Invalidation of child partitions will be done in
2861 		 * update_cpumasks_hier().
2862 		 */
2863 	}
2864 out:
2865 	/*
2866 	 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2867 	 * happens.
2868 	 */
2869 	if (err) {
2870 		new_prs = -new_prs;
2871 		update_partition_exclusive_flag(cs, new_prs);
2872 	}
2873 
2874 	spin_lock_irq(&callback_lock);
2875 	cs->partition_root_state = new_prs;
2876 	WRITE_ONCE(cs->prs_err, err);
2877 	if (!is_partition_valid(cs))
2878 		reset_partition_data(cs);
2879 	else if (isolcpus_updated)
2880 		isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2881 	spin_unlock_irq(&callback_lock);
2882 	update_isolation_cpumasks();
2883 
2884 	/* Force update if switching back to member & update effective_xcpus */
2885 	update_cpumasks_hier(cs, &tmpmask, !new_prs);
2886 
2887 	/* A newly created partition must have effective_xcpus set */
2888 	WARN_ON_ONCE(!old_prs && (new_prs > 0)
2889 			      && cpumask_empty(cs->effective_xcpus));
2890 
2891 	/* Update sched domains and load balance flag */
2892 	update_partition_sd_lb(cs, old_prs);
2893 
2894 	notify_partition_change(cs, old_prs);
2895 	if (force_sd_rebuild)
2896 		rebuild_sched_domains_locked();
2897 	free_tmpmasks(&tmpmask);
2898 	return 0;
2899 }
2900 
2901 static struct cpuset *cpuset_attach_old_cs;
2902 
2903 /*
2904  * Check to see if a cpuset can accept a new task
2905  * For v1, cpus_allowed and mems_allowed can't be empty.
2906  * For v2, effective_cpus can't be empty.
2907  * Note that in v1, effective_cpus = cpus_allowed.
2908  */
2909 static int cpuset_can_attach_check(struct cpuset *cs)
2910 {
2911 	if (cpumask_empty(cs->effective_cpus) ||
2912 	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2913 		return -ENOSPC;
2914 	return 0;
2915 }
2916 
2917 static void reset_migrate_dl_data(struct cpuset *cs)
2918 {
2919 	cs->nr_migrate_dl_tasks = 0;
2920 	cs->sum_migrate_dl_bw = 0;
2921 }
2922 
2923 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2924 static int cpuset_can_attach(struct cgroup_taskset *tset)
2925 {
2926 	struct cgroup_subsys_state *css;
2927 	struct cpuset *cs, *oldcs;
2928 	struct task_struct *task;
2929 	bool cpus_updated, mems_updated;
2930 	int ret;
2931 
2932 	/* used later by cpuset_attach() */
2933 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2934 	oldcs = cpuset_attach_old_cs;
2935 	cs = css_cs(css);
2936 
2937 	mutex_lock(&cpuset_mutex);
2938 
2939 	/* Check to see if task is allowed in the cpuset */
2940 	ret = cpuset_can_attach_check(cs);
2941 	if (ret)
2942 		goto out_unlock;
2943 
2944 	cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2945 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2946 
2947 	cgroup_taskset_for_each(task, css, tset) {
2948 		ret = task_can_attach(task);
2949 		if (ret)
2950 			goto out_unlock;
2951 
2952 		/*
2953 		 * Skip rights over task check in v2 when nothing changes,
2954 		 * migration permission derives from hierarchy ownership in
2955 		 * cgroup_procs_write_permission()).
2956 		 */
2957 		if (!cpuset_v2() || (cpus_updated || mems_updated)) {
2958 			ret = security_task_setscheduler(task);
2959 			if (ret)
2960 				goto out_unlock;
2961 		}
2962 
2963 		if (dl_task(task)) {
2964 			cs->nr_migrate_dl_tasks++;
2965 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
2966 		}
2967 	}
2968 
2969 	if (!cs->nr_migrate_dl_tasks)
2970 		goto out_success;
2971 
2972 	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2973 		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2974 
2975 		if (unlikely(cpu >= nr_cpu_ids)) {
2976 			reset_migrate_dl_data(cs);
2977 			ret = -EINVAL;
2978 			goto out_unlock;
2979 		}
2980 
2981 		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2982 		if (ret) {
2983 			reset_migrate_dl_data(cs);
2984 			goto out_unlock;
2985 		}
2986 	}
2987 
2988 out_success:
2989 	/*
2990 	 * Mark attach is in progress.  This makes validate_change() fail
2991 	 * changes which zero cpus/mems_allowed.
2992 	 */
2993 	cs->attach_in_progress++;
2994 out_unlock:
2995 	mutex_unlock(&cpuset_mutex);
2996 	return ret;
2997 }
2998 
2999 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3000 {
3001 	struct cgroup_subsys_state *css;
3002 	struct cpuset *cs;
3003 
3004 	cgroup_taskset_first(tset, &css);
3005 	cs = css_cs(css);
3006 
3007 	mutex_lock(&cpuset_mutex);
3008 	dec_attach_in_progress_locked(cs);
3009 
3010 	if (cs->nr_migrate_dl_tasks) {
3011 		int cpu = cpumask_any(cs->effective_cpus);
3012 
3013 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3014 		reset_migrate_dl_data(cs);
3015 	}
3016 
3017 	mutex_unlock(&cpuset_mutex);
3018 }
3019 
3020 /*
3021  * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3022  * but we can't allocate it dynamically there.  Define it global and
3023  * allocate from cpuset_init().
3024  */
3025 static cpumask_var_t cpus_attach;
3026 static nodemask_t cpuset_attach_nodemask_to;
3027 
3028 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3029 {
3030 	lockdep_assert_cpuset_lock_held();
3031 
3032 	if (cs != &top_cpuset)
3033 		guarantee_active_cpus(task, cpus_attach);
3034 	else
3035 		cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3036 			       subpartitions_cpus);
3037 	/*
3038 	 * can_attach beforehand should guarantee that this doesn't
3039 	 * fail.  TODO: have a better way to handle failure here
3040 	 */
3041 	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3042 
3043 	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3044 	cpuset1_update_task_spread_flags(cs, task);
3045 }
3046 
3047 static void cpuset_attach(struct cgroup_taskset *tset)
3048 {
3049 	struct task_struct *task;
3050 	struct task_struct *leader;
3051 	struct cgroup_subsys_state *css;
3052 	struct cpuset *cs;
3053 	struct cpuset *oldcs = cpuset_attach_old_cs;
3054 	bool cpus_updated, mems_updated;
3055 	bool queue_task_work = false;
3056 
3057 	cgroup_taskset_first(tset, &css);
3058 	cs = css_cs(css);
3059 
3060 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
3061 	mutex_lock(&cpuset_mutex);
3062 	cpus_updated = !cpumask_equal(cs->effective_cpus,
3063 				      oldcs->effective_cpus);
3064 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3065 
3066 	/*
3067 	 * In the default hierarchy, enabling cpuset in the child cgroups
3068 	 * will trigger a number of cpuset_attach() calls with no change
3069 	 * in effective cpus and mems. In that case, we can optimize out
3070 	 * by skipping the task iteration and update.
3071 	 */
3072 	if (cpuset_v2() && !cpus_updated && !mems_updated) {
3073 		cpuset_attach_nodemask_to = cs->effective_mems;
3074 		goto out;
3075 	}
3076 
3077 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3078 
3079 	cgroup_taskset_for_each(task, css, tset)
3080 		cpuset_attach_task(cs, task);
3081 
3082 	/*
3083 	 * Change mm for all threadgroup leaders. This is expensive and may
3084 	 * sleep and should be moved outside migration path proper. Skip it
3085 	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3086 	 * not set.
3087 	 */
3088 	cpuset_attach_nodemask_to = cs->effective_mems;
3089 	if (!is_memory_migrate(cs) && !mems_updated)
3090 		goto out;
3091 
3092 	cgroup_taskset_for_each_leader(leader, css, tset) {
3093 		struct mm_struct *mm = get_task_mm(leader);
3094 
3095 		if (mm) {
3096 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3097 
3098 			/*
3099 			 * old_mems_allowed is the same with mems_allowed
3100 			 * here, except if this task is being moved
3101 			 * automatically due to hotplug.  In that case
3102 			 * @mems_allowed has been updated and is empty, so
3103 			 * @old_mems_allowed is the right nodesets that we
3104 			 * migrate mm from.
3105 			 */
3106 			if (is_memory_migrate(cs)) {
3107 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3108 						  &cpuset_attach_nodemask_to);
3109 				queue_task_work = true;
3110 			} else
3111 				mmput(mm);
3112 		}
3113 	}
3114 
3115 out:
3116 	if (queue_task_work)
3117 		schedule_flush_migrate_mm();
3118 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
3119 
3120 	if (cs->nr_migrate_dl_tasks) {
3121 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3122 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3123 		reset_migrate_dl_data(cs);
3124 	}
3125 
3126 	dec_attach_in_progress_locked(cs);
3127 
3128 	mutex_unlock(&cpuset_mutex);
3129 }
3130 
3131 /*
3132  * Common handling for a write to a "cpus" or "mems" file.
3133  */
3134 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3135 				    char *buf, size_t nbytes, loff_t off)
3136 {
3137 	struct cpuset *cs = css_cs(of_css(of));
3138 	struct cpuset *trialcs;
3139 	int retval = -ENODEV;
3140 
3141 	/* root is read-only */
3142 	if (cs == &top_cpuset)
3143 		return -EACCES;
3144 
3145 	buf = strstrip(buf);
3146 	cpuset_full_lock();
3147 	if (!is_cpuset_online(cs))
3148 		goto out_unlock;
3149 
3150 	trialcs = dup_or_alloc_cpuset(cs);
3151 	if (!trialcs) {
3152 		retval = -ENOMEM;
3153 		goto out_unlock;
3154 	}
3155 
3156 	switch (of_cft(of)->private) {
3157 	case FILE_CPULIST:
3158 		retval = update_cpumask(cs, trialcs, buf);
3159 		break;
3160 	case FILE_EXCLUSIVE_CPULIST:
3161 		retval = update_exclusive_cpumask(cs, trialcs, buf);
3162 		break;
3163 	case FILE_MEMLIST:
3164 		retval = update_nodemask(cs, trialcs, buf);
3165 		break;
3166 	default:
3167 		retval = -EINVAL;
3168 		break;
3169 	}
3170 
3171 	free_cpuset(trialcs);
3172 	if (force_sd_rebuild)
3173 		rebuild_sched_domains_locked();
3174 out_unlock:
3175 	cpuset_full_unlock();
3176 	if (of_cft(of)->private == FILE_MEMLIST)
3177 		schedule_flush_migrate_mm();
3178 	return retval ?: nbytes;
3179 }
3180 
3181 /*
3182  * These ascii lists should be read in a single call, by using a user
3183  * buffer large enough to hold the entire map.  If read in smaller
3184  * chunks, there is no guarantee of atomicity.  Since the display format
3185  * used, list of ranges of sequential numbers, is variable length,
3186  * and since these maps can change value dynamically, one could read
3187  * gibberish by doing partial reads while a list was changing.
3188  */
3189 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3190 {
3191 	struct cpuset *cs = css_cs(seq_css(sf));
3192 	cpuset_filetype_t type = seq_cft(sf)->private;
3193 	int ret = 0;
3194 
3195 	spin_lock_irq(&callback_lock);
3196 
3197 	switch (type) {
3198 	case FILE_CPULIST:
3199 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3200 		break;
3201 	case FILE_MEMLIST:
3202 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3203 		break;
3204 	case FILE_EFFECTIVE_CPULIST:
3205 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3206 		break;
3207 	case FILE_EFFECTIVE_MEMLIST:
3208 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3209 		break;
3210 	case FILE_EXCLUSIVE_CPULIST:
3211 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3212 		break;
3213 	case FILE_EFFECTIVE_XCPULIST:
3214 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3215 		break;
3216 	case FILE_SUBPARTS_CPULIST:
3217 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3218 		break;
3219 	case FILE_ISOLATED_CPULIST:
3220 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3221 		break;
3222 	default:
3223 		ret = -EINVAL;
3224 	}
3225 
3226 	spin_unlock_irq(&callback_lock);
3227 	return ret;
3228 }
3229 
3230 static int cpuset_partition_show(struct seq_file *seq, void *v)
3231 {
3232 	struct cpuset *cs = css_cs(seq_css(seq));
3233 	const char *err, *type = NULL;
3234 
3235 	switch (cs->partition_root_state) {
3236 	case PRS_ROOT:
3237 		seq_puts(seq, "root\n");
3238 		break;
3239 	case PRS_ISOLATED:
3240 		seq_puts(seq, "isolated\n");
3241 		break;
3242 	case PRS_MEMBER:
3243 		seq_puts(seq, "member\n");
3244 		break;
3245 	case PRS_INVALID_ROOT:
3246 		type = "root";
3247 		fallthrough;
3248 	case PRS_INVALID_ISOLATED:
3249 		if (!type)
3250 			type = "isolated";
3251 		err = perr_strings[READ_ONCE(cs->prs_err)];
3252 		if (err)
3253 			seq_printf(seq, "%s invalid (%s)\n", type, err);
3254 		else
3255 			seq_printf(seq, "%s invalid\n", type);
3256 		break;
3257 	}
3258 	return 0;
3259 }
3260 
3261 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3262 				     size_t nbytes, loff_t off)
3263 {
3264 	struct cpuset *cs = css_cs(of_css(of));
3265 	int val;
3266 	int retval = -ENODEV;
3267 
3268 	buf = strstrip(buf);
3269 
3270 	if (!strcmp(buf, "root"))
3271 		val = PRS_ROOT;
3272 	else if (!strcmp(buf, "member"))
3273 		val = PRS_MEMBER;
3274 	else if (!strcmp(buf, "isolated"))
3275 		val = PRS_ISOLATED;
3276 	else
3277 		return -EINVAL;
3278 
3279 	cpuset_full_lock();
3280 	if (is_cpuset_online(cs))
3281 		retval = update_prstate(cs, val);
3282 	cpuset_full_unlock();
3283 	return retval ?: nbytes;
3284 }
3285 
3286 /*
3287  * This is currently a minimal set for the default hierarchy. It can be
3288  * expanded later on by migrating more features and control files from v1.
3289  */
3290 static struct cftype dfl_files[] = {
3291 	{
3292 		.name = "cpus",
3293 		.seq_show = cpuset_common_seq_show,
3294 		.write = cpuset_write_resmask,
3295 		.max_write_len = (100U + 6 * NR_CPUS),
3296 		.private = FILE_CPULIST,
3297 		.flags = CFTYPE_NOT_ON_ROOT,
3298 	},
3299 
3300 	{
3301 		.name = "mems",
3302 		.seq_show = cpuset_common_seq_show,
3303 		.write = cpuset_write_resmask,
3304 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3305 		.private = FILE_MEMLIST,
3306 		.flags = CFTYPE_NOT_ON_ROOT,
3307 	},
3308 
3309 	{
3310 		.name = "cpus.effective",
3311 		.seq_show = cpuset_common_seq_show,
3312 		.private = FILE_EFFECTIVE_CPULIST,
3313 	},
3314 
3315 	{
3316 		.name = "mems.effective",
3317 		.seq_show = cpuset_common_seq_show,
3318 		.private = FILE_EFFECTIVE_MEMLIST,
3319 	},
3320 
3321 	{
3322 		.name = "cpus.partition",
3323 		.seq_show = cpuset_partition_show,
3324 		.write = cpuset_partition_write,
3325 		.private = FILE_PARTITION_ROOT,
3326 		.flags = CFTYPE_NOT_ON_ROOT,
3327 		.file_offset = offsetof(struct cpuset, partition_file),
3328 	},
3329 
3330 	{
3331 		.name = "cpus.exclusive",
3332 		.seq_show = cpuset_common_seq_show,
3333 		.write = cpuset_write_resmask,
3334 		.max_write_len = (100U + 6 * NR_CPUS),
3335 		.private = FILE_EXCLUSIVE_CPULIST,
3336 		.flags = CFTYPE_NOT_ON_ROOT,
3337 	},
3338 
3339 	{
3340 		.name = "cpus.exclusive.effective",
3341 		.seq_show = cpuset_common_seq_show,
3342 		.private = FILE_EFFECTIVE_XCPULIST,
3343 		.flags = CFTYPE_NOT_ON_ROOT,
3344 	},
3345 
3346 	{
3347 		.name = "cpus.subpartitions",
3348 		.seq_show = cpuset_common_seq_show,
3349 		.private = FILE_SUBPARTS_CPULIST,
3350 		.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3351 	},
3352 
3353 	{
3354 		.name = "cpus.isolated",
3355 		.seq_show = cpuset_common_seq_show,
3356 		.private = FILE_ISOLATED_CPULIST,
3357 		.flags = CFTYPE_ONLY_ON_ROOT,
3358 	},
3359 
3360 	{ }	/* terminate */
3361 };
3362 
3363 
3364 /**
3365  * cpuset_css_alloc - Allocate a cpuset css
3366  * @parent_css: Parent css of the control group that the new cpuset will be
3367  *              part of
3368  * Return: cpuset css on success, -ENOMEM on failure.
3369  *
3370  * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3371  * top cpuset css otherwise.
3372  */
3373 static struct cgroup_subsys_state *
3374 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3375 {
3376 	struct cpuset *cs;
3377 
3378 	if (!parent_css)
3379 		return &top_cpuset.css;
3380 
3381 	cs = dup_or_alloc_cpuset(NULL);
3382 	if (!cs)
3383 		return ERR_PTR(-ENOMEM);
3384 
3385 	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3386 	cpuset1_init(cs);
3387 
3388 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3389 	if (cpuset_v2())
3390 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3391 
3392 	return &cs->css;
3393 }
3394 
3395 static int cpuset_css_online(struct cgroup_subsys_state *css)
3396 {
3397 	struct cpuset *cs = css_cs(css);
3398 	struct cpuset *parent = parent_cs(cs);
3399 
3400 	if (!parent)
3401 		return 0;
3402 
3403 	cpuset_full_lock();
3404 	/*
3405 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3406 	 */
3407 	if (cpuset_v2() && !is_sched_load_balance(parent))
3408 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3409 
3410 	cpuset_inc();
3411 
3412 	spin_lock_irq(&callback_lock);
3413 	if (is_in_v2_mode()) {
3414 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3415 		cs->effective_mems = parent->effective_mems;
3416 	}
3417 	spin_unlock_irq(&callback_lock);
3418 	cpuset1_online_css(css);
3419 
3420 	cpuset_full_unlock();
3421 	return 0;
3422 }
3423 
3424 /*
3425  * If the cpuset being removed has its flag 'sched_load_balance'
3426  * enabled, then simulate turning sched_load_balance off, which
3427  * will call rebuild_sched_domains_locked(). That is not needed
3428  * in the default hierarchy where only changes in partition
3429  * will cause repartitioning.
3430  */
3431 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3432 {
3433 	struct cpuset *cs = css_cs(css);
3434 
3435 	cpuset_full_lock();
3436 	if (!cpuset_v2() && is_sched_load_balance(cs))
3437 		cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3438 
3439 	cpuset_dec();
3440 	cpuset_full_unlock();
3441 }
3442 
3443 /*
3444  * If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3445  * changing it back to member to free its exclusive CPUs back to the pool to
3446  * be used by other online cpusets.
3447  */
3448 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3449 {
3450 	struct cpuset *cs = css_cs(css);
3451 
3452 	cpuset_full_lock();
3453 	/* Reset valid partition back to member */
3454 	if (is_partition_valid(cs))
3455 		update_prstate(cs, PRS_MEMBER);
3456 	cpuset_full_unlock();
3457 }
3458 
3459 static void cpuset_css_free(struct cgroup_subsys_state *css)
3460 {
3461 	struct cpuset *cs = css_cs(css);
3462 
3463 	free_cpuset(cs);
3464 }
3465 
3466 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3467 {
3468 	mutex_lock(&cpuset_mutex);
3469 	spin_lock_irq(&callback_lock);
3470 
3471 	if (is_in_v2_mode()) {
3472 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3473 		cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3474 		top_cpuset.mems_allowed = node_possible_map;
3475 	} else {
3476 		cpumask_copy(top_cpuset.cpus_allowed,
3477 			     top_cpuset.effective_cpus);
3478 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3479 	}
3480 
3481 	spin_unlock_irq(&callback_lock);
3482 	mutex_unlock(&cpuset_mutex);
3483 }
3484 
3485 /*
3486  * In case the child is cloned into a cpuset different from its parent,
3487  * additional checks are done to see if the move is allowed.
3488  */
3489 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3490 {
3491 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3492 	bool same_cs;
3493 	int ret;
3494 
3495 	rcu_read_lock();
3496 	same_cs = (cs == task_cs(current));
3497 	rcu_read_unlock();
3498 
3499 	if (same_cs)
3500 		return 0;
3501 
3502 	lockdep_assert_held(&cgroup_mutex);
3503 	mutex_lock(&cpuset_mutex);
3504 
3505 	/* Check to see if task is allowed in the cpuset */
3506 	ret = cpuset_can_attach_check(cs);
3507 	if (ret)
3508 		goto out_unlock;
3509 
3510 	ret = task_can_attach(task);
3511 	if (ret)
3512 		goto out_unlock;
3513 
3514 	ret = security_task_setscheduler(task);
3515 	if (ret)
3516 		goto out_unlock;
3517 
3518 	/*
3519 	 * Mark attach is in progress.  This makes validate_change() fail
3520 	 * changes which zero cpus/mems_allowed.
3521 	 */
3522 	cs->attach_in_progress++;
3523 out_unlock:
3524 	mutex_unlock(&cpuset_mutex);
3525 	return ret;
3526 }
3527 
3528 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3529 {
3530 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3531 	bool same_cs;
3532 
3533 	rcu_read_lock();
3534 	same_cs = (cs == task_cs(current));
3535 	rcu_read_unlock();
3536 
3537 	if (same_cs)
3538 		return;
3539 
3540 	dec_attach_in_progress(cs);
3541 }
3542 
3543 /*
3544  * Make sure the new task conform to the current state of its parent,
3545  * which could have been changed by cpuset just after it inherits the
3546  * state from the parent and before it sits on the cgroup's task list.
3547  */
3548 static void cpuset_fork(struct task_struct *task)
3549 {
3550 	struct cpuset *cs;
3551 	bool same_cs;
3552 
3553 	rcu_read_lock();
3554 	cs = task_cs(task);
3555 	same_cs = (cs == task_cs(current));
3556 	rcu_read_unlock();
3557 
3558 	if (same_cs) {
3559 		if (cs == &top_cpuset)
3560 			return;
3561 
3562 		set_cpus_allowed_ptr(task, current->cpus_ptr);
3563 		task->mems_allowed = current->mems_allowed;
3564 		return;
3565 	}
3566 
3567 	/* CLONE_INTO_CGROUP */
3568 	mutex_lock(&cpuset_mutex);
3569 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3570 	cpuset_attach_task(cs, task);
3571 
3572 	dec_attach_in_progress_locked(cs);
3573 	mutex_unlock(&cpuset_mutex);
3574 }
3575 
3576 struct cgroup_subsys cpuset_cgrp_subsys = {
3577 	.css_alloc	= cpuset_css_alloc,
3578 	.css_online	= cpuset_css_online,
3579 	.css_offline	= cpuset_css_offline,
3580 	.css_killed	= cpuset_css_killed,
3581 	.css_free	= cpuset_css_free,
3582 	.can_attach	= cpuset_can_attach,
3583 	.cancel_attach	= cpuset_cancel_attach,
3584 	.attach		= cpuset_attach,
3585 	.bind		= cpuset_bind,
3586 	.can_fork	= cpuset_can_fork,
3587 	.cancel_fork	= cpuset_cancel_fork,
3588 	.fork		= cpuset_fork,
3589 #ifdef CONFIG_CPUSETS_V1
3590 	.legacy_cftypes	= cpuset1_files,
3591 #endif
3592 	.dfl_cftypes	= dfl_files,
3593 	.early_init	= true,
3594 	.threaded	= true,
3595 };
3596 
3597 /**
3598  * cpuset_init - initialize cpusets at system boot
3599  *
3600  * Description: Initialize top_cpuset
3601  **/
3602 
3603 int __init cpuset_init(void)
3604 {
3605 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3606 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3607 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3608 	BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3609 	BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3610 	BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3611 
3612 	cpumask_setall(top_cpuset.cpus_allowed);
3613 	nodes_setall(top_cpuset.mems_allowed);
3614 	cpumask_setall(top_cpuset.effective_cpus);
3615 	cpumask_setall(top_cpuset.effective_xcpus);
3616 	cpumask_setall(top_cpuset.exclusive_cpus);
3617 	nodes_setall(top_cpuset.effective_mems);
3618 
3619 	cpuset1_init(&top_cpuset);
3620 
3621 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3622 
3623 	if (housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
3624 		cpumask_andnot(isolated_cpus, cpu_possible_mask,
3625 			       housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
3626 
3627 	return 0;
3628 }
3629 
3630 static void
3631 hotplug_update_tasks(struct cpuset *cs,
3632 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3633 		     bool cpus_updated, bool mems_updated)
3634 {
3635 	/* A partition root is allowed to have empty effective cpus */
3636 	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3637 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3638 	if (nodes_empty(*new_mems))
3639 		*new_mems = parent_cs(cs)->effective_mems;
3640 
3641 	spin_lock_irq(&callback_lock);
3642 	cpumask_copy(cs->effective_cpus, new_cpus);
3643 	cs->effective_mems = *new_mems;
3644 	spin_unlock_irq(&callback_lock);
3645 
3646 	if (cpus_updated)
3647 		cpuset_update_tasks_cpumask(cs, new_cpus);
3648 	if (mems_updated)
3649 		cpuset_update_tasks_nodemask(cs);
3650 }
3651 
3652 void cpuset_force_rebuild(void)
3653 {
3654 	force_sd_rebuild = true;
3655 }
3656 
3657 /**
3658  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3659  * @cs: cpuset in interest
3660  * @tmp: the tmpmasks structure pointer
3661  *
3662  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3663  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3664  * all its tasks are moved to the nearest ancestor with both resources.
3665  */
3666 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3667 {
3668 	static cpumask_t new_cpus;
3669 	static nodemask_t new_mems;
3670 	bool cpus_updated;
3671 	bool mems_updated;
3672 	bool remote;
3673 	int partcmd = -1;
3674 	struct cpuset *parent;
3675 retry:
3676 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3677 
3678 	mutex_lock(&cpuset_mutex);
3679 
3680 	/*
3681 	 * We have raced with task attaching. We wait until attaching
3682 	 * is finished, so we won't attach a task to an empty cpuset.
3683 	 */
3684 	if (cs->attach_in_progress) {
3685 		mutex_unlock(&cpuset_mutex);
3686 		goto retry;
3687 	}
3688 
3689 	parent = parent_cs(cs);
3690 	compute_effective_cpumask(&new_cpus, cs, parent);
3691 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3692 
3693 	if (!tmp || !cs->partition_root_state)
3694 		goto update_tasks;
3695 
3696 	/*
3697 	 * Compute effective_cpus for valid partition root, may invalidate
3698 	 * child partition roots if necessary.
3699 	 */
3700 	remote = is_remote_partition(cs);
3701 	if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3702 		compute_partition_effective_cpumask(cs, &new_cpus);
3703 
3704 	if (remote && (cpumask_empty(subpartitions_cpus) ||
3705 			(cpumask_empty(&new_cpus) &&
3706 			 partition_is_populated(cs, NULL)))) {
3707 		cs->prs_err = PERR_HOTPLUG;
3708 		remote_partition_disable(cs, tmp);
3709 		compute_effective_cpumask(&new_cpus, cs, parent);
3710 		remote = false;
3711 	}
3712 
3713 	/*
3714 	 * Force the partition to become invalid if either one of
3715 	 * the following conditions hold:
3716 	 * 1) empty effective cpus but not valid empty partition.
3717 	 * 2) parent is invalid or doesn't grant any cpus to child
3718 	 *    partitions.
3719 	 * 3) subpartitions_cpus is empty.
3720 	 */
3721 	if (is_local_partition(cs) &&
3722 	    (!is_partition_valid(parent) ||
3723 	     tasks_nocpu_error(parent, cs, &new_cpus) ||
3724 	     cpumask_empty(subpartitions_cpus)))
3725 		partcmd = partcmd_invalidate;
3726 	/*
3727 	 * On the other hand, an invalid partition root may be transitioned
3728 	 * back to a regular one with a non-empty effective xcpus.
3729 	 */
3730 	else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
3731 		 !cpumask_empty(cs->effective_xcpus))
3732 		partcmd = partcmd_update;
3733 
3734 	if (partcmd >= 0) {
3735 		update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3736 		if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3737 			compute_partition_effective_cpumask(cs, &new_cpus);
3738 			cpuset_force_rebuild();
3739 		}
3740 	}
3741 
3742 update_tasks:
3743 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3744 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3745 	if (!cpus_updated && !mems_updated)
3746 		goto unlock;	/* Hotplug doesn't affect this cpuset */
3747 
3748 	if (mems_updated)
3749 		check_insane_mems_config(&new_mems);
3750 
3751 	if (is_in_v2_mode())
3752 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3753 				     cpus_updated, mems_updated);
3754 	else
3755 		cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3756 					    cpus_updated, mems_updated);
3757 
3758 unlock:
3759 	mutex_unlock(&cpuset_mutex);
3760 }
3761 
3762 /**
3763  * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3764  *
3765  * This function is called after either CPU or memory configuration has
3766  * changed and updates cpuset accordingly.  The top_cpuset is always
3767  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3768  * order to make cpusets transparent (of no affect) on systems that are
3769  * actively using CPU hotplug but making no active use of cpusets.
3770  *
3771  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3772  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3773  * all descendants.
3774  *
3775  * Note that CPU offlining during suspend is ignored.  We don't modify
3776  * cpusets across suspend/resume cycles at all.
3777  *
3778  * CPU / memory hotplug is handled synchronously.
3779  */
3780 static void cpuset_handle_hotplug(void)
3781 {
3782 	static cpumask_t new_cpus;
3783 	static nodemask_t new_mems;
3784 	bool cpus_updated, mems_updated;
3785 	bool on_dfl = is_in_v2_mode();
3786 	struct tmpmasks tmp, *ptmp = NULL;
3787 
3788 	if (on_dfl && !alloc_tmpmasks(&tmp))
3789 		ptmp = &tmp;
3790 
3791 	lockdep_assert_cpus_held();
3792 	mutex_lock(&cpuset_mutex);
3793 
3794 	/* fetch the available cpus/mems and find out which changed how */
3795 	cpumask_copy(&new_cpus, cpu_active_mask);
3796 	new_mems = node_states[N_MEMORY];
3797 
3798 	/*
3799 	 * If subpartitions_cpus is populated, it is likely that the check
3800 	 * below will produce a false positive on cpus_updated when the cpu
3801 	 * list isn't changed. It is extra work, but it is better to be safe.
3802 	 */
3803 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3804 		       !cpumask_empty(subpartitions_cpus);
3805 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3806 
3807 	/* For v1, synchronize cpus_allowed to cpu_active_mask */
3808 	if (cpus_updated) {
3809 		cpuset_force_rebuild();
3810 		spin_lock_irq(&callback_lock);
3811 		if (!on_dfl)
3812 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3813 		/*
3814 		 * Make sure that CPUs allocated to child partitions
3815 		 * do not show up in effective_cpus. If no CPU is left,
3816 		 * we clear the subpartitions_cpus & let the child partitions
3817 		 * fight for the CPUs again.
3818 		 */
3819 		if (!cpumask_empty(subpartitions_cpus)) {
3820 			if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3821 				cpumask_clear(subpartitions_cpus);
3822 			} else {
3823 				cpumask_andnot(&new_cpus, &new_cpus,
3824 					       subpartitions_cpus);
3825 			}
3826 		}
3827 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3828 		spin_unlock_irq(&callback_lock);
3829 		/* we don't mess with cpumasks of tasks in top_cpuset */
3830 	}
3831 
3832 	/* synchronize mems_allowed to N_MEMORY */
3833 	if (mems_updated) {
3834 		spin_lock_irq(&callback_lock);
3835 		if (!on_dfl)
3836 			top_cpuset.mems_allowed = new_mems;
3837 		top_cpuset.effective_mems = new_mems;
3838 		spin_unlock_irq(&callback_lock);
3839 		cpuset_update_tasks_nodemask(&top_cpuset);
3840 	}
3841 
3842 	mutex_unlock(&cpuset_mutex);
3843 
3844 	/* if cpus or mems changed, we need to propagate to descendants */
3845 	if (cpus_updated || mems_updated) {
3846 		struct cpuset *cs;
3847 		struct cgroup_subsys_state *pos_css;
3848 
3849 		rcu_read_lock();
3850 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3851 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3852 				continue;
3853 			rcu_read_unlock();
3854 
3855 			cpuset_hotplug_update_tasks(cs, ptmp);
3856 
3857 			rcu_read_lock();
3858 			css_put(&cs->css);
3859 		}
3860 		rcu_read_unlock();
3861 	}
3862 
3863 	/* rebuild sched domains if necessary */
3864 	if (force_sd_rebuild)
3865 		rebuild_sched_domains_cpuslocked();
3866 
3867 	free_tmpmasks(ptmp);
3868 }
3869 
3870 void cpuset_update_active_cpus(void)
3871 {
3872 	/*
3873 	 * We're inside cpu hotplug critical region which usually nests
3874 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3875 	 * to a work item to avoid reverse locking order.
3876 	 */
3877 	cpuset_handle_hotplug();
3878 }
3879 
3880 /*
3881  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3882  * Call this routine anytime after node_states[N_MEMORY] changes.
3883  * See cpuset_update_active_cpus() for CPU hotplug handling.
3884  */
3885 static int cpuset_track_online_nodes(struct notifier_block *self,
3886 				unsigned long action, void *arg)
3887 {
3888 	cpuset_handle_hotplug();
3889 	return NOTIFY_OK;
3890 }
3891 
3892 /**
3893  * cpuset_init_smp - initialize cpus_allowed
3894  *
3895  * Description: Finish top cpuset after cpu, node maps are initialized
3896  */
3897 void __init cpuset_init_smp(void)
3898 {
3899 	/*
3900 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3901 	 * cpuset_bind() call will be reset to v1 values in another
3902 	 * cpuset_bind() call when v1 cpuset is mounted.
3903 	 */
3904 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3905 
3906 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3907 	top_cpuset.effective_mems = node_states[N_MEMORY];
3908 
3909 	hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3910 
3911 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3912 	BUG_ON(!cpuset_migrate_mm_wq);
3913 }
3914 
3915 /*
3916  * Return cpus_allowed mask from a task's cpuset.
3917  */
3918 static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
3919 {
3920 	struct cpuset *cs;
3921 
3922 	cs = task_cs(tsk);
3923 	if (cs != &top_cpuset)
3924 		guarantee_active_cpus(tsk, pmask);
3925 	/*
3926 	 * Tasks in the top cpuset won't get update to their cpumasks
3927 	 * when a hotplug online/offline event happens. So we include all
3928 	 * offline cpus in the allowed cpu list.
3929 	 */
3930 	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3931 		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3932 
3933 		/*
3934 		 * We first exclude cpus allocated to partitions. If there is no
3935 		 * allowable online cpu left, we fall back to all possible cpus.
3936 		 */
3937 		cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
3938 		if (!cpumask_intersects(pmask, cpu_active_mask))
3939 			cpumask_copy(pmask, possible_mask);
3940 	}
3941 }
3942 
3943 /**
3944  * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
3945  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3946  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3947  *
3948  * Similir to cpuset_cpus_allowed() except that the caller must have acquired
3949  * cpuset_mutex.
3950  */
3951 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
3952 {
3953 	lockdep_assert_cpuset_lock_held();
3954 	__cpuset_cpus_allowed_locked(tsk, pmask);
3955 }
3956 
3957 /**
3958  * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
3959  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3960  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3961  *
3962  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3963  * attached to the specified @tsk.  Guaranteed to return some non-empty
3964  * subset of cpu_active_mask, even if this means going outside the
3965  * tasks cpuset, except when the task is in the top cpuset.
3966  **/
3967 
3968 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3969 {
3970 	unsigned long flags;
3971 
3972 	spin_lock_irqsave(&callback_lock, flags);
3973 	__cpuset_cpus_allowed_locked(tsk, pmask);
3974 	spin_unlock_irqrestore(&callback_lock, flags);
3975 }
3976 
3977 /**
3978  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3979  * @tsk: pointer to task_struct with which the scheduler is struggling
3980  *
3981  * Description: In the case that the scheduler cannot find an allowed cpu in
3982  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3983  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3984  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3985  * This is the absolute last resort for the scheduler and it is only used if
3986  * _every_ other avenue has been traveled.
3987  *
3988  * Returns true if the affinity of @tsk was changed, false otherwise.
3989  **/
3990 
3991 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3992 {
3993 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3994 	const struct cpumask *cs_mask;
3995 	bool changed = false;
3996 
3997 	rcu_read_lock();
3998 	cs_mask = task_cs(tsk)->cpus_allowed;
3999 	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4000 		set_cpus_allowed_force(tsk, cs_mask);
4001 		changed = true;
4002 	}
4003 	rcu_read_unlock();
4004 
4005 	/*
4006 	 * We own tsk->cpus_allowed, nobody can change it under us.
4007 	 *
4008 	 * But we used cs && cs->cpus_allowed lockless and thus can
4009 	 * race with cgroup_attach_task() or update_cpumask() and get
4010 	 * the wrong tsk->cpus_allowed. However, both cases imply the
4011 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4012 	 * which takes task_rq_lock().
4013 	 *
4014 	 * If we are called after it dropped the lock we must see all
4015 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4016 	 * set any mask even if it is not right from task_cs() pov,
4017 	 * the pending set_cpus_allowed_ptr() will fix things.
4018 	 *
4019 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4020 	 * if required.
4021 	 */
4022 	return changed;
4023 }
4024 
4025 void __init cpuset_init_current_mems_allowed(void)
4026 {
4027 	nodes_setall(current->mems_allowed);
4028 }
4029 
4030 /**
4031  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4032  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4033  *
4034  * Description: Returns the nodemask_t mems_allowed of the cpuset
4035  * attached to the specified @tsk.  Guaranteed to return some non-empty
4036  * subset of node_states[N_MEMORY], even if this means going outside the
4037  * tasks cpuset.
4038  **/
4039 
4040 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4041 {
4042 	nodemask_t mask;
4043 	unsigned long flags;
4044 
4045 	spin_lock_irqsave(&callback_lock, flags);
4046 	guarantee_online_mems(task_cs(tsk), &mask);
4047 	spin_unlock_irqrestore(&callback_lock, flags);
4048 
4049 	return mask;
4050 }
4051 
4052 /**
4053  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4054  * @nodemask: the nodemask to be checked
4055  *
4056  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4057  */
4058 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4059 {
4060 	return nodes_intersects(*nodemask, current->mems_allowed);
4061 }
4062 
4063 /*
4064  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4065  * mem_hardwall ancestor to the specified cpuset.  Call holding
4066  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4067  * (an unusual configuration), then returns the root cpuset.
4068  */
4069 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4070 {
4071 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4072 		cs = parent_cs(cs);
4073 	return cs;
4074 }
4075 
4076 /*
4077  * cpuset_current_node_allowed - Can current task allocate on a memory node?
4078  * @node: is this an allowed node?
4079  * @gfp_mask: memory allocation flags
4080  *
4081  * If we're in interrupt, yes, we can always allocate.  If @node is set in
4082  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4083  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4084  * yes.  If current has access to memory reserves as an oom victim, yes.
4085  * Otherwise, no.
4086  *
4087  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4088  * and do not allow allocations outside the current tasks cpuset
4089  * unless the task has been OOM killed.
4090  * GFP_KERNEL allocations are not so marked, so can escape to the
4091  * nearest enclosing hardwalled ancestor cpuset.
4092  *
4093  * Scanning up parent cpusets requires callback_lock.  The
4094  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4095  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4096  * current tasks mems_allowed came up empty on the first pass over
4097  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4098  * cpuset are short of memory, might require taking the callback_lock.
4099  *
4100  * The first call here from mm/page_alloc:get_page_from_freelist()
4101  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4102  * so no allocation on a node outside the cpuset is allowed (unless
4103  * in interrupt, of course).
4104  *
4105  * The second pass through get_page_from_freelist() doesn't even call
4106  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4107  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4108  * in alloc_flags.  That logic and the checks below have the combined
4109  * affect that:
4110  *	in_interrupt - any node ok (current task context irrelevant)
4111  *	GFP_ATOMIC   - any node ok
4112  *	tsk_is_oom_victim   - any node ok
4113  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4114  *	GFP_USER     - only nodes in current tasks mems allowed ok.
4115  */
4116 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4117 {
4118 	struct cpuset *cs;		/* current cpuset ancestors */
4119 	bool allowed;			/* is allocation in zone z allowed? */
4120 	unsigned long flags;
4121 
4122 	if (in_interrupt())
4123 		return true;
4124 	if (node_isset(node, current->mems_allowed))
4125 		return true;
4126 	/*
4127 	 * Allow tasks that have access to memory reserves because they have
4128 	 * been OOM killed to get memory anywhere.
4129 	 */
4130 	if (unlikely(tsk_is_oom_victim(current)))
4131 		return true;
4132 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4133 		return false;
4134 
4135 	if (current->flags & PF_EXITING) /* Let dying task have memory */
4136 		return true;
4137 
4138 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4139 	spin_lock_irqsave(&callback_lock, flags);
4140 
4141 	cs = nearest_hardwall_ancestor(task_cs(current));
4142 	allowed = node_isset(node, cs->mems_allowed);
4143 
4144 	spin_unlock_irqrestore(&callback_lock, flags);
4145 	return allowed;
4146 }
4147 
4148 bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
4149 {
4150 	struct cgroup_subsys_state *css;
4151 	struct cpuset *cs;
4152 	bool allowed;
4153 
4154 	/*
4155 	 * In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4156 	 * and mems_allowed is likely to be empty even if we could get to it,
4157 	 * so return true to avoid taking a global lock on the empty check.
4158 	 */
4159 	if (!cpuset_v2())
4160 		return true;
4161 
4162 	css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4163 	if (!css)
4164 		return true;
4165 
4166 	/*
4167 	 * Normally, accessing effective_mems would require the cpuset_mutex
4168 	 * or callback_lock - but node_isset is atomic and the reference
4169 	 * taken via cgroup_get_e_css is sufficient to protect css.
4170 	 *
4171 	 * Since this interface is intended for use by migration paths, we
4172 	 * relax locking here to avoid taking global locks - while accepting
4173 	 * there may be rare scenarios where the result may be innaccurate.
4174 	 *
4175 	 * Reclaim and migration are subject to these same race conditions, and
4176 	 * cannot make strong isolation guarantees, so this is acceptable.
4177 	 */
4178 	cs = container_of(css, struct cpuset, css);
4179 	allowed = node_isset(nid, cs->effective_mems);
4180 	css_put(css);
4181 	return allowed;
4182 }
4183 
4184 /**
4185  * cpuset_spread_node() - On which node to begin search for a page
4186  * @rotor: round robin rotor
4187  *
4188  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4189  * tasks in a cpuset with is_spread_page or is_spread_slab set),
4190  * and if the memory allocation used cpuset_mem_spread_node()
4191  * to determine on which node to start looking, as it will for
4192  * certain page cache or slab cache pages such as used for file
4193  * system buffers and inode caches, then instead of starting on the
4194  * local node to look for a free page, rather spread the starting
4195  * node around the tasks mems_allowed nodes.
4196  *
4197  * We don't have to worry about the returned node being offline
4198  * because "it can't happen", and even if it did, it would be ok.
4199  *
4200  * The routines calling guarantee_online_mems() are careful to
4201  * only set nodes in task->mems_allowed that are online.  So it
4202  * should not be possible for the following code to return an
4203  * offline node.  But if it did, that would be ok, as this routine
4204  * is not returning the node where the allocation must be, only
4205  * the node where the search should start.  The zonelist passed to
4206  * __alloc_pages() will include all nodes.  If the slab allocator
4207  * is passed an offline node, it will fall back to the local node.
4208  * See kmem_cache_alloc_node().
4209  */
4210 static int cpuset_spread_node(int *rotor)
4211 {
4212 	return *rotor = next_node_in(*rotor, current->mems_allowed);
4213 }
4214 
4215 /**
4216  * cpuset_mem_spread_node() - On which node to begin search for a file page
4217  */
4218 int cpuset_mem_spread_node(void)
4219 {
4220 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4221 		current->cpuset_mem_spread_rotor =
4222 			node_random(&current->mems_allowed);
4223 
4224 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4225 }
4226 
4227 /**
4228  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4229  * @tsk1: pointer to task_struct of some task.
4230  * @tsk2: pointer to task_struct of some other task.
4231  *
4232  * Description: Return true if @tsk1's mems_allowed intersects the
4233  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
4234  * one of the task's memory usage might impact the memory available
4235  * to the other.
4236  **/
4237 
4238 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4239 				   const struct task_struct *tsk2)
4240 {
4241 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4242 }
4243 
4244 /**
4245  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4246  *
4247  * Description: Prints current's name, cpuset name, and cached copy of its
4248  * mems_allowed to the kernel log.
4249  */
4250 void cpuset_print_current_mems_allowed(void)
4251 {
4252 	struct cgroup *cgrp;
4253 
4254 	rcu_read_lock();
4255 
4256 	cgrp = task_cs(current)->css.cgroup;
4257 	pr_cont(",cpuset=");
4258 	pr_cont_cgroup_name(cgrp);
4259 	pr_cont(",mems_allowed=%*pbl",
4260 		nodemask_pr_args(&current->mems_allowed));
4261 
4262 	rcu_read_unlock();
4263 }
4264 
4265 /* Display task mems_allowed in /proc/<pid>/status file. */
4266 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4267 {
4268 	seq_printf(m, "Mems_allowed:\t%*pb\n",
4269 		   nodemask_pr_args(&task->mems_allowed));
4270 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4271 		   nodemask_pr_args(&task->mems_allowed));
4272 }
4273