xref: /linux/kernel/cgroup/cpuset.c (revision 3b3bea6d4b9c162f9e555905d96b8c1da67ecd5b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  kernel/cpuset.c
4  *
5  *  Processor and Memory placement constraints for sets of tasks.
6  *
7  *  Copyright (C) 2003 BULL SA.
8  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
9  *  Copyright (C) 2006 Google, Inc
10  *
11  *  Portions derived from Patrick Mochel's sysfs code.
12  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
13  *
14  *  2003-10-10 Written by Simon Derr.
15  *  2003-10-22 Updates by Stephen Hemminger.
16  *  2004 May-July Rework by Paul Jackson.
17  *  2006 Rework by Paul Menage to use generic cgroups
18  *  2008 Rework of the scheduler domains and CPU hotplug handling
19  *       by Max Krasnyansky
20  */
21 #include "cpuset-internal.h"
22 
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mm.h>
28 #include <linux/memory.h>
29 #include <linux/rcupdate.h>
30 #include <linux/sched.h>
31 #include <linux/sched/deadline.h>
32 #include <linux/sched/mm.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <linux/oom.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/task_work.h>
40 
41 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
42 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
43 
44 /*
45  * There could be abnormal cpuset configurations for cpu or memory
46  * node binding, add this key to provide a quick low-cost judgment
47  * of the situation.
48  */
49 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
50 
51 static const char * const perr_strings[] = {
52 	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus.exclusive",
53 	[PERR_INVPARENT] = "Parent is an invalid partition root",
54 	[PERR_NOTPART]   = "Parent is not a partition root",
55 	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
56 	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
57 	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
58 	[PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
59 	[PERR_HKEEPING]  = "partition config conflicts with housekeeping setup",
60 	[PERR_ACCESS]    = "Enable partition not permitted",
61 	[PERR_REMOTE]    = "Have remote partition underneath",
62 };
63 
64 /*
65  * CPUSET Locking Convention
66  * -------------------------
67  *
68  * Below are the four global/local locks guarding cpuset structures in lock
69  * acquisition order:
70  *  - cpuset_top_mutex
71  *  - cpu_hotplug_lock (cpus_read_lock/cpus_write_lock)
72  *  - cpuset_mutex
73  *  - callback_lock (raw spinlock)
74  *
75  * As cpuset will now indirectly flush a number of different workqueues in
76  * housekeeping_update() to update housekeeping cpumasks when the set of
77  * isolated CPUs is going to be changed, it may be vulnerable to deadlock
78  * if we hold cpus_read_lock while calling into housekeeping_update().
79  *
80  * The first cpuset_top_mutex will be held except when calling into
81  * cpuset_handle_hotplug() from the CPU hotplug code where cpus_write_lock
82  * and cpuset_mutex will be held instead. The main purpose of this mutex
83  * is to prevent regular cpuset control file write actions from interfering
84  * with the call to housekeeping_update(), though CPU hotplug operation can
85  * still happen in parallel. This mutex also provides protection for some
86  * internal variables.
87  *
88  * A task must hold all the remaining three locks to modify externally visible
89  * or used fields of cpusets, though some of the internally used cpuset fields
90  * and internal variables can be modified without holding callback_lock. If only
91  * reliable read access of the externally used fields are needed, a task can
92  * hold either cpuset_mutex or callback_lock which are exposed to other
93  * external subsystems.
94  *
95  * If a task holds cpu_hotplug_lock and cpuset_mutex, it blocks others,
96  * ensuring that it is the only task able to also acquire callback_lock and
97  * be able to modify cpusets.  It can perform various checks on the cpuset
98  * structure first, knowing nothing will change. It can also allocate memory
99  * without holding callback_lock. While it is performing these checks, various
100  * callback routines can briefly acquire callback_lock to query cpusets.  Once
101  * it is ready to make the changes, it takes callback_lock, blocking everyone
102  * else.
103  *
104  * Calls to the kernel memory allocator cannot be made while holding
105  * callback_lock which is a spinlock, as the memory allocator may sleep or
106  * call back into cpuset code and acquire callback_lock.
107  *
108  * Now, the task_struct fields mems_allowed and mempolicy may be changed
109  * by other task, we use alloc_lock in the task_struct fields to protect
110  * them.
111  *
112  * The cpuset_common_seq_show() handlers only hold callback_lock across
113  * small pieces of code, such as when reading out possibly multi-word
114  * cpumasks and nodemasks.
115  */
116 
117 static DEFINE_MUTEX(cpuset_top_mutex);
118 static DEFINE_MUTEX(cpuset_mutex);
119 
120 /*
121  * File level internal variables below follow one of the following exclusion
122  * rules.
123  *
124  * RWCS: Read/write-able by holding either cpus_write_lock (and optionally
125  *	 cpuset_mutex) or both cpus_read_lock and cpuset_mutex.
126  *
127  * CSCB: Readable by holding either cpuset_mutex or callback_lock. Writable
128  *	 by holding both cpuset_mutex and callback_lock.
129  *
130  * T:	 Read/write-able by holding the cpuset_top_mutex.
131  */
132 
133 /*
134  * For local partitions, update to subpartitions_cpus & isolated_cpus is done
135  * in update_parent_effective_cpumask(). For remote partitions, it is done in
136  * the remote_partition_*() and remote_cpus_update() helpers.
137  */
138 /*
139  * Exclusive CPUs distributed out to local or remote sub-partitions of
140  * top_cpuset
141  */
142 static cpumask_var_t	subpartitions_cpus;	/* RWCS */
143 
144 /*
145  * Exclusive CPUs in isolated partitions (shown in cpuset.cpus.isolated)
146  */
147 static cpumask_var_t	isolated_cpus;		/* CSCB */
148 
149 /*
150  * Set if housekeeping cpumasks are to be updated.
151  */
152 static bool		update_housekeeping;	/* RWCS */
153 
154 /*
155  * Copy of isolated_cpus to be passed to housekeeping_update()
156  */
157 static cpumask_var_t	isolated_hk_cpus;	/* T */
158 
159 /*
160  * A flag to force sched domain rebuild at the end of an operation.
161  * It can be set in
162  *  - update_partition_sd_lb()
163  *  - update_cpumasks_hier()
164  *  - cpuset_update_flag()
165  *  - cpuset_hotplug_update_tasks()
166  *  - cpuset_handle_hotplug()
167  *
168  * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
169  *
170  * Note that update_relax_domain_level() in cpuset-v1.c can still call
171  * rebuild_sched_domains_locked() directly without using this flag.
172  */
173 static bool force_sd_rebuild;			/* RWCS */
174 
175 /*
176  * Partition root states:
177  *
178  *   0 - member (not a partition root)
179  *   1 - partition root
180  *   2 - partition root without load balancing (isolated)
181  *  -1 - invalid partition root
182  *  -2 - invalid isolated partition root
183  *
184  *  There are 2 types of partitions - local or remote. Local partitions are
185  *  those whose parents are partition root themselves. Setting of
186  *  cpuset.cpus.exclusive are optional in setting up local partitions.
187  *  Remote partitions are those whose parents are not partition roots. Passing
188  *  down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
189  *  nodes are mandatory in creating a remote partition.
190  *
191  *  For simplicity, a local partition can be created under a local or remote
192  *  partition but a remote partition cannot have any partition root in its
193  *  ancestor chain except the cgroup root.
194  *
195  *  A valid partition can be formed by setting exclusive_cpus or cpus_allowed
196  *  if exclusive_cpus is not set. In the case of partition with empty
197  *  exclusive_cpus, all the conflicting exclusive CPUs specified in the
198  *  following cpumasks of sibling cpusets will be removed from its
199  *  cpus_allowed in determining its effective_xcpus.
200  *  - effective_xcpus
201  *  - exclusive_cpus
202  *
203  *  The "cpuset.cpus.exclusive" control file should be used for setting up
204  *  partition if the users want to get as many CPUs as possible.
205  */
206 #define PRS_MEMBER		0
207 #define PRS_ROOT		1
208 #define PRS_ISOLATED		2
209 #define PRS_INVALID_ROOT	-1
210 #define PRS_INVALID_ISOLATED	-2
211 
212 /*
213  * Temporary cpumasks for working with partitions that are passed among
214  * functions to avoid memory allocation in inner functions.
215  */
216 struct tmpmasks {
217 	cpumask_var_t addmask, delmask;	/* For partition root */
218 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
219 };
220 
inc_dl_tasks_cs(struct task_struct * p)221 void inc_dl_tasks_cs(struct task_struct *p)
222 {
223 	struct cpuset *cs = task_cs(p);
224 
225 	cs->nr_deadline_tasks++;
226 }
227 
dec_dl_tasks_cs(struct task_struct * p)228 void dec_dl_tasks_cs(struct task_struct *p)
229 {
230 	struct cpuset *cs = task_cs(p);
231 
232 	cs->nr_deadline_tasks--;
233 }
234 
is_partition_valid(const struct cpuset * cs)235 static inline bool is_partition_valid(const struct cpuset *cs)
236 {
237 	return cs->partition_root_state > 0;
238 }
239 
is_partition_invalid(const struct cpuset * cs)240 static inline bool is_partition_invalid(const struct cpuset *cs)
241 {
242 	return cs->partition_root_state < 0;
243 }
244 
cs_is_member(const struct cpuset * cs)245 static inline bool cs_is_member(const struct cpuset *cs)
246 {
247 	return cs->partition_root_state == PRS_MEMBER;
248 }
249 
250 /*
251  * Callers should hold callback_lock to modify partition_root_state.
252  */
make_partition_invalid(struct cpuset * cs)253 static inline void make_partition_invalid(struct cpuset *cs)
254 {
255 	if (cs->partition_root_state > 0)
256 		cs->partition_root_state = -cs->partition_root_state;
257 }
258 
259 /*
260  * Send notification event of whenever partition_root_state changes.
261  */
notify_partition_change(struct cpuset * cs,int old_prs)262 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
263 {
264 	if (old_prs == cs->partition_root_state)
265 		return;
266 	cgroup_file_notify(&cs->partition_file);
267 
268 	/* Reset prs_err if not invalid */
269 	if (is_partition_valid(cs))
270 		WRITE_ONCE(cs->prs_err, PERR_NONE);
271 }
272 
273 /*
274  * The top_cpuset is always synchronized to cpu_active_mask and we should avoid
275  * using cpu_online_mask as much as possible. An active CPU is always an online
276  * CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
277  * during hotplug operations. A CPU is marked active at the last stage of CPU
278  * bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
279  * will be called to update the sched domains so that the scheduler can move
280  * a normal task to a newly active CPU or remove tasks away from a newly
281  * inactivated CPU. The online bit is set much earlier in the CPU bringup
282  * process and cleared much later in CPU teardown.
283  *
284  * If cpu_online_mask is used while a hotunplug operation is happening in
285  * parallel, we may leave an offline CPU in cpu_allowed or some other masks.
286  */
287 struct cpuset top_cpuset = {
288 	.flags = BIT(CS_CPU_EXCLUSIVE) |
289 		 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
290 	.partition_root_state = PRS_ROOT,
291 	.dl_bw_cpu = -1,
292 };
293 
294 /**
295  * cpuset_lock - Acquire the global cpuset mutex
296  *
297  * This locks the global cpuset mutex to prevent modifications to cpuset
298  * hierarchy and configurations. This helper is not enough to make modification.
299  */
cpuset_lock(void)300 void cpuset_lock(void)
301 {
302 	mutex_lock(&cpuset_mutex);
303 }
304 
cpuset_unlock(void)305 void cpuset_unlock(void)
306 {
307 	mutex_unlock(&cpuset_mutex);
308 }
309 
lockdep_assert_cpuset_lock_held(void)310 void lockdep_assert_cpuset_lock_held(void)
311 {
312 	lockdep_assert_held(&cpuset_mutex);
313 }
314 
315 /**
316  * cpuset_full_lock - Acquire full protection for cpuset modification
317  *
318  * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
319  * to safely modify cpuset data.
320  */
cpuset_full_lock(void)321 void cpuset_full_lock(void)
322 {
323 	mutex_lock(&cpuset_top_mutex);
324 	cpus_read_lock();
325 	mutex_lock(&cpuset_mutex);
326 }
327 
cpuset_full_unlock(void)328 void cpuset_full_unlock(void)
329 {
330 	mutex_unlock(&cpuset_mutex);
331 	cpus_read_unlock();
332 	mutex_unlock(&cpuset_top_mutex);
333 }
334 
335 #ifdef CONFIG_LOCKDEP
lockdep_is_cpuset_held(void)336 bool lockdep_is_cpuset_held(void)
337 {
338 	return lockdep_is_held(&cpuset_mutex) ||
339 	       lockdep_is_held(&cpuset_top_mutex);
340 }
341 #endif
342 
343 static DEFINE_SPINLOCK(callback_lock);
344 
cpuset_callback_lock_irq(void)345 void cpuset_callback_lock_irq(void)
346 {
347 	spin_lock_irq(&callback_lock);
348 }
349 
cpuset_callback_unlock_irq(void)350 void cpuset_callback_unlock_irq(void)
351 {
352 	spin_unlock_irq(&callback_lock);
353 }
354 
355 static struct workqueue_struct *cpuset_migrate_mm_wq;
356 
357 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
358 
check_insane_mems_config(nodemask_t * nodes)359 static inline void check_insane_mems_config(nodemask_t *nodes)
360 {
361 	if (!cpusets_insane_config() &&
362 		movable_only_nodes(nodes)) {
363 		static_branch_enable_cpuslocked(&cpusets_insane_config_key);
364 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
365 			"Cpuset allocations might fail even with a lot of memory available.\n",
366 			nodemask_pr_args(nodes));
367 	}
368 }
369 
370 /*
371  * decrease cs->attach_in_progress.
372  * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
373  */
dec_attach_in_progress_locked(struct cpuset * cs)374 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
375 {
376 	lockdep_assert_cpuset_lock_held();
377 
378 	cs->attach_in_progress--;
379 	if (!cs->attach_in_progress)
380 		wake_up(&cpuset_attach_wq);
381 }
382 
dec_attach_in_progress(struct cpuset * cs)383 static inline void dec_attach_in_progress(struct cpuset *cs)
384 {
385 	mutex_lock(&cpuset_mutex);
386 	dec_attach_in_progress_locked(cs);
387 	mutex_unlock(&cpuset_mutex);
388 }
389 
cpuset_v2(void)390 static inline bool cpuset_v2(void)
391 {
392 	return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
393 		cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
394 }
395 
396 /*
397  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
398  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
399  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
400  * With v2 behavior, "cpus" and "mems" are always what the users have
401  * requested and won't be changed by hotplug events. Only the effective
402  * cpus or mems will be affected.
403  */
is_in_v2_mode(void)404 static inline bool is_in_v2_mode(void)
405 {
406 	return cpuset_v2() ||
407 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
408 }
409 
410 /**
411  * partition_is_populated - check if partition has tasks
412  * @cs: partition root to be checked
413  * @excluded_child: a child cpuset to be excluded in task checking
414  * Return: true if there are tasks, false otherwise
415  *
416  * @cs should be a valid partition root or going to become a partition root.
417  * @excluded_child should be non-NULL when this cpuset is going to become a
418  * partition itself.
419  *
420  * Note that a remote partition is not allowed underneath a valid local
421  * or remote partition. So if a non-partition root child is populated,
422  * the whole partition is considered populated.
423  */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)424 static inline bool partition_is_populated(struct cpuset *cs,
425 					  struct cpuset *excluded_child)
426 {
427 	struct cpuset *cp;
428 	struct cgroup_subsys_state *pos_css;
429 
430 	/*
431 	 * We cannot call cs_is_populated(cs) directly, as
432 	 * nr_populated_domain_children may include populated
433 	 * csets from descendants that are partitions.
434 	 */
435 	if (cs->css.cgroup->nr_populated_csets ||
436 	    cs->attach_in_progress)
437 		return true;
438 
439 	rcu_read_lock();
440 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
441 		if (cp == cs || cp == excluded_child)
442 			continue;
443 
444 		if (is_partition_valid(cp)) {
445 			pos_css = css_rightmost_descendant(pos_css);
446 			continue;
447 		}
448 
449 		if (cpuset_is_populated(cp)) {
450 			rcu_read_unlock();
451 			return true;
452 		}
453 	}
454 	rcu_read_unlock();
455 	return false;
456 }
457 
458 /*
459  * Return in pmask the portion of a task's cpusets's cpus_allowed that
460  * are online and are capable of running the task.  If none are found,
461  * walk up the cpuset hierarchy until we find one that does have some
462  * appropriate cpus.
463  *
464  * One way or another, we guarantee to return some non-empty subset
465  * of cpu_active_mask.
466  *
467  * Call with callback_lock or cpuset_mutex held.
468  */
guarantee_active_cpus(struct task_struct * tsk,struct cpumask * pmask)469 static void guarantee_active_cpus(struct task_struct *tsk,
470 				  struct cpumask *pmask)
471 {
472 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
473 	struct cpuset *cs;
474 
475 	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
476 		cpumask_copy(pmask, cpu_active_mask);
477 
478 	rcu_read_lock();
479 	cs = task_cs(tsk);
480 
481 	while (!cpumask_intersects(cs->effective_cpus, pmask))
482 		cs = parent_cs(cs);
483 
484 	cpumask_and(pmask, pmask, cs->effective_cpus);
485 	rcu_read_unlock();
486 }
487 
488 /*
489  * Return in *pmask the portion of a cpusets's mems_allowed that
490  * are online, with memory.  If none are online with memory, walk
491  * up the cpuset hierarchy until we find one that does have some
492  * online mems.  The top cpuset always has some mems online.
493  *
494  * One way or another, we guarantee to return some non-empty subset
495  * of node_states[N_MEMORY].
496  *
497  * Call with callback_lock or cpuset_mutex held.
498  */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)499 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
500 {
501 	while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
502 		cs = parent_cs(cs);
503 }
504 
505 /**
506  * alloc_cpumasks - Allocate an array of cpumask variables
507  * @pmasks: Pointer to array of cpumask_var_t pointers
508  * @size: Number of cpumasks to allocate
509  * Return: 0 if successful, -ENOMEM otherwise.
510  *
511  * Allocates @size cpumasks and initializes them to empty. Returns 0 on
512  * success, -ENOMEM on allocation failure. On failure, any previously
513  * allocated cpumasks are freed.
514  */
alloc_cpumasks(cpumask_var_t * pmasks[],u32 size)515 static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
516 {
517 	int i;
518 
519 	for (i = 0; i < size; i++) {
520 		if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
521 			while (--i >= 0)
522 				free_cpumask_var(*pmasks[i]);
523 			return -ENOMEM;
524 		}
525 	}
526 	return 0;
527 }
528 
529 /**
530  * alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
531  * @tmp: Pointer to tmpmasks structure to populate
532  * Return: 0 on success, -ENOMEM on allocation failure
533  */
alloc_tmpmasks(struct tmpmasks * tmp)534 static inline int alloc_tmpmasks(struct tmpmasks *tmp)
535 {
536 	/*
537 	 * Array of pointers to the three cpumask_var_t fields in tmpmasks.
538 	 * Note: Array size must match actual number of masks (3)
539 	 */
540 	cpumask_var_t *pmask[3] = {
541 		&tmp->new_cpus,
542 		&tmp->addmask,
543 		&tmp->delmask
544 	};
545 
546 	return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
547 }
548 
549 /**
550  * free_tmpmasks - free cpumasks in a tmpmasks structure
551  * @tmp: the tmpmasks structure pointer
552  */
free_tmpmasks(struct tmpmasks * tmp)553 static inline void free_tmpmasks(struct tmpmasks *tmp)
554 {
555 	if (!tmp)
556 		return;
557 
558 	free_cpumask_var(tmp->new_cpus);
559 	free_cpumask_var(tmp->addmask);
560 	free_cpumask_var(tmp->delmask);
561 }
562 
563 /**
564  * dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
565  * @cs: Source cpuset to duplicate (NULL for a fresh allocation)
566  *
567  * Creates a new cpuset by either:
568  * 1. Duplicating an existing cpuset (if @cs is non-NULL), or
569  * 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
570  *
571  * Return: Pointer to newly allocated cpuset on success, NULL on failure
572  */
dup_or_alloc_cpuset(struct cpuset * cs)573 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
574 {
575 	struct cpuset *trial;
576 
577 	/* Allocate base structure */
578 	trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
579 		     kzalloc_obj(*cs);
580 	if (!trial)
581 		return NULL;
582 
583 	trial->dl_bw_cpu = -1;
584 
585 	/* Setup cpumask pointer array */
586 	cpumask_var_t *pmask[4] = {
587 		&trial->cpus_allowed,
588 		&trial->effective_cpus,
589 		&trial->effective_xcpus,
590 		&trial->exclusive_cpus
591 	};
592 
593 	if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
594 		kfree(trial);
595 		return NULL;
596 	}
597 
598 	/* Copy masks if duplicating */
599 	if (cs) {
600 		cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
601 		cpumask_copy(trial->effective_cpus, cs->effective_cpus);
602 		cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
603 		cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
604 	}
605 
606 	return trial;
607 }
608 
609 /**
610  * free_cpuset - free the cpuset
611  * @cs: the cpuset to be freed
612  */
free_cpuset(struct cpuset * cs)613 static inline void free_cpuset(struct cpuset *cs)
614 {
615 	free_cpumask_var(cs->cpus_allowed);
616 	free_cpumask_var(cs->effective_cpus);
617 	free_cpumask_var(cs->effective_xcpus);
618 	free_cpumask_var(cs->exclusive_cpus);
619 	kfree(cs);
620 }
621 
622 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)623 static inline struct cpumask *user_xcpus(struct cpuset *cs)
624 {
625 	return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
626 						 : cs->exclusive_cpus;
627 }
628 
xcpus_empty(struct cpuset * cs)629 static inline bool xcpus_empty(struct cpuset *cs)
630 {
631 	return cpumask_empty(cs->cpus_allowed) &&
632 	       cpumask_empty(cs->exclusive_cpus);
633 }
634 
635 /*
636  * cpusets_are_exclusive() - check if two cpusets are exclusive
637  *
638  * Return true if exclusive, false if not
639  */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)640 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
641 {
642 	struct cpumask *xcpus1 = user_xcpus(cs1);
643 	struct cpumask *xcpus2 = user_xcpus(cs2);
644 
645 	if (cpumask_intersects(xcpus1, xcpus2))
646 		return false;
647 	return true;
648 }
649 
650 /**
651  * cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
652  * @trial:	the trial cpuset to be checked
653  * @sibling:	a sibling cpuset to be checked against
654  * @xcpus_changed: set if exclusive_cpus has been set
655  *
656  * Returns: true if CPU exclusivity conflict exists, false otherwise
657  *
658  * Conflict detection rules:
659  *  o cgroup v1
660  *    See cpuset1_cpus_excl_conflict()
661  *  o cgroup v2
662  *    - The exclusive_cpus values cannot overlap.
663  *    - New exclusive_cpus cannot be a superset of a sibling's cpus_allowed.
664  */
cpus_excl_conflict(struct cpuset * trial,struct cpuset * sibling,bool xcpus_changed)665 static inline bool cpus_excl_conflict(struct cpuset *trial, struct cpuset *sibling,
666 				      bool xcpus_changed)
667 {
668 	if (!cpuset_v2())
669 		return cpuset1_cpus_excl_conflict(trial, sibling);
670 
671 	/* The cpus_allowed of a sibling cpuset cannot be a subset of the new exclusive_cpus */
672 	if (xcpus_changed && !cpumask_empty(sibling->cpus_allowed) &&
673 	    cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
674 		return true;
675 
676 	/* Exclusive_cpus cannot intersect */
677 	return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus);
678 }
679 
mems_excl_conflict(struct cpuset * cs1,struct cpuset * cs2)680 static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
681 {
682 	if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
683 		return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
684 	return false;
685 }
686 
687 /*
688  * validate_change() - Used to validate that any proposed cpuset change
689  *		       follows the structural rules for cpusets.
690  *
691  * If we replaced the flag and mask values of the current cpuset
692  * (cur) with those values in the trial cpuset (trial), would
693  * our various subset and exclusive rules still be valid?  Presumes
694  * cpuset_mutex held.
695  *
696  * 'cur' is the address of an actual, in-use cpuset.  Operations
697  * such as list traversal that depend on the actual address of the
698  * cpuset in the list must use cur below, not trial.
699  *
700  * 'trial' is the address of bulk structure copy of cur, with
701  * perhaps one or more of the fields cpus_allowed, mems_allowed,
702  * or flags changed to new, trial values.
703  *
704  * Return 0 if valid, -errno if not.
705  */
706 
validate_change(struct cpuset * cur,struct cpuset * trial)707 static int validate_change(struct cpuset *cur, struct cpuset *trial)
708 {
709 	struct cgroup_subsys_state *css;
710 	struct cpuset *c, *par;
711 	bool xcpus_changed;
712 	int ret = 0;
713 
714 	rcu_read_lock();
715 
716 	if (!is_in_v2_mode())
717 		ret = cpuset1_validate_change(cur, trial);
718 	if (ret)
719 		goto out;
720 
721 	/* Remaining checks don't apply to root cpuset */
722 	if (cur == &top_cpuset)
723 		goto out;
724 
725 	par = parent_cs(cur);
726 
727 	/*
728 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
729 	 * tasks. This check is not done when scheduling is disabled as the
730 	 * users should know what they are doing.
731 	 *
732 	 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
733 	 * cpus_allowed.
734 	 *
735 	 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
736 	 * for non-isolated partition root. At this point, the target
737 	 * effective_cpus isn't computed yet. user_xcpus() is the best
738 	 * approximation.
739 	 *
740 	 * TBD: May need to precompute the real effective_cpus here in case
741 	 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
742 	 * becomes an issue.
743 	 */
744 	ret = -EBUSY;
745 	if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
746 	    !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
747 		goto out;
748 
749 	/*
750 	 * If either I or some sibling (!= me) is exclusive, we can't
751 	 * overlap. exclusive_cpus cannot overlap with each other if set.
752 	 */
753 	ret = -EINVAL;
754 	xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
755 	cpuset_for_each_child(c, css, par) {
756 		if (c == cur)
757 			continue;
758 		if (cpus_excl_conflict(trial, c, xcpus_changed))
759 			goto out;
760 		if (mems_excl_conflict(trial, c))
761 			goto out;
762 	}
763 
764 	ret = 0;
765 out:
766 	rcu_read_unlock();
767 	return ret;
768 }
769 
770 #ifdef CONFIG_SMP
771 
772 /*
773  * generate_sched_domains()
774  *
775  * This function builds a partial partition of the systems CPUs
776  * A 'partial partition' is a set of non-overlapping subsets whose
777  * union is a subset of that set.
778  * The output of this function needs to be passed to kernel/sched/core.c
779  * partition_sched_domains() routine, which will rebuild the scheduler's
780  * load balancing domains (sched domains) as specified by that partial
781  * partition.
782  *
783  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
784  * for a background explanation of this.
785  *
786  * Does not return errors, on the theory that the callers of this
787  * routine would rather not worry about failures to rebuild sched
788  * domains when operating in the severe memory shortage situations
789  * that could cause allocation failures below.
790  *
791  * Must be called with cpuset_mutex held.
792  *
793  * The three key local variables below are:
794  *    cp - cpuset pointer, used (together with pos_css) to perform a
795  *	   top-down scan of all cpusets. For our purposes, rebuilding
796  *	   the schedulers sched domains, we can ignore !is_sched_load_
797  *	   balance cpusets.
798  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
799  *	   that need to be load balanced, for convenient iterative
800  *	   access by the subsequent code that finds the best partition,
801  *	   i.e the set of domains (subsets) of CPUs such that the
802  *	   cpus_allowed of every cpuset marked is_sched_load_balance
803  *	   is a subset of one of these domains, while there are as
804  *	   many such domains as possible, each as small as possible.
805  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
806  *	   the kernel/sched/core.c routine partition_sched_domains() in a
807  *	   convenient format, that can be easily compared to the prior
808  *	   value to determine what partition elements (sched domains)
809  *	   were changed (added or removed.)
810  */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)811 static int generate_sched_domains(cpumask_var_t **domains,
812 			struct sched_domain_attr **attributes)
813 {
814 	struct cpuset *cp;	/* top-down scan of cpusets */
815 	struct cpuset **csa;	/* array of all cpuset ptrs */
816 	int i, j;		/* indices for partition finding loops */
817 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
818 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
819 	int ndoms = 0;		/* number of sched domains in result */
820 	struct cgroup_subsys_state *pos_css;
821 
822 	if (!cpuset_v2())
823 		return cpuset1_generate_sched_domains(domains, attributes);
824 
825 	doms = NULL;
826 	dattr = NULL;
827 	csa = NULL;
828 
829 	/* Special case for the 99% of systems with one, full, sched domain */
830 	if (cpumask_empty(subpartitions_cpus)) {
831 		ndoms = 1;
832 		/* !csa will be checked and can be correctly handled */
833 		goto generate_doms;
834 	}
835 
836 	csa = kmalloc_objs(cp, nr_cpusets());
837 	if (!csa)
838 		goto done;
839 
840 	/* Find how many partitions and cache them to csa[] */
841 	rcu_read_lock();
842 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
843 		/*
844 		 * Only valid partition roots that are not isolated and with
845 		 * non-empty effective_cpus will be saved into csa[].
846 		 */
847 		if ((cp->partition_root_state == PRS_ROOT) &&
848 		    !cpumask_empty(cp->effective_cpus))
849 			csa[ndoms++] = cp;
850 
851 		/*
852 		 * Skip @cp's subtree if not a partition root and has no
853 		 * exclusive CPUs to be granted to child cpusets.
854 		 */
855 		if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
856 			pos_css = css_rightmost_descendant(pos_css);
857 	}
858 	rcu_read_unlock();
859 
860 	for (i = 0; i < ndoms; i++) {
861 		for (j = i + 1; j < ndoms; j++) {
862 			if (cpusets_overlap(csa[i], csa[j]))
863 				/*
864 				 * Cgroup v2 shouldn't pass down overlapping
865 				 * partition root cpusets.
866 				 */
867 				WARN_ON_ONCE(1);
868 		}
869 	}
870 
871 generate_doms:
872 	doms = alloc_sched_domains(ndoms);
873 	if (!doms)
874 		goto done;
875 
876 	/*
877 	 * The rest of the code, including the scheduler, can deal with
878 	 * dattr==NULL case. No need to abort if alloc fails.
879 	 */
880 	dattr = kmalloc_objs(struct sched_domain_attr, ndoms);
881 
882 	/*
883 	 * Cgroup v2 doesn't support domain attributes, just set all of them
884 	 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
885 	 * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs.
886 	 */
887 	for (i = 0; i < ndoms; i++) {
888 		/*
889 		 * The top cpuset may contain some boot time isolated
890 		 * CPUs that need to be excluded from the sched domain.
891 		 */
892 		if (!csa || csa[i] == &top_cpuset)
893 			cpumask_and(doms[i], top_cpuset.effective_cpus,
894 				    housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
895 		else
896 			cpumask_copy(doms[i], csa[i]->effective_cpus);
897 		if (dattr)
898 			dattr[i] = SD_ATTR_INIT;
899 	}
900 
901 done:
902 	kfree(csa);
903 
904 	/*
905 	 * Fallback to the default domain if kmalloc() failed.
906 	 * See comments in partition_sched_domains().
907 	 */
908 	if (doms == NULL)
909 		ndoms = 1;
910 
911 	*domains    = doms;
912 	*attributes = dattr;
913 	return ndoms;
914 }
915 
dl_update_tasks_root_domain(struct cpuset * cs)916 static void dl_update_tasks_root_domain(struct cpuset *cs)
917 {
918 	struct css_task_iter it;
919 	struct task_struct *task;
920 
921 	if (cs->nr_deadline_tasks == 0)
922 		return;
923 
924 	css_task_iter_start(&cs->css, 0, &it);
925 
926 	while ((task = css_task_iter_next(&it)))
927 		dl_add_task_root_domain(task);
928 
929 	css_task_iter_end(&it);
930 }
931 
dl_rebuild_rd_accounting(void)932 void dl_rebuild_rd_accounting(void)
933 {
934 	struct cpuset *cs = NULL;
935 	struct cgroup_subsys_state *pos_css;
936 	int cpu;
937 	u64 cookie = ++dl_cookie;
938 
939 	lockdep_assert_cpuset_lock_held();
940 	lockdep_assert_cpus_held();
941 	lockdep_assert_held(&sched_domains_mutex);
942 
943 	rcu_read_lock();
944 
945 	for_each_possible_cpu(cpu) {
946 		if (dl_bw_visited(cpu, cookie))
947 			continue;
948 
949 		dl_clear_root_domain_cpu(cpu);
950 	}
951 
952 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
953 
954 		if (cpumask_empty(cs->effective_cpus)) {
955 			pos_css = css_rightmost_descendant(pos_css);
956 			continue;
957 		}
958 
959 		css_get(&cs->css);
960 
961 		rcu_read_unlock();
962 
963 		dl_update_tasks_root_domain(cs);
964 
965 		rcu_read_lock();
966 		css_put(&cs->css);
967 	}
968 	rcu_read_unlock();
969 }
970 
971 /*
972  * Rebuild scheduler domains.
973  *
974  * If the flag 'sched_load_balance' of any cpuset with non-empty
975  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
976  * which has that flag enabled, or if any cpuset with a non-empty
977  * 'cpus' is removed, then call this routine to rebuild the
978  * scheduler's dynamic sched domains.
979  *
980  * Call with cpuset_mutex held.  Takes cpus_read_lock().
981  */
rebuild_sched_domains_locked(void)982 void rebuild_sched_domains_locked(void)
983 {
984 	struct sched_domain_attr *attr;
985 	cpumask_var_t *doms;
986 	int ndoms;
987 	int i;
988 
989 	lockdep_assert_cpus_held();
990 	lockdep_assert_cpuset_lock_held();
991 	force_sd_rebuild = false;
992 
993 	/* Generate domain masks and attrs */
994 	ndoms = generate_sched_domains(&doms, &attr);
995 
996 	/*
997 	* cpuset_hotplug_workfn is invoked synchronously now, thus this
998 	* function should not race with CPU hotplug. And the effective CPUs
999 	* must not include any offline CPUs. Passing an offline CPU in the
1000 	* doms to partition_sched_domains() will trigger a kernel panic.
1001 	*
1002 	* We perform a final check here: if the doms contains any
1003 	* offline CPUs, a warning is emitted and we return directly to
1004 	* prevent the panic.
1005 	*/
1006 	for (i = 0; doms && i < ndoms; i++) {
1007 		if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
1008 			return;
1009 	}
1010 
1011 	/* Have scheduler rebuild the domains */
1012 	partition_sched_domains(ndoms, doms, attr);
1013 }
1014 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1015 void rebuild_sched_domains_locked(void)
1016 {
1017 }
1018 #endif /* CONFIG_SMP */
1019 
rebuild_sched_domains_cpuslocked(void)1020 static void rebuild_sched_domains_cpuslocked(void)
1021 {
1022 	mutex_lock(&cpuset_mutex);
1023 	rebuild_sched_domains_locked();
1024 	mutex_unlock(&cpuset_mutex);
1025 }
1026 
rebuild_sched_domains(void)1027 void rebuild_sched_domains(void)
1028 {
1029 	cpus_read_lock();
1030 	rebuild_sched_domains_cpuslocked();
1031 	cpus_read_unlock();
1032 }
1033 
cpuset_reset_sched_domains(void)1034 void cpuset_reset_sched_domains(void)
1035 {
1036 	mutex_lock(&cpuset_mutex);
1037 	partition_sched_domains(1, NULL, NULL);
1038 	mutex_unlock(&cpuset_mutex);
1039 }
1040 
1041 /**
1042  * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1043  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1044  * @new_cpus: the temp variable for the new effective_cpus mask
1045  *
1046  * Iterate through each task of @cs updating its cpus_allowed to the
1047  * effective cpuset's.  As this function is called with cpuset_mutex held,
1048  * cpuset membership stays stable.
1049  *
1050  * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1051  * to make sure all offline CPUs are also included as hotplug code won't
1052  * update cpumasks for tasks in top_cpuset.
1053  *
1054  * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1055  * do cpu masking per task instead of doing it once for all.
1056  */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1057 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1058 {
1059 	struct css_task_iter it;
1060 	struct task_struct *task;
1061 	bool top_cs = cs == &top_cpuset;
1062 
1063 	css_task_iter_start(&cs->css, 0, &it);
1064 	while ((task = css_task_iter_next(&it))) {
1065 		const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1066 
1067 		if (top_cs) {
1068 			/*
1069 			 * PF_KTHREAD tasks are handled by housekeeping.
1070 			 * PF_NO_SETAFFINITY tasks are ignored.
1071 			 */
1072 			if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
1073 				continue;
1074 			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1075 		} else {
1076 			cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1077 		}
1078 		set_cpus_allowed_ptr(task, new_cpus);
1079 	}
1080 	css_task_iter_end(&it);
1081 }
1082 
1083 /**
1084  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1085  * @new_cpus: the temp variable for the new effective_cpus mask
1086  * @cs: the cpuset the need to recompute the new effective_cpus mask
1087  * @parent: the parent cpuset
1088  *
1089  * The result is valid only if the given cpuset isn't a partition root.
1090  */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1091 static void compute_effective_cpumask(struct cpumask *new_cpus,
1092 				      struct cpuset *cs, struct cpuset *parent)
1093 {
1094 	cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1095 }
1096 
1097 /*
1098  * Commands for update_parent_effective_cpumask
1099  */
1100 enum partition_cmd {
1101 	partcmd_enable,		/* Enable partition root	  */
1102 	partcmd_enablei,	/* Enable isolated partition root */
1103 	partcmd_disable,	/* Disable partition root	  */
1104 	partcmd_update,		/* Update parent's effective_cpus */
1105 	partcmd_invalidate,	/* Make partition invalid	  */
1106 };
1107 
1108 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1109 				    struct tmpmasks *tmp);
1110 
1111 /*
1112  * Update partition exclusive flag
1113  *
1114  * Return: 0 if successful, an error code otherwise
1115  */
update_partition_exclusive_flag(struct cpuset * cs,int new_prs)1116 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1117 {
1118 	bool exclusive = (new_prs > PRS_MEMBER);
1119 
1120 	if (exclusive && !is_cpu_exclusive(cs)) {
1121 		if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1122 			return PERR_NOTEXCL;
1123 	} else if (!exclusive && is_cpu_exclusive(cs)) {
1124 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1125 		cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1126 	}
1127 	return 0;
1128 }
1129 
1130 /*
1131  * Update partition load balance flag and/or rebuild sched domain
1132  *
1133  * Changing load balance flag will automatically call
1134  * rebuild_sched_domains_locked().
1135  * This function is for cgroup v2 only.
1136  */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1137 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1138 {
1139 	int new_prs = cs->partition_root_state;
1140 	bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1141 	bool new_lb;
1142 
1143 	/*
1144 	 * If cs is not a valid partition root, the load balance state
1145 	 * will follow its parent.
1146 	 */
1147 	if (new_prs > 0) {
1148 		new_lb = (new_prs != PRS_ISOLATED);
1149 	} else {
1150 		new_lb = is_sched_load_balance(parent_cs(cs));
1151 	}
1152 	if (new_lb != !!is_sched_load_balance(cs)) {
1153 		rebuild_domains = true;
1154 		if (new_lb)
1155 			set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1156 		else
1157 			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1158 	}
1159 
1160 	if (rebuild_domains)
1161 		cpuset_force_rebuild();
1162 }
1163 
1164 /*
1165  * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1166  */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1167 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1168 			      struct cpumask *xcpus)
1169 {
1170 	/*
1171 	 * A populated partition (cs or parent) can't have empty effective_cpus
1172 	 */
1173 	return (cpumask_subset(parent->effective_cpus, xcpus) &&
1174 		partition_is_populated(parent, cs)) ||
1175 	       (!cpumask_intersects(xcpus, cpu_active_mask) &&
1176 		partition_is_populated(cs, NULL));
1177 }
1178 
reset_partition_data(struct cpuset * cs)1179 static void reset_partition_data(struct cpuset *cs)
1180 {
1181 	struct cpuset *parent = parent_cs(cs);
1182 
1183 	if (!cpuset_v2())
1184 		return;
1185 
1186 	lockdep_assert_held(&callback_lock);
1187 
1188 	if (cpumask_empty(cs->exclusive_cpus)) {
1189 		cpumask_clear(cs->effective_xcpus);
1190 		if (is_cpu_exclusive(cs))
1191 			clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1192 	}
1193 	if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1194 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1195 }
1196 
1197 /*
1198  * isolated_cpus_update - Update the isolated_cpus mask
1199  * @old_prs: old partition_root_state
1200  * @new_prs: new partition_root_state
1201  * @xcpus: exclusive CPUs with state change
1202  */
isolated_cpus_update(int old_prs,int new_prs,struct cpumask * xcpus)1203 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1204 {
1205 	WARN_ON_ONCE(old_prs == new_prs);
1206 	lockdep_assert_held(&callback_lock);
1207 	lockdep_assert_held(&cpuset_mutex);
1208 	if (new_prs == PRS_ISOLATED) {
1209 		if (cpumask_subset(xcpus, isolated_cpus))
1210 			return;
1211 		cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1212 	} else {
1213 		if (!cpumask_intersects(xcpus, isolated_cpus))
1214 			return;
1215 		cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1216 	}
1217 	update_housekeeping = true;
1218 }
1219 
1220 /*
1221  * partition_xcpus_add - Add new exclusive CPUs to partition
1222  * @new_prs: new partition_root_state
1223  * @parent: parent cpuset
1224  * @xcpus: exclusive CPUs to be added
1225  *
1226  * Remote partition if parent == NULL
1227  */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1228 static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1229 				struct cpumask *xcpus)
1230 {
1231 	WARN_ON_ONCE(new_prs < 0);
1232 	lockdep_assert_held(&callback_lock);
1233 	if (!parent)
1234 		parent = &top_cpuset;
1235 
1236 
1237 	if (parent == &top_cpuset)
1238 		cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1239 
1240 	if (new_prs != parent->partition_root_state)
1241 		isolated_cpus_update(parent->partition_root_state, new_prs,
1242 				     xcpus);
1243 
1244 	cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1245 }
1246 
1247 /*
1248  * partition_xcpus_del - Remove exclusive CPUs from partition
1249  * @old_prs: old partition_root_state
1250  * @parent: parent cpuset
1251  * @xcpus: exclusive CPUs to be removed
1252  *
1253  * Remote partition if parent == NULL
1254  */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1255 static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1256 				struct cpumask *xcpus)
1257 {
1258 	WARN_ON_ONCE(old_prs < 0);
1259 	lockdep_assert_held(&callback_lock);
1260 	if (!parent)
1261 		parent = &top_cpuset;
1262 
1263 	if (parent == &top_cpuset)
1264 		cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1265 
1266 	if (old_prs != parent->partition_root_state)
1267 		isolated_cpus_update(old_prs, parent->partition_root_state,
1268 				     xcpus);
1269 
1270 	cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1271 	cpumask_and(parent->effective_cpus, parent->effective_cpus, cpu_active_mask);
1272 }
1273 
1274 /*
1275  * isolated_cpus_can_update - check for isolated & nohz_full conflicts
1276  * @add_cpus: cpu mask for cpus that are going to be isolated
1277  * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1278  * Return: false if there is conflict, true otherwise
1279  *
1280  * If nohz_full is enabled and we have isolated CPUs, their combination must
1281  * still leave housekeeping CPUs.
1282  *
1283  * TBD: Should consider merging this function into
1284  *      prstate_housekeeping_conflict().
1285  */
isolated_cpus_can_update(struct cpumask * add_cpus,struct cpumask * del_cpus)1286 static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1287 				     struct cpumask *del_cpus)
1288 {
1289 	cpumask_var_t full_hk_cpus;
1290 	int res = true;
1291 
1292 	if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1293 		return true;
1294 
1295 	if (del_cpus && cpumask_weight_and(del_cpus,
1296 			housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1297 		return true;
1298 
1299 	if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1300 		return false;
1301 
1302 	cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1303 		    housekeeping_cpumask(HK_TYPE_DOMAIN));
1304 	cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1305 	cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1306 	if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1307 		res = false;
1308 
1309 	free_cpumask_var(full_hk_cpus);
1310 	return res;
1311 }
1312 
1313 /*
1314  * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1315  * @prstate: partition root state to be checked
1316  * @new_cpus: cpu mask
1317  * Return: true if there is conflict, false otherwise
1318  *
1319  * CPUs outside of HK_TYPE_DOMAIN_BOOT, if defined, can only be used in an
1320  * isolated partition.
1321  */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1322 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1323 {
1324 	if (!housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
1325 		return false;
1326 
1327 	if ((prstate != PRS_ISOLATED) &&
1328 	    !cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
1329 		return true;
1330 
1331 	return false;
1332 }
1333 
1334 /*
1335  * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock
1336  *
1337  * Update housekeeping cpumasks and rebuild sched domains if necessary and
1338  * then do a cpuset_full_unlock().
1339  * This should be called at the end of cpuset operation.
1340  */
cpuset_update_sd_hk_unlock(void)1341 static void cpuset_update_sd_hk_unlock(void)
1342 	__releases(&cpuset_mutex)
1343 	__releases(&cpuset_top_mutex)
1344 {
1345 	/* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
1346 	if (force_sd_rebuild)
1347 		rebuild_sched_domains_locked();
1348 
1349 	if (update_housekeeping) {
1350 		update_housekeeping = false;
1351 		cpumask_copy(isolated_hk_cpus, isolated_cpus);
1352 
1353 		/*
1354 		 * housekeeping_update() is now called without holding
1355 		 * cpus_read_lock and cpuset_mutex. Only cpuset_top_mutex
1356 		 * is still being held for mutual exclusion.
1357 		 */
1358 		mutex_unlock(&cpuset_mutex);
1359 		cpus_read_unlock();
1360 		WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus));
1361 		mutex_unlock(&cpuset_top_mutex);
1362 	} else {
1363 		cpuset_full_unlock();
1364 	}
1365 }
1366 
1367 /*
1368  * Work function to invoke cpuset_update_sd_hk_unlock()
1369  */
hk_sd_workfn(struct work_struct * work)1370 static void hk_sd_workfn(struct work_struct *work)
1371 {
1372 	cpuset_full_lock();
1373 	cpuset_update_sd_hk_unlock();
1374 }
1375 
1376 /**
1377  * rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1378  * @parent: Parent cpuset containing all siblings
1379  * @cs: Current cpuset (will be skipped)
1380  * @excpus:  exclusive effective CPU mask to modify
1381  *
1382  * This function ensures the given @excpus mask doesn't include any CPUs that
1383  * are exclusively allocated to sibling cpusets. It walks through all siblings
1384  * of @cs under @parent and removes their exclusive CPUs from @excpus.
1385  */
rm_siblings_excl_cpus(struct cpuset * parent,struct cpuset * cs,struct cpumask * excpus)1386 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1387 					struct cpumask *excpus)
1388 {
1389 	struct cgroup_subsys_state *css;
1390 	struct cpuset *sibling;
1391 	int retval = 0;
1392 
1393 	if (cpumask_empty(excpus))
1394 		return 0;
1395 
1396 	/*
1397 	 * Remove exclusive CPUs from siblings
1398 	 */
1399 	rcu_read_lock();
1400 	cpuset_for_each_child(sibling, css, parent) {
1401 		struct cpumask *sibling_xcpus;
1402 
1403 		if (sibling == cs)
1404 			continue;
1405 
1406 		/*
1407 		 * If exclusive_cpus is defined, effective_xcpus will always
1408 		 * be a subset. Otherwise, effective_xcpus will only be set
1409 		 * in a valid partition root.
1410 		 */
1411 		sibling_xcpus = cpumask_empty(sibling->exclusive_cpus)
1412 			      ? sibling->effective_xcpus
1413 			      : sibling->exclusive_cpus;
1414 
1415 		if (cpumask_intersects(excpus, sibling_xcpus)) {
1416 			cpumask_andnot(excpus, excpus, sibling_xcpus);
1417 			retval++;
1418 		}
1419 	}
1420 	rcu_read_unlock();
1421 
1422 	return retval;
1423 }
1424 
1425 /*
1426  * compute_excpus - compute effective exclusive CPUs
1427  * @cs: cpuset
1428  * @xcpus: effective exclusive CPUs value to be set
1429  * Return: 0 if there is no sibling conflict, > 0 otherwise
1430  *
1431  * If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1432  * and exclude their exclusive_cpus or effective_xcpus as well.
1433  */
compute_excpus(struct cpuset * cs,struct cpumask * excpus)1434 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1435 {
1436 	struct cpuset *parent = parent_cs(cs);
1437 
1438 	cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1439 
1440 	if (!cpumask_empty(cs->exclusive_cpus))
1441 		return 0;
1442 
1443 	return rm_siblings_excl_cpus(parent, cs, excpus);
1444 }
1445 
1446 /*
1447  * compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1448  * @trialcs: The trial cpuset containing the proposed new configuration
1449  * @cs: The original cpuset that the trial configuration is based on
1450  * Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1451  *
1452  * Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1453  * the real cs.
1454  */
compute_trialcs_excpus(struct cpuset * trialcs,struct cpuset * cs)1455 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1456 {
1457 	struct cpuset *parent = parent_cs(trialcs);
1458 	struct cpumask *excpus = trialcs->effective_xcpus;
1459 
1460 	/* trialcs is member, cpuset.cpus has no impact to excpus */
1461 	if (cs_is_member(cs))
1462 		cpumask_and(excpus, trialcs->exclusive_cpus,
1463 				parent->effective_xcpus);
1464 	else
1465 		cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1466 
1467 	return rm_siblings_excl_cpus(parent, cs, excpus);
1468 }
1469 
is_remote_partition(struct cpuset * cs)1470 static inline bool is_remote_partition(struct cpuset *cs)
1471 {
1472 	return cs->remote_partition;
1473 }
1474 
is_local_partition(struct cpuset * cs)1475 static inline bool is_local_partition(struct cpuset *cs)
1476 {
1477 	return is_partition_valid(cs) && !is_remote_partition(cs);
1478 }
1479 
1480 /*
1481  * remote_partition_enable - Enable current cpuset as a remote partition root
1482  * @cs: the cpuset to update
1483  * @new_prs: new partition_root_state
1484  * @tmp: temporary masks
1485  * Return: 0 if successful, errcode if error
1486  *
1487  * Enable the current cpuset to become a remote partition root taking CPUs
1488  * directly from the top cpuset. cpuset_mutex must be held by the caller.
1489  */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1490 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1491 				   struct tmpmasks *tmp)
1492 {
1493 	/*
1494 	 * The user must have sysadmin privilege.
1495 	 */
1496 	if (!capable(CAP_SYS_ADMIN))
1497 		return PERR_ACCESS;
1498 
1499 	/*
1500 	 * The requested exclusive_cpus must not be allocated to other
1501 	 * partitions and it can't use up all the root's effective_cpus.
1502 	 *
1503 	 * The effective_xcpus mask can contain offline CPUs, but there must
1504 	 * be at least one or more online CPUs present before it can be enabled.
1505 	 *
1506 	 * Note that creating a remote partition with any local partition root
1507 	 * above it or remote partition root underneath it is not allowed.
1508 	 */
1509 	compute_excpus(cs, tmp->new_cpus);
1510 	WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1511 	if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1512 	    cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1513 		return PERR_INVCPUS;
1514 	if (((new_prs == PRS_ISOLATED) &&
1515 	     !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1516 	    prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1517 		return PERR_HKEEPING;
1518 
1519 	spin_lock_irq(&callback_lock);
1520 	partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1521 	cs->remote_partition = true;
1522 	cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1523 	spin_unlock_irq(&callback_lock);
1524 	cpuset_force_rebuild();
1525 	cs->prs_err = 0;
1526 
1527 	/*
1528 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1529 	 */
1530 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1531 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1532 	return 0;
1533 }
1534 
1535 /*
1536  * remote_partition_disable - Remove current cpuset from remote partition list
1537  * @cs: the cpuset to update
1538  * @tmp: temporary masks
1539  *
1540  * The effective_cpus is also updated.
1541  *
1542  * cpuset_mutex must be held by the caller.
1543  */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1544 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1545 {
1546 	WARN_ON_ONCE(!is_remote_partition(cs));
1547 	/*
1548 	 * When a CPU is offlined, top_cpuset may end up with no available CPUs,
1549 	 * which should clear subpartitions_cpus. We should not emit a warning for this
1550 	 * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1551 	 * may already be cleared when disabling the partition.
1552 	 */
1553 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1554 		     !cpumask_empty(subpartitions_cpus));
1555 
1556 	spin_lock_irq(&callback_lock);
1557 	cs->remote_partition = false;
1558 	partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1559 	if (cs->prs_err)
1560 		cs->partition_root_state = -cs->partition_root_state;
1561 	else
1562 		cs->partition_root_state = PRS_MEMBER;
1563 
1564 	/* effective_xcpus may need to be changed */
1565 	compute_excpus(cs, cs->effective_xcpus);
1566 	reset_partition_data(cs);
1567 	spin_unlock_irq(&callback_lock);
1568 	cpuset_force_rebuild();
1569 
1570 	/*
1571 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1572 	 */
1573 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1574 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1575 }
1576 
1577 /*
1578  * remote_cpus_update - cpus_exclusive change of remote partition
1579  * @cs: the cpuset to be updated
1580  * @xcpus: the new exclusive_cpus mask, if non-NULL
1581  * @excpus: the new effective_xcpus mask
1582  * @tmp: temporary masks
1583  *
1584  * top_cpuset and subpartitions_cpus will be updated or partition can be
1585  * invalidated.
1586  */
remote_cpus_update(struct cpuset * cs,struct cpumask * xcpus,struct cpumask * excpus,struct tmpmasks * tmp)1587 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1588 			       struct cpumask *excpus, struct tmpmasks *tmp)
1589 {
1590 	bool adding, deleting;
1591 	int prs = cs->partition_root_state;
1592 
1593 	if (WARN_ON_ONCE(!is_remote_partition(cs)))
1594 		return;
1595 
1596 	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1597 
1598 	if (cpumask_empty(excpus)) {
1599 		cs->prs_err = PERR_CPUSEMPTY;
1600 		goto invalidate;
1601 	}
1602 
1603 	adding   = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1604 	deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1605 
1606 	/*
1607 	 * Additions of remote CPUs is only allowed if those CPUs are
1608 	 * not allocated to other partitions and there are effective_cpus
1609 	 * left in the top cpuset.
1610 	 */
1611 	if (adding) {
1612 		WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1613 		if (!capable(CAP_SYS_ADMIN))
1614 			cs->prs_err = PERR_ACCESS;
1615 		else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1616 			 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1617 			cs->prs_err = PERR_NOCPUS;
1618 		else if ((prs == PRS_ISOLATED) &&
1619 			 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1620 			cs->prs_err = PERR_HKEEPING;
1621 		if (cs->prs_err)
1622 			goto invalidate;
1623 	}
1624 
1625 	spin_lock_irq(&callback_lock);
1626 	if (adding)
1627 		partition_xcpus_add(prs, NULL, tmp->addmask);
1628 	if (deleting)
1629 		partition_xcpus_del(prs, NULL, tmp->delmask);
1630 	/*
1631 	 * Need to update effective_xcpus and exclusive_cpus now as
1632 	 * update_sibling_cpumasks() below may iterate back to the same cs.
1633 	 */
1634 	cpumask_copy(cs->effective_xcpus, excpus);
1635 	if (xcpus)
1636 		cpumask_copy(cs->exclusive_cpus, xcpus);
1637 	spin_unlock_irq(&callback_lock);
1638 	if (adding || deleting)
1639 		cpuset_force_rebuild();
1640 
1641 	/*
1642 	 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1643 	 */
1644 	cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1645 	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1646 	return;
1647 
1648 invalidate:
1649 	remote_partition_disable(cs, tmp);
1650 }
1651 
1652 /**
1653  * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1654  * @cs:      The cpuset that requests change in partition root state
1655  * @cmd:     Partition root state change command
1656  * @newmask: Optional new cpumask for partcmd_update
1657  * @tmp:     Temporary addmask and delmask
1658  * Return:   0 or a partition root state error code
1659  *
1660  * For partcmd_enable*, the cpuset is being transformed from a non-partition
1661  * root to a partition root. The effective_xcpus (cpus_allowed if
1662  * effective_xcpus not set) mask of the given cpuset will be taken away from
1663  * parent's effective_cpus. The function will return 0 if all the CPUs listed
1664  * in effective_xcpus can be granted or an error code will be returned.
1665  *
1666  * For partcmd_disable, the cpuset is being transformed from a partition
1667  * root back to a non-partition root. Any CPUs in effective_xcpus will be
1668  * given back to parent's effective_cpus. 0 will always be returned.
1669  *
1670  * For partcmd_update, if the optional newmask is specified, the cpu list is
1671  * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1672  * assumed to remain the same. The cpuset should either be a valid or invalid
1673  * partition root. The partition root state may change from valid to invalid
1674  * or vice versa. An error code will be returned if transitioning from
1675  * invalid to valid violates the exclusivity rule.
1676  *
1677  * For partcmd_invalidate, the current partition will be made invalid.
1678  *
1679  * The partcmd_enable* and partcmd_disable commands are used by
1680  * update_prstate(). An error code may be returned and the caller will check
1681  * for error.
1682  *
1683  * The partcmd_update command is used by update_cpumasks_hier() with newmask
1684  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1685  * by update_cpumask() with NULL newmask. In both cases, the callers won't
1686  * check for error and so partition_root_state and prs_err will be updated
1687  * directly.
1688  */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1689 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1690 					   struct cpumask *newmask,
1691 					   struct tmpmasks *tmp)
1692 {
1693 	struct cpuset *parent = parent_cs(cs);
1694 	int adding;	/* Adding cpus to parent's effective_cpus	*/
1695 	int deleting;	/* Deleting cpus from parent's effective_cpus	*/
1696 	int old_prs, new_prs;
1697 	int part_error = PERR_NONE;	/* Partition error? */
1698 	struct cpumask *xcpus = user_xcpus(cs);
1699 	int parent_prs = parent->partition_root_state;
1700 	bool nocpu;
1701 
1702 	lockdep_assert_cpuset_lock_held();
1703 	WARN_ON_ONCE(is_remote_partition(cs));	/* For local partition only */
1704 
1705 	/*
1706 	 * new_prs will only be changed for the partcmd_update and
1707 	 * partcmd_invalidate commands.
1708 	 */
1709 	adding = deleting = false;
1710 	old_prs = new_prs = cs->partition_root_state;
1711 
1712 	if (cmd == partcmd_invalidate) {
1713 		if (is_partition_invalid(cs))
1714 			return 0;
1715 
1716 		/*
1717 		 * Make the current partition invalid.
1718 		 */
1719 		if (is_partition_valid(parent))
1720 			adding = cpumask_and(tmp->addmask,
1721 					     xcpus, parent->effective_xcpus);
1722 		if (old_prs > 0)
1723 			new_prs = -old_prs;
1724 
1725 		goto write_error;
1726 	}
1727 
1728 	/*
1729 	 * The parent must be a partition root.
1730 	 * The new cpumask, if present, or the current cpus_allowed must
1731 	 * not be empty.
1732 	 */
1733 	if (!is_partition_valid(parent)) {
1734 		return is_partition_invalid(parent)
1735 		       ? PERR_INVPARENT : PERR_NOTPART;
1736 	}
1737 	if (!newmask && xcpus_empty(cs))
1738 		return PERR_CPUSEMPTY;
1739 
1740 	nocpu = tasks_nocpu_error(parent, cs, xcpus);
1741 
1742 	if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1743 		/*
1744 		 * Need to call compute_excpus() in case
1745 		 * exclusive_cpus not set. Sibling conflict should only happen
1746 		 * if exclusive_cpus isn't set.
1747 		 */
1748 		xcpus = tmp->delmask;
1749 		if (compute_excpus(cs, xcpus))
1750 			WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1751 		new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1752 
1753 		/*
1754 		 * Enabling partition root is not allowed if its
1755 		 * effective_xcpus is empty.
1756 		 */
1757 		if (cpumask_empty(xcpus))
1758 			return PERR_INVCPUS;
1759 
1760 		if (prstate_housekeeping_conflict(new_prs, xcpus))
1761 			return PERR_HKEEPING;
1762 
1763 		if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1764 		    !isolated_cpus_can_update(xcpus, NULL))
1765 			return PERR_HKEEPING;
1766 
1767 		if (tasks_nocpu_error(parent, cs, xcpus))
1768 			return PERR_NOCPUS;
1769 
1770 		/*
1771 		 * This function will only be called when all the preliminary
1772 		 * checks have passed. At this point, the following condition
1773 		 * should hold.
1774 		 *
1775 		 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1776 		 *
1777 		 * Warn if it is not the case.
1778 		 */
1779 		cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1780 		WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1781 
1782 		deleting = true;
1783 	} else if (cmd == partcmd_disable) {
1784 		/*
1785 		 * May need to add cpus back to parent's effective_cpus
1786 		 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1787 		 * for valid partition root. xcpus may contain CPUs that
1788 		 * shouldn't be removed from the two global cpumasks.
1789 		 */
1790 		if (is_partition_valid(cs)) {
1791 			cpumask_copy(tmp->addmask, cs->effective_xcpus);
1792 			adding = true;
1793 		}
1794 		new_prs = PRS_MEMBER;
1795 	} else if (newmask) {
1796 		/*
1797 		 * Empty cpumask is not allowed
1798 		 */
1799 		if (cpumask_empty(newmask)) {
1800 			part_error = PERR_CPUSEMPTY;
1801 			goto write_error;
1802 		}
1803 
1804 		/* Check newmask again, whether cpus are available for parent/cs */
1805 		nocpu |= tasks_nocpu_error(parent, cs, newmask);
1806 
1807 		/*
1808 		 * partcmd_update with newmask:
1809 		 *
1810 		 * Compute add/delete mask to/from effective_cpus
1811 		 *
1812 		 * For valid partition:
1813 		 *   addmask = exclusive_cpus & ~newmask
1814 		 *			      & parent->effective_xcpus
1815 		 *   delmask = newmask & ~exclusive_cpus
1816 		 *		       & parent->effective_xcpus
1817 		 *
1818 		 * For invalid partition:
1819 		 *   delmask = newmask & parent->effective_xcpus
1820 		 *   The partition may become valid soon.
1821 		 */
1822 		if (is_partition_invalid(cs)) {
1823 			adding = false;
1824 			deleting = cpumask_and(tmp->delmask,
1825 					newmask, parent->effective_xcpus);
1826 		} else {
1827 			cpumask_andnot(tmp->addmask, xcpus, newmask);
1828 			adding = cpumask_and(tmp->addmask, tmp->addmask,
1829 					     parent->effective_xcpus);
1830 
1831 			cpumask_andnot(tmp->delmask, newmask, xcpus);
1832 			deleting = cpumask_and(tmp->delmask, tmp->delmask,
1833 					       parent->effective_xcpus);
1834 		}
1835 
1836 		/*
1837 		 * TBD: Invalidate a currently valid child root partition may
1838 		 * still break isolated_cpus_can_update() rule if parent is an
1839 		 * isolated partition.
1840 		 */
1841 		if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1842 			if ((parent_prs == PRS_ROOT) &&
1843 			    /* Adding to parent means removing isolated CPUs */
1844 			    !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1845 				part_error = PERR_HKEEPING;
1846 			if ((parent_prs == PRS_ISOLATED) &&
1847 			    /* Adding to parent means adding isolated CPUs */
1848 			    !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1849 				part_error = PERR_HKEEPING;
1850 		}
1851 
1852 		/*
1853 		 * The new CPUs to be removed from parent's effective CPUs
1854 		 * must be present.
1855 		 */
1856 		if (deleting) {
1857 			cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1858 			WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1859 		}
1860 
1861 		/*
1862 		 * Make partition invalid if parent's effective_cpus could
1863 		 * become empty and there are tasks in the parent.
1864 		 */
1865 		if (nocpu && (!adding ||
1866 		    !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1867 			part_error = PERR_NOCPUS;
1868 			deleting = false;
1869 			adding = cpumask_and(tmp->addmask,
1870 					     xcpus, parent->effective_xcpus);
1871 		}
1872 	} else {
1873 		/*
1874 		 * partcmd_update w/o newmask
1875 		 *
1876 		 * delmask = effective_xcpus & parent->effective_cpus
1877 		 *
1878 		 * This can be called from:
1879 		 * 1) update_cpumasks_hier()
1880 		 * 2) cpuset_hotplug_update_tasks()
1881 		 *
1882 		 * Check to see if it can be transitioned from valid to
1883 		 * invalid partition or vice versa.
1884 		 *
1885 		 * A partition error happens when parent has tasks and all
1886 		 * its effective CPUs will have to be distributed out.
1887 		 */
1888 		if (nocpu) {
1889 			part_error = PERR_NOCPUS;
1890 			if (is_partition_valid(cs))
1891 				adding = cpumask_and(tmp->addmask,
1892 						xcpus, parent->effective_xcpus);
1893 		} else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
1894 			   cpumask_subset(xcpus, parent->effective_xcpus)) {
1895 			struct cgroup_subsys_state *css;
1896 			struct cpuset *child;
1897 			bool exclusive = true;
1898 
1899 			/*
1900 			 * Convert invalid partition to valid has to
1901 			 * pass the cpu exclusivity test.
1902 			 */
1903 			rcu_read_lock();
1904 			cpuset_for_each_child(child, css, parent) {
1905 				if (child == cs)
1906 					continue;
1907 				if (!cpusets_are_exclusive(cs, child)) {
1908 					exclusive = false;
1909 					break;
1910 				}
1911 			}
1912 			rcu_read_unlock();
1913 			if (exclusive)
1914 				deleting = cpumask_and(tmp->delmask,
1915 						xcpus, parent->effective_cpus);
1916 			else
1917 				part_error = PERR_NOTEXCL;
1918 		}
1919 	}
1920 
1921 write_error:
1922 	if (part_error)
1923 		WRITE_ONCE(cs->prs_err, part_error);
1924 
1925 	if (cmd == partcmd_update) {
1926 		/*
1927 		 * Check for possible transition between valid and invalid
1928 		 * partition root.
1929 		 */
1930 		switch (cs->partition_root_state) {
1931 		case PRS_ROOT:
1932 		case PRS_ISOLATED:
1933 			if (part_error)
1934 				new_prs = -old_prs;
1935 			break;
1936 		case PRS_INVALID_ROOT:
1937 		case PRS_INVALID_ISOLATED:
1938 			if (!part_error)
1939 				new_prs = -old_prs;
1940 			break;
1941 		}
1942 	}
1943 
1944 	if (!adding && !deleting && (new_prs == old_prs))
1945 		return 0;
1946 
1947 	/*
1948 	 * Transitioning between invalid to valid or vice versa may require
1949 	 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1950 	 * validate_change() has already been successfully called and
1951 	 * CPU lists in cs haven't been updated yet. So defer it to later.
1952 	 */
1953 	if ((old_prs != new_prs) && (cmd != partcmd_update))  {
1954 		int err = update_partition_exclusive_flag(cs, new_prs);
1955 
1956 		if (err)
1957 			return err;
1958 	}
1959 
1960 	/*
1961 	 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1962 	 * only).
1963 	 *
1964 	 * Newly added CPUs will be removed from effective_cpus and
1965 	 * newly deleted ones will be added back to effective_cpus.
1966 	 */
1967 	spin_lock_irq(&callback_lock);
1968 	if (old_prs != new_prs)
1969 		cs->partition_root_state = new_prs;
1970 
1971 	/*
1972 	 * Adding to parent's effective_cpus means deletion CPUs from cs
1973 	 * and vice versa.
1974 	 */
1975 	if (adding)
1976 		partition_xcpus_del(old_prs, parent, tmp->addmask);
1977 	if (deleting)
1978 		partition_xcpus_add(new_prs, parent, tmp->delmask);
1979 
1980 	spin_unlock_irq(&callback_lock);
1981 
1982 	if ((old_prs != new_prs) && (cmd == partcmd_update))
1983 		update_partition_exclusive_flag(cs, new_prs);
1984 
1985 	if (adding || deleting) {
1986 		cpuset_update_tasks_cpumask(parent, tmp->addmask);
1987 		update_sibling_cpumasks(parent, cs, tmp);
1988 	}
1989 
1990 	/*
1991 	 * For partcmd_update without newmask, it is being called from
1992 	 * cpuset_handle_hotplug(). Update the load balance flag and
1993 	 * scheduling domain accordingly.
1994 	 */
1995 	if ((cmd == partcmd_update) && !newmask)
1996 		update_partition_sd_lb(cs, old_prs);
1997 
1998 	notify_partition_change(cs, old_prs);
1999 	return 0;
2000 }
2001 
2002 /**
2003  * compute_partition_effective_cpumask - compute effective_cpus for partition
2004  * @cs: partition root cpuset
2005  * @new_ecpus: previously computed effective_cpus to be updated
2006  *
2007  * Compute the effective_cpus of a partition root by scanning effective_xcpus
2008  * of child partition roots and excluding their effective_xcpus.
2009  *
2010  * This has the side effect of invalidating valid child partition roots,
2011  * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2012  * or update_cpumasks_hier() where parent and children are modified
2013  * successively, we don't need to call update_parent_effective_cpumask()
2014  * and the child's effective_cpus will be updated in later iterations.
2015  *
2016  * Note that rcu_read_lock() is assumed to be held.
2017  */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)2018 static void compute_partition_effective_cpumask(struct cpuset *cs,
2019 						struct cpumask *new_ecpus)
2020 {
2021 	struct cgroup_subsys_state *css;
2022 	struct cpuset *child;
2023 	bool populated = partition_is_populated(cs, NULL);
2024 
2025 	/*
2026 	 * Check child partition roots to see if they should be
2027 	 * invalidated when
2028 	 *  1) child effective_xcpus not a subset of new
2029 	 *     excluisve_cpus
2030 	 *  2) All the effective_cpus will be used up and cp
2031 	 *     has tasks
2032 	 */
2033 	compute_excpus(cs, new_ecpus);
2034 	cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2035 
2036 	rcu_read_lock();
2037 	cpuset_for_each_child(child, css, cs) {
2038 		if (!is_partition_valid(child))
2039 			continue;
2040 
2041 		/*
2042 		 * There shouldn't be a remote partition underneath another
2043 		 * partition root.
2044 		 */
2045 		WARN_ON_ONCE(is_remote_partition(child));
2046 		child->prs_err = 0;
2047 		if (!cpumask_subset(child->effective_xcpus,
2048 				    cs->effective_xcpus))
2049 			child->prs_err = PERR_INVCPUS;
2050 		else if (populated &&
2051 			 cpumask_subset(new_ecpus, child->effective_xcpus))
2052 			child->prs_err = PERR_NOCPUS;
2053 
2054 		if (child->prs_err) {
2055 			int old_prs = child->partition_root_state;
2056 
2057 			/*
2058 			 * Invalidate child partition
2059 			 */
2060 			spin_lock_irq(&callback_lock);
2061 			make_partition_invalid(child);
2062 			spin_unlock_irq(&callback_lock);
2063 			notify_partition_change(child, old_prs);
2064 			continue;
2065 		}
2066 		cpumask_andnot(new_ecpus, new_ecpus,
2067 			       child->effective_xcpus);
2068 	}
2069 	rcu_read_unlock();
2070 }
2071 
2072 /*
2073  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2074  * @cs:  the cpuset to consider
2075  * @tmp: temp variables for calculating effective_cpus & partition setup
2076  * @force: don't skip any descendant cpusets if set
2077  *
2078  * When configured cpumask is changed, the effective cpumasks of this cpuset
2079  * and all its descendants need to be updated.
2080  *
2081  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2082  *
2083  * Called with cpuset_mutex held
2084  */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)2085 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2086 				 bool force)
2087 {
2088 	struct cpuset *cp;
2089 	struct cgroup_subsys_state *pos_css;
2090 	int old_prs, new_prs;
2091 
2092 	rcu_read_lock();
2093 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2094 		struct cpuset *parent = parent_cs(cp);
2095 		bool remote = is_remote_partition(cp);
2096 		bool update_parent = false;
2097 
2098 		old_prs = new_prs = cp->partition_root_state;
2099 
2100 		/*
2101 		 * For child remote partition root (!= cs), we need to call
2102 		 * remote_cpus_update() if effective_xcpus will be changed.
2103 		 * Otherwise, we can skip the whole subtree.
2104 		 *
2105 		 * remote_cpus_update() will reuse tmp->new_cpus only after
2106 		 * its value is being processed.
2107 		 */
2108 		if (remote && (cp != cs)) {
2109 			compute_excpus(cp, tmp->new_cpus);
2110 			if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2111 				pos_css = css_rightmost_descendant(pos_css);
2112 				continue;
2113 			}
2114 			rcu_read_unlock();
2115 			remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2116 			rcu_read_lock();
2117 
2118 			/* Remote partition may be invalidated */
2119 			new_prs = cp->partition_root_state;
2120 			remote = (new_prs == old_prs);
2121 		}
2122 
2123 		if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2124 			compute_partition_effective_cpumask(cp, tmp->new_cpus);
2125 		else
2126 			compute_effective_cpumask(tmp->new_cpus, cp, parent);
2127 
2128 		if (remote)
2129 			goto get_css;	/* Ready to update cpuset data */
2130 
2131 		/*
2132 		 * A partition with no effective_cpus is allowed as long as
2133 		 * there is no task associated with it. Call
2134 		 * update_parent_effective_cpumask() to check it.
2135 		 */
2136 		if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2137 			update_parent = true;
2138 			goto update_parent_effective;
2139 		}
2140 
2141 		/*
2142 		 * If it becomes empty, inherit the effective mask of the
2143 		 * parent, which is guaranteed to have some CPUs unless
2144 		 * it is a partition root that has explicitly distributed
2145 		 * out all its CPUs.
2146 		 */
2147 		if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2148 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2149 
2150 		/*
2151 		 * Skip the whole subtree if
2152 		 * 1) the cpumask remains the same,
2153 		 * 2) has no partition root state,
2154 		 * 3) force flag not set, and
2155 		 * 4) for v2 load balance state same as its parent.
2156 		 */
2157 		if (!cp->partition_root_state && !force &&
2158 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2159 		    (!cpuset_v2() ||
2160 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2161 			pos_css = css_rightmost_descendant(pos_css);
2162 			continue;
2163 		}
2164 
2165 update_parent_effective:
2166 		/*
2167 		 * update_parent_effective_cpumask() should have been called
2168 		 * for cs already in update_cpumask(). We should also call
2169 		 * cpuset_update_tasks_cpumask() again for tasks in the parent
2170 		 * cpuset if the parent's effective_cpus changes.
2171 		 */
2172 		if ((cp != cs) && old_prs) {
2173 			switch (parent->partition_root_state) {
2174 			case PRS_ROOT:
2175 			case PRS_ISOLATED:
2176 				update_parent = true;
2177 				break;
2178 
2179 			default:
2180 				/*
2181 				 * When parent is not a partition root or is
2182 				 * invalid, child partition roots become
2183 				 * invalid too.
2184 				 */
2185 				if (is_partition_valid(cp))
2186 					new_prs = -cp->partition_root_state;
2187 				WRITE_ONCE(cp->prs_err,
2188 					   is_partition_invalid(parent)
2189 					   ? PERR_INVPARENT : PERR_NOTPART);
2190 				break;
2191 			}
2192 		}
2193 get_css:
2194 		if (!css_tryget_online(&cp->css))
2195 			continue;
2196 		rcu_read_unlock();
2197 
2198 		if (update_parent) {
2199 			update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2200 			/*
2201 			 * The cpuset partition_root_state may become
2202 			 * invalid. Capture it.
2203 			 */
2204 			new_prs = cp->partition_root_state;
2205 		}
2206 
2207 		spin_lock_irq(&callback_lock);
2208 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2209 		cp->partition_root_state = new_prs;
2210 		/*
2211 		 * Need to compute effective_xcpus if either exclusive_cpus
2212 		 * is non-empty or it is a valid partition root.
2213 		 */
2214 		if ((new_prs > 0) || !cpumask_empty(cp->exclusive_cpus))
2215 			compute_excpus(cp, cp->effective_xcpus);
2216 		if (new_prs <= 0)
2217 			reset_partition_data(cp);
2218 		spin_unlock_irq(&callback_lock);
2219 
2220 		notify_partition_change(cp, old_prs);
2221 
2222 		WARN_ON(!is_in_v2_mode() &&
2223 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2224 
2225 		cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
2226 
2227 		/*
2228 		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2229 		 * from parent if current cpuset isn't a valid partition root
2230 		 * and their load balance states differ.
2231 		 */
2232 		if (cpuset_v2() && !is_partition_valid(cp) &&
2233 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2234 			if (is_sched_load_balance(parent))
2235 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2236 			else
2237 				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2238 		}
2239 
2240 		/*
2241 		 * On legacy hierarchy, if the effective cpumask of any non-
2242 		 * empty cpuset is changed, we need to rebuild sched domains.
2243 		 * On default hierarchy, the cpuset needs to be a partition
2244 		 * root as well.
2245 		 */
2246 		if (!cpumask_empty(cp->cpus_allowed) &&
2247 		    is_sched_load_balance(cp) &&
2248 		   (!cpuset_v2() || is_partition_valid(cp)))
2249 			cpuset_force_rebuild();
2250 
2251 		rcu_read_lock();
2252 		css_put(&cp->css);
2253 	}
2254 	rcu_read_unlock();
2255 }
2256 
2257 /**
2258  * update_sibling_cpumasks - Update siblings cpumasks
2259  * @parent:  Parent cpuset
2260  * @cs:      Current cpuset
2261  * @tmp:     Temp variables
2262  */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2263 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2264 				    struct tmpmasks *tmp)
2265 {
2266 	struct cpuset *sibling;
2267 	struct cgroup_subsys_state *pos_css;
2268 
2269 	lockdep_assert_cpuset_lock_held();
2270 
2271 	/*
2272 	 * Check all its siblings and call update_cpumasks_hier()
2273 	 * if their effective_cpus will need to be changed.
2274 	 *
2275 	 * It is possible a change in parent's effective_cpus
2276 	 * due to a change in a child partition's effective_xcpus will impact
2277 	 * its siblings even if they do not inherit parent's effective_cpus
2278 	 * directly. It should not impact valid partition.
2279 	 *
2280 	 * The update_cpumasks_hier() function may sleep. So we have to
2281 	 * release the RCU read lock before calling it.
2282 	 */
2283 	rcu_read_lock();
2284 	cpuset_for_each_child(sibling, pos_css, parent) {
2285 		if (sibling == cs || is_partition_valid(sibling))
2286 			continue;
2287 
2288 		compute_effective_cpumask(tmp->new_cpus, sibling,
2289 					  parent);
2290 		if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2291 			continue;
2292 
2293 		if (!css_tryget_online(&sibling->css))
2294 			continue;
2295 
2296 		rcu_read_unlock();
2297 		update_cpumasks_hier(sibling, tmp, false);
2298 		rcu_read_lock();
2299 		css_put(&sibling->css);
2300 	}
2301 	rcu_read_unlock();
2302 }
2303 
parse_cpuset_cpulist(const char * buf,struct cpumask * out_mask)2304 static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2305 {
2306 	int retval;
2307 
2308 	retval = cpulist_parse(buf, out_mask);
2309 	if (retval < 0)
2310 		return retval;
2311 	if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2312 		return -EINVAL;
2313 
2314 	return 0;
2315 }
2316 
2317 /**
2318  * validate_partition - Validate a cpuset partition configuration
2319  * @cs: The cpuset to validate
2320  * @trialcs: The trial cpuset containing proposed configuration changes
2321  *
2322  * If any validation check fails, the appropriate error code is set in the
2323  * cpuset's prs_err field.
2324  *
2325  * Return: PRS error code (0 if valid, non-zero error code if invalid)
2326  */
validate_partition(struct cpuset * cs,struct cpuset * trialcs)2327 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2328 {
2329 	struct cpuset *parent = parent_cs(cs);
2330 
2331 	if (cs_is_member(trialcs))
2332 		return PERR_NONE;
2333 
2334 	if (cpumask_empty(trialcs->effective_xcpus))
2335 		return PERR_INVCPUS;
2336 
2337 	if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2338 					  trialcs->effective_xcpus))
2339 		return PERR_HKEEPING;
2340 
2341 	if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2342 		return PERR_NOCPUS;
2343 
2344 	return PERR_NONE;
2345 }
2346 
2347 /**
2348  * partition_cpus_change - Handle partition state changes due to CPU mask updates
2349  * @cs: The target cpuset being modified
2350  * @trialcs: The trial cpuset containing proposed configuration changes
2351  * @tmp: Temporary masks for intermediate calculations
2352  *
2353  * This function handles partition state transitions triggered by CPU mask changes.
2354  * CPU modifications may cause a partition to be disabled or require state updates.
2355  */
partition_cpus_change(struct cpuset * cs,struct cpuset * trialcs,struct tmpmasks * tmp)2356 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2357 					struct tmpmasks *tmp)
2358 {
2359 	enum prs_errcode prs_err;
2360 
2361 	if (cs_is_member(cs))
2362 		return;
2363 
2364 	prs_err = validate_partition(cs, trialcs);
2365 	if (prs_err)
2366 		trialcs->prs_err = cs->prs_err = prs_err;
2367 
2368 	if (is_remote_partition(cs)) {
2369 		if (trialcs->prs_err)
2370 			remote_partition_disable(cs, tmp);
2371 		else
2372 			remote_cpus_update(cs, trialcs->exclusive_cpus,
2373 					   trialcs->effective_xcpus, tmp);
2374 	} else {
2375 		if (trialcs->prs_err)
2376 			update_parent_effective_cpumask(cs, partcmd_invalidate,
2377 							NULL, tmp);
2378 		else
2379 			update_parent_effective_cpumask(cs, partcmd_update,
2380 							trialcs->effective_xcpus, tmp);
2381 	}
2382 }
2383 
2384 /**
2385  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2386  * @cs: the cpuset to consider
2387  * @trialcs: trial cpuset
2388  * @buf: buffer of cpu numbers written to this cpuset
2389  */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2390 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2391 			  const char *buf)
2392 {
2393 	int retval;
2394 	struct tmpmasks tmp;
2395 	bool force = false;
2396 	int old_prs = cs->partition_root_state;
2397 
2398 	retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2399 	if (retval < 0)
2400 		return retval;
2401 
2402 	/* Nothing to do if the cpus didn't change */
2403 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2404 		return 0;
2405 
2406 	compute_trialcs_excpus(trialcs, cs);
2407 	trialcs->prs_err = PERR_NONE;
2408 
2409 	retval = validate_change(cs, trialcs);
2410 	if (retval < 0)
2411 		return retval;
2412 
2413 	if (alloc_tmpmasks(&tmp))
2414 		return -ENOMEM;
2415 
2416 	/*
2417 	 * Check all the descendants in update_cpumasks_hier() if
2418 	 * effective_xcpus is to be changed.
2419 	 */
2420 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2421 
2422 	partition_cpus_change(cs, trialcs, &tmp);
2423 
2424 	spin_lock_irq(&callback_lock);
2425 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2426 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2427 	if ((old_prs > 0) && !is_partition_valid(cs))
2428 		reset_partition_data(cs);
2429 	spin_unlock_irq(&callback_lock);
2430 
2431 	/* effective_cpus/effective_xcpus will be updated here */
2432 	update_cpumasks_hier(cs, &tmp, force);
2433 
2434 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2435 	if (cs->partition_root_state)
2436 		update_partition_sd_lb(cs, old_prs);
2437 
2438 	free_tmpmasks(&tmp);
2439 	return retval;
2440 }
2441 
2442 /**
2443  * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2444  * @cs: the cpuset to consider
2445  * @trialcs: trial cpuset
2446  * @buf: buffer of cpu numbers written to this cpuset
2447  *
2448  * The tasks' cpumask will be updated if cs is a valid partition root.
2449  */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2450 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2451 				    const char *buf)
2452 {
2453 	int retval;
2454 	struct tmpmasks tmp;
2455 	bool force = false;
2456 	int old_prs = cs->partition_root_state;
2457 
2458 	retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2459 	if (retval < 0)
2460 		return retval;
2461 
2462 	/* Nothing to do if the CPUs didn't change */
2463 	if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2464 		return 0;
2465 
2466 	/*
2467 	 * Reject the change if there is exclusive CPUs conflict with
2468 	 * the siblings.
2469 	 */
2470 	if (compute_trialcs_excpus(trialcs, cs))
2471 		return -EINVAL;
2472 
2473 	/*
2474 	 * Check all the descendants in update_cpumasks_hier() if
2475 	 * effective_xcpus is to be changed.
2476 	 */
2477 	force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2478 
2479 	retval = validate_change(cs, trialcs);
2480 	if (retval)
2481 		return retval;
2482 
2483 	if (alloc_tmpmasks(&tmp))
2484 		return -ENOMEM;
2485 
2486 	trialcs->prs_err = PERR_NONE;
2487 	partition_cpus_change(cs, trialcs, &tmp);
2488 
2489 	spin_lock_irq(&callback_lock);
2490 	cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2491 	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2492 	if ((old_prs > 0) && !is_partition_valid(cs))
2493 		reset_partition_data(cs);
2494 	spin_unlock_irq(&callback_lock);
2495 
2496 	/*
2497 	 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2498 	 * of the subtree when it is a valid partition root or effective_xcpus
2499 	 * is updated.
2500 	 */
2501 	if (is_partition_valid(cs) || force)
2502 		update_cpumasks_hier(cs, &tmp, force);
2503 
2504 	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2505 	if (cs->partition_root_state)
2506 		update_partition_sd_lb(cs, old_prs);
2507 
2508 	free_tmpmasks(&tmp);
2509 	return 0;
2510 }
2511 
2512 /*
2513  * Migrate memory region from one set of nodes to another.  This is
2514  * performed asynchronously as it can be called from process migration path
2515  * holding locks involved in process management.  All mm migrations are
2516  * performed in the queued order and can be waited for by flushing
2517  * cpuset_migrate_mm_wq.
2518  */
2519 
2520 struct cpuset_migrate_mm_work {
2521 	struct work_struct	work;
2522 	struct mm_struct	*mm;
2523 	nodemask_t		from;
2524 	nodemask_t		to;
2525 };
2526 
cpuset_migrate_mm_workfn(struct work_struct * work)2527 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2528 {
2529 	struct cpuset_migrate_mm_work *mwork =
2530 		container_of(work, struct cpuset_migrate_mm_work, work);
2531 
2532 	/* on a wq worker, no need to worry about %current's mems_allowed */
2533 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2534 	mmput(mwork->mm);
2535 	kfree(mwork);
2536 }
2537 
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2538 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2539 							const nodemask_t *to)
2540 {
2541 	struct cpuset_migrate_mm_work *mwork;
2542 
2543 	if (nodes_equal(*from, *to)) {
2544 		mmput(mm);
2545 		return;
2546 	}
2547 
2548 	mwork = kzalloc_obj(*mwork);
2549 	if (mwork) {
2550 		mwork->mm = mm;
2551 		mwork->from = *from;
2552 		mwork->to = *to;
2553 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2554 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
2555 	} else {
2556 		mmput(mm);
2557 	}
2558 }
2559 
flush_migrate_mm_task_workfn(struct callback_head * head)2560 static void flush_migrate_mm_task_workfn(struct callback_head *head)
2561 {
2562 	flush_workqueue(cpuset_migrate_mm_wq);
2563 	kfree(head);
2564 }
2565 
schedule_flush_migrate_mm(void)2566 static void schedule_flush_migrate_mm(void)
2567 {
2568 	struct callback_head *flush_cb;
2569 
2570 	flush_cb = kzalloc_obj(struct callback_head);
2571 	if (!flush_cb)
2572 		return;
2573 
2574 	init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2575 
2576 	if (task_work_add(current, flush_cb, TWA_RESUME))
2577 		kfree(flush_cb);
2578 }
2579 
2580 /*
2581  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2582  * @tsk: the task to change
2583  * @newmems: new nodes that the task will be set
2584  *
2585  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2586  * and rebind an eventual tasks' mempolicy. If the task is allocating in
2587  * parallel, it might temporarily see an empty intersection, which results in
2588  * a seqlock check and retry before OOM or allocation failure.
2589  */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2590 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2591 					nodemask_t *newmems)
2592 {
2593 	task_lock(tsk);
2594 
2595 	local_irq_disable();
2596 	write_seqcount_begin(&tsk->mems_allowed_seq);
2597 
2598 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2599 	mpol_rebind_task(tsk, newmems);
2600 	tsk->mems_allowed = *newmems;
2601 
2602 	write_seqcount_end(&tsk->mems_allowed_seq);
2603 	local_irq_enable();
2604 
2605 	task_unlock(tsk);
2606 }
2607 
2608 static void *cpuset_being_rebound;
2609 
2610 /**
2611  * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2612  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2613  *
2614  * Iterate through each task of @cs updating its mems_allowed to the
2615  * effective cpuset's.  As this function is called with cpuset_mutex held,
2616  * cpuset membership stays stable.
2617  */
cpuset_update_tasks_nodemask(struct cpuset * cs)2618 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2619 {
2620 	static nodemask_t newmems;	/* protected by cpuset_mutex */
2621 	struct css_task_iter it;
2622 	struct task_struct *task;
2623 
2624 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2625 
2626 	guarantee_online_mems(cs, &newmems);
2627 
2628 	/*
2629 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2630 	 * take while holding tasklist_lock.  Forks can happen - the
2631 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2632 	 * and rebind their vma mempolicies too.  Because we still hold
2633 	 * the global cpuset_mutex, we know that no other rebind effort
2634 	 * will be contending for the global variable cpuset_being_rebound.
2635 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2636 	 * is idempotent.  Also migrate pages in each mm to new nodes.
2637 	 */
2638 	css_task_iter_start(&cs->css, 0, &it);
2639 	while ((task = css_task_iter_next(&it))) {
2640 		struct mm_struct *mm;
2641 		bool migrate;
2642 
2643 		cpuset_change_task_nodemask(task, &newmems);
2644 
2645 		mm = get_task_mm(task);
2646 		if (!mm)
2647 			continue;
2648 
2649 		migrate = is_memory_migrate(cs);
2650 
2651 		mpol_rebind_mm(mm, &cs->mems_allowed);
2652 		if (migrate)
2653 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2654 		else
2655 			mmput(mm);
2656 	}
2657 	css_task_iter_end(&it);
2658 
2659 	/*
2660 	 * All the tasks' nodemasks have been updated, update
2661 	 * cs->old_mems_allowed.
2662 	 */
2663 	cs->old_mems_allowed = newmems;
2664 
2665 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2666 	cpuset_being_rebound = NULL;
2667 }
2668 
2669 /*
2670  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2671  * @cs: the cpuset to consider
2672  * @new_mems: a temp variable for calculating new effective_mems
2673  *
2674  * When configured nodemask is changed, the effective nodemasks of this cpuset
2675  * and all its descendants need to be updated.
2676  *
2677  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2678  *
2679  * Called with cpuset_mutex held
2680  */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2681 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2682 {
2683 	struct cpuset *cp;
2684 	struct cgroup_subsys_state *pos_css;
2685 
2686 	rcu_read_lock();
2687 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2688 		struct cpuset *parent = parent_cs(cp);
2689 
2690 		bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2691 
2692 		/*
2693 		 * If it becomes empty, inherit the effective mask of the
2694 		 * parent, which is guaranteed to have some MEMs.
2695 		 */
2696 		if (is_in_v2_mode() && !has_mems)
2697 			*new_mems = parent->effective_mems;
2698 
2699 		/* Skip the whole subtree if the nodemask remains the same. */
2700 		if (nodes_equal(*new_mems, cp->effective_mems)) {
2701 			pos_css = css_rightmost_descendant(pos_css);
2702 			continue;
2703 		}
2704 
2705 		if (!css_tryget_online(&cp->css))
2706 			continue;
2707 		rcu_read_unlock();
2708 
2709 		spin_lock_irq(&callback_lock);
2710 		cp->effective_mems = *new_mems;
2711 		spin_unlock_irq(&callback_lock);
2712 
2713 		WARN_ON(!is_in_v2_mode() &&
2714 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2715 
2716 		cpuset_update_tasks_nodemask(cp);
2717 
2718 		rcu_read_lock();
2719 		css_put(&cp->css);
2720 	}
2721 	rcu_read_unlock();
2722 }
2723 
2724 /*
2725  * Handle user request to change the 'mems' memory placement
2726  * of a cpuset.  Needs to validate the request, update the
2727  * cpusets mems_allowed, and for each task in the cpuset,
2728  * update mems_allowed and rebind task's mempolicy and any vma
2729  * mempolicies and if the cpuset is marked 'memory_migrate',
2730  * migrate the tasks pages to the new memory.
2731  *
2732  * Call with cpuset_mutex held. May take callback_lock during call.
2733  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2734  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2735  * their mempolicies to the cpusets new mems_allowed.
2736  */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2737 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2738 			   const char *buf)
2739 {
2740 	int retval;
2741 
2742 	/*
2743 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2744 	 * The validate_change() call ensures that cpusets with tasks have memory.
2745 	 */
2746 	retval = nodelist_parse(buf, trialcs->mems_allowed);
2747 	if (retval < 0)
2748 		return retval;
2749 
2750 	if (!nodes_subset(trialcs->mems_allowed,
2751 			  top_cpuset.mems_allowed))
2752 		return -EINVAL;
2753 
2754 	/* No change? nothing to do */
2755 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2756 		return 0;
2757 
2758 	retval = validate_change(cs, trialcs);
2759 	if (retval < 0)
2760 		return retval;
2761 
2762 	check_insane_mems_config(&trialcs->mems_allowed);
2763 
2764 	spin_lock_irq(&callback_lock);
2765 	cs->mems_allowed = trialcs->mems_allowed;
2766 	spin_unlock_irq(&callback_lock);
2767 
2768 	/* use trialcs->mems_allowed as a temp variable */
2769 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2770 	return 0;
2771 }
2772 
current_cpuset_is_being_rebound(void)2773 bool current_cpuset_is_being_rebound(void)
2774 {
2775 	bool ret;
2776 
2777 	rcu_read_lock();
2778 	ret = task_cs(current) == cpuset_being_rebound;
2779 	rcu_read_unlock();
2780 
2781 	return ret;
2782 }
2783 
2784 /*
2785  * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2786  * bit:		the bit to update (see cpuset_flagbits_t)
2787  * cs:		the cpuset to update
2788  * turning_on: 	whether the flag is being set or cleared
2789  *
2790  * Call with cpuset_mutex held.
2791  */
2792 
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2793 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2794 		       int turning_on)
2795 {
2796 	struct cpuset *trialcs;
2797 	int balance_flag_changed;
2798 	int spread_flag_changed;
2799 	int err;
2800 
2801 	trialcs = dup_or_alloc_cpuset(cs);
2802 	if (!trialcs)
2803 		return -ENOMEM;
2804 
2805 	if (turning_on)
2806 		set_bit(bit, &trialcs->flags);
2807 	else
2808 		clear_bit(bit, &trialcs->flags);
2809 
2810 	err = validate_change(cs, trialcs);
2811 	if (err < 0)
2812 		goto out;
2813 
2814 	balance_flag_changed = (is_sched_load_balance(cs) !=
2815 				is_sched_load_balance(trialcs));
2816 
2817 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2818 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2819 
2820 	spin_lock_irq(&callback_lock);
2821 	cs->flags = trialcs->flags;
2822 	spin_unlock_irq(&callback_lock);
2823 
2824 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2825 		if (cpuset_v2())
2826 			cpuset_force_rebuild();
2827 		else
2828 			rebuild_sched_domains_locked();
2829 	}
2830 
2831 	if (spread_flag_changed)
2832 		cpuset1_update_tasks_flags(cs);
2833 out:
2834 	free_cpuset(trialcs);
2835 	return err;
2836 }
2837 
2838 /**
2839  * update_prstate - update partition_root_state
2840  * @cs: the cpuset to update
2841  * @new_prs: new partition root state
2842  * Return: 0 if successful, != 0 if error
2843  *
2844  * Call with cpuset_mutex held.
2845  */
update_prstate(struct cpuset * cs,int new_prs)2846 static int update_prstate(struct cpuset *cs, int new_prs)
2847 {
2848 	int err = PERR_NONE, old_prs = cs->partition_root_state;
2849 	struct cpuset *parent = parent_cs(cs);
2850 	struct tmpmasks tmpmask;
2851 	bool isolcpus_updated = false;
2852 
2853 	if (old_prs == new_prs)
2854 		return 0;
2855 
2856 	/*
2857 	 * Treat a previously invalid partition root as if it is a "member".
2858 	 */
2859 	if (new_prs && is_partition_invalid(cs))
2860 		old_prs = PRS_MEMBER;
2861 
2862 	if (alloc_tmpmasks(&tmpmask))
2863 		return -ENOMEM;
2864 
2865 	err = update_partition_exclusive_flag(cs, new_prs);
2866 	if (err)
2867 		goto out;
2868 
2869 	if (!old_prs) {
2870 		/*
2871 		 * cpus_allowed and exclusive_cpus cannot be both empty.
2872 		 */
2873 		if (xcpus_empty(cs)) {
2874 			err = PERR_CPUSEMPTY;
2875 			goto out;
2876 		}
2877 
2878 		/*
2879 		 * We don't support the creation of a new local partition with
2880 		 * a remote partition underneath it. This unsupported
2881 		 * setting can happen only if parent is the top_cpuset because
2882 		 * a remote partition cannot be created underneath an existing
2883 		 * local or remote partition.
2884 		 */
2885 		if ((parent == &top_cpuset) &&
2886 		    cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2887 			err = PERR_REMOTE;
2888 			goto out;
2889 		}
2890 
2891 		/*
2892 		 * If parent is valid partition, enable local partiion.
2893 		 * Otherwise, enable a remote partition.
2894 		 */
2895 		if (is_partition_valid(parent)) {
2896 			enum partition_cmd cmd = (new_prs == PRS_ROOT)
2897 					       ? partcmd_enable : partcmd_enablei;
2898 
2899 			err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2900 		} else {
2901 			err = remote_partition_enable(cs, new_prs, &tmpmask);
2902 		}
2903 	} else if (old_prs && new_prs) {
2904 		/*
2905 		 * A change in load balance state only, no change in cpumasks.
2906 		 * Need to update isolated_cpus.
2907 		 */
2908 		if (((new_prs == PRS_ISOLATED) &&
2909 		     !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
2910 		    prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
2911 			err = PERR_HKEEPING;
2912 		else
2913 			isolcpus_updated = true;
2914 	} else {
2915 		/*
2916 		 * Switching back to member is always allowed even if it
2917 		 * disables child partitions.
2918 		 */
2919 		if (is_remote_partition(cs))
2920 			remote_partition_disable(cs, &tmpmask);
2921 		else
2922 			update_parent_effective_cpumask(cs, partcmd_disable,
2923 							NULL, &tmpmask);
2924 
2925 		/*
2926 		 * Invalidation of child partitions will be done in
2927 		 * update_cpumasks_hier().
2928 		 */
2929 	}
2930 out:
2931 	/*
2932 	 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2933 	 * happens.
2934 	 */
2935 	if (err) {
2936 		new_prs = -new_prs;
2937 		update_partition_exclusive_flag(cs, new_prs);
2938 	}
2939 
2940 	spin_lock_irq(&callback_lock);
2941 	cs->partition_root_state = new_prs;
2942 	WRITE_ONCE(cs->prs_err, err);
2943 	if (!is_partition_valid(cs))
2944 		reset_partition_data(cs);
2945 	else if (isolcpus_updated)
2946 		isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2947 	spin_unlock_irq(&callback_lock);
2948 
2949 	/* Force update if switching back to member & update effective_xcpus */
2950 	update_cpumasks_hier(cs, &tmpmask, !new_prs);
2951 
2952 	/* A newly created partition must have effective_xcpus set */
2953 	WARN_ON_ONCE(!old_prs && (new_prs > 0)
2954 			      && cpumask_empty(cs->effective_xcpus));
2955 
2956 	/* Update sched domains and load balance flag */
2957 	update_partition_sd_lb(cs, old_prs);
2958 
2959 	notify_partition_change(cs, old_prs);
2960 	if (force_sd_rebuild)
2961 		rebuild_sched_domains_locked();
2962 	free_tmpmasks(&tmpmask);
2963 	return 0;
2964 }
2965 
2966 static struct cpuset *cpuset_attach_old_cs;
2967 
2968 /*
2969  * Check to see if a cpuset can accept a new task
2970  * For v1, cpus_allowed and mems_allowed can't be empty.
2971  * For v2, effective_cpus can't be empty.
2972  * Note that in v1, effective_cpus = cpus_allowed.
2973  */
cpuset_can_attach_check(struct cpuset * cs)2974 static int cpuset_can_attach_check(struct cpuset *cs)
2975 {
2976 	if (cpumask_empty(cs->effective_cpus) ||
2977 	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2978 		return -ENOSPC;
2979 	return 0;
2980 }
2981 
reset_migrate_dl_data(struct cpuset * cs)2982 static void reset_migrate_dl_data(struct cpuset *cs)
2983 {
2984 	cs->nr_migrate_dl_tasks = 0;
2985 	cs->sum_migrate_dl_bw = 0;
2986 	cs->dl_bw_cpu = -1;
2987 }
2988 
2989 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2990 static int cpuset_can_attach(struct cgroup_taskset *tset)
2991 {
2992 	struct cgroup_subsys_state *css;
2993 	struct cpuset *cs, *oldcs;
2994 	struct task_struct *task;
2995 	bool setsched_check;
2996 	int ret;
2997 
2998 	/* used later by cpuset_attach() */
2999 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
3000 	oldcs = cpuset_attach_old_cs;
3001 	cs = css_cs(css);
3002 
3003 	mutex_lock(&cpuset_mutex);
3004 
3005 	/* Check to see if task is allowed in the cpuset */
3006 	ret = cpuset_can_attach_check(cs);
3007 	if (ret)
3008 		goto out_unlock;
3009 
3010 	/*
3011 	 * Skip rights over task setsched check in v2 when nothing changes,
3012 	 * migration permission derives from hierarchy ownership in
3013 	 * cgroup_procs_write_permission()).
3014 	 */
3015 	setsched_check = !cpuset_v2() ||
3016 		!cpumask_equal(cs->effective_cpus, oldcs->effective_cpus) ||
3017 		!nodes_equal(cs->effective_mems, oldcs->effective_mems);
3018 
3019 	/*
3020 	 * A v1 cpuset with tasks will have no CPU left only when CPU hotplug
3021 	 * brings the last online CPU offline as users are not allowed to empty
3022 	 * cpuset.cpus when there are active tasks inside. When that happens,
3023 	 * we should allow tasks to migrate out without security check to make
3024 	 * sure they will be able to run after migration.
3025 	 */
3026 	if (!is_in_v2_mode() && cpumask_empty(oldcs->effective_cpus))
3027 		setsched_check = false;
3028 
3029 	cgroup_taskset_for_each(task, css, tset) {
3030 		ret = task_can_attach(task);
3031 		if (ret)
3032 			goto out_unlock;
3033 
3034 		if (setsched_check) {
3035 			ret = security_task_setscheduler(task);
3036 			if (ret)
3037 				goto out_unlock;
3038 		}
3039 
3040 		if (dl_task(task)) {
3041 			cs->nr_migrate_dl_tasks++;
3042 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
3043 		}
3044 	}
3045 
3046 	if (!cs->nr_migrate_dl_tasks)
3047 		goto out_success;
3048 
3049 	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3050 		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3051 
3052 		if (unlikely(cpu >= nr_cpu_ids)) {
3053 			reset_migrate_dl_data(cs);
3054 			ret = -EINVAL;
3055 			goto out_unlock;
3056 		}
3057 
3058 		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3059 		if (ret) {
3060 			reset_migrate_dl_data(cs);
3061 			goto out_unlock;
3062 		}
3063 
3064 		cs->dl_bw_cpu = cpu;
3065 	}
3066 
3067 out_success:
3068 	/*
3069 	 * Mark attach is in progress.  This makes validate_change() fail
3070 	 * changes which zero cpus/mems_allowed.
3071 	 */
3072 	cs->attach_in_progress++;
3073 out_unlock:
3074 	mutex_unlock(&cpuset_mutex);
3075 	return ret;
3076 }
3077 
cpuset_cancel_attach(struct cgroup_taskset * tset)3078 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3079 {
3080 	struct cgroup_subsys_state *css;
3081 	struct cpuset *cs;
3082 
3083 	cgroup_taskset_first(tset, &css);
3084 	cs = css_cs(css);
3085 
3086 	mutex_lock(&cpuset_mutex);
3087 	dec_attach_in_progress_locked(cs);
3088 
3089 	if (cs->dl_bw_cpu >= 0)
3090 		dl_bw_free(cs->dl_bw_cpu, cs->sum_migrate_dl_bw);
3091 
3092 	if (cs->nr_migrate_dl_tasks)
3093 		reset_migrate_dl_data(cs);
3094 
3095 	mutex_unlock(&cpuset_mutex);
3096 }
3097 
3098 /*
3099  * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3100  * but we can't allocate it dynamically there.  Define it global and
3101  * allocate from cpuset_init().
3102  */
3103 static cpumask_var_t cpus_attach;
3104 static nodemask_t cpuset_attach_nodemask_to;
3105 
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3106 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3107 {
3108 	lockdep_assert_cpuset_lock_held();
3109 
3110 	if (cs != &top_cpuset)
3111 		guarantee_active_cpus(task, cpus_attach);
3112 	else
3113 		cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3114 			       subpartitions_cpus);
3115 	/*
3116 	 * can_attach beforehand should guarantee that this doesn't
3117 	 * fail.  TODO: have a better way to handle failure here
3118 	 */
3119 	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3120 
3121 	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3122 	cpuset1_update_task_spread_flags(cs, task);
3123 }
3124 
cpuset_attach(struct cgroup_taskset * tset)3125 static void cpuset_attach(struct cgroup_taskset *tset)
3126 {
3127 	struct task_struct *task;
3128 	struct task_struct *leader;
3129 	struct cgroup_subsys_state *css;
3130 	struct cpuset *cs;
3131 	struct cpuset *oldcs = cpuset_attach_old_cs;
3132 	bool cpus_updated, mems_updated;
3133 	bool queue_task_work = false;
3134 
3135 	cgroup_taskset_first(tset, &css);
3136 	cs = css_cs(css);
3137 
3138 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
3139 	mutex_lock(&cpuset_mutex);
3140 	cpus_updated = !cpumask_equal(cs->effective_cpus,
3141 				      oldcs->effective_cpus);
3142 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3143 
3144 	/*
3145 	 * In the default hierarchy, enabling cpuset in the child cgroups
3146 	 * will trigger a number of cpuset_attach() calls with no change
3147 	 * in effective cpus and mems. In that case, we can optimize out
3148 	 * by skipping the task iteration and update.
3149 	 */
3150 	if (cpuset_v2() && !cpus_updated && !mems_updated) {
3151 		cpuset_attach_nodemask_to = cs->effective_mems;
3152 		goto out;
3153 	}
3154 
3155 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3156 
3157 	cgroup_taskset_for_each(task, css, tset)
3158 		cpuset_attach_task(cs, task);
3159 
3160 	/*
3161 	 * Change mm for all threadgroup leaders. This is expensive and may
3162 	 * sleep and should be moved outside migration path proper. Skip it
3163 	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3164 	 * not set.
3165 	 */
3166 	cpuset_attach_nodemask_to = cs->effective_mems;
3167 	if (!is_memory_migrate(cs) && !mems_updated)
3168 		goto out;
3169 
3170 	cgroup_taskset_for_each_leader(leader, css, tset) {
3171 		struct mm_struct *mm = get_task_mm(leader);
3172 
3173 		if (mm) {
3174 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3175 
3176 			/*
3177 			 * old_mems_allowed is the same with mems_allowed
3178 			 * here, except if this task is being moved
3179 			 * automatically due to hotplug.  In that case
3180 			 * @mems_allowed has been updated and is empty, so
3181 			 * @old_mems_allowed is the right nodesets that we
3182 			 * migrate mm from.
3183 			 */
3184 			if (is_memory_migrate(cs)) {
3185 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3186 						  &cpuset_attach_nodemask_to);
3187 				queue_task_work = true;
3188 			} else
3189 				mmput(mm);
3190 		}
3191 	}
3192 
3193 out:
3194 	if (queue_task_work)
3195 		schedule_flush_migrate_mm();
3196 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
3197 
3198 	if (cs->nr_migrate_dl_tasks) {
3199 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3200 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3201 		reset_migrate_dl_data(cs);
3202 	}
3203 
3204 	dec_attach_in_progress_locked(cs);
3205 
3206 	mutex_unlock(&cpuset_mutex);
3207 }
3208 
3209 /*
3210  * Common handling for a write to a "cpus" or "mems" file.
3211  */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3212 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3213 				    char *buf, size_t nbytes, loff_t off)
3214 {
3215 	struct cpuset *cs = css_cs(of_css(of));
3216 	struct cpuset *trialcs;
3217 	int retval = -ENODEV;
3218 
3219 	/* root is read-only */
3220 	if (cs == &top_cpuset)
3221 		return -EACCES;
3222 
3223 	buf = strstrip(buf);
3224 	cpuset_full_lock();
3225 	if (!is_cpuset_online(cs))
3226 		goto out_unlock;
3227 
3228 	trialcs = dup_or_alloc_cpuset(cs);
3229 	if (!trialcs) {
3230 		retval = -ENOMEM;
3231 		goto out_unlock;
3232 	}
3233 
3234 	switch (of_cft(of)->private) {
3235 	case FILE_CPULIST:
3236 		retval = update_cpumask(cs, trialcs, buf);
3237 		break;
3238 	case FILE_EXCLUSIVE_CPULIST:
3239 		retval = update_exclusive_cpumask(cs, trialcs, buf);
3240 		break;
3241 	case FILE_MEMLIST:
3242 		retval = update_nodemask(cs, trialcs, buf);
3243 		break;
3244 	default:
3245 		retval = -EINVAL;
3246 		break;
3247 	}
3248 
3249 	free_cpuset(trialcs);
3250 out_unlock:
3251 	cpuset_update_sd_hk_unlock();
3252 	if (of_cft(of)->private == FILE_MEMLIST)
3253 		schedule_flush_migrate_mm();
3254 	return retval ?: nbytes;
3255 }
3256 
3257 /*
3258  * These ascii lists should be read in a single call, by using a user
3259  * buffer large enough to hold the entire map.  If read in smaller
3260  * chunks, there is no guarantee of atomicity.  Since the display format
3261  * used, list of ranges of sequential numbers, is variable length,
3262  * and since these maps can change value dynamically, one could read
3263  * gibberish by doing partial reads while a list was changing.
3264  */
cpuset_common_seq_show(struct seq_file * sf,void * v)3265 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3266 {
3267 	struct cpuset *cs = css_cs(seq_css(sf));
3268 	cpuset_filetype_t type = seq_cft(sf)->private;
3269 	int ret = 0;
3270 
3271 	spin_lock_irq(&callback_lock);
3272 
3273 	switch (type) {
3274 	case FILE_CPULIST:
3275 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3276 		break;
3277 	case FILE_MEMLIST:
3278 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3279 		break;
3280 	case FILE_EFFECTIVE_CPULIST:
3281 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3282 		break;
3283 	case FILE_EFFECTIVE_MEMLIST:
3284 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3285 		break;
3286 	case FILE_EXCLUSIVE_CPULIST:
3287 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3288 		break;
3289 	case FILE_EFFECTIVE_XCPULIST:
3290 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3291 		break;
3292 	case FILE_SUBPARTS_CPULIST:
3293 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3294 		break;
3295 	case FILE_ISOLATED_CPULIST:
3296 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3297 		break;
3298 	default:
3299 		ret = -EINVAL;
3300 	}
3301 
3302 	spin_unlock_irq(&callback_lock);
3303 	return ret;
3304 }
3305 
cpuset_partition_show(struct seq_file * seq,void * v)3306 static int cpuset_partition_show(struct seq_file *seq, void *v)
3307 {
3308 	struct cpuset *cs = css_cs(seq_css(seq));
3309 	const char *err, *type = NULL;
3310 
3311 	switch (cs->partition_root_state) {
3312 	case PRS_ROOT:
3313 		seq_puts(seq, "root\n");
3314 		break;
3315 	case PRS_ISOLATED:
3316 		seq_puts(seq, "isolated\n");
3317 		break;
3318 	case PRS_MEMBER:
3319 		seq_puts(seq, "member\n");
3320 		break;
3321 	case PRS_INVALID_ROOT:
3322 		type = "root";
3323 		fallthrough;
3324 	case PRS_INVALID_ISOLATED:
3325 		if (!type)
3326 			type = "isolated";
3327 		err = perr_strings[READ_ONCE(cs->prs_err)];
3328 		if (err)
3329 			seq_printf(seq, "%s invalid (%s)\n", type, err);
3330 		else
3331 			seq_printf(seq, "%s invalid\n", type);
3332 		break;
3333 	}
3334 	return 0;
3335 }
3336 
cpuset_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3337 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3338 				     size_t nbytes, loff_t off)
3339 {
3340 	struct cpuset *cs = css_cs(of_css(of));
3341 	int val;
3342 	int retval = -ENODEV;
3343 
3344 	buf = strstrip(buf);
3345 
3346 	if (!strcmp(buf, "root"))
3347 		val = PRS_ROOT;
3348 	else if (!strcmp(buf, "member"))
3349 		val = PRS_MEMBER;
3350 	else if (!strcmp(buf, "isolated"))
3351 		val = PRS_ISOLATED;
3352 	else
3353 		return -EINVAL;
3354 
3355 	cpuset_full_lock();
3356 	if (is_cpuset_online(cs))
3357 		retval = update_prstate(cs, val);
3358 	cpuset_update_sd_hk_unlock();
3359 	return retval ?: nbytes;
3360 }
3361 
3362 /*
3363  * This is currently a minimal set for the default hierarchy. It can be
3364  * expanded later on by migrating more features and control files from v1.
3365  */
3366 static struct cftype dfl_files[] = {
3367 	{
3368 		.name = "cpus",
3369 		.seq_show = cpuset_common_seq_show,
3370 		.write = cpuset_write_resmask,
3371 		.max_write_len = (100U + 6 * NR_CPUS),
3372 		.private = FILE_CPULIST,
3373 		.flags = CFTYPE_NOT_ON_ROOT,
3374 	},
3375 
3376 	{
3377 		.name = "mems",
3378 		.seq_show = cpuset_common_seq_show,
3379 		.write = cpuset_write_resmask,
3380 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3381 		.private = FILE_MEMLIST,
3382 		.flags = CFTYPE_NOT_ON_ROOT,
3383 	},
3384 
3385 	{
3386 		.name = "cpus.effective",
3387 		.seq_show = cpuset_common_seq_show,
3388 		.private = FILE_EFFECTIVE_CPULIST,
3389 	},
3390 
3391 	{
3392 		.name = "mems.effective",
3393 		.seq_show = cpuset_common_seq_show,
3394 		.private = FILE_EFFECTIVE_MEMLIST,
3395 	},
3396 
3397 	{
3398 		.name = "cpus.partition",
3399 		.seq_show = cpuset_partition_show,
3400 		.write = cpuset_partition_write,
3401 		.private = FILE_PARTITION_ROOT,
3402 		.flags = CFTYPE_NOT_ON_ROOT,
3403 		.file_offset = offsetof(struct cpuset, partition_file),
3404 	},
3405 
3406 	{
3407 		.name = "cpus.exclusive",
3408 		.seq_show = cpuset_common_seq_show,
3409 		.write = cpuset_write_resmask,
3410 		.max_write_len = (100U + 6 * NR_CPUS),
3411 		.private = FILE_EXCLUSIVE_CPULIST,
3412 		.flags = CFTYPE_NOT_ON_ROOT,
3413 	},
3414 
3415 	{
3416 		.name = "cpus.exclusive.effective",
3417 		.seq_show = cpuset_common_seq_show,
3418 		.private = FILE_EFFECTIVE_XCPULIST,
3419 		.flags = CFTYPE_NOT_ON_ROOT,
3420 	},
3421 
3422 	{
3423 		.name = "cpus.subpartitions",
3424 		.seq_show = cpuset_common_seq_show,
3425 		.private = FILE_SUBPARTS_CPULIST,
3426 		.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3427 	},
3428 
3429 	{
3430 		.name = "cpus.isolated",
3431 		.seq_show = cpuset_common_seq_show,
3432 		.private = FILE_ISOLATED_CPULIST,
3433 		.flags = CFTYPE_ONLY_ON_ROOT,
3434 	},
3435 
3436 	{ }	/* terminate */
3437 };
3438 
3439 
3440 /**
3441  * cpuset_css_alloc - Allocate a cpuset css
3442  * @parent_css: Parent css of the control group that the new cpuset will be
3443  *              part of
3444  * Return: cpuset css on success, -ENOMEM on failure.
3445  *
3446  * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3447  * top cpuset css otherwise.
3448  */
3449 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3450 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3451 {
3452 	struct cpuset *cs;
3453 
3454 	if (!parent_css)
3455 		return &top_cpuset.css;
3456 
3457 	cs = dup_or_alloc_cpuset(NULL);
3458 	if (!cs)
3459 		return ERR_PTR(-ENOMEM);
3460 
3461 	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3462 	cpuset1_init(cs);
3463 
3464 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3465 	if (cpuset_v2())
3466 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3467 
3468 	return &cs->css;
3469 }
3470 
cpuset_css_online(struct cgroup_subsys_state * css)3471 static int cpuset_css_online(struct cgroup_subsys_state *css)
3472 {
3473 	struct cpuset *cs = css_cs(css);
3474 	struct cpuset *parent = parent_cs(cs);
3475 
3476 	if (!parent)
3477 		return 0;
3478 
3479 	cpuset_full_lock();
3480 	/*
3481 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3482 	 */
3483 	if (cpuset_v2() && !is_sched_load_balance(parent))
3484 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3485 
3486 	cpuset_inc();
3487 
3488 	spin_lock_irq(&callback_lock);
3489 	if (is_in_v2_mode()) {
3490 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3491 		cs->effective_mems = parent->effective_mems;
3492 	}
3493 	spin_unlock_irq(&callback_lock);
3494 	cpuset1_online_css(css);
3495 
3496 	cpuset_full_unlock();
3497 	return 0;
3498 }
3499 
3500 /*
3501  * If the cpuset being removed has its flag 'sched_load_balance'
3502  * enabled, then simulate turning sched_load_balance off, which
3503  * will call rebuild_sched_domains_locked(). That is not needed
3504  * in the default hierarchy where only changes in partition
3505  * will cause repartitioning.
3506  */
cpuset_css_offline(struct cgroup_subsys_state * css)3507 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3508 {
3509 	struct cpuset *cs = css_cs(css);
3510 
3511 	cpuset_full_lock();
3512 	if (!cpuset_v2() && is_sched_load_balance(cs))
3513 		cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3514 
3515 	cpuset_dec();
3516 	cpuset_full_unlock();
3517 }
3518 
3519 /*
3520  * If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3521  * changing it back to member to free its exclusive CPUs back to the pool to
3522  * be used by other online cpusets.
3523  */
cpuset_css_killed(struct cgroup_subsys_state * css)3524 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3525 {
3526 	struct cpuset *cs = css_cs(css);
3527 
3528 	cpuset_full_lock();
3529 	/* Reset valid partition back to member */
3530 	if (is_partition_valid(cs))
3531 		update_prstate(cs, PRS_MEMBER);
3532 	cpuset_update_sd_hk_unlock();
3533 }
3534 
cpuset_css_free(struct cgroup_subsys_state * css)3535 static void cpuset_css_free(struct cgroup_subsys_state *css)
3536 {
3537 	struct cpuset *cs = css_cs(css);
3538 
3539 	free_cpuset(cs);
3540 }
3541 
cpuset_bind(struct cgroup_subsys_state * root_css)3542 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3543 {
3544 	mutex_lock(&cpuset_mutex);
3545 	spin_lock_irq(&callback_lock);
3546 
3547 	if (is_in_v2_mode()) {
3548 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3549 		cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3550 		top_cpuset.mems_allowed = node_possible_map;
3551 	} else {
3552 		cpumask_copy(top_cpuset.cpus_allowed,
3553 			     top_cpuset.effective_cpus);
3554 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3555 	}
3556 
3557 	spin_unlock_irq(&callback_lock);
3558 	mutex_unlock(&cpuset_mutex);
3559 }
3560 
3561 /*
3562  * In case the child is cloned into a cpuset different from its parent,
3563  * additional checks are done to see if the move is allowed.
3564  */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3565 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3566 {
3567 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3568 	bool same_cs;
3569 	int ret;
3570 
3571 	rcu_read_lock();
3572 	same_cs = (cs == task_cs(current));
3573 	rcu_read_unlock();
3574 
3575 	if (same_cs)
3576 		return 0;
3577 
3578 	lockdep_assert_held(&cgroup_mutex);
3579 	mutex_lock(&cpuset_mutex);
3580 
3581 	/* Check to see if task is allowed in the cpuset */
3582 	ret = cpuset_can_attach_check(cs);
3583 	if (ret)
3584 		goto out_unlock;
3585 
3586 	ret = task_can_attach(task);
3587 	if (ret)
3588 		goto out_unlock;
3589 
3590 	ret = security_task_setscheduler(task);
3591 	if (ret)
3592 		goto out_unlock;
3593 
3594 	/*
3595 	 * Mark attach is in progress.  This makes validate_change() fail
3596 	 * changes which zero cpus/mems_allowed.
3597 	 */
3598 	cs->attach_in_progress++;
3599 out_unlock:
3600 	mutex_unlock(&cpuset_mutex);
3601 	return ret;
3602 }
3603 
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3604 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3605 {
3606 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3607 	bool same_cs;
3608 
3609 	rcu_read_lock();
3610 	same_cs = (cs == task_cs(current));
3611 	rcu_read_unlock();
3612 
3613 	if (same_cs)
3614 		return;
3615 
3616 	dec_attach_in_progress(cs);
3617 }
3618 
3619 /*
3620  * Make sure the new task conform to the current state of its parent,
3621  * which could have been changed by cpuset just after it inherits the
3622  * state from the parent and before it sits on the cgroup's task list.
3623  */
cpuset_fork(struct task_struct * task)3624 static void cpuset_fork(struct task_struct *task)
3625 {
3626 	struct cpuset *cs;
3627 	bool same_cs;
3628 
3629 	rcu_read_lock();
3630 	cs = task_cs(task);
3631 	same_cs = (cs == task_cs(current));
3632 	rcu_read_unlock();
3633 
3634 	if (same_cs) {
3635 		if (cs == &top_cpuset)
3636 			return;
3637 
3638 		set_cpus_allowed_ptr(task, current->cpus_ptr);
3639 		task->mems_allowed = current->mems_allowed;
3640 		return;
3641 	}
3642 
3643 	/* CLONE_INTO_CGROUP */
3644 	mutex_lock(&cpuset_mutex);
3645 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3646 	cpuset_attach_task(cs, task);
3647 
3648 	dec_attach_in_progress_locked(cs);
3649 	mutex_unlock(&cpuset_mutex);
3650 }
3651 
3652 struct cgroup_subsys cpuset_cgrp_subsys = {
3653 	.css_alloc	= cpuset_css_alloc,
3654 	.css_online	= cpuset_css_online,
3655 	.css_offline	= cpuset_css_offline,
3656 	.css_killed	= cpuset_css_killed,
3657 	.css_free	= cpuset_css_free,
3658 	.can_attach	= cpuset_can_attach,
3659 	.cancel_attach	= cpuset_cancel_attach,
3660 	.attach		= cpuset_attach,
3661 	.bind		= cpuset_bind,
3662 	.can_fork	= cpuset_can_fork,
3663 	.cancel_fork	= cpuset_cancel_fork,
3664 	.fork		= cpuset_fork,
3665 #ifdef CONFIG_CPUSETS_V1
3666 	.legacy_cftypes	= cpuset1_files,
3667 #endif
3668 	.dfl_cftypes	= dfl_files,
3669 	.early_init	= true,
3670 	.threaded	= true,
3671 };
3672 
3673 /**
3674  * cpuset_init - initialize cpusets at system boot
3675  *
3676  * Description: Initialize top_cpuset
3677  **/
3678 
cpuset_init(void)3679 int __init cpuset_init(void)
3680 {
3681 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3682 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3683 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3684 	BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3685 	BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3686 	BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3687 	BUG_ON(!zalloc_cpumask_var(&isolated_hk_cpus, GFP_KERNEL));
3688 
3689 	cpumask_setall(top_cpuset.cpus_allowed);
3690 	nodes_setall(top_cpuset.mems_allowed);
3691 	cpumask_setall(top_cpuset.effective_cpus);
3692 	cpumask_setall(top_cpuset.effective_xcpus);
3693 	cpumask_setall(top_cpuset.exclusive_cpus);
3694 	nodes_setall(top_cpuset.effective_mems);
3695 
3696 	cpuset1_init(&top_cpuset);
3697 
3698 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3699 
3700 	if (housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
3701 		cpumask_andnot(isolated_cpus, cpu_possible_mask,
3702 			       housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
3703 
3704 	return 0;
3705 }
3706 
3707 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3708 hotplug_update_tasks(struct cpuset *cs,
3709 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3710 		     bool cpus_updated, bool mems_updated)
3711 {
3712 	/* A partition root is allowed to have empty effective cpus */
3713 	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3714 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3715 	if (nodes_empty(*new_mems))
3716 		*new_mems = parent_cs(cs)->effective_mems;
3717 
3718 	spin_lock_irq(&callback_lock);
3719 	cpumask_copy(cs->effective_cpus, new_cpus);
3720 	cs->effective_mems = *new_mems;
3721 	spin_unlock_irq(&callback_lock);
3722 
3723 	if (cpus_updated)
3724 		cpuset_update_tasks_cpumask(cs, new_cpus);
3725 	if (mems_updated)
3726 		cpuset_update_tasks_nodemask(cs);
3727 }
3728 
cpuset_force_rebuild(void)3729 void cpuset_force_rebuild(void)
3730 {
3731 	force_sd_rebuild = true;
3732 }
3733 
3734 /**
3735  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3736  * @cs: cpuset in interest
3737  * @tmp: the tmpmasks structure pointer
3738  *
3739  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3740  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3741  * all its tasks are moved to the nearest ancestor with both resources.
3742  */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3743 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3744 {
3745 	static cpumask_t new_cpus;
3746 	static nodemask_t new_mems;
3747 	bool cpus_updated;
3748 	bool mems_updated;
3749 	bool remote;
3750 	int partcmd = -1;
3751 	struct cpuset *parent;
3752 retry:
3753 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3754 
3755 	mutex_lock(&cpuset_mutex);
3756 
3757 	/*
3758 	 * We have raced with task attaching. We wait until attaching
3759 	 * is finished, so we won't attach a task to an empty cpuset.
3760 	 */
3761 	if (cs->attach_in_progress) {
3762 		mutex_unlock(&cpuset_mutex);
3763 		goto retry;
3764 	}
3765 
3766 	parent = parent_cs(cs);
3767 	compute_effective_cpumask(&new_cpus, cs, parent);
3768 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3769 
3770 	if (!tmp || !cs->partition_root_state)
3771 		goto update_tasks;
3772 
3773 	/*
3774 	 * Compute effective_cpus for valid partition root, may invalidate
3775 	 * child partition roots if necessary.
3776 	 */
3777 	remote = is_remote_partition(cs);
3778 	if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3779 		compute_partition_effective_cpumask(cs, &new_cpus);
3780 
3781 	if (remote && (cpumask_empty(subpartitions_cpus) ||
3782 			(cpumask_empty(&new_cpus) &&
3783 			 partition_is_populated(cs, NULL)))) {
3784 		cs->prs_err = PERR_HOTPLUG;
3785 		remote_partition_disable(cs, tmp);
3786 		compute_effective_cpumask(&new_cpus, cs, parent);
3787 		remote = false;
3788 	}
3789 
3790 	/*
3791 	 * Force the partition to become invalid if either one of
3792 	 * the following conditions hold:
3793 	 * 1) empty effective cpus but not valid empty partition.
3794 	 * 2) parent is invalid or doesn't grant any cpus to child
3795 	 *    partitions.
3796 	 * 3) subpartitions_cpus is empty.
3797 	 */
3798 	if (is_local_partition(cs) &&
3799 	    (!is_partition_valid(parent) ||
3800 	     tasks_nocpu_error(parent, cs, &new_cpus) ||
3801 	     cpumask_empty(subpartitions_cpus)))
3802 		partcmd = partcmd_invalidate;
3803 	/*
3804 	 * On the other hand, an invalid partition root may be transitioned
3805 	 * back to a regular one with a non-empty effective xcpus.
3806 	 */
3807 	else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
3808 		 !cpumask_empty(cs->effective_xcpus))
3809 		partcmd = partcmd_update;
3810 
3811 	if (partcmd >= 0) {
3812 		update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3813 		if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3814 			compute_partition_effective_cpumask(cs, &new_cpus);
3815 			cpuset_force_rebuild();
3816 		}
3817 	}
3818 
3819 update_tasks:
3820 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3821 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3822 	if (!cpus_updated && !mems_updated)
3823 		goto unlock;	/* Hotplug doesn't affect this cpuset */
3824 
3825 	if (mems_updated)
3826 		check_insane_mems_config(&new_mems);
3827 
3828 	if (is_in_v2_mode())
3829 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3830 				     cpus_updated, mems_updated);
3831 	else
3832 		cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3833 					    cpus_updated, mems_updated);
3834 
3835 unlock:
3836 	mutex_unlock(&cpuset_mutex);
3837 }
3838 
3839 /**
3840  * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3841  *
3842  * This function is called after either CPU or memory configuration has
3843  * changed and updates cpuset accordingly.  The top_cpuset is always
3844  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3845  * order to make cpusets transparent (of no affect) on systems that are
3846  * actively using CPU hotplug but making no active use of cpusets.
3847  *
3848  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3849  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3850  * all descendants.
3851  *
3852  * Note that CPU offlining during suspend is ignored.  We don't modify
3853  * cpusets across suspend/resume cycles at all.
3854  *
3855  * CPU / memory hotplug is handled synchronously.
3856  */
cpuset_handle_hotplug(void)3857 static void cpuset_handle_hotplug(void)
3858 {
3859 	static DECLARE_WORK(hk_sd_work, hk_sd_workfn);
3860 	static cpumask_t new_cpus;
3861 	static nodemask_t new_mems;
3862 	bool cpus_updated, mems_updated;
3863 	bool on_dfl = is_in_v2_mode();
3864 	struct tmpmasks tmp, *ptmp = NULL;
3865 
3866 	if (on_dfl && !alloc_tmpmasks(&tmp))
3867 		ptmp = &tmp;
3868 
3869 	lockdep_assert_cpus_held();
3870 	mutex_lock(&cpuset_mutex);
3871 
3872 	/* fetch the available cpus/mems and find out which changed how */
3873 	cpumask_copy(&new_cpus, cpu_active_mask);
3874 	new_mems = node_states[N_MEMORY];
3875 
3876 	/*
3877 	 * If subpartitions_cpus is populated, it is likely that the check
3878 	 * below will produce a false positive on cpus_updated when the cpu
3879 	 * list isn't changed. It is extra work, but it is better to be safe.
3880 	 */
3881 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3882 		       !cpumask_empty(subpartitions_cpus);
3883 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3884 
3885 	/* For v1, synchronize cpus_allowed to cpu_active_mask */
3886 	if (cpus_updated) {
3887 		cpuset_force_rebuild();
3888 		spin_lock_irq(&callback_lock);
3889 		if (!on_dfl)
3890 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3891 		/*
3892 		 * Make sure that CPUs allocated to child partitions
3893 		 * do not show up in effective_cpus. If no CPU is left,
3894 		 * we clear the subpartitions_cpus & let the child partitions
3895 		 * fight for the CPUs again.
3896 		 */
3897 		if (!cpumask_empty(subpartitions_cpus)) {
3898 			if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3899 				cpumask_clear(subpartitions_cpus);
3900 			} else {
3901 				cpumask_andnot(&new_cpus, &new_cpus,
3902 					       subpartitions_cpus);
3903 			}
3904 		}
3905 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3906 		spin_unlock_irq(&callback_lock);
3907 		/* we don't mess with cpumasks of tasks in top_cpuset */
3908 	}
3909 
3910 	/* synchronize mems_allowed to N_MEMORY */
3911 	if (mems_updated) {
3912 		spin_lock_irq(&callback_lock);
3913 		if (!on_dfl)
3914 			top_cpuset.mems_allowed = new_mems;
3915 		top_cpuset.effective_mems = new_mems;
3916 		spin_unlock_irq(&callback_lock);
3917 		cpuset_update_tasks_nodemask(&top_cpuset);
3918 	}
3919 
3920 	mutex_unlock(&cpuset_mutex);
3921 
3922 	/* if cpus or mems changed, we need to propagate to descendants */
3923 	if (cpus_updated || mems_updated) {
3924 		struct cpuset *cs;
3925 		struct cgroup_subsys_state *pos_css;
3926 
3927 		rcu_read_lock();
3928 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3929 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3930 				continue;
3931 			rcu_read_unlock();
3932 
3933 			cpuset_hotplug_update_tasks(cs, ptmp);
3934 
3935 			rcu_read_lock();
3936 			css_put(&cs->css);
3937 		}
3938 		rcu_read_unlock();
3939 	}
3940 
3941 	/*
3942 	 * rebuild_sched_domains() will always be called directly if needed
3943 	 * to make sure that newly added or removed CPU will be reflected in
3944 	 * the sched domains. However, if isolated partition invalidation
3945 	 * or recreation is being done (update_housekeeping set), a work item
3946 	 * will be queued to call housekeeping_update() to update the
3947 	 * corresponding housekeeping cpumasks after some slight delay.
3948 	 *
3949 	 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
3950 	 * is still pending. Before the pending bit is cleared, the work data
3951 	 * is copied out and work item dequeued. So it is possible to queue
3952 	 * the work again before the hk_sd_workfn() is invoked to process the
3953 	 * previously queued work. Since hk_sd_workfn() doesn't use the work
3954 	 * item at all, this is not a problem.
3955 	 */
3956 	if (force_sd_rebuild)
3957 		rebuild_sched_domains_cpuslocked();
3958 	if (update_housekeeping)
3959 		queue_work(system_dfl_wq, &hk_sd_work);
3960 
3961 	free_tmpmasks(ptmp);
3962 }
3963 
cpuset_update_active_cpus(void)3964 void cpuset_update_active_cpus(void)
3965 {
3966 	/*
3967 	 * We're inside cpu hotplug critical region which usually nests
3968 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3969 	 * to a work item to avoid reverse locking order.
3970 	 */
3971 	cpuset_handle_hotplug();
3972 }
3973 
3974 /*
3975  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3976  * Call this routine anytime after node_states[N_MEMORY] changes.
3977  * See cpuset_update_active_cpus() for CPU hotplug handling.
3978  */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3979 static int cpuset_track_online_nodes(struct notifier_block *self,
3980 				unsigned long action, void *arg)
3981 {
3982 	cpuset_handle_hotplug();
3983 	return NOTIFY_OK;
3984 }
3985 
3986 /**
3987  * cpuset_init_smp - initialize cpus_allowed
3988  *
3989  * Description: Finish top cpuset after cpu, node maps are initialized
3990  */
cpuset_init_smp(void)3991 void __init cpuset_init_smp(void)
3992 {
3993 	/*
3994 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3995 	 * cpuset_bind() call will be reset to v1 values in another
3996 	 * cpuset_bind() call when v1 cpuset is mounted.
3997 	 */
3998 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3999 
4000 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4001 	top_cpuset.effective_mems = node_states[N_MEMORY];
4002 
4003 	hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4004 
4005 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4006 	BUG_ON(!cpuset_migrate_mm_wq);
4007 }
4008 
4009 /*
4010  * Return cpus_allowed mask from a task's cpuset.
4011  */
__cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)4012 static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4013 {
4014 	struct cpuset *cs;
4015 
4016 	cs = task_cs(tsk);
4017 	if (cs != &top_cpuset)
4018 		guarantee_active_cpus(tsk, pmask);
4019 	/*
4020 	 * Tasks in the top cpuset won't get update to their cpumasks
4021 	 * when a hotplug online/offline event happens. So we include all
4022 	 * offline cpus in the allowed cpu list.
4023 	 */
4024 	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4025 		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4026 
4027 		/*
4028 		 * We first exclude cpus allocated to partitions. If there is no
4029 		 * allowable online cpu left, we fall back to all possible cpus.
4030 		 */
4031 		cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4032 		if (!cpumask_intersects(pmask, cpu_active_mask))
4033 			cpumask_copy(pmask, possible_mask);
4034 	}
4035 }
4036 
4037 /**
4038  * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
4039  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4040  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4041  *
4042  * Similir to cpuset_cpus_allowed() except that the caller must have acquired
4043  * cpuset_mutex.
4044  */
cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)4045 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4046 {
4047 	lockdep_assert_cpuset_lock_held();
4048 	__cpuset_cpus_allowed_locked(tsk, pmask);
4049 }
4050 
4051 /**
4052  * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
4053  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4054  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4055  *
4056  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4057  * attached to the specified @tsk.  Guaranteed to return some non-empty
4058  * subset of cpu_active_mask, even if this means going outside the
4059  * tasks cpuset, except when the task is in the top cpuset.
4060  **/
4061 
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)4062 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4063 {
4064 	unsigned long flags;
4065 
4066 	spin_lock_irqsave(&callback_lock, flags);
4067 	__cpuset_cpus_allowed_locked(tsk, pmask);
4068 	spin_unlock_irqrestore(&callback_lock, flags);
4069 }
4070 
4071 /**
4072  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4073  * @tsk: pointer to task_struct with which the scheduler is struggling
4074  *
4075  * Description: In the case that the scheduler cannot find an allowed cpu in
4076  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4077  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4078  * which will not contain a sane cpumask during cases such as cpu hotplugging.
4079  * This is the absolute last resort for the scheduler and it is only used if
4080  * _every_ other avenue has been traveled.
4081  *
4082  * Returns true if the affinity of @tsk was changed, false otherwise.
4083  **/
4084 
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4085 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4086 {
4087 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4088 	const struct cpumask *cs_mask;
4089 	bool changed = false;
4090 
4091 	rcu_read_lock();
4092 	cs_mask = task_cs(tsk)->cpus_allowed;
4093 	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4094 		set_cpus_allowed_force(tsk, cs_mask);
4095 		changed = true;
4096 	}
4097 	rcu_read_unlock();
4098 
4099 	/*
4100 	 * We own tsk->cpus_allowed, nobody can change it under us.
4101 	 *
4102 	 * But we used cs && cs->cpus_allowed lockless and thus can
4103 	 * race with cgroup_attach_task() or update_cpumask() and get
4104 	 * the wrong tsk->cpus_allowed. However, both cases imply the
4105 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4106 	 * which takes task_rq_lock().
4107 	 *
4108 	 * If we are called after it dropped the lock we must see all
4109 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4110 	 * set any mask even if it is not right from task_cs() pov,
4111 	 * the pending set_cpus_allowed_ptr() will fix things.
4112 	 *
4113 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4114 	 * if required.
4115 	 */
4116 	return changed;
4117 }
4118 
cpuset_init_current_mems_allowed(void)4119 void __init cpuset_init_current_mems_allowed(void)
4120 {
4121 	nodes_setall(current->mems_allowed);
4122 }
4123 
4124 /**
4125  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4126  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4127  *
4128  * Description: Returns the nodemask_t mems_allowed of the cpuset
4129  * attached to the specified @tsk.  Guaranteed to return some non-empty
4130  * subset of node_states[N_MEMORY], even if this means going outside the
4131  * tasks cpuset.
4132  **/
4133 
cpuset_mems_allowed(struct task_struct * tsk)4134 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4135 {
4136 	nodemask_t mask;
4137 	unsigned long flags;
4138 
4139 	spin_lock_irqsave(&callback_lock, flags);
4140 	guarantee_online_mems(task_cs(tsk), &mask);
4141 	spin_unlock_irqrestore(&callback_lock, flags);
4142 
4143 	return mask;
4144 }
4145 
4146 /**
4147  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4148  * @nodemask: the nodemask to be checked
4149  *
4150  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4151  */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4152 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4153 {
4154 	return nodes_intersects(*nodemask, current->mems_allowed);
4155 }
4156 
4157 /*
4158  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4159  * mem_hardwall ancestor to the specified cpuset.  Call holding
4160  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4161  * (an unusual configuration), then returns the root cpuset.
4162  */
nearest_hardwall_ancestor(struct cpuset * cs)4163 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4164 {
4165 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4166 		cs = parent_cs(cs);
4167 	return cs;
4168 }
4169 
4170 /*
4171  * cpuset_current_node_allowed - Can current task allocate on a memory node?
4172  * @node: is this an allowed node?
4173  * @gfp_mask: memory allocation flags
4174  *
4175  * If we're in interrupt, yes, we can always allocate.  If @node is set in
4176  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4177  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4178  * yes.  If current has access to memory reserves as an oom victim, yes.
4179  * Otherwise, no.
4180  *
4181  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4182  * and do not allow allocations outside the current tasks cpuset
4183  * unless the task has been OOM killed.
4184  * GFP_KERNEL allocations are not so marked, so can escape to the
4185  * nearest enclosing hardwalled ancestor cpuset.
4186  *
4187  * Scanning up parent cpusets requires callback_lock.  The
4188  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4189  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4190  * current tasks mems_allowed came up empty on the first pass over
4191  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4192  * cpuset are short of memory, might require taking the callback_lock.
4193  *
4194  * The first call here from mm/page_alloc:get_page_from_freelist()
4195  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4196  * so no allocation on a node outside the cpuset is allowed (unless
4197  * in interrupt, of course).
4198  *
4199  * The second pass through get_page_from_freelist() doesn't even call
4200  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4201  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4202  * in alloc_flags.  That logic and the checks below have the combined
4203  * affect that:
4204  *	in_interrupt - any node ok (current task context irrelevant)
4205  *	GFP_ATOMIC   - any node ok
4206  *	tsk_is_oom_victim   - any node ok
4207  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4208  *	GFP_USER     - only nodes in current tasks mems allowed ok.
4209  */
cpuset_current_node_allowed(int node,gfp_t gfp_mask)4210 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4211 {
4212 	struct cpuset *cs;		/* current cpuset ancestors */
4213 	bool allowed;			/* is allocation in zone z allowed? */
4214 	unsigned long flags;
4215 
4216 	if (in_interrupt())
4217 		return true;
4218 	if (node_isset(node, current->mems_allowed))
4219 		return true;
4220 	/*
4221 	 * Allow tasks that have access to memory reserves because they have
4222 	 * been OOM killed to get memory anywhere.
4223 	 */
4224 	if (unlikely(tsk_is_oom_victim(current)))
4225 		return true;
4226 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4227 		return false;
4228 
4229 	if (current->flags & PF_EXITING) /* Let dying task have memory */
4230 		return true;
4231 
4232 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4233 	spin_lock_irqsave(&callback_lock, flags);
4234 
4235 	cs = nearest_hardwall_ancestor(task_cs(current));
4236 	allowed = node_isset(node, cs->mems_allowed);
4237 
4238 	spin_unlock_irqrestore(&callback_lock, flags);
4239 	return allowed;
4240 }
4241 
4242 /**
4243  * cpuset_nodes_allowed - return effective_mems mask from a cgroup cpuset.
4244  * @cgroup: pointer to struct cgroup.
4245  * @mask: pointer to struct nodemask_t to be returned.
4246  *
4247  * Returns effective_mems mask from a cgroup cpuset if it is cgroup v2 and
4248  * has cpuset subsys. Otherwise, returns node_states[N_MEMORY].
4249  *
4250  * This function intentionally avoids taking the cpuset_mutex or callback_lock
4251  * when accessing effective_mems. This is because the obtained effective_mems
4252  * is stale immediately after the query anyway (e.g., effective_mems is updated
4253  * immediately after releasing the lock but before returning).
4254  *
4255  * As a result, returned @mask may be empty because cs->effective_mems can be
4256  * rebound during this call. Besides, nodes in @mask are not guaranteed to be
4257  * online due to hot plugins. Callers should check the mask for validity on
4258  * return based on its subsequent use.
4259  **/
cpuset_nodes_allowed(struct cgroup * cgroup,nodemask_t * mask)4260 void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
4261 {
4262 	struct cgroup_subsys_state *css;
4263 	struct cpuset *cs;
4264 
4265 	/*
4266 	 * In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4267 	 * and mems_allowed is likely to be empty even if we could get to it,
4268 	 * so return directly to avoid taking a global lock on the empty check.
4269 	 */
4270 	if (!cgroup || !cpuset_v2()) {
4271 		nodes_copy(*mask, node_states[N_MEMORY]);
4272 		return;
4273 	}
4274 
4275 	css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4276 	if (!css) {
4277 		nodes_copy(*mask, node_states[N_MEMORY]);
4278 		return;
4279 	}
4280 
4281 	/*
4282 	 * The reference taken via cgroup_get_e_css is sufficient to
4283 	 * protect css, but it does not imply safe accesses to effective_mems.
4284 	 *
4285 	 * Normally, accessing effective_mems would require the cpuset_mutex
4286 	 * or callback_lock - but the correctness of this information is stale
4287 	 * immediately after the query anyway. We do not acquire the lock
4288 	 * during this process to save lock contention in exchange for racing
4289 	 * against mems_allowed rebinds.
4290 	 */
4291 	cs = container_of(css, struct cpuset, css);
4292 	nodes_copy(*mask, cs->effective_mems);
4293 	css_put(css);
4294 }
4295 
4296 /**
4297  * cpuset_spread_node() - On which node to begin search for a page
4298  * @rotor: round robin rotor
4299  *
4300  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4301  * tasks in a cpuset with is_spread_page or is_spread_slab set),
4302  * and if the memory allocation used cpuset_mem_spread_node()
4303  * to determine on which node to start looking, as it will for
4304  * certain page cache or slab cache pages such as used for file
4305  * system buffers and inode caches, then instead of starting on the
4306  * local node to look for a free page, rather spread the starting
4307  * node around the tasks mems_allowed nodes.
4308  *
4309  * We don't have to worry about the returned node being offline
4310  * because "it can't happen", and even if it did, it would be ok.
4311  *
4312  * The routines calling guarantee_online_mems() are careful to
4313  * only set nodes in task->mems_allowed that are online.  So it
4314  * should not be possible for the following code to return an
4315  * offline node.  But if it did, that would be ok, as this routine
4316  * is not returning the node where the allocation must be, only
4317  * the node where the search should start.  The zonelist passed to
4318  * __alloc_pages() will include all nodes.  If the slab allocator
4319  * is passed an offline node, it will fall back to the local node.
4320  * See kmem_cache_alloc_node().
4321  */
cpuset_spread_node(int * rotor)4322 static int cpuset_spread_node(int *rotor)
4323 {
4324 	return *rotor = next_node_in(*rotor, current->mems_allowed);
4325 }
4326 
4327 /**
4328  * cpuset_mem_spread_node() - On which node to begin search for a file page
4329  */
cpuset_mem_spread_node(void)4330 int cpuset_mem_spread_node(void)
4331 {
4332 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4333 		current->cpuset_mem_spread_rotor =
4334 			node_random(&current->mems_allowed);
4335 
4336 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4337 }
4338 
4339 /**
4340  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4341  * @tsk1: pointer to task_struct of some task.
4342  * @tsk2: pointer to task_struct of some other task.
4343  *
4344  * Description: Return true if @tsk1's mems_allowed intersects the
4345  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
4346  * one of the task's memory usage might impact the memory available
4347  * to the other.
4348  **/
4349 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4350 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4351 				   const struct task_struct *tsk2)
4352 {
4353 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4354 }
4355 
4356 /**
4357  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4358  *
4359  * Description: Prints current's name, cpuset name, and cached copy of its
4360  * mems_allowed to the kernel log.
4361  */
cpuset_print_current_mems_allowed(void)4362 void cpuset_print_current_mems_allowed(void)
4363 {
4364 	struct cgroup *cgrp;
4365 
4366 	rcu_read_lock();
4367 
4368 	cgrp = task_cs(current)->css.cgroup;
4369 	pr_cont(",cpuset=");
4370 	pr_cont_cgroup_name(cgrp);
4371 	pr_cont(",mems_allowed=%*pbl",
4372 		nodemask_pr_args(&current->mems_allowed));
4373 
4374 	rcu_read_unlock();
4375 }
4376 
4377 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4378 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4379 {
4380 	seq_printf(m, "Mems_allowed:\t%*pb\n",
4381 		   nodemask_pr_args(&task->mems_allowed));
4382 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4383 		   nodemask_pr_args(&task->mems_allowed));
4384 }
4385