1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kernel/cpuset.c
4 *
5 * Processor and Memory placement constraints for sets of tasks.
6 *
7 * Copyright (C) 2003 BULL SA.
8 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
9 * Copyright (C) 2006 Google, Inc
10 *
11 * Portions derived from Patrick Mochel's sysfs code.
12 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 *
14 * 2003-10-10 Written by Simon Derr.
15 * 2003-10-22 Updates by Stephen Hemminger.
16 * 2004 May-July Rework by Paul Jackson.
17 * 2006 Rework by Paul Menage to use generic cgroups
18 * 2008 Rework of the scheduler domains and CPU hotplug handling
19 * by Max Krasnyansky
20 */
21 #include "cpuset-internal.h"
22
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mm.h>
28 #include <linux/memory.h>
29 #include <linux/rcupdate.h>
30 #include <linux/sched.h>
31 #include <linux/sched/deadline.h>
32 #include <linux/sched/mm.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <linux/oom.h>
36 #include <linux/sched/isolation.h>
37 #include <linux/wait.h>
38 #include <linux/workqueue.h>
39 #include <linux/task_work.h>
40
41 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
42 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
43
44 /*
45 * There could be abnormal cpuset configurations for cpu or memory
46 * node binding, add this key to provide a quick low-cost judgment
47 * of the situation.
48 */
49 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
50
51 static const char * const perr_strings[] = {
52 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
53 [PERR_INVPARENT] = "Parent is an invalid partition root",
54 [PERR_NOTPART] = "Parent is not a partition root",
55 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
56 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
57 [PERR_HOTPLUG] = "No cpu available due to hotplug",
58 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
59 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
60 [PERR_ACCESS] = "Enable partition not permitted",
61 [PERR_REMOTE] = "Have remote partition underneath",
62 };
63
64 /*
65 * CPUSET Locking Convention
66 * -------------------------
67 *
68 * Below are the four global/local locks guarding cpuset structures in lock
69 * acquisition order:
70 * - cpuset_top_mutex
71 * - cpu_hotplug_lock (cpus_read_lock/cpus_write_lock)
72 * - cpuset_mutex
73 * - callback_lock (raw spinlock)
74 *
75 * As cpuset will now indirectly flush a number of different workqueues in
76 * housekeeping_update() to update housekeeping cpumasks when the set of
77 * isolated CPUs is going to be changed, it may be vulnerable to deadlock
78 * if we hold cpus_read_lock while calling into housekeeping_update().
79 *
80 * The first cpuset_top_mutex will be held except when calling into
81 * cpuset_handle_hotplug() from the CPU hotplug code where cpus_write_lock
82 * and cpuset_mutex will be held instead. The main purpose of this mutex
83 * is to prevent regular cpuset control file write actions from interfering
84 * with the call to housekeeping_update(), though CPU hotplug operation can
85 * still happen in parallel. This mutex also provides protection for some
86 * internal variables.
87 *
88 * A task must hold all the remaining three locks to modify externally visible
89 * or used fields of cpusets, though some of the internally used cpuset fields
90 * and internal variables can be modified without holding callback_lock. If only
91 * reliable read access of the externally used fields are needed, a task can
92 * hold either cpuset_mutex or callback_lock which are exposed to other
93 * external subsystems.
94 *
95 * If a task holds cpu_hotplug_lock and cpuset_mutex, it blocks others,
96 * ensuring that it is the only task able to also acquire callback_lock and
97 * be able to modify cpusets. It can perform various checks on the cpuset
98 * structure first, knowing nothing will change. It can also allocate memory
99 * without holding callback_lock. While it is performing these checks, various
100 * callback routines can briefly acquire callback_lock to query cpusets. Once
101 * it is ready to make the changes, it takes callback_lock, blocking everyone
102 * else.
103 *
104 * Calls to the kernel memory allocator cannot be made while holding
105 * callback_lock which is a spinlock, as the memory allocator may sleep or
106 * call back into cpuset code and acquire callback_lock.
107 *
108 * Now, the task_struct fields mems_allowed and mempolicy may be changed
109 * by other task, we use alloc_lock in the task_struct fields to protect
110 * them.
111 *
112 * The cpuset_common_seq_show() handlers only hold callback_lock across
113 * small pieces of code, such as when reading out possibly multi-word
114 * cpumasks and nodemasks.
115 */
116
117 static DEFINE_MUTEX(cpuset_top_mutex);
118 static DEFINE_MUTEX(cpuset_mutex);
119
120 /*
121 * File level internal variables below follow one of the following exclusion
122 * rules.
123 *
124 * RWCS: Read/write-able by holding either cpus_write_lock (and optionally
125 * cpuset_mutex) or both cpus_read_lock and cpuset_mutex.
126 *
127 * CSCB: Readable by holding either cpuset_mutex or callback_lock. Writable
128 * by holding both cpuset_mutex and callback_lock.
129 *
130 * T: Read/write-able by holding the cpuset_top_mutex.
131 */
132
133 /*
134 * For local partitions, update to subpartitions_cpus & isolated_cpus is done
135 * in update_parent_effective_cpumask(). For remote partitions, it is done in
136 * the remote_partition_*() and remote_cpus_update() helpers.
137 */
138 /*
139 * Exclusive CPUs distributed out to local or remote sub-partitions of
140 * top_cpuset
141 */
142 static cpumask_var_t subpartitions_cpus; /* RWCS */
143
144 /*
145 * Exclusive CPUs in isolated partitions (shown in cpuset.cpus.isolated)
146 */
147 static cpumask_var_t isolated_cpus; /* CSCB */
148
149 /*
150 * Set if housekeeping cpumasks are to be updated.
151 */
152 static bool update_housekeeping; /* RWCS */
153
154 /*
155 * Copy of isolated_cpus to be passed to housekeeping_update()
156 */
157 static cpumask_var_t isolated_hk_cpus; /* T */
158
159 /*
160 * A flag to force sched domain rebuild at the end of an operation.
161 * It can be set in
162 * - update_partition_sd_lb()
163 * - update_cpumasks_hier()
164 * - cpuset_update_flag()
165 * - cpuset_hotplug_update_tasks()
166 * - cpuset_handle_hotplug()
167 *
168 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
169 *
170 * Note that update_relax_domain_level() in cpuset-v1.c can still call
171 * rebuild_sched_domains_locked() directly without using this flag.
172 */
173 static bool force_sd_rebuild; /* RWCS */
174
175 /*
176 * Partition root states:
177 *
178 * 0 - member (not a partition root)
179 * 1 - partition root
180 * 2 - partition root without load balancing (isolated)
181 * -1 - invalid partition root
182 * -2 - invalid isolated partition root
183 *
184 * There are 2 types of partitions - local or remote. Local partitions are
185 * those whose parents are partition root themselves. Setting of
186 * cpuset.cpus.exclusive are optional in setting up local partitions.
187 * Remote partitions are those whose parents are not partition roots. Passing
188 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
189 * nodes are mandatory in creating a remote partition.
190 *
191 * For simplicity, a local partition can be created under a local or remote
192 * partition but a remote partition cannot have any partition root in its
193 * ancestor chain except the cgroup root.
194 *
195 * A valid partition can be formed by setting exclusive_cpus or cpus_allowed
196 * if exclusive_cpus is not set. In the case of partition with empty
197 * exclusive_cpus, all the conflicting exclusive CPUs specified in the
198 * following cpumasks of sibling cpusets will be removed from its
199 * cpus_allowed in determining its effective_xcpus.
200 * - effective_xcpus
201 * - exclusive_cpus
202 *
203 * The "cpuset.cpus.exclusive" control file should be used for setting up
204 * partition if the users want to get as many CPUs as possible.
205 */
206 #define PRS_MEMBER 0
207 #define PRS_ROOT 1
208 #define PRS_ISOLATED 2
209 #define PRS_INVALID_ROOT -1
210 #define PRS_INVALID_ISOLATED -2
211
212 /*
213 * Temporary cpumasks for working with partitions that are passed among
214 * functions to avoid memory allocation in inner functions.
215 */
216 struct tmpmasks {
217 cpumask_var_t addmask, delmask; /* For partition root */
218 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
219 };
220
inc_dl_tasks_cs(struct task_struct * p)221 void inc_dl_tasks_cs(struct task_struct *p)
222 {
223 struct cpuset *cs = task_cs(p);
224
225 cs->nr_deadline_tasks++;
226 }
227
dec_dl_tasks_cs(struct task_struct * p)228 void dec_dl_tasks_cs(struct task_struct *p)
229 {
230 struct cpuset *cs = task_cs(p);
231
232 cs->nr_deadline_tasks--;
233 }
234
is_partition_valid(const struct cpuset * cs)235 static inline bool is_partition_valid(const struct cpuset *cs)
236 {
237 return cs->partition_root_state > 0;
238 }
239
is_partition_invalid(const struct cpuset * cs)240 static inline bool is_partition_invalid(const struct cpuset *cs)
241 {
242 return cs->partition_root_state < 0;
243 }
244
cs_is_member(const struct cpuset * cs)245 static inline bool cs_is_member(const struct cpuset *cs)
246 {
247 return cs->partition_root_state == PRS_MEMBER;
248 }
249
250 /*
251 * Callers should hold callback_lock to modify partition_root_state.
252 */
make_partition_invalid(struct cpuset * cs)253 static inline void make_partition_invalid(struct cpuset *cs)
254 {
255 if (cs->partition_root_state > 0)
256 cs->partition_root_state = -cs->partition_root_state;
257 }
258
259 /*
260 * Send notification event of whenever partition_root_state changes.
261 */
notify_partition_change(struct cpuset * cs,int old_prs)262 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
263 {
264 if (old_prs == cs->partition_root_state)
265 return;
266 cgroup_file_notify(&cs->partition_file);
267
268 /* Reset prs_err if not invalid */
269 if (is_partition_valid(cs))
270 WRITE_ONCE(cs->prs_err, PERR_NONE);
271 }
272
273 /*
274 * The top_cpuset is always synchronized to cpu_active_mask and we should avoid
275 * using cpu_online_mask as much as possible. An active CPU is always an online
276 * CPU, but not vice versa. cpu_active_mask and cpu_online_mask can differ
277 * during hotplug operations. A CPU is marked active at the last stage of CPU
278 * bringup (CPUHP_AP_ACTIVE). It is also the stage where cpuset hotplug code
279 * will be called to update the sched domains so that the scheduler can move
280 * a normal task to a newly active CPU or remove tasks away from a newly
281 * inactivated CPU. The online bit is set much earlier in the CPU bringup
282 * process and cleared much later in CPU teardown.
283 *
284 * If cpu_online_mask is used while a hotunplug operation is happening in
285 * parallel, we may leave an offline CPU in cpu_allowed or some other masks.
286 */
287 struct cpuset top_cpuset = {
288 .flags = BIT(CS_CPU_EXCLUSIVE) |
289 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
290 .partition_root_state = PRS_ROOT,
291 };
292
293 /**
294 * cpuset_lock - Acquire the global cpuset mutex
295 *
296 * This locks the global cpuset mutex to prevent modifications to cpuset
297 * hierarchy and configurations. This helper is not enough to make modification.
298 */
cpuset_lock(void)299 void cpuset_lock(void)
300 {
301 mutex_lock(&cpuset_mutex);
302 }
303
cpuset_unlock(void)304 void cpuset_unlock(void)
305 {
306 mutex_unlock(&cpuset_mutex);
307 }
308
lockdep_assert_cpuset_lock_held(void)309 void lockdep_assert_cpuset_lock_held(void)
310 {
311 lockdep_assert_held(&cpuset_mutex);
312 }
313
314 /**
315 * cpuset_full_lock - Acquire full protection for cpuset modification
316 *
317 * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
318 * to safely modify cpuset data.
319 */
cpuset_full_lock(void)320 void cpuset_full_lock(void)
321 {
322 mutex_lock(&cpuset_top_mutex);
323 cpus_read_lock();
324 mutex_lock(&cpuset_mutex);
325 }
326
cpuset_full_unlock(void)327 void cpuset_full_unlock(void)
328 {
329 mutex_unlock(&cpuset_mutex);
330 cpus_read_unlock();
331 mutex_unlock(&cpuset_top_mutex);
332 }
333
334 #ifdef CONFIG_LOCKDEP
lockdep_is_cpuset_held(void)335 bool lockdep_is_cpuset_held(void)
336 {
337 return lockdep_is_held(&cpuset_mutex) ||
338 lockdep_is_held(&cpuset_top_mutex);
339 }
340 #endif
341
342 static DEFINE_SPINLOCK(callback_lock);
343
cpuset_callback_lock_irq(void)344 void cpuset_callback_lock_irq(void)
345 {
346 spin_lock_irq(&callback_lock);
347 }
348
cpuset_callback_unlock_irq(void)349 void cpuset_callback_unlock_irq(void)
350 {
351 spin_unlock_irq(&callback_lock);
352 }
353
354 static struct workqueue_struct *cpuset_migrate_mm_wq;
355
356 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
357
check_insane_mems_config(nodemask_t * nodes)358 static inline void check_insane_mems_config(nodemask_t *nodes)
359 {
360 if (!cpusets_insane_config() &&
361 movable_only_nodes(nodes)) {
362 static_branch_enable_cpuslocked(&cpusets_insane_config_key);
363 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
364 "Cpuset allocations might fail even with a lot of memory available.\n",
365 nodemask_pr_args(nodes));
366 }
367 }
368
369 /*
370 * decrease cs->attach_in_progress.
371 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
372 */
dec_attach_in_progress_locked(struct cpuset * cs)373 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
374 {
375 lockdep_assert_cpuset_lock_held();
376
377 cs->attach_in_progress--;
378 if (!cs->attach_in_progress)
379 wake_up(&cpuset_attach_wq);
380 }
381
dec_attach_in_progress(struct cpuset * cs)382 static inline void dec_attach_in_progress(struct cpuset *cs)
383 {
384 mutex_lock(&cpuset_mutex);
385 dec_attach_in_progress_locked(cs);
386 mutex_unlock(&cpuset_mutex);
387 }
388
cpuset_v2(void)389 static inline bool cpuset_v2(void)
390 {
391 return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
392 cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
393 }
394
395 /*
396 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
397 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
398 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
399 * With v2 behavior, "cpus" and "mems" are always what the users have
400 * requested and won't be changed by hotplug events. Only the effective
401 * cpus or mems will be affected.
402 */
is_in_v2_mode(void)403 static inline bool is_in_v2_mode(void)
404 {
405 return cpuset_v2() ||
406 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
407 }
408
409 /**
410 * partition_is_populated - check if partition has tasks
411 * @cs: partition root to be checked
412 * @excluded_child: a child cpuset to be excluded in task checking
413 * Return: true if there are tasks, false otherwise
414 *
415 * @cs should be a valid partition root or going to become a partition root.
416 * @excluded_child should be non-NULL when this cpuset is going to become a
417 * partition itself.
418 *
419 * Note that a remote partition is not allowed underneath a valid local
420 * or remote partition. So if a non-partition root child is populated,
421 * the whole partition is considered populated.
422 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)423 static inline bool partition_is_populated(struct cpuset *cs,
424 struct cpuset *excluded_child)
425 {
426 struct cpuset *cp;
427 struct cgroup_subsys_state *pos_css;
428
429 /*
430 * We cannot call cs_is_populated(cs) directly, as
431 * nr_populated_domain_children may include populated
432 * csets from descendants that are partitions.
433 */
434 if (cs->css.cgroup->nr_populated_csets ||
435 cs->attach_in_progress)
436 return true;
437
438 rcu_read_lock();
439 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
440 if (cp == cs || cp == excluded_child)
441 continue;
442
443 if (is_partition_valid(cp)) {
444 pos_css = css_rightmost_descendant(pos_css);
445 continue;
446 }
447
448 if (cpuset_is_populated(cp)) {
449 rcu_read_unlock();
450 return true;
451 }
452 }
453 rcu_read_unlock();
454 return false;
455 }
456
457 /*
458 * Return in pmask the portion of a task's cpusets's cpus_allowed that
459 * are online and are capable of running the task. If none are found,
460 * walk up the cpuset hierarchy until we find one that does have some
461 * appropriate cpus.
462 *
463 * One way or another, we guarantee to return some non-empty subset
464 * of cpu_active_mask.
465 *
466 * Call with callback_lock or cpuset_mutex held.
467 */
guarantee_active_cpus(struct task_struct * tsk,struct cpumask * pmask)468 static void guarantee_active_cpus(struct task_struct *tsk,
469 struct cpumask *pmask)
470 {
471 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
472 struct cpuset *cs;
473
474 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_active_mask)))
475 cpumask_copy(pmask, cpu_active_mask);
476
477 rcu_read_lock();
478 cs = task_cs(tsk);
479
480 while (!cpumask_intersects(cs->effective_cpus, pmask))
481 cs = parent_cs(cs);
482
483 cpumask_and(pmask, pmask, cs->effective_cpus);
484 rcu_read_unlock();
485 }
486
487 /*
488 * Return in *pmask the portion of a cpusets's mems_allowed that
489 * are online, with memory. If none are online with memory, walk
490 * up the cpuset hierarchy until we find one that does have some
491 * online mems. The top cpuset always has some mems online.
492 *
493 * One way or another, we guarantee to return some non-empty subset
494 * of node_states[N_MEMORY].
495 *
496 * Call with callback_lock or cpuset_mutex held.
497 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)498 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
499 {
500 while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]))
501 cs = parent_cs(cs);
502 }
503
504 /**
505 * alloc_cpumasks - Allocate an array of cpumask variables
506 * @pmasks: Pointer to array of cpumask_var_t pointers
507 * @size: Number of cpumasks to allocate
508 * Return: 0 if successful, -ENOMEM otherwise.
509 *
510 * Allocates @size cpumasks and initializes them to empty. Returns 0 on
511 * success, -ENOMEM on allocation failure. On failure, any previously
512 * allocated cpumasks are freed.
513 */
alloc_cpumasks(cpumask_var_t * pmasks[],u32 size)514 static inline int alloc_cpumasks(cpumask_var_t *pmasks[], u32 size)
515 {
516 int i;
517
518 for (i = 0; i < size; i++) {
519 if (!zalloc_cpumask_var(pmasks[i], GFP_KERNEL)) {
520 while (--i >= 0)
521 free_cpumask_var(*pmasks[i]);
522 return -ENOMEM;
523 }
524 }
525 return 0;
526 }
527
528 /**
529 * alloc_tmpmasks - Allocate temporary cpumasks for cpuset operations.
530 * @tmp: Pointer to tmpmasks structure to populate
531 * Return: 0 on success, -ENOMEM on allocation failure
532 */
alloc_tmpmasks(struct tmpmasks * tmp)533 static inline int alloc_tmpmasks(struct tmpmasks *tmp)
534 {
535 /*
536 * Array of pointers to the three cpumask_var_t fields in tmpmasks.
537 * Note: Array size must match actual number of masks (3)
538 */
539 cpumask_var_t *pmask[3] = {
540 &tmp->new_cpus,
541 &tmp->addmask,
542 &tmp->delmask
543 };
544
545 return alloc_cpumasks(pmask, ARRAY_SIZE(pmask));
546 }
547
548 /**
549 * free_tmpmasks - free cpumasks in a tmpmasks structure
550 * @tmp: the tmpmasks structure pointer
551 */
free_tmpmasks(struct tmpmasks * tmp)552 static inline void free_tmpmasks(struct tmpmasks *tmp)
553 {
554 if (!tmp)
555 return;
556
557 free_cpumask_var(tmp->new_cpus);
558 free_cpumask_var(tmp->addmask);
559 free_cpumask_var(tmp->delmask);
560 }
561
562 /**
563 * dup_or_alloc_cpuset - Duplicate or allocate a new cpuset
564 * @cs: Source cpuset to duplicate (NULL for a fresh allocation)
565 *
566 * Creates a new cpuset by either:
567 * 1. Duplicating an existing cpuset (if @cs is non-NULL), or
568 * 2. Allocating a fresh cpuset with zero-initialized masks (if @cs is NULL)
569 *
570 * Return: Pointer to newly allocated cpuset on success, NULL on failure
571 */
dup_or_alloc_cpuset(struct cpuset * cs)572 static struct cpuset *dup_or_alloc_cpuset(struct cpuset *cs)
573 {
574 struct cpuset *trial;
575
576 /* Allocate base structure */
577 trial = cs ? kmemdup(cs, sizeof(*cs), GFP_KERNEL) :
578 kzalloc_obj(*cs);
579 if (!trial)
580 return NULL;
581
582 /* Setup cpumask pointer array */
583 cpumask_var_t *pmask[4] = {
584 &trial->cpus_allowed,
585 &trial->effective_cpus,
586 &trial->effective_xcpus,
587 &trial->exclusive_cpus
588 };
589
590 if (alloc_cpumasks(pmask, ARRAY_SIZE(pmask))) {
591 kfree(trial);
592 return NULL;
593 }
594
595 /* Copy masks if duplicating */
596 if (cs) {
597 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
598 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
599 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
600 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
601 }
602
603 return trial;
604 }
605
606 /**
607 * free_cpuset - free the cpuset
608 * @cs: the cpuset to be freed
609 */
free_cpuset(struct cpuset * cs)610 static inline void free_cpuset(struct cpuset *cs)
611 {
612 free_cpumask_var(cs->cpus_allowed);
613 free_cpumask_var(cs->effective_cpus);
614 free_cpumask_var(cs->effective_xcpus);
615 free_cpumask_var(cs->exclusive_cpus);
616 kfree(cs);
617 }
618
619 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)620 static inline struct cpumask *user_xcpus(struct cpuset *cs)
621 {
622 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
623 : cs->exclusive_cpus;
624 }
625
xcpus_empty(struct cpuset * cs)626 static inline bool xcpus_empty(struct cpuset *cs)
627 {
628 return cpumask_empty(cs->cpus_allowed) &&
629 cpumask_empty(cs->exclusive_cpus);
630 }
631
632 /*
633 * cpusets_are_exclusive() - check if two cpusets are exclusive
634 *
635 * Return true if exclusive, false if not
636 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)637 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
638 {
639 struct cpumask *xcpus1 = user_xcpus(cs1);
640 struct cpumask *xcpus2 = user_xcpus(cs2);
641
642 if (cpumask_intersects(xcpus1, xcpus2))
643 return false;
644 return true;
645 }
646
647 /**
648 * cpus_excl_conflict - Check if two cpusets have exclusive CPU conflicts
649 * @trial: the trial cpuset to be checked
650 * @sibling: a sibling cpuset to be checked against
651 * @xcpus_changed: set if exclusive_cpus has been set
652 *
653 * Returns: true if CPU exclusivity conflict exists, false otherwise
654 *
655 * Conflict detection rules:
656 * o cgroup v1
657 * See cpuset1_cpus_excl_conflict()
658 * o cgroup v2
659 * - The exclusive_cpus values cannot overlap.
660 * - New exclusive_cpus cannot be a superset of a sibling's cpus_allowed.
661 */
cpus_excl_conflict(struct cpuset * trial,struct cpuset * sibling,bool xcpus_changed)662 static inline bool cpus_excl_conflict(struct cpuset *trial, struct cpuset *sibling,
663 bool xcpus_changed)
664 {
665 if (!cpuset_v2())
666 return cpuset1_cpus_excl_conflict(trial, sibling);
667
668 /* The cpus_allowed of a sibling cpuset cannot be a subset of the new exclusive_cpus */
669 if (xcpus_changed && !cpumask_empty(sibling->cpus_allowed) &&
670 cpumask_subset(sibling->cpus_allowed, trial->exclusive_cpus))
671 return true;
672
673 /* Exclusive_cpus cannot intersect */
674 return cpumask_intersects(trial->exclusive_cpus, sibling->exclusive_cpus);
675 }
676
mems_excl_conflict(struct cpuset * cs1,struct cpuset * cs2)677 static inline bool mems_excl_conflict(struct cpuset *cs1, struct cpuset *cs2)
678 {
679 if ((is_mem_exclusive(cs1) || is_mem_exclusive(cs2)))
680 return nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
681 return false;
682 }
683
684 /*
685 * validate_change() - Used to validate that any proposed cpuset change
686 * follows the structural rules for cpusets.
687 *
688 * If we replaced the flag and mask values of the current cpuset
689 * (cur) with those values in the trial cpuset (trial), would
690 * our various subset and exclusive rules still be valid? Presumes
691 * cpuset_mutex held.
692 *
693 * 'cur' is the address of an actual, in-use cpuset. Operations
694 * such as list traversal that depend on the actual address of the
695 * cpuset in the list must use cur below, not trial.
696 *
697 * 'trial' is the address of bulk structure copy of cur, with
698 * perhaps one or more of the fields cpus_allowed, mems_allowed,
699 * or flags changed to new, trial values.
700 *
701 * Return 0 if valid, -errno if not.
702 */
703
validate_change(struct cpuset * cur,struct cpuset * trial)704 static int validate_change(struct cpuset *cur, struct cpuset *trial)
705 {
706 struct cgroup_subsys_state *css;
707 struct cpuset *c, *par;
708 bool xcpus_changed;
709 int ret = 0;
710
711 rcu_read_lock();
712
713 if (!is_in_v2_mode())
714 ret = cpuset1_validate_change(cur, trial);
715 if (ret)
716 goto out;
717
718 /* Remaining checks don't apply to root cpuset */
719 if (cur == &top_cpuset)
720 goto out;
721
722 par = parent_cs(cur);
723
724 /*
725 * We can't shrink if we won't have enough room for SCHED_DEADLINE
726 * tasks. This check is not done when scheduling is disabled as the
727 * users should know what they are doing.
728 *
729 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
730 * cpus_allowed.
731 *
732 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
733 * for non-isolated partition root. At this point, the target
734 * effective_cpus isn't computed yet. user_xcpus() is the best
735 * approximation.
736 *
737 * TBD: May need to precompute the real effective_cpus here in case
738 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
739 * becomes an issue.
740 */
741 ret = -EBUSY;
742 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
743 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
744 goto out;
745
746 /*
747 * If either I or some sibling (!= me) is exclusive, we can't
748 * overlap. exclusive_cpus cannot overlap with each other if set.
749 */
750 ret = -EINVAL;
751 xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
752 cpuset_for_each_child(c, css, par) {
753 if (c == cur)
754 continue;
755 if (cpus_excl_conflict(trial, c, xcpus_changed))
756 goto out;
757 if (mems_excl_conflict(trial, c))
758 goto out;
759 }
760
761 ret = 0;
762 out:
763 rcu_read_unlock();
764 return ret;
765 }
766
767 #ifdef CONFIG_SMP
768
769 /*
770 * generate_sched_domains()
771 *
772 * This function builds a partial partition of the systems CPUs
773 * A 'partial partition' is a set of non-overlapping subsets whose
774 * union is a subset of that set.
775 * The output of this function needs to be passed to kernel/sched/core.c
776 * partition_sched_domains() routine, which will rebuild the scheduler's
777 * load balancing domains (sched domains) as specified by that partial
778 * partition.
779 *
780 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
781 * for a background explanation of this.
782 *
783 * Does not return errors, on the theory that the callers of this
784 * routine would rather not worry about failures to rebuild sched
785 * domains when operating in the severe memory shortage situations
786 * that could cause allocation failures below.
787 *
788 * Must be called with cpuset_mutex held.
789 *
790 * The three key local variables below are:
791 * cp - cpuset pointer, used (together with pos_css) to perform a
792 * top-down scan of all cpusets. For our purposes, rebuilding
793 * the schedulers sched domains, we can ignore !is_sched_load_
794 * balance cpusets.
795 * csa - (for CpuSet Array) Array of pointers to all the cpusets
796 * that need to be load balanced, for convenient iterative
797 * access by the subsequent code that finds the best partition,
798 * i.e the set of domains (subsets) of CPUs such that the
799 * cpus_allowed of every cpuset marked is_sched_load_balance
800 * is a subset of one of these domains, while there are as
801 * many such domains as possible, each as small as possible.
802 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
803 * the kernel/sched/core.c routine partition_sched_domains() in a
804 * convenient format, that can be easily compared to the prior
805 * value to determine what partition elements (sched domains)
806 * were changed (added or removed.)
807 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)808 static int generate_sched_domains(cpumask_var_t **domains,
809 struct sched_domain_attr **attributes)
810 {
811 struct cpuset *cp; /* top-down scan of cpusets */
812 struct cpuset **csa; /* array of all cpuset ptrs */
813 int i, j; /* indices for partition finding loops */
814 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
815 struct sched_domain_attr *dattr; /* attributes for custom domains */
816 int ndoms = 0; /* number of sched domains in result */
817 struct cgroup_subsys_state *pos_css;
818
819 if (!cpuset_v2())
820 return cpuset1_generate_sched_domains(domains, attributes);
821
822 doms = NULL;
823 dattr = NULL;
824 csa = NULL;
825
826 /* Special case for the 99% of systems with one, full, sched domain */
827 if (cpumask_empty(subpartitions_cpus)) {
828 ndoms = 1;
829 /* !csa will be checked and can be correctly handled */
830 goto generate_doms;
831 }
832
833 csa = kmalloc_objs(cp, nr_cpusets());
834 if (!csa)
835 goto done;
836
837 /* Find how many partitions and cache them to csa[] */
838 rcu_read_lock();
839 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
840 /*
841 * Only valid partition roots that are not isolated and with
842 * non-empty effective_cpus will be saved into csa[].
843 */
844 if ((cp->partition_root_state == PRS_ROOT) &&
845 !cpumask_empty(cp->effective_cpus))
846 csa[ndoms++] = cp;
847
848 /*
849 * Skip @cp's subtree if not a partition root and has no
850 * exclusive CPUs to be granted to child cpusets.
851 */
852 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
853 pos_css = css_rightmost_descendant(pos_css);
854 }
855 rcu_read_unlock();
856
857 for (i = 0; i < ndoms; i++) {
858 for (j = i + 1; j < ndoms; j++) {
859 if (cpusets_overlap(csa[i], csa[j]))
860 /*
861 * Cgroup v2 shouldn't pass down overlapping
862 * partition root cpusets.
863 */
864 WARN_ON_ONCE(1);
865 }
866 }
867
868 generate_doms:
869 doms = alloc_sched_domains(ndoms);
870 if (!doms)
871 goto done;
872
873 /*
874 * The rest of the code, including the scheduler, can deal with
875 * dattr==NULL case. No need to abort if alloc fails.
876 */
877 dattr = kmalloc_objs(struct sched_domain_attr, ndoms);
878
879 /*
880 * Cgroup v2 doesn't support domain attributes, just set all of them
881 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
882 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
883 */
884 for (i = 0; i < ndoms; i++) {
885 /*
886 * The top cpuset may contain some boot time isolated
887 * CPUs that need to be excluded from the sched domain.
888 */
889 if (!csa || csa[i] == &top_cpuset)
890 cpumask_and(doms[i], top_cpuset.effective_cpus,
891 housekeeping_cpumask(HK_TYPE_DOMAIN));
892 else
893 cpumask_copy(doms[i], csa[i]->effective_cpus);
894 if (dattr)
895 dattr[i] = SD_ATTR_INIT;
896 }
897
898 done:
899 kfree(csa);
900
901 /*
902 * Fallback to the default domain if kmalloc() failed.
903 * See comments in partition_sched_domains().
904 */
905 if (doms == NULL)
906 ndoms = 1;
907
908 *domains = doms;
909 *attributes = dattr;
910 return ndoms;
911 }
912
dl_update_tasks_root_domain(struct cpuset * cs)913 static void dl_update_tasks_root_domain(struct cpuset *cs)
914 {
915 struct css_task_iter it;
916 struct task_struct *task;
917
918 if (cs->nr_deadline_tasks == 0)
919 return;
920
921 css_task_iter_start(&cs->css, 0, &it);
922
923 while ((task = css_task_iter_next(&it)))
924 dl_add_task_root_domain(task);
925
926 css_task_iter_end(&it);
927 }
928
dl_rebuild_rd_accounting(void)929 void dl_rebuild_rd_accounting(void)
930 {
931 struct cpuset *cs = NULL;
932 struct cgroup_subsys_state *pos_css;
933 int cpu;
934 u64 cookie = ++dl_cookie;
935
936 lockdep_assert_cpuset_lock_held();
937 lockdep_assert_cpus_held();
938 lockdep_assert_held(&sched_domains_mutex);
939
940 rcu_read_lock();
941
942 for_each_possible_cpu(cpu) {
943 if (dl_bw_visited(cpu, cookie))
944 continue;
945
946 dl_clear_root_domain_cpu(cpu);
947 }
948
949 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
950
951 if (cpumask_empty(cs->effective_cpus)) {
952 pos_css = css_rightmost_descendant(pos_css);
953 continue;
954 }
955
956 css_get(&cs->css);
957
958 rcu_read_unlock();
959
960 dl_update_tasks_root_domain(cs);
961
962 rcu_read_lock();
963 css_put(&cs->css);
964 }
965 rcu_read_unlock();
966 }
967
968 /*
969 * Rebuild scheduler domains.
970 *
971 * If the flag 'sched_load_balance' of any cpuset with non-empty
972 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
973 * which has that flag enabled, or if any cpuset with a non-empty
974 * 'cpus' is removed, then call this routine to rebuild the
975 * scheduler's dynamic sched domains.
976 *
977 * Call with cpuset_mutex held. Takes cpus_read_lock().
978 */
rebuild_sched_domains_locked(void)979 void rebuild_sched_domains_locked(void)
980 {
981 struct sched_domain_attr *attr;
982 cpumask_var_t *doms;
983 int ndoms;
984 int i;
985
986 lockdep_assert_cpus_held();
987 lockdep_assert_cpuset_lock_held();
988 force_sd_rebuild = false;
989
990 /* Generate domain masks and attrs */
991 ndoms = generate_sched_domains(&doms, &attr);
992
993 /*
994 * cpuset_hotplug_workfn is invoked synchronously now, thus this
995 * function should not race with CPU hotplug. And the effective CPUs
996 * must not include any offline CPUs. Passing an offline CPU in the
997 * doms to partition_sched_domains() will trigger a kernel panic.
998 *
999 * We perform a final check here: if the doms contains any
1000 * offline CPUs, a warning is emitted and we return directly to
1001 * prevent the panic.
1002 */
1003 for (i = 0; doms && i < ndoms; i++) {
1004 if (WARN_ON_ONCE(!cpumask_subset(doms[i], cpu_active_mask)))
1005 return;
1006 }
1007
1008 /* Have scheduler rebuild the domains */
1009 partition_sched_domains(ndoms, doms, attr);
1010 }
1011 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1012 void rebuild_sched_domains_locked(void)
1013 {
1014 }
1015 #endif /* CONFIG_SMP */
1016
rebuild_sched_domains_cpuslocked(void)1017 static void rebuild_sched_domains_cpuslocked(void)
1018 {
1019 mutex_lock(&cpuset_mutex);
1020 rebuild_sched_domains_locked();
1021 mutex_unlock(&cpuset_mutex);
1022 }
1023
rebuild_sched_domains(void)1024 void rebuild_sched_domains(void)
1025 {
1026 cpus_read_lock();
1027 rebuild_sched_domains_cpuslocked();
1028 cpus_read_unlock();
1029 }
1030
cpuset_reset_sched_domains(void)1031 void cpuset_reset_sched_domains(void)
1032 {
1033 mutex_lock(&cpuset_mutex);
1034 partition_sched_domains(1, NULL, NULL);
1035 mutex_unlock(&cpuset_mutex);
1036 }
1037
1038 /**
1039 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1040 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1041 * @new_cpus: the temp variable for the new effective_cpus mask
1042 *
1043 * Iterate through each task of @cs updating its cpus_allowed to the
1044 * effective cpuset's. As this function is called with cpuset_mutex held,
1045 * cpuset membership stays stable.
1046 *
1047 * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1048 * to make sure all offline CPUs are also included as hotplug code won't
1049 * update cpumasks for tasks in top_cpuset.
1050 *
1051 * As task_cpu_possible_mask() can be task dependent in arm64, we have to
1052 * do cpu masking per task instead of doing it once for all.
1053 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1054 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1055 {
1056 struct css_task_iter it;
1057 struct task_struct *task;
1058 bool top_cs = cs == &top_cpuset;
1059
1060 css_task_iter_start(&cs->css, 0, &it);
1061 while ((task = css_task_iter_next(&it))) {
1062 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1063
1064 if (top_cs) {
1065 /*
1066 * PF_KTHREAD tasks are handled by housekeeping.
1067 * PF_NO_SETAFFINITY tasks are ignored.
1068 */
1069 if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
1070 continue;
1071 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1072 } else {
1073 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1074 }
1075 set_cpus_allowed_ptr(task, new_cpus);
1076 }
1077 css_task_iter_end(&it);
1078 }
1079
1080 /**
1081 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1082 * @new_cpus: the temp variable for the new effective_cpus mask
1083 * @cs: the cpuset the need to recompute the new effective_cpus mask
1084 * @parent: the parent cpuset
1085 *
1086 * The result is valid only if the given cpuset isn't a partition root.
1087 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1088 static void compute_effective_cpumask(struct cpumask *new_cpus,
1089 struct cpuset *cs, struct cpuset *parent)
1090 {
1091 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1092 }
1093
1094 /*
1095 * Commands for update_parent_effective_cpumask
1096 */
1097 enum partition_cmd {
1098 partcmd_enable, /* Enable partition root */
1099 partcmd_enablei, /* Enable isolated partition root */
1100 partcmd_disable, /* Disable partition root */
1101 partcmd_update, /* Update parent's effective_cpus */
1102 partcmd_invalidate, /* Make partition invalid */
1103 };
1104
1105 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1106 struct tmpmasks *tmp);
1107
1108 /*
1109 * Update partition exclusive flag
1110 *
1111 * Return: 0 if successful, an error code otherwise
1112 */
update_partition_exclusive_flag(struct cpuset * cs,int new_prs)1113 static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
1114 {
1115 bool exclusive = (new_prs > PRS_MEMBER);
1116
1117 if (exclusive && !is_cpu_exclusive(cs)) {
1118 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1119 return PERR_NOTEXCL;
1120 } else if (!exclusive && is_cpu_exclusive(cs)) {
1121 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1122 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1123 }
1124 return 0;
1125 }
1126
1127 /*
1128 * Update partition load balance flag and/or rebuild sched domain
1129 *
1130 * Changing load balance flag will automatically call
1131 * rebuild_sched_domains_locked().
1132 * This function is for cgroup v2 only.
1133 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1134 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1135 {
1136 int new_prs = cs->partition_root_state;
1137 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1138 bool new_lb;
1139
1140 /*
1141 * If cs is not a valid partition root, the load balance state
1142 * will follow its parent.
1143 */
1144 if (new_prs > 0) {
1145 new_lb = (new_prs != PRS_ISOLATED);
1146 } else {
1147 new_lb = is_sched_load_balance(parent_cs(cs));
1148 }
1149 if (new_lb != !!is_sched_load_balance(cs)) {
1150 rebuild_domains = true;
1151 if (new_lb)
1152 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1153 else
1154 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1155 }
1156
1157 if (rebuild_domains)
1158 cpuset_force_rebuild();
1159 }
1160
1161 /*
1162 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1163 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1164 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1165 struct cpumask *xcpus)
1166 {
1167 /*
1168 * A populated partition (cs or parent) can't have empty effective_cpus
1169 */
1170 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1171 partition_is_populated(parent, cs)) ||
1172 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1173 partition_is_populated(cs, NULL));
1174 }
1175
reset_partition_data(struct cpuset * cs)1176 static void reset_partition_data(struct cpuset *cs)
1177 {
1178 struct cpuset *parent = parent_cs(cs);
1179
1180 if (!cpuset_v2())
1181 return;
1182
1183 lockdep_assert_held(&callback_lock);
1184
1185 if (cpumask_empty(cs->exclusive_cpus)) {
1186 cpumask_clear(cs->effective_xcpus);
1187 if (is_cpu_exclusive(cs))
1188 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1189 }
1190 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1191 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1192 }
1193
1194 /*
1195 * isolated_cpus_update - Update the isolated_cpus mask
1196 * @old_prs: old partition_root_state
1197 * @new_prs: new partition_root_state
1198 * @xcpus: exclusive CPUs with state change
1199 */
isolated_cpus_update(int old_prs,int new_prs,struct cpumask * xcpus)1200 static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
1201 {
1202 WARN_ON_ONCE(old_prs == new_prs);
1203 lockdep_assert_held(&callback_lock);
1204 lockdep_assert_held(&cpuset_mutex);
1205 if (new_prs == PRS_ISOLATED) {
1206 if (cpumask_subset(xcpus, isolated_cpus))
1207 return;
1208 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1209 } else {
1210 if (!cpumask_intersects(xcpus, isolated_cpus))
1211 return;
1212 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1213 }
1214 update_housekeeping = true;
1215 }
1216
1217 /*
1218 * partition_xcpus_add - Add new exclusive CPUs to partition
1219 * @new_prs: new partition_root_state
1220 * @parent: parent cpuset
1221 * @xcpus: exclusive CPUs to be added
1222 *
1223 * Remote partition if parent == NULL
1224 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1225 static void partition_xcpus_add(int new_prs, struct cpuset *parent,
1226 struct cpumask *xcpus)
1227 {
1228 WARN_ON_ONCE(new_prs < 0);
1229 lockdep_assert_held(&callback_lock);
1230 if (!parent)
1231 parent = &top_cpuset;
1232
1233
1234 if (parent == &top_cpuset)
1235 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1236
1237 if (new_prs != parent->partition_root_state)
1238 isolated_cpus_update(parent->partition_root_state, new_prs,
1239 xcpus);
1240
1241 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1242 }
1243
1244 /*
1245 * partition_xcpus_del - Remove exclusive CPUs from partition
1246 * @old_prs: old partition_root_state
1247 * @parent: parent cpuset
1248 * @xcpus: exclusive CPUs to be removed
1249 *
1250 * Remote partition if parent == NULL
1251 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1252 static void partition_xcpus_del(int old_prs, struct cpuset *parent,
1253 struct cpumask *xcpus)
1254 {
1255 WARN_ON_ONCE(old_prs < 0);
1256 lockdep_assert_held(&callback_lock);
1257 if (!parent)
1258 parent = &top_cpuset;
1259
1260 if (parent == &top_cpuset)
1261 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1262
1263 if (old_prs != parent->partition_root_state)
1264 isolated_cpus_update(old_prs, parent->partition_root_state,
1265 xcpus);
1266
1267 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1268 cpumask_and(parent->effective_cpus, parent->effective_cpus, cpu_active_mask);
1269 }
1270
1271 /*
1272 * isolated_cpus_can_update - check for isolated & nohz_full conflicts
1273 * @add_cpus: cpu mask for cpus that are going to be isolated
1274 * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
1275 * Return: false if there is conflict, true otherwise
1276 *
1277 * If nohz_full is enabled and we have isolated CPUs, their combination must
1278 * still leave housekeeping CPUs.
1279 *
1280 * TBD: Should consider merging this function into
1281 * prstate_housekeeping_conflict().
1282 */
isolated_cpus_can_update(struct cpumask * add_cpus,struct cpumask * del_cpus)1283 static bool isolated_cpus_can_update(struct cpumask *add_cpus,
1284 struct cpumask *del_cpus)
1285 {
1286 cpumask_var_t full_hk_cpus;
1287 int res = true;
1288
1289 if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
1290 return true;
1291
1292 if (del_cpus && cpumask_weight_and(del_cpus,
1293 housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
1294 return true;
1295
1296 if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
1297 return false;
1298
1299 cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
1300 housekeeping_cpumask(HK_TYPE_DOMAIN));
1301 cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
1302 cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
1303 if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
1304 res = false;
1305
1306 free_cpumask_var(full_hk_cpus);
1307 return res;
1308 }
1309
1310 /*
1311 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1312 * @prstate: partition root state to be checked
1313 * @new_cpus: cpu mask
1314 * Return: true if there is conflict, false otherwise
1315 *
1316 * CPUs outside of HK_TYPE_DOMAIN_BOOT, if defined, can only be used in an
1317 * isolated partition.
1318 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1319 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1320 {
1321 if (!housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
1322 return false;
1323
1324 if ((prstate != PRS_ISOLATED) &&
1325 !cpumask_subset(new_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)))
1326 return true;
1327
1328 return false;
1329 }
1330
1331 /*
1332 * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains
1333 *
1334 * Update housekeeping cpumasks and rebuild sched domains if necessary.
1335 * This should be called at the end of cpuset or hotplug actions.
1336 */
update_hk_sched_domains(void)1337 static void update_hk_sched_domains(void)
1338 {
1339 if (update_housekeeping) {
1340 /* Updating HK cpumasks implies rebuild sched domains */
1341 update_housekeeping = false;
1342 force_sd_rebuild = true;
1343 cpumask_copy(isolated_hk_cpus, isolated_cpus);
1344
1345 /*
1346 * housekeeping_update() is now called without holding
1347 * cpus_read_lock and cpuset_mutex. Only cpuset_top_mutex
1348 * is still being held for mutual exclusion.
1349 */
1350 mutex_unlock(&cpuset_mutex);
1351 cpus_read_unlock();
1352 WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus));
1353 cpus_read_lock();
1354 mutex_lock(&cpuset_mutex);
1355 }
1356 /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
1357 if (force_sd_rebuild)
1358 rebuild_sched_domains_locked();
1359 }
1360
1361 /*
1362 * Work function to invoke update_hk_sched_domains()
1363 */
hk_sd_workfn(struct work_struct * work)1364 static void hk_sd_workfn(struct work_struct *work)
1365 {
1366 cpuset_full_lock();
1367 update_hk_sched_domains();
1368 cpuset_full_unlock();
1369 }
1370
1371 /**
1372 * rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
1373 * @parent: Parent cpuset containing all siblings
1374 * @cs: Current cpuset (will be skipped)
1375 * @excpus: exclusive effective CPU mask to modify
1376 *
1377 * This function ensures the given @excpus mask doesn't include any CPUs that
1378 * are exclusively allocated to sibling cpusets. It walks through all siblings
1379 * of @cs under @parent and removes their exclusive CPUs from @excpus.
1380 */
rm_siblings_excl_cpus(struct cpuset * parent,struct cpuset * cs,struct cpumask * excpus)1381 static int rm_siblings_excl_cpus(struct cpuset *parent, struct cpuset *cs,
1382 struct cpumask *excpus)
1383 {
1384 struct cgroup_subsys_state *css;
1385 struct cpuset *sibling;
1386 int retval = 0;
1387
1388 if (cpumask_empty(excpus))
1389 return 0;
1390
1391 /*
1392 * Remove exclusive CPUs from siblings
1393 */
1394 rcu_read_lock();
1395 cpuset_for_each_child(sibling, css, parent) {
1396 struct cpumask *sibling_xcpus;
1397
1398 if (sibling == cs)
1399 continue;
1400
1401 /*
1402 * If exclusive_cpus is defined, effective_xcpus will always
1403 * be a subset. Otherwise, effective_xcpus will only be set
1404 * in a valid partition root.
1405 */
1406 sibling_xcpus = cpumask_empty(sibling->exclusive_cpus)
1407 ? sibling->effective_xcpus
1408 : sibling->exclusive_cpus;
1409
1410 if (cpumask_intersects(excpus, sibling_xcpus)) {
1411 cpumask_andnot(excpus, excpus, sibling_xcpus);
1412 retval++;
1413 }
1414 }
1415 rcu_read_unlock();
1416
1417 return retval;
1418 }
1419
1420 /*
1421 * compute_excpus - compute effective exclusive CPUs
1422 * @cs: cpuset
1423 * @xcpus: effective exclusive CPUs value to be set
1424 * Return: 0 if there is no sibling conflict, > 0 otherwise
1425 *
1426 * If exclusive_cpus isn't explicitly set , we have to scan the sibling cpusets
1427 * and exclude their exclusive_cpus or effective_xcpus as well.
1428 */
compute_excpus(struct cpuset * cs,struct cpumask * excpus)1429 static int compute_excpus(struct cpuset *cs, struct cpumask *excpus)
1430 {
1431 struct cpuset *parent = parent_cs(cs);
1432
1433 cpumask_and(excpus, user_xcpus(cs), parent->effective_xcpus);
1434
1435 if (!cpumask_empty(cs->exclusive_cpus))
1436 return 0;
1437
1438 return rm_siblings_excl_cpus(parent, cs, excpus);
1439 }
1440
1441 /*
1442 * compute_trialcs_excpus - Compute effective exclusive CPUs for a trial cpuset
1443 * @trialcs: The trial cpuset containing the proposed new configuration
1444 * @cs: The original cpuset that the trial configuration is based on
1445 * Return: 0 if successful with no sibling conflict, >0 if a conflict is found
1446 *
1447 * Computes the effective_xcpus for a trial configuration. @cs is provided to represent
1448 * the real cs.
1449 */
compute_trialcs_excpus(struct cpuset * trialcs,struct cpuset * cs)1450 static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
1451 {
1452 struct cpuset *parent = parent_cs(trialcs);
1453 struct cpumask *excpus = trialcs->effective_xcpus;
1454
1455 /* trialcs is member, cpuset.cpus has no impact to excpus */
1456 if (cs_is_member(cs))
1457 cpumask_and(excpus, trialcs->exclusive_cpus,
1458 parent->effective_xcpus);
1459 else
1460 cpumask_and(excpus, user_xcpus(trialcs), parent->effective_xcpus);
1461
1462 return rm_siblings_excl_cpus(parent, cs, excpus);
1463 }
1464
is_remote_partition(struct cpuset * cs)1465 static inline bool is_remote_partition(struct cpuset *cs)
1466 {
1467 return cs->remote_partition;
1468 }
1469
is_local_partition(struct cpuset * cs)1470 static inline bool is_local_partition(struct cpuset *cs)
1471 {
1472 return is_partition_valid(cs) && !is_remote_partition(cs);
1473 }
1474
1475 /*
1476 * remote_partition_enable - Enable current cpuset as a remote partition root
1477 * @cs: the cpuset to update
1478 * @new_prs: new partition_root_state
1479 * @tmp: temporary masks
1480 * Return: 0 if successful, errcode if error
1481 *
1482 * Enable the current cpuset to become a remote partition root taking CPUs
1483 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1484 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1485 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1486 struct tmpmasks *tmp)
1487 {
1488 /*
1489 * The user must have sysadmin privilege.
1490 */
1491 if (!capable(CAP_SYS_ADMIN))
1492 return PERR_ACCESS;
1493
1494 /*
1495 * The requested exclusive_cpus must not be allocated to other
1496 * partitions and it can't use up all the root's effective_cpus.
1497 *
1498 * The effective_xcpus mask can contain offline CPUs, but there must
1499 * be at least one or more online CPUs present before it can be enabled.
1500 *
1501 * Note that creating a remote partition with any local partition root
1502 * above it or remote partition root underneath it is not allowed.
1503 */
1504 compute_excpus(cs, tmp->new_cpus);
1505 WARN_ON_ONCE(cpumask_intersects(tmp->new_cpus, subpartitions_cpus));
1506 if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
1507 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1508 return PERR_INVCPUS;
1509 if (((new_prs == PRS_ISOLATED) &&
1510 !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
1511 prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
1512 return PERR_HKEEPING;
1513
1514 spin_lock_irq(&callback_lock);
1515 partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1516 cs->remote_partition = true;
1517 cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
1518 spin_unlock_irq(&callback_lock);
1519 cpuset_force_rebuild();
1520 cs->prs_err = 0;
1521
1522 /*
1523 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1524 */
1525 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1526 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1527 return 0;
1528 }
1529
1530 /*
1531 * remote_partition_disable - Remove current cpuset from remote partition list
1532 * @cs: the cpuset to update
1533 * @tmp: temporary masks
1534 *
1535 * The effective_cpus is also updated.
1536 *
1537 * cpuset_mutex must be held by the caller.
1538 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1539 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1540 {
1541 WARN_ON_ONCE(!is_remote_partition(cs));
1542 /*
1543 * When a CPU is offlined, top_cpuset may end up with no available CPUs,
1544 * which should clear subpartitions_cpus. We should not emit a warning for this
1545 * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
1546 * may already be cleared when disabling the partition.
1547 */
1548 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
1549 !cpumask_empty(subpartitions_cpus));
1550
1551 spin_lock_irq(&callback_lock);
1552 cs->remote_partition = false;
1553 partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
1554 if (cs->prs_err)
1555 cs->partition_root_state = -cs->partition_root_state;
1556 else
1557 cs->partition_root_state = PRS_MEMBER;
1558
1559 /* effective_xcpus may need to be changed */
1560 compute_excpus(cs, cs->effective_xcpus);
1561 reset_partition_data(cs);
1562 spin_unlock_irq(&callback_lock);
1563 cpuset_force_rebuild();
1564
1565 /*
1566 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1567 */
1568 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1569 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1570 }
1571
1572 /*
1573 * remote_cpus_update - cpus_exclusive change of remote partition
1574 * @cs: the cpuset to be updated
1575 * @xcpus: the new exclusive_cpus mask, if non-NULL
1576 * @excpus: the new effective_xcpus mask
1577 * @tmp: temporary masks
1578 *
1579 * top_cpuset and subpartitions_cpus will be updated or partition can be
1580 * invalidated.
1581 */
remote_cpus_update(struct cpuset * cs,struct cpumask * xcpus,struct cpumask * excpus,struct tmpmasks * tmp)1582 static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
1583 struct cpumask *excpus, struct tmpmasks *tmp)
1584 {
1585 bool adding, deleting;
1586 int prs = cs->partition_root_state;
1587
1588 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1589 return;
1590
1591 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1592
1593 if (cpumask_empty(excpus)) {
1594 cs->prs_err = PERR_CPUSEMPTY;
1595 goto invalidate;
1596 }
1597
1598 adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus);
1599 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus);
1600
1601 /*
1602 * Additions of remote CPUs is only allowed if those CPUs are
1603 * not allocated to other partitions and there are effective_cpus
1604 * left in the top cpuset.
1605 */
1606 if (adding) {
1607 WARN_ON_ONCE(cpumask_intersects(tmp->addmask, subpartitions_cpus));
1608 if (!capable(CAP_SYS_ADMIN))
1609 cs->prs_err = PERR_ACCESS;
1610 else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1611 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
1612 cs->prs_err = PERR_NOCPUS;
1613 else if ((prs == PRS_ISOLATED) &&
1614 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1615 cs->prs_err = PERR_HKEEPING;
1616 if (cs->prs_err)
1617 goto invalidate;
1618 }
1619
1620 spin_lock_irq(&callback_lock);
1621 if (adding)
1622 partition_xcpus_add(prs, NULL, tmp->addmask);
1623 if (deleting)
1624 partition_xcpus_del(prs, NULL, tmp->delmask);
1625 /*
1626 * Need to update effective_xcpus and exclusive_cpus now as
1627 * update_sibling_cpumasks() below may iterate back to the same cs.
1628 */
1629 cpumask_copy(cs->effective_xcpus, excpus);
1630 if (xcpus)
1631 cpumask_copy(cs->exclusive_cpus, xcpus);
1632 spin_unlock_irq(&callback_lock);
1633 if (adding || deleting)
1634 cpuset_force_rebuild();
1635
1636 /*
1637 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1638 */
1639 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1640 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1641 return;
1642
1643 invalidate:
1644 remote_partition_disable(cs, tmp);
1645 }
1646
1647 /**
1648 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1649 * @cs: The cpuset that requests change in partition root state
1650 * @cmd: Partition root state change command
1651 * @newmask: Optional new cpumask for partcmd_update
1652 * @tmp: Temporary addmask and delmask
1653 * Return: 0 or a partition root state error code
1654 *
1655 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1656 * root to a partition root. The effective_xcpus (cpus_allowed if
1657 * effective_xcpus not set) mask of the given cpuset will be taken away from
1658 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1659 * in effective_xcpus can be granted or an error code will be returned.
1660 *
1661 * For partcmd_disable, the cpuset is being transformed from a partition
1662 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1663 * given back to parent's effective_cpus. 0 will always be returned.
1664 *
1665 * For partcmd_update, if the optional newmask is specified, the cpu list is
1666 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1667 * assumed to remain the same. The cpuset should either be a valid or invalid
1668 * partition root. The partition root state may change from valid to invalid
1669 * or vice versa. An error code will be returned if transitioning from
1670 * invalid to valid violates the exclusivity rule.
1671 *
1672 * For partcmd_invalidate, the current partition will be made invalid.
1673 *
1674 * The partcmd_enable* and partcmd_disable commands are used by
1675 * update_prstate(). An error code may be returned and the caller will check
1676 * for error.
1677 *
1678 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1679 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1680 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1681 * check for error and so partition_root_state and prs_err will be updated
1682 * directly.
1683 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1684 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1685 struct cpumask *newmask,
1686 struct tmpmasks *tmp)
1687 {
1688 struct cpuset *parent = parent_cs(cs);
1689 int adding; /* Adding cpus to parent's effective_cpus */
1690 int deleting; /* Deleting cpus from parent's effective_cpus */
1691 int old_prs, new_prs;
1692 int part_error = PERR_NONE; /* Partition error? */
1693 struct cpumask *xcpus = user_xcpus(cs);
1694 int parent_prs = parent->partition_root_state;
1695 bool nocpu;
1696
1697 lockdep_assert_cpuset_lock_held();
1698 WARN_ON_ONCE(is_remote_partition(cs)); /* For local partition only */
1699
1700 /*
1701 * new_prs will only be changed for the partcmd_update and
1702 * partcmd_invalidate commands.
1703 */
1704 adding = deleting = false;
1705 old_prs = new_prs = cs->partition_root_state;
1706
1707 if (cmd == partcmd_invalidate) {
1708 if (is_partition_invalid(cs))
1709 return 0;
1710
1711 /*
1712 * Make the current partition invalid.
1713 */
1714 if (is_partition_valid(parent))
1715 adding = cpumask_and(tmp->addmask,
1716 xcpus, parent->effective_xcpus);
1717 if (old_prs > 0)
1718 new_prs = -old_prs;
1719
1720 goto write_error;
1721 }
1722
1723 /*
1724 * The parent must be a partition root.
1725 * The new cpumask, if present, or the current cpus_allowed must
1726 * not be empty.
1727 */
1728 if (!is_partition_valid(parent)) {
1729 return is_partition_invalid(parent)
1730 ? PERR_INVPARENT : PERR_NOTPART;
1731 }
1732 if (!newmask && xcpus_empty(cs))
1733 return PERR_CPUSEMPTY;
1734
1735 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1736
1737 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1738 /*
1739 * Need to call compute_excpus() in case
1740 * exclusive_cpus not set. Sibling conflict should only happen
1741 * if exclusive_cpus isn't set.
1742 */
1743 xcpus = tmp->delmask;
1744 if (compute_excpus(cs, xcpus))
1745 WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus));
1746 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1747
1748 /*
1749 * Enabling partition root is not allowed if its
1750 * effective_xcpus is empty.
1751 */
1752 if (cpumask_empty(xcpus))
1753 return PERR_INVCPUS;
1754
1755 if (prstate_housekeeping_conflict(new_prs, xcpus))
1756 return PERR_HKEEPING;
1757
1758 if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
1759 !isolated_cpus_can_update(xcpus, NULL))
1760 return PERR_HKEEPING;
1761
1762 if (tasks_nocpu_error(parent, cs, xcpus))
1763 return PERR_NOCPUS;
1764
1765 /*
1766 * This function will only be called when all the preliminary
1767 * checks have passed. At this point, the following condition
1768 * should hold.
1769 *
1770 * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
1771 *
1772 * Warn if it is not the case.
1773 */
1774 cpumask_and(tmp->new_cpus, xcpus, cpu_active_mask);
1775 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1776
1777 deleting = true;
1778 } else if (cmd == partcmd_disable) {
1779 /*
1780 * May need to add cpus back to parent's effective_cpus
1781 * (and maybe removed from subpartitions_cpus/isolated_cpus)
1782 * for valid partition root. xcpus may contain CPUs that
1783 * shouldn't be removed from the two global cpumasks.
1784 */
1785 if (is_partition_valid(cs)) {
1786 cpumask_copy(tmp->addmask, cs->effective_xcpus);
1787 adding = true;
1788 }
1789 new_prs = PRS_MEMBER;
1790 } else if (newmask) {
1791 /*
1792 * Empty cpumask is not allowed
1793 */
1794 if (cpumask_empty(newmask)) {
1795 part_error = PERR_CPUSEMPTY;
1796 goto write_error;
1797 }
1798
1799 /* Check newmask again, whether cpus are available for parent/cs */
1800 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1801
1802 /*
1803 * partcmd_update with newmask:
1804 *
1805 * Compute add/delete mask to/from effective_cpus
1806 *
1807 * For valid partition:
1808 * addmask = exclusive_cpus & ~newmask
1809 * & parent->effective_xcpus
1810 * delmask = newmask & ~exclusive_cpus
1811 * & parent->effective_xcpus
1812 *
1813 * For invalid partition:
1814 * delmask = newmask & parent->effective_xcpus
1815 * The partition may become valid soon.
1816 */
1817 if (is_partition_invalid(cs)) {
1818 adding = false;
1819 deleting = cpumask_and(tmp->delmask,
1820 newmask, parent->effective_xcpus);
1821 } else {
1822 cpumask_andnot(tmp->addmask, xcpus, newmask);
1823 adding = cpumask_and(tmp->addmask, tmp->addmask,
1824 parent->effective_xcpus);
1825
1826 cpumask_andnot(tmp->delmask, newmask, xcpus);
1827 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1828 parent->effective_xcpus);
1829 }
1830
1831 /*
1832 * TBD: Invalidate a currently valid child root partition may
1833 * still break isolated_cpus_can_update() rule if parent is an
1834 * isolated partition.
1835 */
1836 if (is_partition_valid(cs) && (old_prs != parent_prs)) {
1837 if ((parent_prs == PRS_ROOT) &&
1838 /* Adding to parent means removing isolated CPUs */
1839 !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
1840 part_error = PERR_HKEEPING;
1841 if ((parent_prs == PRS_ISOLATED) &&
1842 /* Adding to parent means adding isolated CPUs */
1843 !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
1844 part_error = PERR_HKEEPING;
1845 }
1846
1847 /*
1848 * The new CPUs to be removed from parent's effective CPUs
1849 * must be present.
1850 */
1851 if (deleting) {
1852 cpumask_and(tmp->new_cpus, tmp->delmask, cpu_active_mask);
1853 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
1854 }
1855
1856 /*
1857 * Make partition invalid if parent's effective_cpus could
1858 * become empty and there are tasks in the parent.
1859 */
1860 if (nocpu && (!adding ||
1861 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1862 part_error = PERR_NOCPUS;
1863 deleting = false;
1864 adding = cpumask_and(tmp->addmask,
1865 xcpus, parent->effective_xcpus);
1866 }
1867 } else {
1868 /*
1869 * partcmd_update w/o newmask
1870 *
1871 * delmask = effective_xcpus & parent->effective_cpus
1872 *
1873 * This can be called from:
1874 * 1) update_cpumasks_hier()
1875 * 2) cpuset_hotplug_update_tasks()
1876 *
1877 * Check to see if it can be transitioned from valid to
1878 * invalid partition or vice versa.
1879 *
1880 * A partition error happens when parent has tasks and all
1881 * its effective CPUs will have to be distributed out.
1882 */
1883 if (nocpu) {
1884 part_error = PERR_NOCPUS;
1885 if (is_partition_valid(cs))
1886 adding = cpumask_and(tmp->addmask,
1887 xcpus, parent->effective_xcpus);
1888 } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
1889 cpumask_subset(xcpus, parent->effective_xcpus)) {
1890 struct cgroup_subsys_state *css;
1891 struct cpuset *child;
1892 bool exclusive = true;
1893
1894 /*
1895 * Convert invalid partition to valid has to
1896 * pass the cpu exclusivity test.
1897 */
1898 rcu_read_lock();
1899 cpuset_for_each_child(child, css, parent) {
1900 if (child == cs)
1901 continue;
1902 if (!cpusets_are_exclusive(cs, child)) {
1903 exclusive = false;
1904 break;
1905 }
1906 }
1907 rcu_read_unlock();
1908 if (exclusive)
1909 deleting = cpumask_and(tmp->delmask,
1910 xcpus, parent->effective_cpus);
1911 else
1912 part_error = PERR_NOTEXCL;
1913 }
1914 }
1915
1916 write_error:
1917 if (part_error)
1918 WRITE_ONCE(cs->prs_err, part_error);
1919
1920 if (cmd == partcmd_update) {
1921 /*
1922 * Check for possible transition between valid and invalid
1923 * partition root.
1924 */
1925 switch (cs->partition_root_state) {
1926 case PRS_ROOT:
1927 case PRS_ISOLATED:
1928 if (part_error)
1929 new_prs = -old_prs;
1930 break;
1931 case PRS_INVALID_ROOT:
1932 case PRS_INVALID_ISOLATED:
1933 if (!part_error)
1934 new_prs = -old_prs;
1935 break;
1936 }
1937 }
1938
1939 if (!adding && !deleting && (new_prs == old_prs))
1940 return 0;
1941
1942 /*
1943 * Transitioning between invalid to valid or vice versa may require
1944 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1945 * validate_change() has already been successfully called and
1946 * CPU lists in cs haven't been updated yet. So defer it to later.
1947 */
1948 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1949 int err = update_partition_exclusive_flag(cs, new_prs);
1950
1951 if (err)
1952 return err;
1953 }
1954
1955 /*
1956 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1957 * only).
1958 *
1959 * Newly added CPUs will be removed from effective_cpus and
1960 * newly deleted ones will be added back to effective_cpus.
1961 */
1962 spin_lock_irq(&callback_lock);
1963 if (old_prs != new_prs)
1964 cs->partition_root_state = new_prs;
1965
1966 /*
1967 * Adding to parent's effective_cpus means deletion CPUs from cs
1968 * and vice versa.
1969 */
1970 if (adding)
1971 partition_xcpus_del(old_prs, parent, tmp->addmask);
1972 if (deleting)
1973 partition_xcpus_add(new_prs, parent, tmp->delmask);
1974
1975 spin_unlock_irq(&callback_lock);
1976
1977 if ((old_prs != new_prs) && (cmd == partcmd_update))
1978 update_partition_exclusive_flag(cs, new_prs);
1979
1980 if (adding || deleting) {
1981 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1982 update_sibling_cpumasks(parent, cs, tmp);
1983 }
1984
1985 /*
1986 * For partcmd_update without newmask, it is being called from
1987 * cpuset_handle_hotplug(). Update the load balance flag and
1988 * scheduling domain accordingly.
1989 */
1990 if ((cmd == partcmd_update) && !newmask)
1991 update_partition_sd_lb(cs, old_prs);
1992
1993 notify_partition_change(cs, old_prs);
1994 return 0;
1995 }
1996
1997 /**
1998 * compute_partition_effective_cpumask - compute effective_cpus for partition
1999 * @cs: partition root cpuset
2000 * @new_ecpus: previously computed effective_cpus to be updated
2001 *
2002 * Compute the effective_cpus of a partition root by scanning effective_xcpus
2003 * of child partition roots and excluding their effective_xcpus.
2004 *
2005 * This has the side effect of invalidating valid child partition roots,
2006 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2007 * or update_cpumasks_hier() where parent and children are modified
2008 * successively, we don't need to call update_parent_effective_cpumask()
2009 * and the child's effective_cpus will be updated in later iterations.
2010 *
2011 * Note that rcu_read_lock() is assumed to be held.
2012 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)2013 static void compute_partition_effective_cpumask(struct cpuset *cs,
2014 struct cpumask *new_ecpus)
2015 {
2016 struct cgroup_subsys_state *css;
2017 struct cpuset *child;
2018 bool populated = partition_is_populated(cs, NULL);
2019
2020 /*
2021 * Check child partition roots to see if they should be
2022 * invalidated when
2023 * 1) child effective_xcpus not a subset of new
2024 * excluisve_cpus
2025 * 2) All the effective_cpus will be used up and cp
2026 * has tasks
2027 */
2028 compute_excpus(cs, new_ecpus);
2029 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2030
2031 rcu_read_lock();
2032 cpuset_for_each_child(child, css, cs) {
2033 if (!is_partition_valid(child))
2034 continue;
2035
2036 /*
2037 * There shouldn't be a remote partition underneath another
2038 * partition root.
2039 */
2040 WARN_ON_ONCE(is_remote_partition(child));
2041 child->prs_err = 0;
2042 if (!cpumask_subset(child->effective_xcpus,
2043 cs->effective_xcpus))
2044 child->prs_err = PERR_INVCPUS;
2045 else if (populated &&
2046 cpumask_subset(new_ecpus, child->effective_xcpus))
2047 child->prs_err = PERR_NOCPUS;
2048
2049 if (child->prs_err) {
2050 int old_prs = child->partition_root_state;
2051
2052 /*
2053 * Invalidate child partition
2054 */
2055 spin_lock_irq(&callback_lock);
2056 make_partition_invalid(child);
2057 spin_unlock_irq(&callback_lock);
2058 notify_partition_change(child, old_prs);
2059 continue;
2060 }
2061 cpumask_andnot(new_ecpus, new_ecpus,
2062 child->effective_xcpus);
2063 }
2064 rcu_read_unlock();
2065 }
2066
2067 /*
2068 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2069 * @cs: the cpuset to consider
2070 * @tmp: temp variables for calculating effective_cpus & partition setup
2071 * @force: don't skip any descendant cpusets if set
2072 *
2073 * When configured cpumask is changed, the effective cpumasks of this cpuset
2074 * and all its descendants need to be updated.
2075 *
2076 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2077 *
2078 * Called with cpuset_mutex held
2079 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)2080 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2081 bool force)
2082 {
2083 struct cpuset *cp;
2084 struct cgroup_subsys_state *pos_css;
2085 int old_prs, new_prs;
2086
2087 rcu_read_lock();
2088 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2089 struct cpuset *parent = parent_cs(cp);
2090 bool remote = is_remote_partition(cp);
2091 bool update_parent = false;
2092
2093 old_prs = new_prs = cp->partition_root_state;
2094
2095 /*
2096 * For child remote partition root (!= cs), we need to call
2097 * remote_cpus_update() if effective_xcpus will be changed.
2098 * Otherwise, we can skip the whole subtree.
2099 *
2100 * remote_cpus_update() will reuse tmp->new_cpus only after
2101 * its value is being processed.
2102 */
2103 if (remote && (cp != cs)) {
2104 compute_excpus(cp, tmp->new_cpus);
2105 if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) {
2106 pos_css = css_rightmost_descendant(pos_css);
2107 continue;
2108 }
2109 rcu_read_unlock();
2110 remote_cpus_update(cp, NULL, tmp->new_cpus, tmp);
2111 rcu_read_lock();
2112
2113 /* Remote partition may be invalidated */
2114 new_prs = cp->partition_root_state;
2115 remote = (new_prs == old_prs);
2116 }
2117
2118 if (remote || (is_partition_valid(parent) && is_partition_valid(cp)))
2119 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2120 else
2121 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2122
2123 if (remote)
2124 goto get_css; /* Ready to update cpuset data */
2125
2126 /*
2127 * A partition with no effective_cpus is allowed as long as
2128 * there is no task associated with it. Call
2129 * update_parent_effective_cpumask() to check it.
2130 */
2131 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2132 update_parent = true;
2133 goto update_parent_effective;
2134 }
2135
2136 /*
2137 * If it becomes empty, inherit the effective mask of the
2138 * parent, which is guaranteed to have some CPUs unless
2139 * it is a partition root that has explicitly distributed
2140 * out all its CPUs.
2141 */
2142 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2143 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2144
2145 /*
2146 * Skip the whole subtree if
2147 * 1) the cpumask remains the same,
2148 * 2) has no partition root state,
2149 * 3) force flag not set, and
2150 * 4) for v2 load balance state same as its parent.
2151 */
2152 if (!cp->partition_root_state && !force &&
2153 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2154 (!cpuset_v2() ||
2155 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2156 pos_css = css_rightmost_descendant(pos_css);
2157 continue;
2158 }
2159
2160 update_parent_effective:
2161 /*
2162 * update_parent_effective_cpumask() should have been called
2163 * for cs already in update_cpumask(). We should also call
2164 * cpuset_update_tasks_cpumask() again for tasks in the parent
2165 * cpuset if the parent's effective_cpus changes.
2166 */
2167 if ((cp != cs) && old_prs) {
2168 switch (parent->partition_root_state) {
2169 case PRS_ROOT:
2170 case PRS_ISOLATED:
2171 update_parent = true;
2172 break;
2173
2174 default:
2175 /*
2176 * When parent is not a partition root or is
2177 * invalid, child partition roots become
2178 * invalid too.
2179 */
2180 if (is_partition_valid(cp))
2181 new_prs = -cp->partition_root_state;
2182 WRITE_ONCE(cp->prs_err,
2183 is_partition_invalid(parent)
2184 ? PERR_INVPARENT : PERR_NOTPART);
2185 break;
2186 }
2187 }
2188 get_css:
2189 if (!css_tryget_online(&cp->css))
2190 continue;
2191 rcu_read_unlock();
2192
2193 if (update_parent) {
2194 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2195 /*
2196 * The cpuset partition_root_state may become
2197 * invalid. Capture it.
2198 */
2199 new_prs = cp->partition_root_state;
2200 }
2201
2202 spin_lock_irq(&callback_lock);
2203 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2204 cp->partition_root_state = new_prs;
2205 /*
2206 * Need to compute effective_xcpus if either exclusive_cpus
2207 * is non-empty or it is a valid partition root.
2208 */
2209 if ((new_prs > 0) || !cpumask_empty(cp->exclusive_cpus))
2210 compute_excpus(cp, cp->effective_xcpus);
2211 if (new_prs <= 0)
2212 reset_partition_data(cp);
2213 spin_unlock_irq(&callback_lock);
2214
2215 notify_partition_change(cp, old_prs);
2216
2217 WARN_ON(!is_in_v2_mode() &&
2218 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2219
2220 cpuset_update_tasks_cpumask(cp, tmp->new_cpus);
2221
2222 /*
2223 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2224 * from parent if current cpuset isn't a valid partition root
2225 * and their load balance states differ.
2226 */
2227 if (cpuset_v2() && !is_partition_valid(cp) &&
2228 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2229 if (is_sched_load_balance(parent))
2230 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2231 else
2232 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2233 }
2234
2235 /*
2236 * On legacy hierarchy, if the effective cpumask of any non-
2237 * empty cpuset is changed, we need to rebuild sched domains.
2238 * On default hierarchy, the cpuset needs to be a partition
2239 * root as well.
2240 */
2241 if (!cpumask_empty(cp->cpus_allowed) &&
2242 is_sched_load_balance(cp) &&
2243 (!cpuset_v2() || is_partition_valid(cp)))
2244 cpuset_force_rebuild();
2245
2246 rcu_read_lock();
2247 css_put(&cp->css);
2248 }
2249 rcu_read_unlock();
2250 }
2251
2252 /**
2253 * update_sibling_cpumasks - Update siblings cpumasks
2254 * @parent: Parent cpuset
2255 * @cs: Current cpuset
2256 * @tmp: Temp variables
2257 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2258 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2259 struct tmpmasks *tmp)
2260 {
2261 struct cpuset *sibling;
2262 struct cgroup_subsys_state *pos_css;
2263
2264 lockdep_assert_cpuset_lock_held();
2265
2266 /*
2267 * Check all its siblings and call update_cpumasks_hier()
2268 * if their effective_cpus will need to be changed.
2269 *
2270 * It is possible a change in parent's effective_cpus
2271 * due to a change in a child partition's effective_xcpus will impact
2272 * its siblings even if they do not inherit parent's effective_cpus
2273 * directly. It should not impact valid partition.
2274 *
2275 * The update_cpumasks_hier() function may sleep. So we have to
2276 * release the RCU read lock before calling it.
2277 */
2278 rcu_read_lock();
2279 cpuset_for_each_child(sibling, pos_css, parent) {
2280 if (sibling == cs || is_partition_valid(sibling))
2281 continue;
2282
2283 compute_effective_cpumask(tmp->new_cpus, sibling,
2284 parent);
2285 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2286 continue;
2287
2288 if (!css_tryget_online(&sibling->css))
2289 continue;
2290
2291 rcu_read_unlock();
2292 update_cpumasks_hier(sibling, tmp, false);
2293 rcu_read_lock();
2294 css_put(&sibling->css);
2295 }
2296 rcu_read_unlock();
2297 }
2298
parse_cpuset_cpulist(const char * buf,struct cpumask * out_mask)2299 static int parse_cpuset_cpulist(const char *buf, struct cpumask *out_mask)
2300 {
2301 int retval;
2302
2303 retval = cpulist_parse(buf, out_mask);
2304 if (retval < 0)
2305 return retval;
2306 if (!cpumask_subset(out_mask, top_cpuset.cpus_allowed))
2307 return -EINVAL;
2308
2309 return 0;
2310 }
2311
2312 /**
2313 * validate_partition - Validate a cpuset partition configuration
2314 * @cs: The cpuset to validate
2315 * @trialcs: The trial cpuset containing proposed configuration changes
2316 *
2317 * If any validation check fails, the appropriate error code is set in the
2318 * cpuset's prs_err field.
2319 *
2320 * Return: PRS error code (0 if valid, non-zero error code if invalid)
2321 */
validate_partition(struct cpuset * cs,struct cpuset * trialcs)2322 static enum prs_errcode validate_partition(struct cpuset *cs, struct cpuset *trialcs)
2323 {
2324 struct cpuset *parent = parent_cs(cs);
2325
2326 if (cs_is_member(trialcs))
2327 return PERR_NONE;
2328
2329 if (cpumask_empty(trialcs->effective_xcpus))
2330 return PERR_INVCPUS;
2331
2332 if (prstate_housekeeping_conflict(trialcs->partition_root_state,
2333 trialcs->effective_xcpus))
2334 return PERR_HKEEPING;
2335
2336 if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus))
2337 return PERR_NOCPUS;
2338
2339 return PERR_NONE;
2340 }
2341
2342 /**
2343 * partition_cpus_change - Handle partition state changes due to CPU mask updates
2344 * @cs: The target cpuset being modified
2345 * @trialcs: The trial cpuset containing proposed configuration changes
2346 * @tmp: Temporary masks for intermediate calculations
2347 *
2348 * This function handles partition state transitions triggered by CPU mask changes.
2349 * CPU modifications may cause a partition to be disabled or require state updates.
2350 */
partition_cpus_change(struct cpuset * cs,struct cpuset * trialcs,struct tmpmasks * tmp)2351 static void partition_cpus_change(struct cpuset *cs, struct cpuset *trialcs,
2352 struct tmpmasks *tmp)
2353 {
2354 enum prs_errcode prs_err;
2355
2356 if (cs_is_member(cs))
2357 return;
2358
2359 prs_err = validate_partition(cs, trialcs);
2360 if (prs_err)
2361 trialcs->prs_err = cs->prs_err = prs_err;
2362
2363 if (is_remote_partition(cs)) {
2364 if (trialcs->prs_err)
2365 remote_partition_disable(cs, tmp);
2366 else
2367 remote_cpus_update(cs, trialcs->exclusive_cpus,
2368 trialcs->effective_xcpus, tmp);
2369 } else {
2370 if (trialcs->prs_err)
2371 update_parent_effective_cpumask(cs, partcmd_invalidate,
2372 NULL, tmp);
2373 else
2374 update_parent_effective_cpumask(cs, partcmd_update,
2375 trialcs->effective_xcpus, tmp);
2376 }
2377 }
2378
2379 /**
2380 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2381 * @cs: the cpuset to consider
2382 * @trialcs: trial cpuset
2383 * @buf: buffer of cpu numbers written to this cpuset
2384 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2385 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2386 const char *buf)
2387 {
2388 int retval;
2389 struct tmpmasks tmp;
2390 bool force = false;
2391 int old_prs = cs->partition_root_state;
2392
2393 retval = parse_cpuset_cpulist(buf, trialcs->cpus_allowed);
2394 if (retval < 0)
2395 return retval;
2396
2397 /* Nothing to do if the cpus didn't change */
2398 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2399 return 0;
2400
2401 compute_trialcs_excpus(trialcs, cs);
2402 trialcs->prs_err = PERR_NONE;
2403
2404 retval = validate_change(cs, trialcs);
2405 if (retval < 0)
2406 return retval;
2407
2408 if (alloc_tmpmasks(&tmp))
2409 return -ENOMEM;
2410
2411 /*
2412 * Check all the descendants in update_cpumasks_hier() if
2413 * effective_xcpus is to be changed.
2414 */
2415 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2416
2417 partition_cpus_change(cs, trialcs, &tmp);
2418
2419 spin_lock_irq(&callback_lock);
2420 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2421 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2422 if ((old_prs > 0) && !is_partition_valid(cs))
2423 reset_partition_data(cs);
2424 spin_unlock_irq(&callback_lock);
2425
2426 /* effective_cpus/effective_xcpus will be updated here */
2427 update_cpumasks_hier(cs, &tmp, force);
2428
2429 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2430 if (cs->partition_root_state)
2431 update_partition_sd_lb(cs, old_prs);
2432
2433 free_tmpmasks(&tmp);
2434 return retval;
2435 }
2436
2437 /**
2438 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2439 * @cs: the cpuset to consider
2440 * @trialcs: trial cpuset
2441 * @buf: buffer of cpu numbers written to this cpuset
2442 *
2443 * The tasks' cpumask will be updated if cs is a valid partition root.
2444 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2445 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2446 const char *buf)
2447 {
2448 int retval;
2449 struct tmpmasks tmp;
2450 bool force = false;
2451 int old_prs = cs->partition_root_state;
2452
2453 retval = parse_cpuset_cpulist(buf, trialcs->exclusive_cpus);
2454 if (retval < 0)
2455 return retval;
2456
2457 /* Nothing to do if the CPUs didn't change */
2458 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2459 return 0;
2460
2461 /*
2462 * Reject the change if there is exclusive CPUs conflict with
2463 * the siblings.
2464 */
2465 if (compute_trialcs_excpus(trialcs, cs))
2466 return -EINVAL;
2467
2468 /*
2469 * Check all the descendants in update_cpumasks_hier() if
2470 * effective_xcpus is to be changed.
2471 */
2472 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2473
2474 retval = validate_change(cs, trialcs);
2475 if (retval)
2476 return retval;
2477
2478 if (alloc_tmpmasks(&tmp))
2479 return -ENOMEM;
2480
2481 trialcs->prs_err = PERR_NONE;
2482 partition_cpus_change(cs, trialcs, &tmp);
2483
2484 spin_lock_irq(&callback_lock);
2485 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2486 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2487 if ((old_prs > 0) && !is_partition_valid(cs))
2488 reset_partition_data(cs);
2489 spin_unlock_irq(&callback_lock);
2490
2491 /*
2492 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2493 * of the subtree when it is a valid partition root or effective_xcpus
2494 * is updated.
2495 */
2496 if (is_partition_valid(cs) || force)
2497 update_cpumasks_hier(cs, &tmp, force);
2498
2499 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2500 if (cs->partition_root_state)
2501 update_partition_sd_lb(cs, old_prs);
2502
2503 free_tmpmasks(&tmp);
2504 return 0;
2505 }
2506
2507 /*
2508 * Migrate memory region from one set of nodes to another. This is
2509 * performed asynchronously as it can be called from process migration path
2510 * holding locks involved in process management. All mm migrations are
2511 * performed in the queued order and can be waited for by flushing
2512 * cpuset_migrate_mm_wq.
2513 */
2514
2515 struct cpuset_migrate_mm_work {
2516 struct work_struct work;
2517 struct mm_struct *mm;
2518 nodemask_t from;
2519 nodemask_t to;
2520 };
2521
cpuset_migrate_mm_workfn(struct work_struct * work)2522 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2523 {
2524 struct cpuset_migrate_mm_work *mwork =
2525 container_of(work, struct cpuset_migrate_mm_work, work);
2526
2527 /* on a wq worker, no need to worry about %current's mems_allowed */
2528 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2529 mmput(mwork->mm);
2530 kfree(mwork);
2531 }
2532
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2533 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2534 const nodemask_t *to)
2535 {
2536 struct cpuset_migrate_mm_work *mwork;
2537
2538 if (nodes_equal(*from, *to)) {
2539 mmput(mm);
2540 return;
2541 }
2542
2543 mwork = kzalloc_obj(*mwork);
2544 if (mwork) {
2545 mwork->mm = mm;
2546 mwork->from = *from;
2547 mwork->to = *to;
2548 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2549 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2550 } else {
2551 mmput(mm);
2552 }
2553 }
2554
flush_migrate_mm_task_workfn(struct callback_head * head)2555 static void flush_migrate_mm_task_workfn(struct callback_head *head)
2556 {
2557 flush_workqueue(cpuset_migrate_mm_wq);
2558 kfree(head);
2559 }
2560
schedule_flush_migrate_mm(void)2561 static void schedule_flush_migrate_mm(void)
2562 {
2563 struct callback_head *flush_cb;
2564
2565 flush_cb = kzalloc_obj(struct callback_head);
2566 if (!flush_cb)
2567 return;
2568
2569 init_task_work(flush_cb, flush_migrate_mm_task_workfn);
2570
2571 if (task_work_add(current, flush_cb, TWA_RESUME))
2572 kfree(flush_cb);
2573 }
2574
2575 /*
2576 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2577 * @tsk: the task to change
2578 * @newmems: new nodes that the task will be set
2579 *
2580 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2581 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2582 * parallel, it might temporarily see an empty intersection, which results in
2583 * a seqlock check and retry before OOM or allocation failure.
2584 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2585 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2586 nodemask_t *newmems)
2587 {
2588 task_lock(tsk);
2589
2590 local_irq_disable();
2591 write_seqcount_begin(&tsk->mems_allowed_seq);
2592
2593 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2594 mpol_rebind_task(tsk, newmems);
2595 tsk->mems_allowed = *newmems;
2596
2597 write_seqcount_end(&tsk->mems_allowed_seq);
2598 local_irq_enable();
2599
2600 task_unlock(tsk);
2601 }
2602
2603 static void *cpuset_being_rebound;
2604
2605 /**
2606 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2607 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2608 *
2609 * Iterate through each task of @cs updating its mems_allowed to the
2610 * effective cpuset's. As this function is called with cpuset_mutex held,
2611 * cpuset membership stays stable.
2612 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2613 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2614 {
2615 static nodemask_t newmems; /* protected by cpuset_mutex */
2616 struct css_task_iter it;
2617 struct task_struct *task;
2618
2619 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2620
2621 guarantee_online_mems(cs, &newmems);
2622
2623 /*
2624 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2625 * take while holding tasklist_lock. Forks can happen - the
2626 * mpol_dup() cpuset_being_rebound check will catch such forks,
2627 * and rebind their vma mempolicies too. Because we still hold
2628 * the global cpuset_mutex, we know that no other rebind effort
2629 * will be contending for the global variable cpuset_being_rebound.
2630 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2631 * is idempotent. Also migrate pages in each mm to new nodes.
2632 */
2633 css_task_iter_start(&cs->css, 0, &it);
2634 while ((task = css_task_iter_next(&it))) {
2635 struct mm_struct *mm;
2636 bool migrate;
2637
2638 cpuset_change_task_nodemask(task, &newmems);
2639
2640 mm = get_task_mm(task);
2641 if (!mm)
2642 continue;
2643
2644 migrate = is_memory_migrate(cs);
2645
2646 mpol_rebind_mm(mm, &cs->mems_allowed);
2647 if (migrate)
2648 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2649 else
2650 mmput(mm);
2651 }
2652 css_task_iter_end(&it);
2653
2654 /*
2655 * All the tasks' nodemasks have been updated, update
2656 * cs->old_mems_allowed.
2657 */
2658 cs->old_mems_allowed = newmems;
2659
2660 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2661 cpuset_being_rebound = NULL;
2662 }
2663
2664 /*
2665 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2666 * @cs: the cpuset to consider
2667 * @new_mems: a temp variable for calculating new effective_mems
2668 *
2669 * When configured nodemask is changed, the effective nodemasks of this cpuset
2670 * and all its descendants need to be updated.
2671 *
2672 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2673 *
2674 * Called with cpuset_mutex held
2675 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2676 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2677 {
2678 struct cpuset *cp;
2679 struct cgroup_subsys_state *pos_css;
2680
2681 rcu_read_lock();
2682 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2683 struct cpuset *parent = parent_cs(cp);
2684
2685 bool has_mems = nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2686
2687 /*
2688 * If it becomes empty, inherit the effective mask of the
2689 * parent, which is guaranteed to have some MEMs.
2690 */
2691 if (is_in_v2_mode() && !has_mems)
2692 *new_mems = parent->effective_mems;
2693
2694 /* Skip the whole subtree if the nodemask remains the same. */
2695 if (nodes_equal(*new_mems, cp->effective_mems)) {
2696 pos_css = css_rightmost_descendant(pos_css);
2697 continue;
2698 }
2699
2700 if (!css_tryget_online(&cp->css))
2701 continue;
2702 rcu_read_unlock();
2703
2704 spin_lock_irq(&callback_lock);
2705 cp->effective_mems = *new_mems;
2706 spin_unlock_irq(&callback_lock);
2707
2708 WARN_ON(!is_in_v2_mode() &&
2709 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2710
2711 cpuset_update_tasks_nodemask(cp);
2712
2713 rcu_read_lock();
2714 css_put(&cp->css);
2715 }
2716 rcu_read_unlock();
2717 }
2718
2719 /*
2720 * Handle user request to change the 'mems' memory placement
2721 * of a cpuset. Needs to validate the request, update the
2722 * cpusets mems_allowed, and for each task in the cpuset,
2723 * update mems_allowed and rebind task's mempolicy and any vma
2724 * mempolicies and if the cpuset is marked 'memory_migrate',
2725 * migrate the tasks pages to the new memory.
2726 *
2727 * Call with cpuset_mutex held. May take callback_lock during call.
2728 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2729 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2730 * their mempolicies to the cpusets new mems_allowed.
2731 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2732 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2733 const char *buf)
2734 {
2735 int retval;
2736
2737 /*
2738 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2739 * The validate_change() call ensures that cpusets with tasks have memory.
2740 */
2741 retval = nodelist_parse(buf, trialcs->mems_allowed);
2742 if (retval < 0)
2743 return retval;
2744
2745 if (!nodes_subset(trialcs->mems_allowed,
2746 top_cpuset.mems_allowed))
2747 return -EINVAL;
2748
2749 /* No change? nothing to do */
2750 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
2751 return 0;
2752
2753 retval = validate_change(cs, trialcs);
2754 if (retval < 0)
2755 return retval;
2756
2757 check_insane_mems_config(&trialcs->mems_allowed);
2758
2759 spin_lock_irq(&callback_lock);
2760 cs->mems_allowed = trialcs->mems_allowed;
2761 spin_unlock_irq(&callback_lock);
2762
2763 /* use trialcs->mems_allowed as a temp variable */
2764 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2765 return 0;
2766 }
2767
current_cpuset_is_being_rebound(void)2768 bool current_cpuset_is_being_rebound(void)
2769 {
2770 bool ret;
2771
2772 rcu_read_lock();
2773 ret = task_cs(current) == cpuset_being_rebound;
2774 rcu_read_unlock();
2775
2776 return ret;
2777 }
2778
2779 /*
2780 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2781 * bit: the bit to update (see cpuset_flagbits_t)
2782 * cs: the cpuset to update
2783 * turning_on: whether the flag is being set or cleared
2784 *
2785 * Call with cpuset_mutex held.
2786 */
2787
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2788 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2789 int turning_on)
2790 {
2791 struct cpuset *trialcs;
2792 int balance_flag_changed;
2793 int spread_flag_changed;
2794 int err;
2795
2796 trialcs = dup_or_alloc_cpuset(cs);
2797 if (!trialcs)
2798 return -ENOMEM;
2799
2800 if (turning_on)
2801 set_bit(bit, &trialcs->flags);
2802 else
2803 clear_bit(bit, &trialcs->flags);
2804
2805 err = validate_change(cs, trialcs);
2806 if (err < 0)
2807 goto out;
2808
2809 balance_flag_changed = (is_sched_load_balance(cs) !=
2810 is_sched_load_balance(trialcs));
2811
2812 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2813 || (is_spread_page(cs) != is_spread_page(trialcs)));
2814
2815 spin_lock_irq(&callback_lock);
2816 cs->flags = trialcs->flags;
2817 spin_unlock_irq(&callback_lock);
2818
2819 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2820 if (cpuset_v2())
2821 cpuset_force_rebuild();
2822 else
2823 rebuild_sched_domains_locked();
2824 }
2825
2826 if (spread_flag_changed)
2827 cpuset1_update_tasks_flags(cs);
2828 out:
2829 free_cpuset(trialcs);
2830 return err;
2831 }
2832
2833 /**
2834 * update_prstate - update partition_root_state
2835 * @cs: the cpuset to update
2836 * @new_prs: new partition root state
2837 * Return: 0 if successful, != 0 if error
2838 *
2839 * Call with cpuset_mutex held.
2840 */
update_prstate(struct cpuset * cs,int new_prs)2841 static int update_prstate(struct cpuset *cs, int new_prs)
2842 {
2843 int err = PERR_NONE, old_prs = cs->partition_root_state;
2844 struct cpuset *parent = parent_cs(cs);
2845 struct tmpmasks tmpmask;
2846 bool isolcpus_updated = false;
2847
2848 if (old_prs == new_prs)
2849 return 0;
2850
2851 /*
2852 * Treat a previously invalid partition root as if it is a "member".
2853 */
2854 if (new_prs && is_partition_invalid(cs))
2855 old_prs = PRS_MEMBER;
2856
2857 if (alloc_tmpmasks(&tmpmask))
2858 return -ENOMEM;
2859
2860 err = update_partition_exclusive_flag(cs, new_prs);
2861 if (err)
2862 goto out;
2863
2864 if (!old_prs) {
2865 /*
2866 * cpus_allowed and exclusive_cpus cannot be both empty.
2867 */
2868 if (xcpus_empty(cs)) {
2869 err = PERR_CPUSEMPTY;
2870 goto out;
2871 }
2872
2873 /*
2874 * We don't support the creation of a new local partition with
2875 * a remote partition underneath it. This unsupported
2876 * setting can happen only if parent is the top_cpuset because
2877 * a remote partition cannot be created underneath an existing
2878 * local or remote partition.
2879 */
2880 if ((parent == &top_cpuset) &&
2881 cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) {
2882 err = PERR_REMOTE;
2883 goto out;
2884 }
2885
2886 /*
2887 * If parent is valid partition, enable local partiion.
2888 * Otherwise, enable a remote partition.
2889 */
2890 if (is_partition_valid(parent)) {
2891 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2892 ? partcmd_enable : partcmd_enablei;
2893
2894 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2895 } else {
2896 err = remote_partition_enable(cs, new_prs, &tmpmask);
2897 }
2898 } else if (old_prs && new_prs) {
2899 /*
2900 * A change in load balance state only, no change in cpumasks.
2901 * Need to update isolated_cpus.
2902 */
2903 if (((new_prs == PRS_ISOLATED) &&
2904 !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
2905 prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
2906 err = PERR_HKEEPING;
2907 else
2908 isolcpus_updated = true;
2909 } else {
2910 /*
2911 * Switching back to member is always allowed even if it
2912 * disables child partitions.
2913 */
2914 if (is_remote_partition(cs))
2915 remote_partition_disable(cs, &tmpmask);
2916 else
2917 update_parent_effective_cpumask(cs, partcmd_disable,
2918 NULL, &tmpmask);
2919
2920 /*
2921 * Invalidation of child partitions will be done in
2922 * update_cpumasks_hier().
2923 */
2924 }
2925 out:
2926 /*
2927 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2928 * happens.
2929 */
2930 if (err) {
2931 new_prs = -new_prs;
2932 update_partition_exclusive_flag(cs, new_prs);
2933 }
2934
2935 spin_lock_irq(&callback_lock);
2936 cs->partition_root_state = new_prs;
2937 WRITE_ONCE(cs->prs_err, err);
2938 if (!is_partition_valid(cs))
2939 reset_partition_data(cs);
2940 else if (isolcpus_updated)
2941 isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
2942 spin_unlock_irq(&callback_lock);
2943
2944 /* Force update if switching back to member & update effective_xcpus */
2945 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2946
2947 /* A newly created partition must have effective_xcpus set */
2948 WARN_ON_ONCE(!old_prs && (new_prs > 0)
2949 && cpumask_empty(cs->effective_xcpus));
2950
2951 /* Update sched domains and load balance flag */
2952 update_partition_sd_lb(cs, old_prs);
2953
2954 notify_partition_change(cs, old_prs);
2955 if (force_sd_rebuild)
2956 rebuild_sched_domains_locked();
2957 free_tmpmasks(&tmpmask);
2958 return 0;
2959 }
2960
2961 static struct cpuset *cpuset_attach_old_cs;
2962
2963 /*
2964 * Check to see if a cpuset can accept a new task
2965 * For v1, cpus_allowed and mems_allowed can't be empty.
2966 * For v2, effective_cpus can't be empty.
2967 * Note that in v1, effective_cpus = cpus_allowed.
2968 */
cpuset_can_attach_check(struct cpuset * cs)2969 static int cpuset_can_attach_check(struct cpuset *cs)
2970 {
2971 if (cpumask_empty(cs->effective_cpus) ||
2972 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2973 return -ENOSPC;
2974 return 0;
2975 }
2976
reset_migrate_dl_data(struct cpuset * cs)2977 static void reset_migrate_dl_data(struct cpuset *cs)
2978 {
2979 cs->nr_migrate_dl_tasks = 0;
2980 cs->sum_migrate_dl_bw = 0;
2981 }
2982
2983 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2984 static int cpuset_can_attach(struct cgroup_taskset *tset)
2985 {
2986 struct cgroup_subsys_state *css;
2987 struct cpuset *cs, *oldcs;
2988 struct task_struct *task;
2989 bool cpus_updated, mems_updated;
2990 int ret;
2991
2992 /* used later by cpuset_attach() */
2993 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2994 oldcs = cpuset_attach_old_cs;
2995 cs = css_cs(css);
2996
2997 mutex_lock(&cpuset_mutex);
2998
2999 /* Check to see if task is allowed in the cpuset */
3000 ret = cpuset_can_attach_check(cs);
3001 if (ret)
3002 goto out_unlock;
3003
3004 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
3005 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3006
3007 cgroup_taskset_for_each(task, css, tset) {
3008 ret = task_can_attach(task);
3009 if (ret)
3010 goto out_unlock;
3011
3012 /*
3013 * Skip rights over task check in v2 when nothing changes,
3014 * migration permission derives from hierarchy ownership in
3015 * cgroup_procs_write_permission()).
3016 */
3017 if (!cpuset_v2() || (cpus_updated || mems_updated)) {
3018 ret = security_task_setscheduler(task);
3019 if (ret)
3020 goto out_unlock;
3021 }
3022
3023 if (dl_task(task)) {
3024 cs->nr_migrate_dl_tasks++;
3025 cs->sum_migrate_dl_bw += task->dl.dl_bw;
3026 }
3027 }
3028
3029 if (!cs->nr_migrate_dl_tasks)
3030 goto out_success;
3031
3032 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3033 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3034
3035 if (unlikely(cpu >= nr_cpu_ids)) {
3036 reset_migrate_dl_data(cs);
3037 ret = -EINVAL;
3038 goto out_unlock;
3039 }
3040
3041 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3042 if (ret) {
3043 reset_migrate_dl_data(cs);
3044 goto out_unlock;
3045 }
3046 }
3047
3048 out_success:
3049 /*
3050 * Mark attach is in progress. This makes validate_change() fail
3051 * changes which zero cpus/mems_allowed.
3052 */
3053 cs->attach_in_progress++;
3054 out_unlock:
3055 mutex_unlock(&cpuset_mutex);
3056 return ret;
3057 }
3058
cpuset_cancel_attach(struct cgroup_taskset * tset)3059 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3060 {
3061 struct cgroup_subsys_state *css;
3062 struct cpuset *cs;
3063
3064 cgroup_taskset_first(tset, &css);
3065 cs = css_cs(css);
3066
3067 mutex_lock(&cpuset_mutex);
3068 dec_attach_in_progress_locked(cs);
3069
3070 if (cs->nr_migrate_dl_tasks) {
3071 int cpu = cpumask_any(cs->effective_cpus);
3072
3073 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3074 reset_migrate_dl_data(cs);
3075 }
3076
3077 mutex_unlock(&cpuset_mutex);
3078 }
3079
3080 /*
3081 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3082 * but we can't allocate it dynamically there. Define it global and
3083 * allocate from cpuset_init().
3084 */
3085 static cpumask_var_t cpus_attach;
3086 static nodemask_t cpuset_attach_nodemask_to;
3087
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3088 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3089 {
3090 lockdep_assert_cpuset_lock_held();
3091
3092 if (cs != &top_cpuset)
3093 guarantee_active_cpus(task, cpus_attach);
3094 else
3095 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3096 subpartitions_cpus);
3097 /*
3098 * can_attach beforehand should guarantee that this doesn't
3099 * fail. TODO: have a better way to handle failure here
3100 */
3101 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3102
3103 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3104 cpuset1_update_task_spread_flags(cs, task);
3105 }
3106
cpuset_attach(struct cgroup_taskset * tset)3107 static void cpuset_attach(struct cgroup_taskset *tset)
3108 {
3109 struct task_struct *task;
3110 struct task_struct *leader;
3111 struct cgroup_subsys_state *css;
3112 struct cpuset *cs;
3113 struct cpuset *oldcs = cpuset_attach_old_cs;
3114 bool cpus_updated, mems_updated;
3115 bool queue_task_work = false;
3116
3117 cgroup_taskset_first(tset, &css);
3118 cs = css_cs(css);
3119
3120 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3121 mutex_lock(&cpuset_mutex);
3122 cpus_updated = !cpumask_equal(cs->effective_cpus,
3123 oldcs->effective_cpus);
3124 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3125
3126 /*
3127 * In the default hierarchy, enabling cpuset in the child cgroups
3128 * will trigger a number of cpuset_attach() calls with no change
3129 * in effective cpus and mems. In that case, we can optimize out
3130 * by skipping the task iteration and update.
3131 */
3132 if (cpuset_v2() && !cpus_updated && !mems_updated) {
3133 cpuset_attach_nodemask_to = cs->effective_mems;
3134 goto out;
3135 }
3136
3137 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3138
3139 cgroup_taskset_for_each(task, css, tset)
3140 cpuset_attach_task(cs, task);
3141
3142 /*
3143 * Change mm for all threadgroup leaders. This is expensive and may
3144 * sleep and should be moved outside migration path proper. Skip it
3145 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3146 * not set.
3147 */
3148 cpuset_attach_nodemask_to = cs->effective_mems;
3149 if (!is_memory_migrate(cs) && !mems_updated)
3150 goto out;
3151
3152 cgroup_taskset_for_each_leader(leader, css, tset) {
3153 struct mm_struct *mm = get_task_mm(leader);
3154
3155 if (mm) {
3156 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3157
3158 /*
3159 * old_mems_allowed is the same with mems_allowed
3160 * here, except if this task is being moved
3161 * automatically due to hotplug. In that case
3162 * @mems_allowed has been updated and is empty, so
3163 * @old_mems_allowed is the right nodesets that we
3164 * migrate mm from.
3165 */
3166 if (is_memory_migrate(cs)) {
3167 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3168 &cpuset_attach_nodemask_to);
3169 queue_task_work = true;
3170 } else
3171 mmput(mm);
3172 }
3173 }
3174
3175 out:
3176 if (queue_task_work)
3177 schedule_flush_migrate_mm();
3178 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3179
3180 if (cs->nr_migrate_dl_tasks) {
3181 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3182 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3183 reset_migrate_dl_data(cs);
3184 }
3185
3186 dec_attach_in_progress_locked(cs);
3187
3188 mutex_unlock(&cpuset_mutex);
3189 }
3190
3191 /*
3192 * Common handling for a write to a "cpus" or "mems" file.
3193 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3194 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3195 char *buf, size_t nbytes, loff_t off)
3196 {
3197 struct cpuset *cs = css_cs(of_css(of));
3198 struct cpuset *trialcs;
3199 int retval = -ENODEV;
3200
3201 /* root is read-only */
3202 if (cs == &top_cpuset)
3203 return -EACCES;
3204
3205 buf = strstrip(buf);
3206 cpuset_full_lock();
3207 if (!is_cpuset_online(cs))
3208 goto out_unlock;
3209
3210 trialcs = dup_or_alloc_cpuset(cs);
3211 if (!trialcs) {
3212 retval = -ENOMEM;
3213 goto out_unlock;
3214 }
3215
3216 switch (of_cft(of)->private) {
3217 case FILE_CPULIST:
3218 retval = update_cpumask(cs, trialcs, buf);
3219 break;
3220 case FILE_EXCLUSIVE_CPULIST:
3221 retval = update_exclusive_cpumask(cs, trialcs, buf);
3222 break;
3223 case FILE_MEMLIST:
3224 retval = update_nodemask(cs, trialcs, buf);
3225 break;
3226 default:
3227 retval = -EINVAL;
3228 break;
3229 }
3230
3231 free_cpuset(trialcs);
3232 out_unlock:
3233 update_hk_sched_domains();
3234 cpuset_full_unlock();
3235 if (of_cft(of)->private == FILE_MEMLIST)
3236 schedule_flush_migrate_mm();
3237 return retval ?: nbytes;
3238 }
3239
3240 /*
3241 * These ascii lists should be read in a single call, by using a user
3242 * buffer large enough to hold the entire map. If read in smaller
3243 * chunks, there is no guarantee of atomicity. Since the display format
3244 * used, list of ranges of sequential numbers, is variable length,
3245 * and since these maps can change value dynamically, one could read
3246 * gibberish by doing partial reads while a list was changing.
3247 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3248 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3249 {
3250 struct cpuset *cs = css_cs(seq_css(sf));
3251 cpuset_filetype_t type = seq_cft(sf)->private;
3252 int ret = 0;
3253
3254 spin_lock_irq(&callback_lock);
3255
3256 switch (type) {
3257 case FILE_CPULIST:
3258 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3259 break;
3260 case FILE_MEMLIST:
3261 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3262 break;
3263 case FILE_EFFECTIVE_CPULIST:
3264 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3265 break;
3266 case FILE_EFFECTIVE_MEMLIST:
3267 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3268 break;
3269 case FILE_EXCLUSIVE_CPULIST:
3270 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3271 break;
3272 case FILE_EFFECTIVE_XCPULIST:
3273 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3274 break;
3275 case FILE_SUBPARTS_CPULIST:
3276 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3277 break;
3278 case FILE_ISOLATED_CPULIST:
3279 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3280 break;
3281 default:
3282 ret = -EINVAL;
3283 }
3284
3285 spin_unlock_irq(&callback_lock);
3286 return ret;
3287 }
3288
cpuset_partition_show(struct seq_file * seq,void * v)3289 static int cpuset_partition_show(struct seq_file *seq, void *v)
3290 {
3291 struct cpuset *cs = css_cs(seq_css(seq));
3292 const char *err, *type = NULL;
3293
3294 switch (cs->partition_root_state) {
3295 case PRS_ROOT:
3296 seq_puts(seq, "root\n");
3297 break;
3298 case PRS_ISOLATED:
3299 seq_puts(seq, "isolated\n");
3300 break;
3301 case PRS_MEMBER:
3302 seq_puts(seq, "member\n");
3303 break;
3304 case PRS_INVALID_ROOT:
3305 type = "root";
3306 fallthrough;
3307 case PRS_INVALID_ISOLATED:
3308 if (!type)
3309 type = "isolated";
3310 err = perr_strings[READ_ONCE(cs->prs_err)];
3311 if (err)
3312 seq_printf(seq, "%s invalid (%s)\n", type, err);
3313 else
3314 seq_printf(seq, "%s invalid\n", type);
3315 break;
3316 }
3317 return 0;
3318 }
3319
cpuset_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3320 static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
3321 size_t nbytes, loff_t off)
3322 {
3323 struct cpuset *cs = css_cs(of_css(of));
3324 int val;
3325 int retval = -ENODEV;
3326
3327 buf = strstrip(buf);
3328
3329 if (!strcmp(buf, "root"))
3330 val = PRS_ROOT;
3331 else if (!strcmp(buf, "member"))
3332 val = PRS_MEMBER;
3333 else if (!strcmp(buf, "isolated"))
3334 val = PRS_ISOLATED;
3335 else
3336 return -EINVAL;
3337
3338 cpuset_full_lock();
3339 if (is_cpuset_online(cs))
3340 retval = update_prstate(cs, val);
3341 update_hk_sched_domains();
3342 cpuset_full_unlock();
3343 return retval ?: nbytes;
3344 }
3345
3346 /*
3347 * This is currently a minimal set for the default hierarchy. It can be
3348 * expanded later on by migrating more features and control files from v1.
3349 */
3350 static struct cftype dfl_files[] = {
3351 {
3352 .name = "cpus",
3353 .seq_show = cpuset_common_seq_show,
3354 .write = cpuset_write_resmask,
3355 .max_write_len = (100U + 6 * NR_CPUS),
3356 .private = FILE_CPULIST,
3357 .flags = CFTYPE_NOT_ON_ROOT,
3358 },
3359
3360 {
3361 .name = "mems",
3362 .seq_show = cpuset_common_seq_show,
3363 .write = cpuset_write_resmask,
3364 .max_write_len = (100U + 6 * MAX_NUMNODES),
3365 .private = FILE_MEMLIST,
3366 .flags = CFTYPE_NOT_ON_ROOT,
3367 },
3368
3369 {
3370 .name = "cpus.effective",
3371 .seq_show = cpuset_common_seq_show,
3372 .private = FILE_EFFECTIVE_CPULIST,
3373 },
3374
3375 {
3376 .name = "mems.effective",
3377 .seq_show = cpuset_common_seq_show,
3378 .private = FILE_EFFECTIVE_MEMLIST,
3379 },
3380
3381 {
3382 .name = "cpus.partition",
3383 .seq_show = cpuset_partition_show,
3384 .write = cpuset_partition_write,
3385 .private = FILE_PARTITION_ROOT,
3386 .flags = CFTYPE_NOT_ON_ROOT,
3387 .file_offset = offsetof(struct cpuset, partition_file),
3388 },
3389
3390 {
3391 .name = "cpus.exclusive",
3392 .seq_show = cpuset_common_seq_show,
3393 .write = cpuset_write_resmask,
3394 .max_write_len = (100U + 6 * NR_CPUS),
3395 .private = FILE_EXCLUSIVE_CPULIST,
3396 .flags = CFTYPE_NOT_ON_ROOT,
3397 },
3398
3399 {
3400 .name = "cpus.exclusive.effective",
3401 .seq_show = cpuset_common_seq_show,
3402 .private = FILE_EFFECTIVE_XCPULIST,
3403 .flags = CFTYPE_NOT_ON_ROOT,
3404 },
3405
3406 {
3407 .name = "cpus.subpartitions",
3408 .seq_show = cpuset_common_seq_show,
3409 .private = FILE_SUBPARTS_CPULIST,
3410 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3411 },
3412
3413 {
3414 .name = "cpus.isolated",
3415 .seq_show = cpuset_common_seq_show,
3416 .private = FILE_ISOLATED_CPULIST,
3417 .flags = CFTYPE_ONLY_ON_ROOT,
3418 },
3419
3420 { } /* terminate */
3421 };
3422
3423
3424 /**
3425 * cpuset_css_alloc - Allocate a cpuset css
3426 * @parent_css: Parent css of the control group that the new cpuset will be
3427 * part of
3428 * Return: cpuset css on success, -ENOMEM on failure.
3429 *
3430 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3431 * top cpuset css otherwise.
3432 */
3433 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3434 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3435 {
3436 struct cpuset *cs;
3437
3438 if (!parent_css)
3439 return &top_cpuset.css;
3440
3441 cs = dup_or_alloc_cpuset(NULL);
3442 if (!cs)
3443 return ERR_PTR(-ENOMEM);
3444
3445 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3446 cpuset1_init(cs);
3447
3448 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3449 if (cpuset_v2())
3450 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3451
3452 return &cs->css;
3453 }
3454
cpuset_css_online(struct cgroup_subsys_state * css)3455 static int cpuset_css_online(struct cgroup_subsys_state *css)
3456 {
3457 struct cpuset *cs = css_cs(css);
3458 struct cpuset *parent = parent_cs(cs);
3459
3460 if (!parent)
3461 return 0;
3462
3463 cpuset_full_lock();
3464 /*
3465 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3466 */
3467 if (cpuset_v2() && !is_sched_load_balance(parent))
3468 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3469
3470 cpuset_inc();
3471
3472 spin_lock_irq(&callback_lock);
3473 if (is_in_v2_mode()) {
3474 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3475 cs->effective_mems = parent->effective_mems;
3476 }
3477 spin_unlock_irq(&callback_lock);
3478 cpuset1_online_css(css);
3479
3480 cpuset_full_unlock();
3481 return 0;
3482 }
3483
3484 /*
3485 * If the cpuset being removed has its flag 'sched_load_balance'
3486 * enabled, then simulate turning sched_load_balance off, which
3487 * will call rebuild_sched_domains_locked(). That is not needed
3488 * in the default hierarchy where only changes in partition
3489 * will cause repartitioning.
3490 */
cpuset_css_offline(struct cgroup_subsys_state * css)3491 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3492 {
3493 struct cpuset *cs = css_cs(css);
3494
3495 cpuset_full_lock();
3496 if (!cpuset_v2() && is_sched_load_balance(cs))
3497 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3498
3499 cpuset_dec();
3500 cpuset_full_unlock();
3501 }
3502
3503 /*
3504 * If a dying cpuset has the 'cpus.partition' enabled, turn it off by
3505 * changing it back to member to free its exclusive CPUs back to the pool to
3506 * be used by other online cpusets.
3507 */
cpuset_css_killed(struct cgroup_subsys_state * css)3508 static void cpuset_css_killed(struct cgroup_subsys_state *css)
3509 {
3510 struct cpuset *cs = css_cs(css);
3511
3512 cpuset_full_lock();
3513 /* Reset valid partition back to member */
3514 if (is_partition_valid(cs))
3515 update_prstate(cs, PRS_MEMBER);
3516 update_hk_sched_domains();
3517 cpuset_full_unlock();
3518 }
3519
cpuset_css_free(struct cgroup_subsys_state * css)3520 static void cpuset_css_free(struct cgroup_subsys_state *css)
3521 {
3522 struct cpuset *cs = css_cs(css);
3523
3524 free_cpuset(cs);
3525 }
3526
cpuset_bind(struct cgroup_subsys_state * root_css)3527 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3528 {
3529 mutex_lock(&cpuset_mutex);
3530 spin_lock_irq(&callback_lock);
3531
3532 if (is_in_v2_mode()) {
3533 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3534 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3535 top_cpuset.mems_allowed = node_possible_map;
3536 } else {
3537 cpumask_copy(top_cpuset.cpus_allowed,
3538 top_cpuset.effective_cpus);
3539 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3540 }
3541
3542 spin_unlock_irq(&callback_lock);
3543 mutex_unlock(&cpuset_mutex);
3544 }
3545
3546 /*
3547 * In case the child is cloned into a cpuset different from its parent,
3548 * additional checks are done to see if the move is allowed.
3549 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3550 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3551 {
3552 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3553 bool same_cs;
3554 int ret;
3555
3556 rcu_read_lock();
3557 same_cs = (cs == task_cs(current));
3558 rcu_read_unlock();
3559
3560 if (same_cs)
3561 return 0;
3562
3563 lockdep_assert_held(&cgroup_mutex);
3564 mutex_lock(&cpuset_mutex);
3565
3566 /* Check to see if task is allowed in the cpuset */
3567 ret = cpuset_can_attach_check(cs);
3568 if (ret)
3569 goto out_unlock;
3570
3571 ret = task_can_attach(task);
3572 if (ret)
3573 goto out_unlock;
3574
3575 ret = security_task_setscheduler(task);
3576 if (ret)
3577 goto out_unlock;
3578
3579 /*
3580 * Mark attach is in progress. This makes validate_change() fail
3581 * changes which zero cpus/mems_allowed.
3582 */
3583 cs->attach_in_progress++;
3584 out_unlock:
3585 mutex_unlock(&cpuset_mutex);
3586 return ret;
3587 }
3588
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3589 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3590 {
3591 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3592 bool same_cs;
3593
3594 rcu_read_lock();
3595 same_cs = (cs == task_cs(current));
3596 rcu_read_unlock();
3597
3598 if (same_cs)
3599 return;
3600
3601 dec_attach_in_progress(cs);
3602 }
3603
3604 /*
3605 * Make sure the new task conform to the current state of its parent,
3606 * which could have been changed by cpuset just after it inherits the
3607 * state from the parent and before it sits on the cgroup's task list.
3608 */
cpuset_fork(struct task_struct * task)3609 static void cpuset_fork(struct task_struct *task)
3610 {
3611 struct cpuset *cs;
3612 bool same_cs;
3613
3614 rcu_read_lock();
3615 cs = task_cs(task);
3616 same_cs = (cs == task_cs(current));
3617 rcu_read_unlock();
3618
3619 if (same_cs) {
3620 if (cs == &top_cpuset)
3621 return;
3622
3623 set_cpus_allowed_ptr(task, current->cpus_ptr);
3624 task->mems_allowed = current->mems_allowed;
3625 return;
3626 }
3627
3628 /* CLONE_INTO_CGROUP */
3629 mutex_lock(&cpuset_mutex);
3630 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3631 cpuset_attach_task(cs, task);
3632
3633 dec_attach_in_progress_locked(cs);
3634 mutex_unlock(&cpuset_mutex);
3635 }
3636
3637 struct cgroup_subsys cpuset_cgrp_subsys = {
3638 .css_alloc = cpuset_css_alloc,
3639 .css_online = cpuset_css_online,
3640 .css_offline = cpuset_css_offline,
3641 .css_killed = cpuset_css_killed,
3642 .css_free = cpuset_css_free,
3643 .can_attach = cpuset_can_attach,
3644 .cancel_attach = cpuset_cancel_attach,
3645 .attach = cpuset_attach,
3646 .bind = cpuset_bind,
3647 .can_fork = cpuset_can_fork,
3648 .cancel_fork = cpuset_cancel_fork,
3649 .fork = cpuset_fork,
3650 #ifdef CONFIG_CPUSETS_V1
3651 .legacy_cftypes = cpuset1_files,
3652 #endif
3653 .dfl_cftypes = dfl_files,
3654 .early_init = true,
3655 .threaded = true,
3656 };
3657
3658 /**
3659 * cpuset_init - initialize cpusets at system boot
3660 *
3661 * Description: Initialize top_cpuset
3662 **/
3663
cpuset_init(void)3664 int __init cpuset_init(void)
3665 {
3666 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3667 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3668 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3669 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3670 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3671 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3672 BUG_ON(!zalloc_cpumask_var(&isolated_hk_cpus, GFP_KERNEL));
3673
3674 cpumask_setall(top_cpuset.cpus_allowed);
3675 nodes_setall(top_cpuset.mems_allowed);
3676 cpumask_setall(top_cpuset.effective_cpus);
3677 cpumask_setall(top_cpuset.effective_xcpus);
3678 cpumask_setall(top_cpuset.exclusive_cpus);
3679 nodes_setall(top_cpuset.effective_mems);
3680
3681 cpuset1_init(&top_cpuset);
3682
3683 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3684
3685 if (housekeeping_enabled(HK_TYPE_DOMAIN_BOOT))
3686 cpumask_andnot(isolated_cpus, cpu_possible_mask,
3687 housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
3688
3689 return 0;
3690 }
3691
3692 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3693 hotplug_update_tasks(struct cpuset *cs,
3694 struct cpumask *new_cpus, nodemask_t *new_mems,
3695 bool cpus_updated, bool mems_updated)
3696 {
3697 /* A partition root is allowed to have empty effective cpus */
3698 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3699 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3700 if (nodes_empty(*new_mems))
3701 *new_mems = parent_cs(cs)->effective_mems;
3702
3703 spin_lock_irq(&callback_lock);
3704 cpumask_copy(cs->effective_cpus, new_cpus);
3705 cs->effective_mems = *new_mems;
3706 spin_unlock_irq(&callback_lock);
3707
3708 if (cpus_updated)
3709 cpuset_update_tasks_cpumask(cs, new_cpus);
3710 if (mems_updated)
3711 cpuset_update_tasks_nodemask(cs);
3712 }
3713
cpuset_force_rebuild(void)3714 void cpuset_force_rebuild(void)
3715 {
3716 force_sd_rebuild = true;
3717 }
3718
3719 /**
3720 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3721 * @cs: cpuset in interest
3722 * @tmp: the tmpmasks structure pointer
3723 *
3724 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3725 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3726 * all its tasks are moved to the nearest ancestor with both resources.
3727 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3728 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3729 {
3730 static cpumask_t new_cpus;
3731 static nodemask_t new_mems;
3732 bool cpus_updated;
3733 bool mems_updated;
3734 bool remote;
3735 int partcmd = -1;
3736 struct cpuset *parent;
3737 retry:
3738 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3739
3740 mutex_lock(&cpuset_mutex);
3741
3742 /*
3743 * We have raced with task attaching. We wait until attaching
3744 * is finished, so we won't attach a task to an empty cpuset.
3745 */
3746 if (cs->attach_in_progress) {
3747 mutex_unlock(&cpuset_mutex);
3748 goto retry;
3749 }
3750
3751 parent = parent_cs(cs);
3752 compute_effective_cpumask(&new_cpus, cs, parent);
3753 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3754
3755 if (!tmp || !cs->partition_root_state)
3756 goto update_tasks;
3757
3758 /*
3759 * Compute effective_cpus for valid partition root, may invalidate
3760 * child partition roots if necessary.
3761 */
3762 remote = is_remote_partition(cs);
3763 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3764 compute_partition_effective_cpumask(cs, &new_cpus);
3765
3766 if (remote && (cpumask_empty(subpartitions_cpus) ||
3767 (cpumask_empty(&new_cpus) &&
3768 partition_is_populated(cs, NULL)))) {
3769 cs->prs_err = PERR_HOTPLUG;
3770 remote_partition_disable(cs, tmp);
3771 compute_effective_cpumask(&new_cpus, cs, parent);
3772 remote = false;
3773 }
3774
3775 /*
3776 * Force the partition to become invalid if either one of
3777 * the following conditions hold:
3778 * 1) empty effective cpus but not valid empty partition.
3779 * 2) parent is invalid or doesn't grant any cpus to child
3780 * partitions.
3781 * 3) subpartitions_cpus is empty.
3782 */
3783 if (is_local_partition(cs) &&
3784 (!is_partition_valid(parent) ||
3785 tasks_nocpu_error(parent, cs, &new_cpus) ||
3786 cpumask_empty(subpartitions_cpus)))
3787 partcmd = partcmd_invalidate;
3788 /*
3789 * On the other hand, an invalid partition root may be transitioned
3790 * back to a regular one with a non-empty effective xcpus.
3791 */
3792 else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
3793 !cpumask_empty(cs->effective_xcpus))
3794 partcmd = partcmd_update;
3795
3796 if (partcmd >= 0) {
3797 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3798 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3799 compute_partition_effective_cpumask(cs, &new_cpus);
3800 cpuset_force_rebuild();
3801 }
3802 }
3803
3804 update_tasks:
3805 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3806 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3807 if (!cpus_updated && !mems_updated)
3808 goto unlock; /* Hotplug doesn't affect this cpuset */
3809
3810 if (mems_updated)
3811 check_insane_mems_config(&new_mems);
3812
3813 if (is_in_v2_mode())
3814 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3815 cpus_updated, mems_updated);
3816 else
3817 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3818 cpus_updated, mems_updated);
3819
3820 unlock:
3821 mutex_unlock(&cpuset_mutex);
3822 }
3823
3824 /**
3825 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3826 *
3827 * This function is called after either CPU or memory configuration has
3828 * changed and updates cpuset accordingly. The top_cpuset is always
3829 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3830 * order to make cpusets transparent (of no affect) on systems that are
3831 * actively using CPU hotplug but making no active use of cpusets.
3832 *
3833 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3834 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3835 * all descendants.
3836 *
3837 * Note that CPU offlining during suspend is ignored. We don't modify
3838 * cpusets across suspend/resume cycles at all.
3839 *
3840 * CPU / memory hotplug is handled synchronously.
3841 */
cpuset_handle_hotplug(void)3842 static void cpuset_handle_hotplug(void)
3843 {
3844 static DECLARE_WORK(hk_sd_work, hk_sd_workfn);
3845 static cpumask_t new_cpus;
3846 static nodemask_t new_mems;
3847 bool cpus_updated, mems_updated;
3848 bool on_dfl = is_in_v2_mode();
3849 struct tmpmasks tmp, *ptmp = NULL;
3850
3851 if (on_dfl && !alloc_tmpmasks(&tmp))
3852 ptmp = &tmp;
3853
3854 lockdep_assert_cpus_held();
3855 mutex_lock(&cpuset_mutex);
3856
3857 /* fetch the available cpus/mems and find out which changed how */
3858 cpumask_copy(&new_cpus, cpu_active_mask);
3859 new_mems = node_states[N_MEMORY];
3860
3861 /*
3862 * If subpartitions_cpus is populated, it is likely that the check
3863 * below will produce a false positive on cpus_updated when the cpu
3864 * list isn't changed. It is extra work, but it is better to be safe.
3865 */
3866 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3867 !cpumask_empty(subpartitions_cpus);
3868 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3869
3870 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3871 if (cpus_updated) {
3872 cpuset_force_rebuild();
3873 spin_lock_irq(&callback_lock);
3874 if (!on_dfl)
3875 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3876 /*
3877 * Make sure that CPUs allocated to child partitions
3878 * do not show up in effective_cpus. If no CPU is left,
3879 * we clear the subpartitions_cpus & let the child partitions
3880 * fight for the CPUs again.
3881 */
3882 if (!cpumask_empty(subpartitions_cpus)) {
3883 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3884 cpumask_clear(subpartitions_cpus);
3885 } else {
3886 cpumask_andnot(&new_cpus, &new_cpus,
3887 subpartitions_cpus);
3888 }
3889 }
3890 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3891 spin_unlock_irq(&callback_lock);
3892 /* we don't mess with cpumasks of tasks in top_cpuset */
3893 }
3894
3895 /* synchronize mems_allowed to N_MEMORY */
3896 if (mems_updated) {
3897 spin_lock_irq(&callback_lock);
3898 if (!on_dfl)
3899 top_cpuset.mems_allowed = new_mems;
3900 top_cpuset.effective_mems = new_mems;
3901 spin_unlock_irq(&callback_lock);
3902 cpuset_update_tasks_nodemask(&top_cpuset);
3903 }
3904
3905 mutex_unlock(&cpuset_mutex);
3906
3907 /* if cpus or mems changed, we need to propagate to descendants */
3908 if (cpus_updated || mems_updated) {
3909 struct cpuset *cs;
3910 struct cgroup_subsys_state *pos_css;
3911
3912 rcu_read_lock();
3913 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3914 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3915 continue;
3916 rcu_read_unlock();
3917
3918 cpuset_hotplug_update_tasks(cs, ptmp);
3919
3920 rcu_read_lock();
3921 css_put(&cs->css);
3922 }
3923 rcu_read_unlock();
3924 }
3925
3926
3927 /*
3928 * Queue a work to call housekeeping_update() & rebuild_sched_domains()
3929 * There will be a slight delay before the HK_TYPE_DOMAIN housekeeping
3930 * cpumask can correctly reflect what is in isolated_cpus.
3931 *
3932 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
3933 * is still pending. Before the pending bit is cleared, the work data
3934 * is copied out and work item dequeued. So it is possible to queue
3935 * the work again before the hk_sd_workfn() is invoked to process the
3936 * previously queued work. Since hk_sd_workfn() doesn't use the work
3937 * item at all, this is not a problem.
3938 */
3939 if (update_housekeeping || force_sd_rebuild)
3940 queue_work(system_unbound_wq, &hk_sd_work);
3941
3942 free_tmpmasks(ptmp);
3943 }
3944
cpuset_update_active_cpus(void)3945 void cpuset_update_active_cpus(void)
3946 {
3947 /*
3948 * We're inside cpu hotplug critical region which usually nests
3949 * inside cgroup synchronization. Bounce actual hotplug processing
3950 * to a work item to avoid reverse locking order.
3951 */
3952 cpuset_handle_hotplug();
3953 }
3954
3955 /*
3956 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3957 * Call this routine anytime after node_states[N_MEMORY] changes.
3958 * See cpuset_update_active_cpus() for CPU hotplug handling.
3959 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3960 static int cpuset_track_online_nodes(struct notifier_block *self,
3961 unsigned long action, void *arg)
3962 {
3963 cpuset_handle_hotplug();
3964 return NOTIFY_OK;
3965 }
3966
3967 /**
3968 * cpuset_init_smp - initialize cpus_allowed
3969 *
3970 * Description: Finish top cpuset after cpu, node maps are initialized
3971 */
cpuset_init_smp(void)3972 void __init cpuset_init_smp(void)
3973 {
3974 /*
3975 * cpus_allowd/mems_allowed set to v2 values in the initial
3976 * cpuset_bind() call will be reset to v1 values in another
3977 * cpuset_bind() call when v1 cpuset is mounted.
3978 */
3979 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3980
3981 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3982 top_cpuset.effective_mems = node_states[N_MEMORY];
3983
3984 hotplug_node_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3985
3986 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3987 BUG_ON(!cpuset_migrate_mm_wq);
3988 }
3989
3990 /*
3991 * Return cpus_allowed mask from a task's cpuset.
3992 */
__cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)3993 static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
3994 {
3995 struct cpuset *cs;
3996
3997 cs = task_cs(tsk);
3998 if (cs != &top_cpuset)
3999 guarantee_active_cpus(tsk, pmask);
4000 /*
4001 * Tasks in the top cpuset won't get update to their cpumasks
4002 * when a hotplug online/offline event happens. So we include all
4003 * offline cpus in the allowed cpu list.
4004 */
4005 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4006 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4007
4008 /*
4009 * We first exclude cpus allocated to partitions. If there is no
4010 * allowable online cpu left, we fall back to all possible cpus.
4011 */
4012 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4013 if (!cpumask_intersects(pmask, cpu_active_mask))
4014 cpumask_copy(pmask, possible_mask);
4015 }
4016 }
4017
4018 /**
4019 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
4020 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4021 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4022 *
4023 * Similir to cpuset_cpus_allowed() except that the caller must have acquired
4024 * cpuset_mutex.
4025 */
cpuset_cpus_allowed_locked(struct task_struct * tsk,struct cpumask * pmask)4026 void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
4027 {
4028 lockdep_assert_cpuset_lock_held();
4029 __cpuset_cpus_allowed_locked(tsk, pmask);
4030 }
4031
4032 /**
4033 * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
4034 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4035 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4036 *
4037 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4038 * attached to the specified @tsk. Guaranteed to return some non-empty
4039 * subset of cpu_active_mask, even if this means going outside the
4040 * tasks cpuset, except when the task is in the top cpuset.
4041 **/
4042
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)4043 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4044 {
4045 unsigned long flags;
4046
4047 spin_lock_irqsave(&callback_lock, flags);
4048 __cpuset_cpus_allowed_locked(tsk, pmask);
4049 spin_unlock_irqrestore(&callback_lock, flags);
4050 }
4051
4052 /**
4053 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4054 * @tsk: pointer to task_struct with which the scheduler is struggling
4055 *
4056 * Description: In the case that the scheduler cannot find an allowed cpu in
4057 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4058 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4059 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4060 * This is the absolute last resort for the scheduler and it is only used if
4061 * _every_ other avenue has been traveled.
4062 *
4063 * Returns true if the affinity of @tsk was changed, false otherwise.
4064 **/
4065
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4066 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4067 {
4068 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4069 const struct cpumask *cs_mask;
4070 bool changed = false;
4071
4072 rcu_read_lock();
4073 cs_mask = task_cs(tsk)->cpus_allowed;
4074 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4075 set_cpus_allowed_force(tsk, cs_mask);
4076 changed = true;
4077 }
4078 rcu_read_unlock();
4079
4080 /*
4081 * We own tsk->cpus_allowed, nobody can change it under us.
4082 *
4083 * But we used cs && cs->cpus_allowed lockless and thus can
4084 * race with cgroup_attach_task() or update_cpumask() and get
4085 * the wrong tsk->cpus_allowed. However, both cases imply the
4086 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4087 * which takes task_rq_lock().
4088 *
4089 * If we are called after it dropped the lock we must see all
4090 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4091 * set any mask even if it is not right from task_cs() pov,
4092 * the pending set_cpus_allowed_ptr() will fix things.
4093 *
4094 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4095 * if required.
4096 */
4097 return changed;
4098 }
4099
cpuset_init_current_mems_allowed(void)4100 void __init cpuset_init_current_mems_allowed(void)
4101 {
4102 nodes_setall(current->mems_allowed);
4103 }
4104
4105 /**
4106 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4107 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4108 *
4109 * Description: Returns the nodemask_t mems_allowed of the cpuset
4110 * attached to the specified @tsk. Guaranteed to return some non-empty
4111 * subset of node_states[N_MEMORY], even if this means going outside the
4112 * tasks cpuset.
4113 **/
4114
cpuset_mems_allowed(struct task_struct * tsk)4115 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4116 {
4117 nodemask_t mask;
4118 unsigned long flags;
4119
4120 spin_lock_irqsave(&callback_lock, flags);
4121 guarantee_online_mems(task_cs(tsk), &mask);
4122 spin_unlock_irqrestore(&callback_lock, flags);
4123
4124 return mask;
4125 }
4126
4127 /**
4128 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4129 * @nodemask: the nodemask to be checked
4130 *
4131 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4132 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4133 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4134 {
4135 return nodes_intersects(*nodemask, current->mems_allowed);
4136 }
4137
4138 /*
4139 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4140 * mem_hardwall ancestor to the specified cpuset. Call holding
4141 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4142 * (an unusual configuration), then returns the root cpuset.
4143 */
nearest_hardwall_ancestor(struct cpuset * cs)4144 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4145 {
4146 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4147 cs = parent_cs(cs);
4148 return cs;
4149 }
4150
4151 /*
4152 * cpuset_current_node_allowed - Can current task allocate on a memory node?
4153 * @node: is this an allowed node?
4154 * @gfp_mask: memory allocation flags
4155 *
4156 * If we're in interrupt, yes, we can always allocate. If @node is set in
4157 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4158 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4159 * yes. If current has access to memory reserves as an oom victim, yes.
4160 * Otherwise, no.
4161 *
4162 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4163 * and do not allow allocations outside the current tasks cpuset
4164 * unless the task has been OOM killed.
4165 * GFP_KERNEL allocations are not so marked, so can escape to the
4166 * nearest enclosing hardwalled ancestor cpuset.
4167 *
4168 * Scanning up parent cpusets requires callback_lock. The
4169 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4170 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4171 * current tasks mems_allowed came up empty on the first pass over
4172 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4173 * cpuset are short of memory, might require taking the callback_lock.
4174 *
4175 * The first call here from mm/page_alloc:get_page_from_freelist()
4176 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4177 * so no allocation on a node outside the cpuset is allowed (unless
4178 * in interrupt, of course).
4179 *
4180 * The second pass through get_page_from_freelist() doesn't even call
4181 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4182 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4183 * in alloc_flags. That logic and the checks below have the combined
4184 * affect that:
4185 * in_interrupt - any node ok (current task context irrelevant)
4186 * GFP_ATOMIC - any node ok
4187 * tsk_is_oom_victim - any node ok
4188 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4189 * GFP_USER - only nodes in current tasks mems allowed ok.
4190 */
cpuset_current_node_allowed(int node,gfp_t gfp_mask)4191 bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
4192 {
4193 struct cpuset *cs; /* current cpuset ancestors */
4194 bool allowed; /* is allocation in zone z allowed? */
4195 unsigned long flags;
4196
4197 if (in_interrupt())
4198 return true;
4199 if (node_isset(node, current->mems_allowed))
4200 return true;
4201 /*
4202 * Allow tasks that have access to memory reserves because they have
4203 * been OOM killed to get memory anywhere.
4204 */
4205 if (unlikely(tsk_is_oom_victim(current)))
4206 return true;
4207 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4208 return false;
4209
4210 if (current->flags & PF_EXITING) /* Let dying task have memory */
4211 return true;
4212
4213 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4214 spin_lock_irqsave(&callback_lock, flags);
4215
4216 cs = nearest_hardwall_ancestor(task_cs(current));
4217 allowed = node_isset(node, cs->mems_allowed);
4218
4219 spin_unlock_irqrestore(&callback_lock, flags);
4220 return allowed;
4221 }
4222
4223 /**
4224 * cpuset_nodes_allowed - return effective_mems mask from a cgroup cpuset.
4225 * @cgroup: pointer to struct cgroup.
4226 * @mask: pointer to struct nodemask_t to be returned.
4227 *
4228 * Returns effective_mems mask from a cgroup cpuset if it is cgroup v2 and
4229 * has cpuset subsys. Otherwise, returns node_states[N_MEMORY].
4230 *
4231 * This function intentionally avoids taking the cpuset_mutex or callback_lock
4232 * when accessing effective_mems. This is because the obtained effective_mems
4233 * is stale immediately after the query anyway (e.g., effective_mems is updated
4234 * immediately after releasing the lock but before returning).
4235 *
4236 * As a result, returned @mask may be empty because cs->effective_mems can be
4237 * rebound during this call. Besides, nodes in @mask are not guaranteed to be
4238 * online due to hot plugins. Callers should check the mask for validity on
4239 * return based on its subsequent use.
4240 **/
cpuset_nodes_allowed(struct cgroup * cgroup,nodemask_t * mask)4241 void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
4242 {
4243 struct cgroup_subsys_state *css;
4244 struct cpuset *cs;
4245
4246 /*
4247 * In v1, mem_cgroup and cpuset are unlikely in the same hierarchy
4248 * and mems_allowed is likely to be empty even if we could get to it,
4249 * so return directly to avoid taking a global lock on the empty check.
4250 */
4251 if (!cgroup || !cpuset_v2()) {
4252 nodes_copy(*mask, node_states[N_MEMORY]);
4253 return;
4254 }
4255
4256 css = cgroup_get_e_css(cgroup, &cpuset_cgrp_subsys);
4257 if (!css) {
4258 nodes_copy(*mask, node_states[N_MEMORY]);
4259 return;
4260 }
4261
4262 /*
4263 * The reference taken via cgroup_get_e_css is sufficient to
4264 * protect css, but it does not imply safe accesses to effective_mems.
4265 *
4266 * Normally, accessing effective_mems would require the cpuset_mutex
4267 * or callback_lock - but the correctness of this information is stale
4268 * immediately after the query anyway. We do not acquire the lock
4269 * during this process to save lock contention in exchange for racing
4270 * against mems_allowed rebinds.
4271 */
4272 cs = container_of(css, struct cpuset, css);
4273 nodes_copy(*mask, cs->effective_mems);
4274 css_put(css);
4275 }
4276
4277 /**
4278 * cpuset_spread_node() - On which node to begin search for a page
4279 * @rotor: round robin rotor
4280 *
4281 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4282 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4283 * and if the memory allocation used cpuset_mem_spread_node()
4284 * to determine on which node to start looking, as it will for
4285 * certain page cache or slab cache pages such as used for file
4286 * system buffers and inode caches, then instead of starting on the
4287 * local node to look for a free page, rather spread the starting
4288 * node around the tasks mems_allowed nodes.
4289 *
4290 * We don't have to worry about the returned node being offline
4291 * because "it can't happen", and even if it did, it would be ok.
4292 *
4293 * The routines calling guarantee_online_mems() are careful to
4294 * only set nodes in task->mems_allowed that are online. So it
4295 * should not be possible for the following code to return an
4296 * offline node. But if it did, that would be ok, as this routine
4297 * is not returning the node where the allocation must be, only
4298 * the node where the search should start. The zonelist passed to
4299 * __alloc_pages() will include all nodes. If the slab allocator
4300 * is passed an offline node, it will fall back to the local node.
4301 * See kmem_cache_alloc_node().
4302 */
cpuset_spread_node(int * rotor)4303 static int cpuset_spread_node(int *rotor)
4304 {
4305 return *rotor = next_node_in(*rotor, current->mems_allowed);
4306 }
4307
4308 /**
4309 * cpuset_mem_spread_node() - On which node to begin search for a file page
4310 */
cpuset_mem_spread_node(void)4311 int cpuset_mem_spread_node(void)
4312 {
4313 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4314 current->cpuset_mem_spread_rotor =
4315 node_random(¤t->mems_allowed);
4316
4317 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4318 }
4319
4320 /**
4321 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4322 * @tsk1: pointer to task_struct of some task.
4323 * @tsk2: pointer to task_struct of some other task.
4324 *
4325 * Description: Return true if @tsk1's mems_allowed intersects the
4326 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4327 * one of the task's memory usage might impact the memory available
4328 * to the other.
4329 **/
4330
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4331 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4332 const struct task_struct *tsk2)
4333 {
4334 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4335 }
4336
4337 /**
4338 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4339 *
4340 * Description: Prints current's name, cpuset name, and cached copy of its
4341 * mems_allowed to the kernel log.
4342 */
cpuset_print_current_mems_allowed(void)4343 void cpuset_print_current_mems_allowed(void)
4344 {
4345 struct cgroup *cgrp;
4346
4347 rcu_read_lock();
4348
4349 cgrp = task_cs(current)->css.cgroup;
4350 pr_cont(",cpuset=");
4351 pr_cont_cgroup_name(cgrp);
4352 pr_cont(",mems_allowed=%*pbl",
4353 nodemask_pr_args(¤t->mems_allowed));
4354
4355 rcu_read_unlock();
4356 }
4357
4358 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4359 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4360 {
4361 seq_printf(m, "Mems_allowed:\t%*pb\n",
4362 nodemask_pr_args(&task->mems_allowed));
4363 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4364 nodemask_pr_args(&task->mems_allowed));
4365 }
4366