1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24 #include "cgroup-internal.h"
25 #include "cpuset-internal.h"
26
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 #include <linux/mempolicy.h>
31 #include <linux/mm.h>
32 #include <linux/memory.h>
33 #include <linux/export.h>
34 #include <linux/rcupdate.h>
35 #include <linux/sched.h>
36 #include <linux/sched/deadline.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/task.h>
39 #include <linux/security.h>
40 #include <linux/oom.h>
41 #include <linux/sched/isolation.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44
45 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
46 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
47
48 /*
49 * There could be abnormal cpuset configurations for cpu or memory
50 * node binding, add this key to provide a quick low-cost judgment
51 * of the situation.
52 */
53 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
54
55 static const char * const perr_strings[] = {
56 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
57 [PERR_INVPARENT] = "Parent is an invalid partition root",
58 [PERR_NOTPART] = "Parent is not a partition root",
59 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
60 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
61 [PERR_HOTPLUG] = "No cpu available due to hotplug",
62 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
63 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
64 [PERR_ACCESS] = "Enable partition not permitted",
65 };
66
67 /*
68 * Exclusive CPUs distributed out to sub-partitions of top_cpuset
69 */
70 static cpumask_var_t subpartitions_cpus;
71
72 /*
73 * Exclusive CPUs in isolated partitions
74 */
75 static cpumask_var_t isolated_cpus;
76
77 /*
78 * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
79 */
80 static cpumask_var_t boot_hk_cpus;
81 static bool have_boot_isolcpus;
82
83 /* List of remote partition root children */
84 static struct list_head remote_children;
85
86 /*
87 * A flag to force sched domain rebuild at the end of an operation while
88 * inhibiting it in the intermediate stages when set. Currently it is only
89 * set in hotplug code.
90 */
91 static bool force_sd_rebuild;
92
93 /*
94 * Partition root states:
95 *
96 * 0 - member (not a partition root)
97 * 1 - partition root
98 * 2 - partition root without load balancing (isolated)
99 * -1 - invalid partition root
100 * -2 - invalid isolated partition root
101 *
102 * There are 2 types of partitions - local or remote. Local partitions are
103 * those whose parents are partition root themselves. Setting of
104 * cpuset.cpus.exclusive are optional in setting up local partitions.
105 * Remote partitions are those whose parents are not partition roots. Passing
106 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
107 * nodes are mandatory in creating a remote partition.
108 *
109 * For simplicity, a local partition can be created under a local or remote
110 * partition but a remote partition cannot have any partition root in its
111 * ancestor chain except the cgroup root.
112 */
113 #define PRS_MEMBER 0
114 #define PRS_ROOT 1
115 #define PRS_ISOLATED 2
116 #define PRS_INVALID_ROOT -1
117 #define PRS_INVALID_ISOLATED -2
118
is_prs_invalid(int prs_state)119 static inline bool is_prs_invalid(int prs_state)
120 {
121 return prs_state < 0;
122 }
123
124 /*
125 * Temporary cpumasks for working with partitions that are passed among
126 * functions to avoid memory allocation in inner functions.
127 */
128 struct tmpmasks {
129 cpumask_var_t addmask, delmask; /* For partition root */
130 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
131 };
132
inc_dl_tasks_cs(struct task_struct * p)133 void inc_dl_tasks_cs(struct task_struct *p)
134 {
135 struct cpuset *cs = task_cs(p);
136
137 cs->nr_deadline_tasks++;
138 }
139
dec_dl_tasks_cs(struct task_struct * p)140 void dec_dl_tasks_cs(struct task_struct *p)
141 {
142 struct cpuset *cs = task_cs(p);
143
144 cs->nr_deadline_tasks--;
145 }
146
is_partition_valid(const struct cpuset * cs)147 static inline int is_partition_valid(const struct cpuset *cs)
148 {
149 return cs->partition_root_state > 0;
150 }
151
is_partition_invalid(const struct cpuset * cs)152 static inline int is_partition_invalid(const struct cpuset *cs)
153 {
154 return cs->partition_root_state < 0;
155 }
156
157 /*
158 * Callers should hold callback_lock to modify partition_root_state.
159 */
make_partition_invalid(struct cpuset * cs)160 static inline void make_partition_invalid(struct cpuset *cs)
161 {
162 if (cs->partition_root_state > 0)
163 cs->partition_root_state = -cs->partition_root_state;
164 }
165
166 /*
167 * Send notification event of whenever partition_root_state changes.
168 */
notify_partition_change(struct cpuset * cs,int old_prs)169 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
170 {
171 if (old_prs == cs->partition_root_state)
172 return;
173 cgroup_file_notify(&cs->partition_file);
174
175 /* Reset prs_err if not invalid */
176 if (is_partition_valid(cs))
177 WRITE_ONCE(cs->prs_err, PERR_NONE);
178 }
179
180 static struct cpuset top_cpuset = {
181 .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) |
182 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
183 .partition_root_state = PRS_ROOT,
184 .relax_domain_level = -1,
185 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
186 };
187
188 /*
189 * There are two global locks guarding cpuset structures - cpuset_mutex and
190 * callback_lock. We also require taking task_lock() when dereferencing a
191 * task's cpuset pointer. See "The task_lock() exception", at the end of this
192 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
193 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
194 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
195 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
196 * correctness.
197 *
198 * A task must hold both locks to modify cpusets. If a task holds
199 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
200 * also acquire callback_lock and be able to modify cpusets. It can perform
201 * various checks on the cpuset structure first, knowing nothing will change.
202 * It can also allocate memory while just holding cpuset_mutex. While it is
203 * performing these checks, various callback routines can briefly acquire
204 * callback_lock to query cpusets. Once it is ready to make the changes, it
205 * takes callback_lock, blocking everyone else.
206 *
207 * Calls to the kernel memory allocator can not be made while holding
208 * callback_lock, as that would risk double tripping on callback_lock
209 * from one of the callbacks into the cpuset code from within
210 * __alloc_pages().
211 *
212 * If a task is only holding callback_lock, then it has read-only
213 * access to cpusets.
214 *
215 * Now, the task_struct fields mems_allowed and mempolicy may be changed
216 * by other task, we use alloc_lock in the task_struct fields to protect
217 * them.
218 *
219 * The cpuset_common_seq_show() handlers only hold callback_lock across
220 * small pieces of code, such as when reading out possibly multi-word
221 * cpumasks and nodemasks.
222 *
223 * Accessing a task's cpuset should be done in accordance with the
224 * guidelines for accessing subsystem state in kernel/cgroup.c
225 */
226
227 static DEFINE_MUTEX(cpuset_mutex);
228
cpuset_lock(void)229 void cpuset_lock(void)
230 {
231 mutex_lock(&cpuset_mutex);
232 }
233
cpuset_unlock(void)234 void cpuset_unlock(void)
235 {
236 mutex_unlock(&cpuset_mutex);
237 }
238
239 static DEFINE_SPINLOCK(callback_lock);
240
cpuset_callback_lock_irq(void)241 void cpuset_callback_lock_irq(void)
242 {
243 spin_lock_irq(&callback_lock);
244 }
245
cpuset_callback_unlock_irq(void)246 void cpuset_callback_unlock_irq(void)
247 {
248 spin_unlock_irq(&callback_lock);
249 }
250
251 static struct workqueue_struct *cpuset_migrate_mm_wq;
252
253 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
254
check_insane_mems_config(nodemask_t * nodes)255 static inline void check_insane_mems_config(nodemask_t *nodes)
256 {
257 if (!cpusets_insane_config() &&
258 movable_only_nodes(nodes)) {
259 static_branch_enable(&cpusets_insane_config_key);
260 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
261 "Cpuset allocations might fail even with a lot of memory available.\n",
262 nodemask_pr_args(nodes));
263 }
264 }
265
266 /*
267 * decrease cs->attach_in_progress.
268 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
269 */
dec_attach_in_progress_locked(struct cpuset * cs)270 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
271 {
272 lockdep_assert_held(&cpuset_mutex);
273
274 cs->attach_in_progress--;
275 if (!cs->attach_in_progress)
276 wake_up(&cpuset_attach_wq);
277 }
278
dec_attach_in_progress(struct cpuset * cs)279 static inline void dec_attach_in_progress(struct cpuset *cs)
280 {
281 mutex_lock(&cpuset_mutex);
282 dec_attach_in_progress_locked(cs);
283 mutex_unlock(&cpuset_mutex);
284 }
285
286 /*
287 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
288 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
289 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
290 * With v2 behavior, "cpus" and "mems" are always what the users have
291 * requested and won't be changed by hotplug events. Only the effective
292 * cpus or mems will be affected.
293 */
is_in_v2_mode(void)294 static inline bool is_in_v2_mode(void)
295 {
296 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
297 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
298 }
299
300 /**
301 * partition_is_populated - check if partition has tasks
302 * @cs: partition root to be checked
303 * @excluded_child: a child cpuset to be excluded in task checking
304 * Return: true if there are tasks, false otherwise
305 *
306 * It is assumed that @cs is a valid partition root. @excluded_child should
307 * be non-NULL when this cpuset is going to become a partition itself.
308 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)309 static inline bool partition_is_populated(struct cpuset *cs,
310 struct cpuset *excluded_child)
311 {
312 struct cgroup_subsys_state *css;
313 struct cpuset *child;
314
315 if (cs->css.cgroup->nr_populated_csets)
316 return true;
317 if (!excluded_child && !cs->nr_subparts)
318 return cgroup_is_populated(cs->css.cgroup);
319
320 rcu_read_lock();
321 cpuset_for_each_child(child, css, cs) {
322 if (child == excluded_child)
323 continue;
324 if (is_partition_valid(child))
325 continue;
326 if (cgroup_is_populated(child->css.cgroup)) {
327 rcu_read_unlock();
328 return true;
329 }
330 }
331 rcu_read_unlock();
332 return false;
333 }
334
335 /*
336 * Return in pmask the portion of a task's cpusets's cpus_allowed that
337 * are online and are capable of running the task. If none are found,
338 * walk up the cpuset hierarchy until we find one that does have some
339 * appropriate cpus.
340 *
341 * One way or another, we guarantee to return some non-empty subset
342 * of cpu_online_mask.
343 *
344 * Call with callback_lock or cpuset_mutex held.
345 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)346 static void guarantee_online_cpus(struct task_struct *tsk,
347 struct cpumask *pmask)
348 {
349 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
350 struct cpuset *cs;
351
352 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
353 cpumask_copy(pmask, cpu_online_mask);
354
355 rcu_read_lock();
356 cs = task_cs(tsk);
357
358 while (!cpumask_intersects(cs->effective_cpus, pmask))
359 cs = parent_cs(cs);
360
361 cpumask_and(pmask, pmask, cs->effective_cpus);
362 rcu_read_unlock();
363 }
364
365 /*
366 * Return in *pmask the portion of a cpusets's mems_allowed that
367 * are online, with memory. If none are online with memory, walk
368 * up the cpuset hierarchy until we find one that does have some
369 * online mems. The top cpuset always has some mems online.
370 *
371 * One way or another, we guarantee to return some non-empty subset
372 * of node_states[N_MEMORY].
373 *
374 * Call with callback_lock or cpuset_mutex held.
375 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)376 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
377 {
378 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
379 cs = parent_cs(cs);
380 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
381 }
382
383 /**
384 * alloc_cpumasks - allocate three cpumasks for cpuset
385 * @cs: the cpuset that have cpumasks to be allocated.
386 * @tmp: the tmpmasks structure pointer
387 * Return: 0 if successful, -ENOMEM otherwise.
388 *
389 * Only one of the two input arguments should be non-NULL.
390 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)391 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
392 {
393 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
394
395 if (cs) {
396 pmask1 = &cs->cpus_allowed;
397 pmask2 = &cs->effective_cpus;
398 pmask3 = &cs->effective_xcpus;
399 pmask4 = &cs->exclusive_cpus;
400 } else {
401 pmask1 = &tmp->new_cpus;
402 pmask2 = &tmp->addmask;
403 pmask3 = &tmp->delmask;
404 pmask4 = NULL;
405 }
406
407 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
408 return -ENOMEM;
409
410 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
411 goto free_one;
412
413 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
414 goto free_two;
415
416 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL))
417 goto free_three;
418
419
420 return 0;
421
422 free_three:
423 free_cpumask_var(*pmask3);
424 free_two:
425 free_cpumask_var(*pmask2);
426 free_one:
427 free_cpumask_var(*pmask1);
428 return -ENOMEM;
429 }
430
431 /**
432 * free_cpumasks - free cpumasks in a tmpmasks structure
433 * @cs: the cpuset that have cpumasks to be free.
434 * @tmp: the tmpmasks structure pointer
435 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)436 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
437 {
438 if (cs) {
439 free_cpumask_var(cs->cpus_allowed);
440 free_cpumask_var(cs->effective_cpus);
441 free_cpumask_var(cs->effective_xcpus);
442 free_cpumask_var(cs->exclusive_cpus);
443 }
444 if (tmp) {
445 free_cpumask_var(tmp->new_cpus);
446 free_cpumask_var(tmp->addmask);
447 free_cpumask_var(tmp->delmask);
448 }
449 }
450
451 /**
452 * alloc_trial_cpuset - allocate a trial cpuset
453 * @cs: the cpuset that the trial cpuset duplicates
454 */
alloc_trial_cpuset(struct cpuset * cs)455 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
456 {
457 struct cpuset *trial;
458
459 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
460 if (!trial)
461 return NULL;
462
463 if (alloc_cpumasks(trial, NULL)) {
464 kfree(trial);
465 return NULL;
466 }
467
468 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
469 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
470 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
471 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
472 return trial;
473 }
474
475 /**
476 * free_cpuset - free the cpuset
477 * @cs: the cpuset to be freed
478 */
free_cpuset(struct cpuset * cs)479 static inline void free_cpuset(struct cpuset *cs)
480 {
481 free_cpumasks(cs, NULL);
482 kfree(cs);
483 }
484
485 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)486 static inline struct cpumask *user_xcpus(struct cpuset *cs)
487 {
488 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
489 : cs->exclusive_cpus;
490 }
491
xcpus_empty(struct cpuset * cs)492 static inline bool xcpus_empty(struct cpuset *cs)
493 {
494 return cpumask_empty(cs->cpus_allowed) &&
495 cpumask_empty(cs->exclusive_cpus);
496 }
497
498 /*
499 * cpusets_are_exclusive() - check if two cpusets are exclusive
500 *
501 * Return true if exclusive, false if not
502 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)503 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
504 {
505 struct cpumask *xcpus1 = user_xcpus(cs1);
506 struct cpumask *xcpus2 = user_xcpus(cs2);
507
508 if (cpumask_intersects(xcpus1, xcpus2))
509 return false;
510 return true;
511 }
512
513 /*
514 * validate_change() - Used to validate that any proposed cpuset change
515 * follows the structural rules for cpusets.
516 *
517 * If we replaced the flag and mask values of the current cpuset
518 * (cur) with those values in the trial cpuset (trial), would
519 * our various subset and exclusive rules still be valid? Presumes
520 * cpuset_mutex held.
521 *
522 * 'cur' is the address of an actual, in-use cpuset. Operations
523 * such as list traversal that depend on the actual address of the
524 * cpuset in the list must use cur below, not trial.
525 *
526 * 'trial' is the address of bulk structure copy of cur, with
527 * perhaps one or more of the fields cpus_allowed, mems_allowed,
528 * or flags changed to new, trial values.
529 *
530 * Return 0 if valid, -errno if not.
531 */
532
validate_change(struct cpuset * cur,struct cpuset * trial)533 static int validate_change(struct cpuset *cur, struct cpuset *trial)
534 {
535 struct cgroup_subsys_state *css;
536 struct cpuset *c, *par;
537 int ret = 0;
538
539 rcu_read_lock();
540
541 if (!is_in_v2_mode())
542 ret = cpuset1_validate_change(cur, trial);
543 if (ret)
544 goto out;
545
546 /* Remaining checks don't apply to root cpuset */
547 if (cur == &top_cpuset)
548 goto out;
549
550 par = parent_cs(cur);
551
552 /*
553 * Cpusets with tasks - existing or newly being attached - can't
554 * be changed to have empty cpus_allowed or mems_allowed.
555 */
556 ret = -ENOSPC;
557 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
558 if (!cpumask_empty(cur->cpus_allowed) &&
559 cpumask_empty(trial->cpus_allowed))
560 goto out;
561 if (!nodes_empty(cur->mems_allowed) &&
562 nodes_empty(trial->mems_allowed))
563 goto out;
564 }
565
566 /*
567 * We can't shrink if we won't have enough room for SCHED_DEADLINE
568 * tasks.
569 */
570 ret = -EBUSY;
571 if (is_cpu_exclusive(cur) &&
572 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
573 trial->cpus_allowed))
574 goto out;
575
576 /*
577 * If either I or some sibling (!= me) is exclusive, we can't
578 * overlap. exclusive_cpus cannot overlap with each other if set.
579 */
580 ret = -EINVAL;
581 cpuset_for_each_child(c, css, par) {
582 bool txset, cxset; /* Are exclusive_cpus set? */
583
584 if (c == cur)
585 continue;
586
587 txset = !cpumask_empty(trial->exclusive_cpus);
588 cxset = !cpumask_empty(c->exclusive_cpus);
589 if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
590 (txset && cxset)) {
591 if (!cpusets_are_exclusive(trial, c))
592 goto out;
593 } else if (txset || cxset) {
594 struct cpumask *xcpus, *acpus;
595
596 /*
597 * When just one of the exclusive_cpus's is set,
598 * cpus_allowed of the other cpuset, if set, cannot be
599 * a subset of it or none of those CPUs will be
600 * available if these exclusive CPUs are activated.
601 */
602 if (txset) {
603 xcpus = trial->exclusive_cpus;
604 acpus = c->cpus_allowed;
605 } else {
606 xcpus = c->exclusive_cpus;
607 acpus = trial->cpus_allowed;
608 }
609 if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
610 goto out;
611 }
612 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
613 nodes_intersects(trial->mems_allowed, c->mems_allowed))
614 goto out;
615 }
616
617 ret = 0;
618 out:
619 rcu_read_unlock();
620 return ret;
621 }
622
623 #ifdef CONFIG_SMP
624 /*
625 * Helper routine for generate_sched_domains().
626 * Do cpusets a, b have overlapping effective cpus_allowed masks?
627 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)628 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
629 {
630 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
631 }
632
633 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)634 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
635 {
636 if (dattr->relax_domain_level < c->relax_domain_level)
637 dattr->relax_domain_level = c->relax_domain_level;
638 return;
639 }
640
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)641 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
642 struct cpuset *root_cs)
643 {
644 struct cpuset *cp;
645 struct cgroup_subsys_state *pos_css;
646
647 rcu_read_lock();
648 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
649 /* skip the whole subtree if @cp doesn't have any CPU */
650 if (cpumask_empty(cp->cpus_allowed)) {
651 pos_css = css_rightmost_descendant(pos_css);
652 continue;
653 }
654
655 if (is_sched_load_balance(cp))
656 update_domain_attr(dattr, cp);
657 }
658 rcu_read_unlock();
659 }
660
661 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)662 static inline int nr_cpusets(void)
663 {
664 /* jump label reference count + the top-level cpuset */
665 return static_key_count(&cpusets_enabled_key.key) + 1;
666 }
667
668 /*
669 * generate_sched_domains()
670 *
671 * This function builds a partial partition of the systems CPUs
672 * A 'partial partition' is a set of non-overlapping subsets whose
673 * union is a subset of that set.
674 * The output of this function needs to be passed to kernel/sched/core.c
675 * partition_sched_domains() routine, which will rebuild the scheduler's
676 * load balancing domains (sched domains) as specified by that partial
677 * partition.
678 *
679 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
680 * for a background explanation of this.
681 *
682 * Does not return errors, on the theory that the callers of this
683 * routine would rather not worry about failures to rebuild sched
684 * domains when operating in the severe memory shortage situations
685 * that could cause allocation failures below.
686 *
687 * Must be called with cpuset_mutex held.
688 *
689 * The three key local variables below are:
690 * cp - cpuset pointer, used (together with pos_css) to perform a
691 * top-down scan of all cpusets. For our purposes, rebuilding
692 * the schedulers sched domains, we can ignore !is_sched_load_
693 * balance cpusets.
694 * csa - (for CpuSet Array) Array of pointers to all the cpusets
695 * that need to be load balanced, for convenient iterative
696 * access by the subsequent code that finds the best partition,
697 * i.e the set of domains (subsets) of CPUs such that the
698 * cpus_allowed of every cpuset marked is_sched_load_balance
699 * is a subset of one of these domains, while there are as
700 * many such domains as possible, each as small as possible.
701 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
702 * the kernel/sched/core.c routine partition_sched_domains() in a
703 * convenient format, that can be easily compared to the prior
704 * value to determine what partition elements (sched domains)
705 * were changed (added or removed.)
706 *
707 * Finding the best partition (set of domains):
708 * The double nested loops below over i, j scan over the load
709 * balanced cpusets (using the array of cpuset pointers in csa[])
710 * looking for pairs of cpusets that have overlapping cpus_allowed
711 * and merging them using a union-find algorithm.
712 *
713 * The union of the cpus_allowed masks from the set of all cpusets
714 * having the same root then form the one element of the partition
715 * (one sched domain) to be passed to partition_sched_domains().
716 *
717 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)718 static int generate_sched_domains(cpumask_var_t **domains,
719 struct sched_domain_attr **attributes)
720 {
721 struct cpuset *cp; /* top-down scan of cpusets */
722 struct cpuset **csa; /* array of all cpuset ptrs */
723 int csn; /* how many cpuset ptrs in csa so far */
724 int i, j; /* indices for partition finding loops */
725 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
726 struct sched_domain_attr *dattr; /* attributes for custom domains */
727 int ndoms = 0; /* number of sched domains in result */
728 int nslot; /* next empty doms[] struct cpumask slot */
729 struct cgroup_subsys_state *pos_css;
730 bool root_load_balance = is_sched_load_balance(&top_cpuset);
731 bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
732 int nslot_update;
733
734 doms = NULL;
735 dattr = NULL;
736 csa = NULL;
737
738 /* Special case for the 99% of systems with one, full, sched domain */
739 if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
740 single_root_domain:
741 ndoms = 1;
742 doms = alloc_sched_domains(ndoms);
743 if (!doms)
744 goto done;
745
746 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
747 if (dattr) {
748 *dattr = SD_ATTR_INIT;
749 update_domain_attr_tree(dattr, &top_cpuset);
750 }
751 cpumask_and(doms[0], top_cpuset.effective_cpus,
752 housekeeping_cpumask(HK_TYPE_DOMAIN));
753
754 goto done;
755 }
756
757 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
758 if (!csa)
759 goto done;
760 csn = 0;
761
762 rcu_read_lock();
763 if (root_load_balance)
764 csa[csn++] = &top_cpuset;
765 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
766 if (cp == &top_cpuset)
767 continue;
768
769 if (cgrpv2)
770 goto v2;
771
772 /*
773 * v1:
774 * Continue traversing beyond @cp iff @cp has some CPUs and
775 * isn't load balancing. The former is obvious. The
776 * latter: All child cpusets contain a subset of the
777 * parent's cpus, so just skip them, and then we call
778 * update_domain_attr_tree() to calc relax_domain_level of
779 * the corresponding sched domain.
780 */
781 if (!cpumask_empty(cp->cpus_allowed) &&
782 !(is_sched_load_balance(cp) &&
783 cpumask_intersects(cp->cpus_allowed,
784 housekeeping_cpumask(HK_TYPE_DOMAIN))))
785 continue;
786
787 if (is_sched_load_balance(cp) &&
788 !cpumask_empty(cp->effective_cpus))
789 csa[csn++] = cp;
790
791 /* skip @cp's subtree */
792 pos_css = css_rightmost_descendant(pos_css);
793 continue;
794
795 v2:
796 /*
797 * Only valid partition roots that are not isolated and with
798 * non-empty effective_cpus will be saved into csn[].
799 */
800 if ((cp->partition_root_state == PRS_ROOT) &&
801 !cpumask_empty(cp->effective_cpus))
802 csa[csn++] = cp;
803
804 /*
805 * Skip @cp's subtree if not a partition root and has no
806 * exclusive CPUs to be granted to child cpusets.
807 */
808 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
809 pos_css = css_rightmost_descendant(pos_css);
810 }
811 rcu_read_unlock();
812
813 /*
814 * If there are only isolated partitions underneath the cgroup root,
815 * we can optimize out unneeded sched domains scanning.
816 */
817 if (root_load_balance && (csn == 1))
818 goto single_root_domain;
819
820 for (i = 0; i < csn; i++)
821 uf_node_init(&csa[i]->node);
822
823 /* Merge overlapping cpusets */
824 for (i = 0; i < csn; i++) {
825 for (j = i + 1; j < csn; j++) {
826 if (cpusets_overlap(csa[i], csa[j])) {
827 /*
828 * Cgroup v2 shouldn't pass down overlapping
829 * partition root cpusets.
830 */
831 WARN_ON_ONCE(cgrpv2);
832 uf_union(&csa[i]->node, &csa[j]->node);
833 }
834 }
835 }
836
837 /* Count the total number of domains */
838 for (i = 0; i < csn; i++) {
839 if (uf_find(&csa[i]->node) == &csa[i]->node)
840 ndoms++;
841 }
842
843 /*
844 * Now we know how many domains to create.
845 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
846 */
847 doms = alloc_sched_domains(ndoms);
848 if (!doms)
849 goto done;
850
851 /*
852 * The rest of the code, including the scheduler, can deal with
853 * dattr==NULL case. No need to abort if alloc fails.
854 */
855 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
856 GFP_KERNEL);
857
858 /*
859 * Cgroup v2 doesn't support domain attributes, just set all of them
860 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
861 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
862 */
863 if (cgrpv2) {
864 for (i = 0; i < ndoms; i++) {
865 cpumask_copy(doms[i], csa[i]->effective_cpus);
866 if (dattr)
867 dattr[i] = SD_ATTR_INIT;
868 }
869 goto done;
870 }
871
872 for (nslot = 0, i = 0; i < csn; i++) {
873 nslot_update = 0;
874 for (j = i; j < csn; j++) {
875 if (uf_find(&csa[j]->node) == &csa[i]->node) {
876 struct cpumask *dp = doms[nslot];
877
878 if (i == j) {
879 nslot_update = 1;
880 cpumask_clear(dp);
881 if (dattr)
882 *(dattr + nslot) = SD_ATTR_INIT;
883 }
884 cpumask_or(dp, dp, csa[j]->effective_cpus);
885 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
886 if (dattr)
887 update_domain_attr_tree(dattr + nslot, csa[j]);
888 }
889 }
890 if (nslot_update)
891 nslot++;
892 }
893 BUG_ON(nslot != ndoms);
894
895 done:
896 kfree(csa);
897
898 /*
899 * Fallback to the default domain if kmalloc() failed.
900 * See comments in partition_sched_domains().
901 */
902 if (doms == NULL)
903 ndoms = 1;
904
905 *domains = doms;
906 *attributes = dattr;
907 return ndoms;
908 }
909
dl_update_tasks_root_domain(struct cpuset * cs)910 static void dl_update_tasks_root_domain(struct cpuset *cs)
911 {
912 struct css_task_iter it;
913 struct task_struct *task;
914
915 if (cs->nr_deadline_tasks == 0)
916 return;
917
918 css_task_iter_start(&cs->css, 0, &it);
919
920 while ((task = css_task_iter_next(&it)))
921 dl_add_task_root_domain(task);
922
923 css_task_iter_end(&it);
924 }
925
dl_rebuild_rd_accounting(void)926 static void dl_rebuild_rd_accounting(void)
927 {
928 struct cpuset *cs = NULL;
929 struct cgroup_subsys_state *pos_css;
930
931 lockdep_assert_held(&cpuset_mutex);
932 lockdep_assert_cpus_held();
933 lockdep_assert_held(&sched_domains_mutex);
934
935 rcu_read_lock();
936
937 /*
938 * Clear default root domain DL accounting, it will be computed again
939 * if a task belongs to it.
940 */
941 dl_clear_root_domain(&def_root_domain);
942
943 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
944
945 if (cpumask_empty(cs->effective_cpus)) {
946 pos_css = css_rightmost_descendant(pos_css);
947 continue;
948 }
949
950 css_get(&cs->css);
951
952 rcu_read_unlock();
953
954 dl_update_tasks_root_domain(cs);
955
956 rcu_read_lock();
957 css_put(&cs->css);
958 }
959 rcu_read_unlock();
960 }
961
962 static void
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)963 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
964 struct sched_domain_attr *dattr_new)
965 {
966 mutex_lock(&sched_domains_mutex);
967 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
968 dl_rebuild_rd_accounting();
969 mutex_unlock(&sched_domains_mutex);
970 }
971
972 /*
973 * Rebuild scheduler domains.
974 *
975 * If the flag 'sched_load_balance' of any cpuset with non-empty
976 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
977 * which has that flag enabled, or if any cpuset with a non-empty
978 * 'cpus' is removed, then call this routine to rebuild the
979 * scheduler's dynamic sched domains.
980 *
981 * Call with cpuset_mutex held. Takes cpus_read_lock().
982 */
rebuild_sched_domains_locked(void)983 void rebuild_sched_domains_locked(void)
984 {
985 struct cgroup_subsys_state *pos_css;
986 struct sched_domain_attr *attr;
987 cpumask_var_t *doms;
988 struct cpuset *cs;
989 int ndoms;
990
991 lockdep_assert_cpus_held();
992 lockdep_assert_held(&cpuset_mutex);
993
994 /*
995 * If we have raced with CPU hotplug, return early to avoid
996 * passing doms with offlined cpu to partition_sched_domains().
997 * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
998 *
999 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1000 * should be the same as the active CPUs, so checking only top_cpuset
1001 * is enough to detect racing CPU offlines.
1002 */
1003 if (cpumask_empty(subpartitions_cpus) &&
1004 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1005 return;
1006
1007 /*
1008 * With subpartition CPUs, however, the effective CPUs of a partition
1009 * root should be only a subset of the active CPUs. Since a CPU in any
1010 * partition root could be offlined, all must be checked.
1011 */
1012 if (!cpumask_empty(subpartitions_cpus)) {
1013 rcu_read_lock();
1014 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1015 if (!is_partition_valid(cs)) {
1016 pos_css = css_rightmost_descendant(pos_css);
1017 continue;
1018 }
1019 if (!cpumask_subset(cs->effective_cpus,
1020 cpu_active_mask)) {
1021 rcu_read_unlock();
1022 return;
1023 }
1024 }
1025 rcu_read_unlock();
1026 }
1027
1028 /* Generate domain masks and attrs */
1029 ndoms = generate_sched_domains(&doms, &attr);
1030
1031 /* Have scheduler rebuild the domains */
1032 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1033 }
1034 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1035 void rebuild_sched_domains_locked(void)
1036 {
1037 }
1038 #endif /* CONFIG_SMP */
1039
rebuild_sched_domains_cpuslocked(void)1040 static void rebuild_sched_domains_cpuslocked(void)
1041 {
1042 mutex_lock(&cpuset_mutex);
1043 rebuild_sched_domains_locked();
1044 mutex_unlock(&cpuset_mutex);
1045 }
1046
rebuild_sched_domains(void)1047 void rebuild_sched_domains(void)
1048 {
1049 cpus_read_lock();
1050 rebuild_sched_domains_cpuslocked();
1051 cpus_read_unlock();
1052 }
1053
1054 /**
1055 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1056 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1057 * @new_cpus: the temp variable for the new effective_cpus mask
1058 *
1059 * Iterate through each task of @cs updating its cpus_allowed to the
1060 * effective cpuset's. As this function is called with cpuset_mutex held,
1061 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1062 * is used instead of effective_cpus to make sure all offline CPUs are also
1063 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1064 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1065 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1066 {
1067 struct css_task_iter it;
1068 struct task_struct *task;
1069 bool top_cs = cs == &top_cpuset;
1070
1071 css_task_iter_start(&cs->css, 0, &it);
1072 while ((task = css_task_iter_next(&it))) {
1073 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1074
1075 if (top_cs) {
1076 /*
1077 * Percpu kthreads in top_cpuset are ignored
1078 */
1079 if (kthread_is_per_cpu(task))
1080 continue;
1081 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1082 } else {
1083 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1084 }
1085 set_cpus_allowed_ptr(task, new_cpus);
1086 }
1087 css_task_iter_end(&it);
1088 }
1089
1090 /**
1091 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1092 * @new_cpus: the temp variable for the new effective_cpus mask
1093 * @cs: the cpuset the need to recompute the new effective_cpus mask
1094 * @parent: the parent cpuset
1095 *
1096 * The result is valid only if the given cpuset isn't a partition root.
1097 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1098 static void compute_effective_cpumask(struct cpumask *new_cpus,
1099 struct cpuset *cs, struct cpuset *parent)
1100 {
1101 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1102 }
1103
1104 /*
1105 * Commands for update_parent_effective_cpumask
1106 */
1107 enum partition_cmd {
1108 partcmd_enable, /* Enable partition root */
1109 partcmd_enablei, /* Enable isolated partition root */
1110 partcmd_disable, /* Disable partition root */
1111 partcmd_update, /* Update parent's effective_cpus */
1112 partcmd_invalidate, /* Make partition invalid */
1113 };
1114
1115 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1116 struct tmpmasks *tmp);
1117
1118 /*
1119 * Update partition exclusive flag
1120 *
1121 * Return: 0 if successful, an error code otherwise
1122 */
update_partition_exclusive(struct cpuset * cs,int new_prs)1123 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1124 {
1125 bool exclusive = (new_prs > PRS_MEMBER);
1126
1127 if (exclusive && !is_cpu_exclusive(cs)) {
1128 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1129 return PERR_NOTEXCL;
1130 } else if (!exclusive && is_cpu_exclusive(cs)) {
1131 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1132 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1133 }
1134 return 0;
1135 }
1136
1137 /*
1138 * Update partition load balance flag and/or rebuild sched domain
1139 *
1140 * Changing load balance flag will automatically call
1141 * rebuild_sched_domains_locked().
1142 * This function is for cgroup v2 only.
1143 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1144 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1145 {
1146 int new_prs = cs->partition_root_state;
1147 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1148 bool new_lb;
1149
1150 /*
1151 * If cs is not a valid partition root, the load balance state
1152 * will follow its parent.
1153 */
1154 if (new_prs > 0) {
1155 new_lb = (new_prs != PRS_ISOLATED);
1156 } else {
1157 new_lb = is_sched_load_balance(parent_cs(cs));
1158 }
1159 if (new_lb != !!is_sched_load_balance(cs)) {
1160 rebuild_domains = true;
1161 if (new_lb)
1162 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1163 else
1164 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1165 }
1166
1167 if (rebuild_domains && !force_sd_rebuild)
1168 rebuild_sched_domains_locked();
1169 }
1170
1171 /*
1172 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1173 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1174 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1175 struct cpumask *xcpus)
1176 {
1177 /*
1178 * A populated partition (cs or parent) can't have empty effective_cpus
1179 */
1180 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1181 partition_is_populated(parent, cs)) ||
1182 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1183 partition_is_populated(cs, NULL));
1184 }
1185
reset_partition_data(struct cpuset * cs)1186 static void reset_partition_data(struct cpuset *cs)
1187 {
1188 struct cpuset *parent = parent_cs(cs);
1189
1190 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
1191 return;
1192
1193 lockdep_assert_held(&callback_lock);
1194
1195 cs->nr_subparts = 0;
1196 if (cpumask_empty(cs->exclusive_cpus)) {
1197 cpumask_clear(cs->effective_xcpus);
1198 if (is_cpu_exclusive(cs))
1199 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1200 }
1201 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1202 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1203 }
1204
1205 /*
1206 * partition_xcpus_newstate - Exclusive CPUs state change
1207 * @old_prs: old partition_root_state
1208 * @new_prs: new partition_root_state
1209 * @xcpus: exclusive CPUs with state change
1210 */
partition_xcpus_newstate(int old_prs,int new_prs,struct cpumask * xcpus)1211 static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
1212 {
1213 WARN_ON_ONCE(old_prs == new_prs);
1214 if (new_prs == PRS_ISOLATED)
1215 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1216 else
1217 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1218 }
1219
1220 /*
1221 * partition_xcpus_add - Add new exclusive CPUs to partition
1222 * @new_prs: new partition_root_state
1223 * @parent: parent cpuset
1224 * @xcpus: exclusive CPUs to be added
1225 * Return: true if isolated_cpus modified, false otherwise
1226 *
1227 * Remote partition if parent == NULL
1228 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1229 static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
1230 struct cpumask *xcpus)
1231 {
1232 bool isolcpus_updated;
1233
1234 WARN_ON_ONCE(new_prs < 0);
1235 lockdep_assert_held(&callback_lock);
1236 if (!parent)
1237 parent = &top_cpuset;
1238
1239
1240 if (parent == &top_cpuset)
1241 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1242
1243 isolcpus_updated = (new_prs != parent->partition_root_state);
1244 if (isolcpus_updated)
1245 partition_xcpus_newstate(parent->partition_root_state, new_prs,
1246 xcpus);
1247
1248 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1249 return isolcpus_updated;
1250 }
1251
1252 /*
1253 * partition_xcpus_del - Remove exclusive CPUs from partition
1254 * @old_prs: old partition_root_state
1255 * @parent: parent cpuset
1256 * @xcpus: exclusive CPUs to be removed
1257 * Return: true if isolated_cpus modified, false otherwise
1258 *
1259 * Remote partition if parent == NULL
1260 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1261 static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
1262 struct cpumask *xcpus)
1263 {
1264 bool isolcpus_updated;
1265
1266 WARN_ON_ONCE(old_prs < 0);
1267 lockdep_assert_held(&callback_lock);
1268 if (!parent)
1269 parent = &top_cpuset;
1270
1271 if (parent == &top_cpuset)
1272 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1273
1274 isolcpus_updated = (old_prs != parent->partition_root_state);
1275 if (isolcpus_updated)
1276 partition_xcpus_newstate(old_prs, parent->partition_root_state,
1277 xcpus);
1278
1279 cpumask_and(xcpus, xcpus, cpu_active_mask);
1280 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1281 return isolcpus_updated;
1282 }
1283
update_unbound_workqueue_cpumask(bool isolcpus_updated)1284 static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
1285 {
1286 int ret;
1287
1288 lockdep_assert_cpus_held();
1289
1290 if (!isolcpus_updated)
1291 return;
1292
1293 ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1294 WARN_ON_ONCE(ret < 0);
1295 }
1296
1297 /**
1298 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1299 * @cpu: the CPU number to be checked
1300 * Return: true if CPU is used in an isolated partition, false otherwise
1301 */
cpuset_cpu_is_isolated(int cpu)1302 bool cpuset_cpu_is_isolated(int cpu)
1303 {
1304 return cpumask_test_cpu(cpu, isolated_cpus);
1305 }
1306 EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1307
1308 /*
1309 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1310 * @cs: cpuset
1311 * @xcpus: effective exclusive CPUs value to be set
1312 * Return: true if xcpus is not empty, false otherwise.
1313 *
1314 * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set),
1315 * it must be a subset of parent's effective_xcpus.
1316 */
compute_effective_exclusive_cpumask(struct cpuset * cs,struct cpumask * xcpus)1317 static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
1318 struct cpumask *xcpus)
1319 {
1320 struct cpuset *parent = parent_cs(cs);
1321
1322 if (!xcpus)
1323 xcpus = cs->effective_xcpus;
1324
1325 return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
1326 }
1327
is_remote_partition(struct cpuset * cs)1328 static inline bool is_remote_partition(struct cpuset *cs)
1329 {
1330 return !list_empty(&cs->remote_sibling);
1331 }
1332
is_local_partition(struct cpuset * cs)1333 static inline bool is_local_partition(struct cpuset *cs)
1334 {
1335 return is_partition_valid(cs) && !is_remote_partition(cs);
1336 }
1337
1338 /*
1339 * remote_partition_enable - Enable current cpuset as a remote partition root
1340 * @cs: the cpuset to update
1341 * @new_prs: new partition_root_state
1342 * @tmp: temparary masks
1343 * Return: 0 if successful, errcode if error
1344 *
1345 * Enable the current cpuset to become a remote partition root taking CPUs
1346 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1347 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1348 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1349 struct tmpmasks *tmp)
1350 {
1351 bool isolcpus_updated;
1352
1353 /*
1354 * The user must have sysadmin privilege.
1355 */
1356 if (!capable(CAP_SYS_ADMIN))
1357 return PERR_ACCESS;
1358
1359 /*
1360 * The requested exclusive_cpus must not be allocated to other
1361 * partitions and it can't use up all the root's effective_cpus.
1362 *
1363 * Note that if there is any local partition root above it or
1364 * remote partition root underneath it, its exclusive_cpus must
1365 * have overlapped with subpartitions_cpus.
1366 */
1367 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1368 if (cpumask_empty(tmp->new_cpus) ||
1369 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
1370 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1371 return PERR_INVCPUS;
1372
1373 spin_lock_irq(&callback_lock);
1374 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1375 list_add(&cs->remote_sibling, &remote_children);
1376 spin_unlock_irq(&callback_lock);
1377 update_unbound_workqueue_cpumask(isolcpus_updated);
1378
1379 /*
1380 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1381 */
1382 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1383 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1384 return 0;
1385 }
1386
1387 /*
1388 * remote_partition_disable - Remove current cpuset from remote partition list
1389 * @cs: the cpuset to update
1390 * @tmp: temparary masks
1391 *
1392 * The effective_cpus is also updated.
1393 *
1394 * cpuset_mutex must be held by the caller.
1395 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1396 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1397 {
1398 bool isolcpus_updated;
1399
1400 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1401 WARN_ON_ONCE(!is_remote_partition(cs));
1402 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus));
1403
1404 spin_lock_irq(&callback_lock);
1405 list_del_init(&cs->remote_sibling);
1406 isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1407 NULL, tmp->new_cpus);
1408 cs->partition_root_state = -cs->partition_root_state;
1409 if (!cs->prs_err)
1410 cs->prs_err = PERR_INVCPUS;
1411 reset_partition_data(cs);
1412 spin_unlock_irq(&callback_lock);
1413 update_unbound_workqueue_cpumask(isolcpus_updated);
1414
1415 /*
1416 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1417 */
1418 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1419 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1420 }
1421
1422 /*
1423 * remote_cpus_update - cpus_exclusive change of remote partition
1424 * @cs: the cpuset to be updated
1425 * @newmask: the new effective_xcpus mask
1426 * @tmp: temparary masks
1427 *
1428 * top_cpuset and subpartitions_cpus will be updated or partition can be
1429 * invalidated.
1430 */
remote_cpus_update(struct cpuset * cs,struct cpumask * newmask,struct tmpmasks * tmp)1431 static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1432 struct tmpmasks *tmp)
1433 {
1434 bool adding, deleting;
1435 int prs = cs->partition_root_state;
1436 int isolcpus_updated = 0;
1437
1438 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1439 return;
1440
1441 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1442
1443 if (cpumask_empty(newmask))
1444 goto invalidate;
1445
1446 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
1447 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
1448
1449 /*
1450 * Additions of remote CPUs is only allowed if those CPUs are
1451 * not allocated to other partitions and there are effective_cpus
1452 * left in the top cpuset.
1453 */
1454 if (adding && (!capable(CAP_SYS_ADMIN) ||
1455 cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1456 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
1457 goto invalidate;
1458
1459 spin_lock_irq(&callback_lock);
1460 if (adding)
1461 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
1462 if (deleting)
1463 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
1464 spin_unlock_irq(&callback_lock);
1465 update_unbound_workqueue_cpumask(isolcpus_updated);
1466
1467 /*
1468 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1469 */
1470 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1471 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1472 return;
1473
1474 invalidate:
1475 remote_partition_disable(cs, tmp);
1476 }
1477
1478 /*
1479 * remote_partition_check - check if a child remote partition needs update
1480 * @cs: the cpuset to be updated
1481 * @newmask: the new effective_xcpus mask
1482 * @delmask: temporary mask for deletion (not in tmp)
1483 * @tmp: temparary masks
1484 *
1485 * This should be called before the given cs has updated its cpus_allowed
1486 * and/or effective_xcpus.
1487 */
remote_partition_check(struct cpuset * cs,struct cpumask * newmask,struct cpumask * delmask,struct tmpmasks * tmp)1488 static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
1489 struct cpumask *delmask, struct tmpmasks *tmp)
1490 {
1491 struct cpuset *child, *next;
1492 int disable_cnt = 0;
1493
1494 /*
1495 * Compute the effective exclusive CPUs that will be deleted.
1496 */
1497 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) ||
1498 !cpumask_intersects(delmask, subpartitions_cpus))
1499 return; /* No deletion of exclusive CPUs in partitions */
1500
1501 /*
1502 * Searching the remote children list to look for those that will
1503 * be impacted by the deletion of exclusive CPUs.
1504 *
1505 * Since a cpuset must be removed from the remote children list
1506 * before it can go offline and holding cpuset_mutex will prevent
1507 * any change in cpuset status. RCU read lock isn't needed.
1508 */
1509 lockdep_assert_held(&cpuset_mutex);
1510 list_for_each_entry_safe(child, next, &remote_children, remote_sibling)
1511 if (cpumask_intersects(child->effective_cpus, delmask)) {
1512 remote_partition_disable(child, tmp);
1513 disable_cnt++;
1514 }
1515 if (disable_cnt && !force_sd_rebuild)
1516 rebuild_sched_domains_locked();
1517 }
1518
1519 /*
1520 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1521 * @prstate: partition root state to be checked
1522 * @new_cpus: cpu mask
1523 * Return: true if there is conflict, false otherwise
1524 *
1525 * CPUs outside of boot_hk_cpus, if defined, can only be used in an
1526 * isolated partition.
1527 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1528 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1529 {
1530 if (!have_boot_isolcpus)
1531 return false;
1532
1533 if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
1534 return true;
1535
1536 return false;
1537 }
1538
1539 /**
1540 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1541 * @cs: The cpuset that requests change in partition root state
1542 * @cmd: Partition root state change command
1543 * @newmask: Optional new cpumask for partcmd_update
1544 * @tmp: Temporary addmask and delmask
1545 * Return: 0 or a partition root state error code
1546 *
1547 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1548 * root to a partition root. The effective_xcpus (cpus_allowed if
1549 * effective_xcpus not set) mask of the given cpuset will be taken away from
1550 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1551 * in effective_xcpus can be granted or an error code will be returned.
1552 *
1553 * For partcmd_disable, the cpuset is being transformed from a partition
1554 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1555 * given back to parent's effective_cpus. 0 will always be returned.
1556 *
1557 * For partcmd_update, if the optional newmask is specified, the cpu list is
1558 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1559 * assumed to remain the same. The cpuset should either be a valid or invalid
1560 * partition root. The partition root state may change from valid to invalid
1561 * or vice versa. An error code will be returned if transitioning from
1562 * invalid to valid violates the exclusivity rule.
1563 *
1564 * For partcmd_invalidate, the current partition will be made invalid.
1565 *
1566 * The partcmd_enable* and partcmd_disable commands are used by
1567 * update_prstate(). An error code may be returned and the caller will check
1568 * for error.
1569 *
1570 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1571 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1572 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1573 * check for error and so partition_root_state and prs_error will be updated
1574 * directly.
1575 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1576 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1577 struct cpumask *newmask,
1578 struct tmpmasks *tmp)
1579 {
1580 struct cpuset *parent = parent_cs(cs);
1581 int adding; /* Adding cpus to parent's effective_cpus */
1582 int deleting; /* Deleting cpus from parent's effective_cpus */
1583 int old_prs, new_prs;
1584 int part_error = PERR_NONE; /* Partition error? */
1585 int subparts_delta = 0;
1586 struct cpumask *xcpus; /* cs effective_xcpus */
1587 int isolcpus_updated = 0;
1588 bool nocpu;
1589
1590 lockdep_assert_held(&cpuset_mutex);
1591
1592 /*
1593 * new_prs will only be changed for the partcmd_update and
1594 * partcmd_invalidate commands.
1595 */
1596 adding = deleting = false;
1597 old_prs = new_prs = cs->partition_root_state;
1598 xcpus = user_xcpus(cs);
1599
1600 if (cmd == partcmd_invalidate) {
1601 if (is_prs_invalid(old_prs))
1602 return 0;
1603
1604 /*
1605 * Make the current partition invalid.
1606 */
1607 if (is_partition_valid(parent))
1608 adding = cpumask_and(tmp->addmask,
1609 xcpus, parent->effective_xcpus);
1610 if (old_prs > 0) {
1611 new_prs = -old_prs;
1612 subparts_delta--;
1613 }
1614 goto write_error;
1615 }
1616
1617 /*
1618 * The parent must be a partition root.
1619 * The new cpumask, if present, or the current cpus_allowed must
1620 * not be empty.
1621 */
1622 if (!is_partition_valid(parent)) {
1623 return is_partition_invalid(parent)
1624 ? PERR_INVPARENT : PERR_NOTPART;
1625 }
1626 if (!newmask && xcpus_empty(cs))
1627 return PERR_CPUSEMPTY;
1628
1629 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1630
1631 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1632 /*
1633 * Enabling partition root is not allowed if its
1634 * effective_xcpus is empty or doesn't overlap with
1635 * parent's effective_xcpus.
1636 */
1637 if (cpumask_empty(xcpus) ||
1638 !cpumask_intersects(xcpus, parent->effective_xcpus))
1639 return PERR_INVCPUS;
1640
1641 if (prstate_housekeeping_conflict(new_prs, xcpus))
1642 return PERR_HKEEPING;
1643
1644 /*
1645 * A parent can be left with no CPU as long as there is no
1646 * task directly associated with the parent partition.
1647 */
1648 if (nocpu)
1649 return PERR_NOCPUS;
1650
1651 cpumask_copy(tmp->delmask, xcpus);
1652 deleting = true;
1653 subparts_delta++;
1654 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1655 } else if (cmd == partcmd_disable) {
1656 /*
1657 * May need to add cpus to parent's effective_cpus for
1658 * valid partition root.
1659 */
1660 adding = !is_prs_invalid(old_prs) &&
1661 cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus);
1662 if (adding)
1663 subparts_delta--;
1664 new_prs = PRS_MEMBER;
1665 } else if (newmask) {
1666 /*
1667 * Empty cpumask is not allowed
1668 */
1669 if (cpumask_empty(newmask)) {
1670 part_error = PERR_CPUSEMPTY;
1671 goto write_error;
1672 }
1673 /* Check newmask again, whether cpus are available for parent/cs */
1674 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1675
1676 /*
1677 * partcmd_update with newmask:
1678 *
1679 * Compute add/delete mask to/from effective_cpus
1680 *
1681 * For valid partition:
1682 * addmask = exclusive_cpus & ~newmask
1683 * & parent->effective_xcpus
1684 * delmask = newmask & ~exclusive_cpus
1685 * & parent->effective_xcpus
1686 *
1687 * For invalid partition:
1688 * delmask = newmask & parent->effective_xcpus
1689 */
1690 if (is_prs_invalid(old_prs)) {
1691 adding = false;
1692 deleting = cpumask_and(tmp->delmask,
1693 newmask, parent->effective_xcpus);
1694 } else {
1695 cpumask_andnot(tmp->addmask, xcpus, newmask);
1696 adding = cpumask_and(tmp->addmask, tmp->addmask,
1697 parent->effective_xcpus);
1698
1699 cpumask_andnot(tmp->delmask, newmask, xcpus);
1700 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1701 parent->effective_xcpus);
1702 }
1703 /*
1704 * Make partition invalid if parent's effective_cpus could
1705 * become empty and there are tasks in the parent.
1706 */
1707 if (nocpu && (!adding ||
1708 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1709 part_error = PERR_NOCPUS;
1710 deleting = false;
1711 adding = cpumask_and(tmp->addmask,
1712 xcpus, parent->effective_xcpus);
1713 }
1714 } else {
1715 /*
1716 * partcmd_update w/o newmask
1717 *
1718 * delmask = effective_xcpus & parent->effective_cpus
1719 *
1720 * This can be called from:
1721 * 1) update_cpumasks_hier()
1722 * 2) cpuset_hotplug_update_tasks()
1723 *
1724 * Check to see if it can be transitioned from valid to
1725 * invalid partition or vice versa.
1726 *
1727 * A partition error happens when parent has tasks and all
1728 * its effective CPUs will have to be distributed out.
1729 */
1730 WARN_ON_ONCE(!is_partition_valid(parent));
1731 if (nocpu) {
1732 part_error = PERR_NOCPUS;
1733 if (is_partition_valid(cs))
1734 adding = cpumask_and(tmp->addmask,
1735 xcpus, parent->effective_xcpus);
1736 } else if (is_partition_invalid(cs) &&
1737 cpumask_subset(xcpus, parent->effective_xcpus)) {
1738 struct cgroup_subsys_state *css;
1739 struct cpuset *child;
1740 bool exclusive = true;
1741
1742 /*
1743 * Convert invalid partition to valid has to
1744 * pass the cpu exclusivity test.
1745 */
1746 rcu_read_lock();
1747 cpuset_for_each_child(child, css, parent) {
1748 if (child == cs)
1749 continue;
1750 if (!cpusets_are_exclusive(cs, child)) {
1751 exclusive = false;
1752 break;
1753 }
1754 }
1755 rcu_read_unlock();
1756 if (exclusive)
1757 deleting = cpumask_and(tmp->delmask,
1758 xcpus, parent->effective_cpus);
1759 else
1760 part_error = PERR_NOTEXCL;
1761 }
1762 }
1763
1764 write_error:
1765 if (part_error)
1766 WRITE_ONCE(cs->prs_err, part_error);
1767
1768 if (cmd == partcmd_update) {
1769 /*
1770 * Check for possible transition between valid and invalid
1771 * partition root.
1772 */
1773 switch (cs->partition_root_state) {
1774 case PRS_ROOT:
1775 case PRS_ISOLATED:
1776 if (part_error) {
1777 new_prs = -old_prs;
1778 subparts_delta--;
1779 }
1780 break;
1781 case PRS_INVALID_ROOT:
1782 case PRS_INVALID_ISOLATED:
1783 if (!part_error) {
1784 new_prs = -old_prs;
1785 subparts_delta++;
1786 }
1787 break;
1788 }
1789 }
1790
1791 if (!adding && !deleting && (new_prs == old_prs))
1792 return 0;
1793
1794 /*
1795 * Transitioning between invalid to valid or vice versa may require
1796 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1797 * validate_change() has already been successfully called and
1798 * CPU lists in cs haven't been updated yet. So defer it to later.
1799 */
1800 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1801 int err = update_partition_exclusive(cs, new_prs);
1802
1803 if (err)
1804 return err;
1805 }
1806
1807 /*
1808 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1809 * only).
1810 *
1811 * Newly added CPUs will be removed from effective_cpus and
1812 * newly deleted ones will be added back to effective_cpus.
1813 */
1814 spin_lock_irq(&callback_lock);
1815 if (old_prs != new_prs) {
1816 cs->partition_root_state = new_prs;
1817 if (new_prs <= 0)
1818 cs->nr_subparts = 0;
1819 }
1820 /*
1821 * Adding to parent's effective_cpus means deletion CPUs from cs
1822 * and vice versa.
1823 */
1824 if (adding)
1825 isolcpus_updated += partition_xcpus_del(old_prs, parent,
1826 tmp->addmask);
1827 if (deleting)
1828 isolcpus_updated += partition_xcpus_add(new_prs, parent,
1829 tmp->delmask);
1830
1831 if (is_partition_valid(parent)) {
1832 parent->nr_subparts += subparts_delta;
1833 WARN_ON_ONCE(parent->nr_subparts < 0);
1834 }
1835 spin_unlock_irq(&callback_lock);
1836 update_unbound_workqueue_cpumask(isolcpus_updated);
1837
1838 if ((old_prs != new_prs) && (cmd == partcmd_update))
1839 update_partition_exclusive(cs, new_prs);
1840
1841 if (adding || deleting) {
1842 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1843 update_sibling_cpumasks(parent, cs, tmp);
1844 }
1845
1846 /*
1847 * For partcmd_update without newmask, it is being called from
1848 * cpuset_handle_hotplug(). Update the load balance flag and
1849 * scheduling domain accordingly.
1850 */
1851 if ((cmd == partcmd_update) && !newmask)
1852 update_partition_sd_lb(cs, old_prs);
1853
1854 notify_partition_change(cs, old_prs);
1855 return 0;
1856 }
1857
1858 /**
1859 * compute_partition_effective_cpumask - compute effective_cpus for partition
1860 * @cs: partition root cpuset
1861 * @new_ecpus: previously computed effective_cpus to be updated
1862 *
1863 * Compute the effective_cpus of a partition root by scanning effective_xcpus
1864 * of child partition roots and excluding their effective_xcpus.
1865 *
1866 * This has the side effect of invalidating valid child partition roots,
1867 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
1868 * or update_cpumasks_hier() where parent and children are modified
1869 * successively, we don't need to call update_parent_effective_cpumask()
1870 * and the child's effective_cpus will be updated in later iterations.
1871 *
1872 * Note that rcu_read_lock() is assumed to be held.
1873 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)1874 static void compute_partition_effective_cpumask(struct cpuset *cs,
1875 struct cpumask *new_ecpus)
1876 {
1877 struct cgroup_subsys_state *css;
1878 struct cpuset *child;
1879 bool populated = partition_is_populated(cs, NULL);
1880
1881 /*
1882 * Check child partition roots to see if they should be
1883 * invalidated when
1884 * 1) child effective_xcpus not a subset of new
1885 * excluisve_cpus
1886 * 2) All the effective_cpus will be used up and cp
1887 * has tasks
1888 */
1889 compute_effective_exclusive_cpumask(cs, new_ecpus);
1890 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
1891
1892 rcu_read_lock();
1893 cpuset_for_each_child(child, css, cs) {
1894 if (!is_partition_valid(child))
1895 continue;
1896
1897 child->prs_err = 0;
1898 if (!cpumask_subset(child->effective_xcpus,
1899 cs->effective_xcpus))
1900 child->prs_err = PERR_INVCPUS;
1901 else if (populated &&
1902 cpumask_subset(new_ecpus, child->effective_xcpus))
1903 child->prs_err = PERR_NOCPUS;
1904
1905 if (child->prs_err) {
1906 int old_prs = child->partition_root_state;
1907
1908 /*
1909 * Invalidate child partition
1910 */
1911 spin_lock_irq(&callback_lock);
1912 make_partition_invalid(child);
1913 cs->nr_subparts--;
1914 child->nr_subparts = 0;
1915 spin_unlock_irq(&callback_lock);
1916 notify_partition_change(child, old_prs);
1917 continue;
1918 }
1919 cpumask_andnot(new_ecpus, new_ecpus,
1920 child->effective_xcpus);
1921 }
1922 rcu_read_unlock();
1923 }
1924
1925 /*
1926 * update_cpumasks_hier() flags
1927 */
1928 #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
1929 #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
1930
1931 /*
1932 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1933 * @cs: the cpuset to consider
1934 * @tmp: temp variables for calculating effective_cpus & partition setup
1935 * @force: don't skip any descendant cpusets if set
1936 *
1937 * When configured cpumask is changed, the effective cpumasks of this cpuset
1938 * and all its descendants need to be updated.
1939 *
1940 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1941 *
1942 * Called with cpuset_mutex held
1943 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,int flags)1944 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1945 int flags)
1946 {
1947 struct cpuset *cp;
1948 struct cgroup_subsys_state *pos_css;
1949 bool need_rebuild_sched_domains = false;
1950 int old_prs, new_prs;
1951
1952 rcu_read_lock();
1953 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1954 struct cpuset *parent = parent_cs(cp);
1955 bool remote = is_remote_partition(cp);
1956 bool update_parent = false;
1957
1958 /*
1959 * Skip descendent remote partition that acquires CPUs
1960 * directly from top cpuset unless it is cs.
1961 */
1962 if (remote && (cp != cs)) {
1963 pos_css = css_rightmost_descendant(pos_css);
1964 continue;
1965 }
1966
1967 /*
1968 * Update effective_xcpus if exclusive_cpus set.
1969 * The case when exclusive_cpus isn't set is handled later.
1970 */
1971 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
1972 spin_lock_irq(&callback_lock);
1973 compute_effective_exclusive_cpumask(cp, NULL);
1974 spin_unlock_irq(&callback_lock);
1975 }
1976
1977 old_prs = new_prs = cp->partition_root_state;
1978 if (remote || (is_partition_valid(parent) &&
1979 is_partition_valid(cp)))
1980 compute_partition_effective_cpumask(cp, tmp->new_cpus);
1981 else
1982 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1983
1984 /*
1985 * A partition with no effective_cpus is allowed as long as
1986 * there is no task associated with it. Call
1987 * update_parent_effective_cpumask() to check it.
1988 */
1989 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
1990 update_parent = true;
1991 goto update_parent_effective;
1992 }
1993
1994 /*
1995 * If it becomes empty, inherit the effective mask of the
1996 * parent, which is guaranteed to have some CPUs unless
1997 * it is a partition root that has explicitly distributed
1998 * out all its CPUs.
1999 */
2000 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2001 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2002
2003 if (remote)
2004 goto get_css;
2005
2006 /*
2007 * Skip the whole subtree if
2008 * 1) the cpumask remains the same,
2009 * 2) has no partition root state,
2010 * 3) HIER_CHECKALL flag not set, and
2011 * 4) for v2 load balance state same as its parent.
2012 */
2013 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
2014 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2015 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2016 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2017 pos_css = css_rightmost_descendant(pos_css);
2018 continue;
2019 }
2020
2021 update_parent_effective:
2022 /*
2023 * update_parent_effective_cpumask() should have been called
2024 * for cs already in update_cpumask(). We should also call
2025 * cpuset_update_tasks_cpumask() again for tasks in the parent
2026 * cpuset if the parent's effective_cpus changes.
2027 */
2028 if ((cp != cs) && old_prs) {
2029 switch (parent->partition_root_state) {
2030 case PRS_ROOT:
2031 case PRS_ISOLATED:
2032 update_parent = true;
2033 break;
2034
2035 default:
2036 /*
2037 * When parent is not a partition root or is
2038 * invalid, child partition roots become
2039 * invalid too.
2040 */
2041 if (is_partition_valid(cp))
2042 new_prs = -cp->partition_root_state;
2043 WRITE_ONCE(cp->prs_err,
2044 is_partition_invalid(parent)
2045 ? PERR_INVPARENT : PERR_NOTPART);
2046 break;
2047 }
2048 }
2049 get_css:
2050 if (!css_tryget_online(&cp->css))
2051 continue;
2052 rcu_read_unlock();
2053
2054 if (update_parent) {
2055 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2056 /*
2057 * The cpuset partition_root_state may become
2058 * invalid. Capture it.
2059 */
2060 new_prs = cp->partition_root_state;
2061 }
2062
2063 spin_lock_irq(&callback_lock);
2064 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2065 cp->partition_root_state = new_prs;
2066 /*
2067 * Make sure effective_xcpus is properly set for a valid
2068 * partition root.
2069 */
2070 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2071 cpumask_and(cp->effective_xcpus,
2072 cp->cpus_allowed, parent->effective_xcpus);
2073 else if (new_prs < 0)
2074 reset_partition_data(cp);
2075 spin_unlock_irq(&callback_lock);
2076
2077 notify_partition_change(cp, old_prs);
2078
2079 WARN_ON(!is_in_v2_mode() &&
2080 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2081
2082 cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2083
2084 /*
2085 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2086 * from parent if current cpuset isn't a valid partition root
2087 * and their load balance states differ.
2088 */
2089 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2090 !is_partition_valid(cp) &&
2091 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2092 if (is_sched_load_balance(parent))
2093 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2094 else
2095 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2096 }
2097
2098 /*
2099 * On legacy hierarchy, if the effective cpumask of any non-
2100 * empty cpuset is changed, we need to rebuild sched domains.
2101 * On default hierarchy, the cpuset needs to be a partition
2102 * root as well.
2103 */
2104 if (!cpumask_empty(cp->cpus_allowed) &&
2105 is_sched_load_balance(cp) &&
2106 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2107 is_partition_valid(cp)))
2108 need_rebuild_sched_domains = true;
2109
2110 rcu_read_lock();
2111 css_put(&cp->css);
2112 }
2113 rcu_read_unlock();
2114
2115 if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) &&
2116 !force_sd_rebuild)
2117 rebuild_sched_domains_locked();
2118 }
2119
2120 /**
2121 * update_sibling_cpumasks - Update siblings cpumasks
2122 * @parent: Parent cpuset
2123 * @cs: Current cpuset
2124 * @tmp: Temp variables
2125 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2126 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2127 struct tmpmasks *tmp)
2128 {
2129 struct cpuset *sibling;
2130 struct cgroup_subsys_state *pos_css;
2131
2132 lockdep_assert_held(&cpuset_mutex);
2133
2134 /*
2135 * Check all its siblings and call update_cpumasks_hier()
2136 * if their effective_cpus will need to be changed.
2137 *
2138 * It is possible a change in parent's effective_cpus
2139 * due to a change in a child partition's effective_xcpus will impact
2140 * its siblings even if they do not inherit parent's effective_cpus
2141 * directly.
2142 *
2143 * The update_cpumasks_hier() function may sleep. So we have to
2144 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
2145 * flag is used to suppress rebuild of sched domains as the callers
2146 * will take care of that.
2147 */
2148 rcu_read_lock();
2149 cpuset_for_each_child(sibling, pos_css, parent) {
2150 if (sibling == cs)
2151 continue;
2152 if (!is_partition_valid(sibling)) {
2153 compute_effective_cpumask(tmp->new_cpus, sibling,
2154 parent);
2155 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2156 continue;
2157 }
2158 if (!css_tryget_online(&sibling->css))
2159 continue;
2160
2161 rcu_read_unlock();
2162 update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
2163 rcu_read_lock();
2164 css_put(&sibling->css);
2165 }
2166 rcu_read_unlock();
2167 }
2168
2169 /**
2170 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2171 * @cs: the cpuset to consider
2172 * @trialcs: trial cpuset
2173 * @buf: buffer of cpu numbers written to this cpuset
2174 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2175 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2176 const char *buf)
2177 {
2178 int retval;
2179 struct tmpmasks tmp;
2180 struct cpuset *parent = parent_cs(cs);
2181 bool invalidate = false;
2182 int hier_flags = 0;
2183 int old_prs = cs->partition_root_state;
2184
2185 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
2186 if (cs == &top_cpuset)
2187 return -EACCES;
2188
2189 /*
2190 * An empty cpus_allowed is ok only if the cpuset has no tasks.
2191 * Since cpulist_parse() fails on an empty mask, we special case
2192 * that parsing. The validate_change() call ensures that cpusets
2193 * with tasks have cpus.
2194 */
2195 if (!*buf) {
2196 cpumask_clear(trialcs->cpus_allowed);
2197 if (cpumask_empty(trialcs->exclusive_cpus))
2198 cpumask_clear(trialcs->effective_xcpus);
2199 } else {
2200 retval = cpulist_parse(buf, trialcs->cpus_allowed);
2201 if (retval < 0)
2202 return retval;
2203
2204 if (!cpumask_subset(trialcs->cpus_allowed,
2205 top_cpuset.cpus_allowed))
2206 return -EINVAL;
2207
2208 /*
2209 * When exclusive_cpus isn't explicitly set, it is constrainted
2210 * by cpus_allowed and parent's effective_xcpus. Otherwise,
2211 * trialcs->effective_xcpus is used as a temporary cpumask
2212 * for checking validity of the partition root.
2213 */
2214 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2215 compute_effective_exclusive_cpumask(trialcs, NULL);
2216 }
2217
2218 /* Nothing to do if the cpus didn't change */
2219 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2220 return 0;
2221
2222 if (alloc_cpumasks(NULL, &tmp))
2223 return -ENOMEM;
2224
2225 if (old_prs) {
2226 if (is_partition_valid(cs) &&
2227 cpumask_empty(trialcs->effective_xcpus)) {
2228 invalidate = true;
2229 cs->prs_err = PERR_INVCPUS;
2230 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2231 invalidate = true;
2232 cs->prs_err = PERR_HKEEPING;
2233 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2234 invalidate = true;
2235 cs->prs_err = PERR_NOCPUS;
2236 }
2237 }
2238
2239 /*
2240 * Check all the descendants in update_cpumasks_hier() if
2241 * effective_xcpus is to be changed.
2242 */
2243 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2244 hier_flags = HIER_CHECKALL;
2245
2246 retval = validate_change(cs, trialcs);
2247
2248 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
2249 struct cgroup_subsys_state *css;
2250 struct cpuset *cp;
2251
2252 /*
2253 * The -EINVAL error code indicates that partition sibling
2254 * CPU exclusivity rule has been violated. We still allow
2255 * the cpumask change to proceed while invalidating the
2256 * partition. However, any conflicting sibling partitions
2257 * have to be marked as invalid too.
2258 */
2259 invalidate = true;
2260 rcu_read_lock();
2261 cpuset_for_each_child(cp, css, parent) {
2262 struct cpumask *xcpus = user_xcpus(trialcs);
2263
2264 if (is_partition_valid(cp) &&
2265 cpumask_intersects(xcpus, cp->effective_xcpus)) {
2266 rcu_read_unlock();
2267 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2268 rcu_read_lock();
2269 }
2270 }
2271 rcu_read_unlock();
2272 retval = 0;
2273 }
2274
2275 if (retval < 0)
2276 goto out_free;
2277
2278 if (is_partition_valid(cs) ||
2279 (is_partition_invalid(cs) && !invalidate)) {
2280 struct cpumask *xcpus = trialcs->effective_xcpus;
2281
2282 if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2283 xcpus = trialcs->cpus_allowed;
2284
2285 /*
2286 * Call remote_cpus_update() to handle valid remote partition
2287 */
2288 if (is_remote_partition(cs))
2289 remote_cpus_update(cs, xcpus, &tmp);
2290 else if (invalidate)
2291 update_parent_effective_cpumask(cs, partcmd_invalidate,
2292 NULL, &tmp);
2293 else
2294 update_parent_effective_cpumask(cs, partcmd_update,
2295 xcpus, &tmp);
2296 } else if (!cpumask_empty(cs->exclusive_cpus)) {
2297 /*
2298 * Use trialcs->effective_cpus as a temp cpumask
2299 */
2300 remote_partition_check(cs, trialcs->effective_xcpus,
2301 trialcs->effective_cpus, &tmp);
2302 }
2303
2304 spin_lock_irq(&callback_lock);
2305 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2306 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2307 if ((old_prs > 0) && !is_partition_valid(cs))
2308 reset_partition_data(cs);
2309 spin_unlock_irq(&callback_lock);
2310
2311 /* effective_cpus/effective_xcpus will be updated here */
2312 update_cpumasks_hier(cs, &tmp, hier_flags);
2313
2314 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2315 if (cs->partition_root_state)
2316 update_partition_sd_lb(cs, old_prs);
2317 out_free:
2318 free_cpumasks(NULL, &tmp);
2319 return retval;
2320 }
2321
2322 /**
2323 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2324 * @cs: the cpuset to consider
2325 * @trialcs: trial cpuset
2326 * @buf: buffer of cpu numbers written to this cpuset
2327 *
2328 * The tasks' cpumask will be updated if cs is a valid partition root.
2329 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2330 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2331 const char *buf)
2332 {
2333 int retval;
2334 struct tmpmasks tmp;
2335 struct cpuset *parent = parent_cs(cs);
2336 bool invalidate = false;
2337 int hier_flags = 0;
2338 int old_prs = cs->partition_root_state;
2339
2340 if (!*buf) {
2341 cpumask_clear(trialcs->exclusive_cpus);
2342 cpumask_clear(trialcs->effective_xcpus);
2343 } else {
2344 retval = cpulist_parse(buf, trialcs->exclusive_cpus);
2345 if (retval < 0)
2346 return retval;
2347 }
2348
2349 /* Nothing to do if the CPUs didn't change */
2350 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2351 return 0;
2352
2353 if (*buf)
2354 compute_effective_exclusive_cpumask(trialcs, NULL);
2355
2356 /*
2357 * Check all the descendants in update_cpumasks_hier() if
2358 * effective_xcpus is to be changed.
2359 */
2360 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2361 hier_flags = HIER_CHECKALL;
2362
2363 retval = validate_change(cs, trialcs);
2364 if (retval)
2365 return retval;
2366
2367 if (alloc_cpumasks(NULL, &tmp))
2368 return -ENOMEM;
2369
2370 if (old_prs) {
2371 if (cpumask_empty(trialcs->effective_xcpus)) {
2372 invalidate = true;
2373 cs->prs_err = PERR_INVCPUS;
2374 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2375 invalidate = true;
2376 cs->prs_err = PERR_HKEEPING;
2377 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2378 invalidate = true;
2379 cs->prs_err = PERR_NOCPUS;
2380 }
2381
2382 if (is_remote_partition(cs)) {
2383 if (invalidate)
2384 remote_partition_disable(cs, &tmp);
2385 else
2386 remote_cpus_update(cs, trialcs->effective_xcpus,
2387 &tmp);
2388 } else if (invalidate) {
2389 update_parent_effective_cpumask(cs, partcmd_invalidate,
2390 NULL, &tmp);
2391 } else {
2392 update_parent_effective_cpumask(cs, partcmd_update,
2393 trialcs->effective_xcpus, &tmp);
2394 }
2395 } else if (!cpumask_empty(trialcs->exclusive_cpus)) {
2396 /*
2397 * Use trialcs->effective_cpus as a temp cpumask
2398 */
2399 remote_partition_check(cs, trialcs->effective_xcpus,
2400 trialcs->effective_cpus, &tmp);
2401 }
2402 spin_lock_irq(&callback_lock);
2403 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2404 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2405 if ((old_prs > 0) && !is_partition_valid(cs))
2406 reset_partition_data(cs);
2407 spin_unlock_irq(&callback_lock);
2408
2409 /*
2410 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2411 * of the subtree when it is a valid partition root or effective_xcpus
2412 * is updated.
2413 */
2414 if (is_partition_valid(cs) || hier_flags)
2415 update_cpumasks_hier(cs, &tmp, hier_flags);
2416
2417 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2418 if (cs->partition_root_state)
2419 update_partition_sd_lb(cs, old_prs);
2420
2421 free_cpumasks(NULL, &tmp);
2422 return 0;
2423 }
2424
2425 /*
2426 * Migrate memory region from one set of nodes to another. This is
2427 * performed asynchronously as it can be called from process migration path
2428 * holding locks involved in process management. All mm migrations are
2429 * performed in the queued order and can be waited for by flushing
2430 * cpuset_migrate_mm_wq.
2431 */
2432
2433 struct cpuset_migrate_mm_work {
2434 struct work_struct work;
2435 struct mm_struct *mm;
2436 nodemask_t from;
2437 nodemask_t to;
2438 };
2439
cpuset_migrate_mm_workfn(struct work_struct * work)2440 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2441 {
2442 struct cpuset_migrate_mm_work *mwork =
2443 container_of(work, struct cpuset_migrate_mm_work, work);
2444
2445 /* on a wq worker, no need to worry about %current's mems_allowed */
2446 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2447 mmput(mwork->mm);
2448 kfree(mwork);
2449 }
2450
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2451 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2452 const nodemask_t *to)
2453 {
2454 struct cpuset_migrate_mm_work *mwork;
2455
2456 if (nodes_equal(*from, *to)) {
2457 mmput(mm);
2458 return;
2459 }
2460
2461 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2462 if (mwork) {
2463 mwork->mm = mm;
2464 mwork->from = *from;
2465 mwork->to = *to;
2466 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2467 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2468 } else {
2469 mmput(mm);
2470 }
2471 }
2472
cpuset_post_attach(void)2473 static void cpuset_post_attach(void)
2474 {
2475 flush_workqueue(cpuset_migrate_mm_wq);
2476 }
2477
2478 /*
2479 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2480 * @tsk: the task to change
2481 * @newmems: new nodes that the task will be set
2482 *
2483 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2484 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2485 * parallel, it might temporarily see an empty intersection, which results in
2486 * a seqlock check and retry before OOM or allocation failure.
2487 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2488 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2489 nodemask_t *newmems)
2490 {
2491 task_lock(tsk);
2492
2493 local_irq_disable();
2494 write_seqcount_begin(&tsk->mems_allowed_seq);
2495
2496 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2497 mpol_rebind_task(tsk, newmems);
2498 tsk->mems_allowed = *newmems;
2499
2500 write_seqcount_end(&tsk->mems_allowed_seq);
2501 local_irq_enable();
2502
2503 task_unlock(tsk);
2504 }
2505
2506 static void *cpuset_being_rebound;
2507
2508 /**
2509 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2510 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2511 *
2512 * Iterate through each task of @cs updating its mems_allowed to the
2513 * effective cpuset's. As this function is called with cpuset_mutex held,
2514 * cpuset membership stays stable.
2515 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2516 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2517 {
2518 static nodemask_t newmems; /* protected by cpuset_mutex */
2519 struct css_task_iter it;
2520 struct task_struct *task;
2521
2522 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2523
2524 guarantee_online_mems(cs, &newmems);
2525
2526 /*
2527 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2528 * take while holding tasklist_lock. Forks can happen - the
2529 * mpol_dup() cpuset_being_rebound check will catch such forks,
2530 * and rebind their vma mempolicies too. Because we still hold
2531 * the global cpuset_mutex, we know that no other rebind effort
2532 * will be contending for the global variable cpuset_being_rebound.
2533 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2534 * is idempotent. Also migrate pages in each mm to new nodes.
2535 */
2536 css_task_iter_start(&cs->css, 0, &it);
2537 while ((task = css_task_iter_next(&it))) {
2538 struct mm_struct *mm;
2539 bool migrate;
2540
2541 cpuset_change_task_nodemask(task, &newmems);
2542
2543 mm = get_task_mm(task);
2544 if (!mm)
2545 continue;
2546
2547 migrate = is_memory_migrate(cs);
2548
2549 mpol_rebind_mm(mm, &cs->mems_allowed);
2550 if (migrate)
2551 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2552 else
2553 mmput(mm);
2554 }
2555 css_task_iter_end(&it);
2556
2557 /*
2558 * All the tasks' nodemasks have been updated, update
2559 * cs->old_mems_allowed.
2560 */
2561 cs->old_mems_allowed = newmems;
2562
2563 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2564 cpuset_being_rebound = NULL;
2565 }
2566
2567 /*
2568 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2569 * @cs: the cpuset to consider
2570 * @new_mems: a temp variable for calculating new effective_mems
2571 *
2572 * When configured nodemask is changed, the effective nodemasks of this cpuset
2573 * and all its descendants need to be updated.
2574 *
2575 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2576 *
2577 * Called with cpuset_mutex held
2578 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2579 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2580 {
2581 struct cpuset *cp;
2582 struct cgroup_subsys_state *pos_css;
2583
2584 rcu_read_lock();
2585 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2586 struct cpuset *parent = parent_cs(cp);
2587
2588 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2589
2590 /*
2591 * If it becomes empty, inherit the effective mask of the
2592 * parent, which is guaranteed to have some MEMs.
2593 */
2594 if (is_in_v2_mode() && nodes_empty(*new_mems))
2595 *new_mems = parent->effective_mems;
2596
2597 /* Skip the whole subtree if the nodemask remains the same. */
2598 if (nodes_equal(*new_mems, cp->effective_mems)) {
2599 pos_css = css_rightmost_descendant(pos_css);
2600 continue;
2601 }
2602
2603 if (!css_tryget_online(&cp->css))
2604 continue;
2605 rcu_read_unlock();
2606
2607 spin_lock_irq(&callback_lock);
2608 cp->effective_mems = *new_mems;
2609 spin_unlock_irq(&callback_lock);
2610
2611 WARN_ON(!is_in_v2_mode() &&
2612 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2613
2614 cpuset_update_tasks_nodemask(cp);
2615
2616 rcu_read_lock();
2617 css_put(&cp->css);
2618 }
2619 rcu_read_unlock();
2620 }
2621
2622 /*
2623 * Handle user request to change the 'mems' memory placement
2624 * of a cpuset. Needs to validate the request, update the
2625 * cpusets mems_allowed, and for each task in the cpuset,
2626 * update mems_allowed and rebind task's mempolicy and any vma
2627 * mempolicies and if the cpuset is marked 'memory_migrate',
2628 * migrate the tasks pages to the new memory.
2629 *
2630 * Call with cpuset_mutex held. May take callback_lock during call.
2631 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2632 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2633 * their mempolicies to the cpusets new mems_allowed.
2634 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2635 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2636 const char *buf)
2637 {
2638 int retval;
2639
2640 /*
2641 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2642 * it's read-only
2643 */
2644 if (cs == &top_cpuset) {
2645 retval = -EACCES;
2646 goto done;
2647 }
2648
2649 /*
2650 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2651 * Since nodelist_parse() fails on an empty mask, we special case
2652 * that parsing. The validate_change() call ensures that cpusets
2653 * with tasks have memory.
2654 */
2655 if (!*buf) {
2656 nodes_clear(trialcs->mems_allowed);
2657 } else {
2658 retval = nodelist_parse(buf, trialcs->mems_allowed);
2659 if (retval < 0)
2660 goto done;
2661
2662 if (!nodes_subset(trialcs->mems_allowed,
2663 top_cpuset.mems_allowed)) {
2664 retval = -EINVAL;
2665 goto done;
2666 }
2667 }
2668
2669 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2670 retval = 0; /* Too easy - nothing to do */
2671 goto done;
2672 }
2673 retval = validate_change(cs, trialcs);
2674 if (retval < 0)
2675 goto done;
2676
2677 check_insane_mems_config(&trialcs->mems_allowed);
2678
2679 spin_lock_irq(&callback_lock);
2680 cs->mems_allowed = trialcs->mems_allowed;
2681 spin_unlock_irq(&callback_lock);
2682
2683 /* use trialcs->mems_allowed as a temp variable */
2684 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2685 done:
2686 return retval;
2687 }
2688
current_cpuset_is_being_rebound(void)2689 bool current_cpuset_is_being_rebound(void)
2690 {
2691 bool ret;
2692
2693 rcu_read_lock();
2694 ret = task_cs(current) == cpuset_being_rebound;
2695 rcu_read_unlock();
2696
2697 return ret;
2698 }
2699
2700 /*
2701 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2702 * bit: the bit to update (see cpuset_flagbits_t)
2703 * cs: the cpuset to update
2704 * turning_on: whether the flag is being set or cleared
2705 *
2706 * Call with cpuset_mutex held.
2707 */
2708
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2709 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2710 int turning_on)
2711 {
2712 struct cpuset *trialcs;
2713 int balance_flag_changed;
2714 int spread_flag_changed;
2715 int err;
2716
2717 trialcs = alloc_trial_cpuset(cs);
2718 if (!trialcs)
2719 return -ENOMEM;
2720
2721 if (turning_on)
2722 set_bit(bit, &trialcs->flags);
2723 else
2724 clear_bit(bit, &trialcs->flags);
2725
2726 err = validate_change(cs, trialcs);
2727 if (err < 0)
2728 goto out;
2729
2730 balance_flag_changed = (is_sched_load_balance(cs) !=
2731 is_sched_load_balance(trialcs));
2732
2733 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2734 || (is_spread_page(cs) != is_spread_page(trialcs)));
2735
2736 spin_lock_irq(&callback_lock);
2737 cs->flags = trialcs->flags;
2738 spin_unlock_irq(&callback_lock);
2739
2740 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed &&
2741 !force_sd_rebuild)
2742 rebuild_sched_domains_locked();
2743
2744 if (spread_flag_changed)
2745 cpuset1_update_tasks_flags(cs);
2746 out:
2747 free_cpuset(trialcs);
2748 return err;
2749 }
2750
2751 /**
2752 * update_prstate - update partition_root_state
2753 * @cs: the cpuset to update
2754 * @new_prs: new partition root state
2755 * Return: 0 if successful, != 0 if error
2756 *
2757 * Call with cpuset_mutex held.
2758 */
update_prstate(struct cpuset * cs,int new_prs)2759 static int update_prstate(struct cpuset *cs, int new_prs)
2760 {
2761 int err = PERR_NONE, old_prs = cs->partition_root_state;
2762 struct cpuset *parent = parent_cs(cs);
2763 struct tmpmasks tmpmask;
2764 bool new_xcpus_state = false;
2765
2766 if (old_prs == new_prs)
2767 return 0;
2768
2769 /*
2770 * Treat a previously invalid partition root as if it is a "member".
2771 */
2772 if (new_prs && is_prs_invalid(old_prs))
2773 old_prs = PRS_MEMBER;
2774
2775 if (alloc_cpumasks(NULL, &tmpmask))
2776 return -ENOMEM;
2777
2778 /*
2779 * Setup effective_xcpus if not properly set yet, it will be cleared
2780 * later if partition becomes invalid.
2781 */
2782 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) {
2783 spin_lock_irq(&callback_lock);
2784 cpumask_and(cs->effective_xcpus,
2785 cs->cpus_allowed, parent->effective_xcpus);
2786 spin_unlock_irq(&callback_lock);
2787 }
2788
2789 err = update_partition_exclusive(cs, new_prs);
2790 if (err)
2791 goto out;
2792
2793 if (!old_prs) {
2794 /*
2795 * cpus_allowed and exclusive_cpus cannot be both empty.
2796 */
2797 if (xcpus_empty(cs)) {
2798 err = PERR_CPUSEMPTY;
2799 goto out;
2800 }
2801
2802 /*
2803 * If parent is valid partition, enable local partiion.
2804 * Otherwise, enable a remote partition.
2805 */
2806 if (is_partition_valid(parent)) {
2807 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2808 ? partcmd_enable : partcmd_enablei;
2809
2810 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2811 } else {
2812 err = remote_partition_enable(cs, new_prs, &tmpmask);
2813 }
2814 } else if (old_prs && new_prs) {
2815 /*
2816 * A change in load balance state only, no change in cpumasks.
2817 */
2818 new_xcpus_state = true;
2819 } else {
2820 /*
2821 * Switching back to member is always allowed even if it
2822 * disables child partitions.
2823 */
2824 if (is_remote_partition(cs))
2825 remote_partition_disable(cs, &tmpmask);
2826 else
2827 update_parent_effective_cpumask(cs, partcmd_disable,
2828 NULL, &tmpmask);
2829
2830 /*
2831 * Invalidation of child partitions will be done in
2832 * update_cpumasks_hier().
2833 */
2834 }
2835 out:
2836 /*
2837 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2838 * happens.
2839 */
2840 if (err) {
2841 new_prs = -new_prs;
2842 update_partition_exclusive(cs, new_prs);
2843 }
2844
2845 spin_lock_irq(&callback_lock);
2846 cs->partition_root_state = new_prs;
2847 WRITE_ONCE(cs->prs_err, err);
2848 if (!is_partition_valid(cs))
2849 reset_partition_data(cs);
2850 else if (new_xcpus_state)
2851 partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
2852 spin_unlock_irq(&callback_lock);
2853 update_unbound_workqueue_cpumask(new_xcpus_state);
2854
2855 /* Force update if switching back to member */
2856 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
2857
2858 /* Update sched domains and load balance flag */
2859 update_partition_sd_lb(cs, old_prs);
2860
2861 notify_partition_change(cs, old_prs);
2862 free_cpumasks(NULL, &tmpmask);
2863 return 0;
2864 }
2865
2866 static struct cpuset *cpuset_attach_old_cs;
2867
2868 /*
2869 * Check to see if a cpuset can accept a new task
2870 * For v1, cpus_allowed and mems_allowed can't be empty.
2871 * For v2, effective_cpus can't be empty.
2872 * Note that in v1, effective_cpus = cpus_allowed.
2873 */
cpuset_can_attach_check(struct cpuset * cs)2874 static int cpuset_can_attach_check(struct cpuset *cs)
2875 {
2876 if (cpumask_empty(cs->effective_cpus) ||
2877 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2878 return -ENOSPC;
2879 return 0;
2880 }
2881
reset_migrate_dl_data(struct cpuset * cs)2882 static void reset_migrate_dl_data(struct cpuset *cs)
2883 {
2884 cs->nr_migrate_dl_tasks = 0;
2885 cs->sum_migrate_dl_bw = 0;
2886 }
2887
2888 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2889 static int cpuset_can_attach(struct cgroup_taskset *tset)
2890 {
2891 struct cgroup_subsys_state *css;
2892 struct cpuset *cs, *oldcs;
2893 struct task_struct *task;
2894 bool cpus_updated, mems_updated;
2895 int ret;
2896
2897 /* used later by cpuset_attach() */
2898 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2899 oldcs = cpuset_attach_old_cs;
2900 cs = css_cs(css);
2901
2902 mutex_lock(&cpuset_mutex);
2903
2904 /* Check to see if task is allowed in the cpuset */
2905 ret = cpuset_can_attach_check(cs);
2906 if (ret)
2907 goto out_unlock;
2908
2909 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2910 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2911
2912 cgroup_taskset_for_each(task, css, tset) {
2913 ret = task_can_attach(task);
2914 if (ret)
2915 goto out_unlock;
2916
2917 /*
2918 * Skip rights over task check in v2 when nothing changes,
2919 * migration permission derives from hierarchy ownership in
2920 * cgroup_procs_write_permission()).
2921 */
2922 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2923 (cpus_updated || mems_updated)) {
2924 ret = security_task_setscheduler(task);
2925 if (ret)
2926 goto out_unlock;
2927 }
2928
2929 if (dl_task(task)) {
2930 cs->nr_migrate_dl_tasks++;
2931 cs->sum_migrate_dl_bw += task->dl.dl_bw;
2932 }
2933 }
2934
2935 if (!cs->nr_migrate_dl_tasks)
2936 goto out_success;
2937
2938 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2939 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2940
2941 if (unlikely(cpu >= nr_cpu_ids)) {
2942 reset_migrate_dl_data(cs);
2943 ret = -EINVAL;
2944 goto out_unlock;
2945 }
2946
2947 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2948 if (ret) {
2949 reset_migrate_dl_data(cs);
2950 goto out_unlock;
2951 }
2952 }
2953
2954 out_success:
2955 /*
2956 * Mark attach is in progress. This makes validate_change() fail
2957 * changes which zero cpus/mems_allowed.
2958 */
2959 cs->attach_in_progress++;
2960 out_unlock:
2961 mutex_unlock(&cpuset_mutex);
2962 return ret;
2963 }
2964
cpuset_cancel_attach(struct cgroup_taskset * tset)2965 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2966 {
2967 struct cgroup_subsys_state *css;
2968 struct cpuset *cs;
2969
2970 cgroup_taskset_first(tset, &css);
2971 cs = css_cs(css);
2972
2973 mutex_lock(&cpuset_mutex);
2974 dec_attach_in_progress_locked(cs);
2975
2976 if (cs->nr_migrate_dl_tasks) {
2977 int cpu = cpumask_any(cs->effective_cpus);
2978
2979 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
2980 reset_migrate_dl_data(cs);
2981 }
2982
2983 mutex_unlock(&cpuset_mutex);
2984 }
2985
2986 /*
2987 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
2988 * but we can't allocate it dynamically there. Define it global and
2989 * allocate from cpuset_init().
2990 */
2991 static cpumask_var_t cpus_attach;
2992 static nodemask_t cpuset_attach_nodemask_to;
2993
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)2994 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2995 {
2996 lockdep_assert_held(&cpuset_mutex);
2997
2998 if (cs != &top_cpuset)
2999 guarantee_online_cpus(task, cpus_attach);
3000 else
3001 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3002 subpartitions_cpus);
3003 /*
3004 * can_attach beforehand should guarantee that this doesn't
3005 * fail. TODO: have a better way to handle failure here
3006 */
3007 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3008
3009 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3010 cpuset1_update_task_spread_flags(cs, task);
3011 }
3012
cpuset_attach(struct cgroup_taskset * tset)3013 static void cpuset_attach(struct cgroup_taskset *tset)
3014 {
3015 struct task_struct *task;
3016 struct task_struct *leader;
3017 struct cgroup_subsys_state *css;
3018 struct cpuset *cs;
3019 struct cpuset *oldcs = cpuset_attach_old_cs;
3020 bool cpus_updated, mems_updated;
3021
3022 cgroup_taskset_first(tset, &css);
3023 cs = css_cs(css);
3024
3025 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3026 mutex_lock(&cpuset_mutex);
3027 cpus_updated = !cpumask_equal(cs->effective_cpus,
3028 oldcs->effective_cpus);
3029 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3030
3031 /*
3032 * In the default hierarchy, enabling cpuset in the child cgroups
3033 * will trigger a number of cpuset_attach() calls with no change
3034 * in effective cpus and mems. In that case, we can optimize out
3035 * by skipping the task iteration and update.
3036 */
3037 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3038 !cpus_updated && !mems_updated) {
3039 cpuset_attach_nodemask_to = cs->effective_mems;
3040 goto out;
3041 }
3042
3043 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3044
3045 cgroup_taskset_for_each(task, css, tset)
3046 cpuset_attach_task(cs, task);
3047
3048 /*
3049 * Change mm for all threadgroup leaders. This is expensive and may
3050 * sleep and should be moved outside migration path proper. Skip it
3051 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3052 * not set.
3053 */
3054 cpuset_attach_nodemask_to = cs->effective_mems;
3055 if (!is_memory_migrate(cs) && !mems_updated)
3056 goto out;
3057
3058 cgroup_taskset_for_each_leader(leader, css, tset) {
3059 struct mm_struct *mm = get_task_mm(leader);
3060
3061 if (mm) {
3062 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3063
3064 /*
3065 * old_mems_allowed is the same with mems_allowed
3066 * here, except if this task is being moved
3067 * automatically due to hotplug. In that case
3068 * @mems_allowed has been updated and is empty, so
3069 * @old_mems_allowed is the right nodesets that we
3070 * migrate mm from.
3071 */
3072 if (is_memory_migrate(cs))
3073 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3074 &cpuset_attach_nodemask_to);
3075 else
3076 mmput(mm);
3077 }
3078 }
3079
3080 out:
3081 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3082
3083 if (cs->nr_migrate_dl_tasks) {
3084 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3085 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3086 reset_migrate_dl_data(cs);
3087 }
3088
3089 dec_attach_in_progress_locked(cs);
3090
3091 mutex_unlock(&cpuset_mutex);
3092 }
3093
3094 /*
3095 * Common handling for a write to a "cpus" or "mems" file.
3096 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3097 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3098 char *buf, size_t nbytes, loff_t off)
3099 {
3100 struct cpuset *cs = css_cs(of_css(of));
3101 struct cpuset *trialcs;
3102 int retval = -ENODEV;
3103
3104 buf = strstrip(buf);
3105
3106 /*
3107 * CPU or memory hotunplug may leave @cs w/o any execution
3108 * resources, in which case the hotplug code asynchronously updates
3109 * configuration and transfers all tasks to the nearest ancestor
3110 * which can execute.
3111 *
3112 * As writes to "cpus" or "mems" may restore @cs's execution
3113 * resources, wait for the previously scheduled operations before
3114 * proceeding, so that we don't end up keep removing tasks added
3115 * after execution capability is restored.
3116 *
3117 * cpuset_handle_hotplug may call back into cgroup core asynchronously
3118 * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
3119 * operation like this one can lead to a deadlock through kernfs
3120 * active_ref protection. Let's break the protection. Losing the
3121 * protection is okay as we check whether @cs is online after
3122 * grabbing cpuset_mutex anyway. This only happens on the legacy
3123 * hierarchies.
3124 */
3125 css_get(&cs->css);
3126 kernfs_break_active_protection(of->kn);
3127
3128 cpus_read_lock();
3129 mutex_lock(&cpuset_mutex);
3130 if (!is_cpuset_online(cs))
3131 goto out_unlock;
3132
3133 trialcs = alloc_trial_cpuset(cs);
3134 if (!trialcs) {
3135 retval = -ENOMEM;
3136 goto out_unlock;
3137 }
3138
3139 switch (of_cft(of)->private) {
3140 case FILE_CPULIST:
3141 retval = update_cpumask(cs, trialcs, buf);
3142 break;
3143 case FILE_EXCLUSIVE_CPULIST:
3144 retval = update_exclusive_cpumask(cs, trialcs, buf);
3145 break;
3146 case FILE_MEMLIST:
3147 retval = update_nodemask(cs, trialcs, buf);
3148 break;
3149 default:
3150 retval = -EINVAL;
3151 break;
3152 }
3153
3154 free_cpuset(trialcs);
3155 out_unlock:
3156 mutex_unlock(&cpuset_mutex);
3157 cpus_read_unlock();
3158 kernfs_unbreak_active_protection(of->kn);
3159 css_put(&cs->css);
3160 flush_workqueue(cpuset_migrate_mm_wq);
3161 return retval ?: nbytes;
3162 }
3163
3164 /*
3165 * These ascii lists should be read in a single call, by using a user
3166 * buffer large enough to hold the entire map. If read in smaller
3167 * chunks, there is no guarantee of atomicity. Since the display format
3168 * used, list of ranges of sequential numbers, is variable length,
3169 * and since these maps can change value dynamically, one could read
3170 * gibberish by doing partial reads while a list was changing.
3171 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3172 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3173 {
3174 struct cpuset *cs = css_cs(seq_css(sf));
3175 cpuset_filetype_t type = seq_cft(sf)->private;
3176 int ret = 0;
3177
3178 spin_lock_irq(&callback_lock);
3179
3180 switch (type) {
3181 case FILE_CPULIST:
3182 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3183 break;
3184 case FILE_MEMLIST:
3185 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3186 break;
3187 case FILE_EFFECTIVE_CPULIST:
3188 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3189 break;
3190 case FILE_EFFECTIVE_MEMLIST:
3191 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3192 break;
3193 case FILE_EXCLUSIVE_CPULIST:
3194 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3195 break;
3196 case FILE_EFFECTIVE_XCPULIST:
3197 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3198 break;
3199 case FILE_SUBPARTS_CPULIST:
3200 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3201 break;
3202 case FILE_ISOLATED_CPULIST:
3203 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3204 break;
3205 default:
3206 ret = -EINVAL;
3207 }
3208
3209 spin_unlock_irq(&callback_lock);
3210 return ret;
3211 }
3212
sched_partition_show(struct seq_file * seq,void * v)3213 static int sched_partition_show(struct seq_file *seq, void *v)
3214 {
3215 struct cpuset *cs = css_cs(seq_css(seq));
3216 const char *err, *type = NULL;
3217
3218 switch (cs->partition_root_state) {
3219 case PRS_ROOT:
3220 seq_puts(seq, "root\n");
3221 break;
3222 case PRS_ISOLATED:
3223 seq_puts(seq, "isolated\n");
3224 break;
3225 case PRS_MEMBER:
3226 seq_puts(seq, "member\n");
3227 break;
3228 case PRS_INVALID_ROOT:
3229 type = "root";
3230 fallthrough;
3231 case PRS_INVALID_ISOLATED:
3232 if (!type)
3233 type = "isolated";
3234 err = perr_strings[READ_ONCE(cs->prs_err)];
3235 if (err)
3236 seq_printf(seq, "%s invalid (%s)\n", type, err);
3237 else
3238 seq_printf(seq, "%s invalid\n", type);
3239 break;
3240 }
3241 return 0;
3242 }
3243
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3244 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3245 size_t nbytes, loff_t off)
3246 {
3247 struct cpuset *cs = css_cs(of_css(of));
3248 int val;
3249 int retval = -ENODEV;
3250
3251 buf = strstrip(buf);
3252
3253 if (!strcmp(buf, "root"))
3254 val = PRS_ROOT;
3255 else if (!strcmp(buf, "member"))
3256 val = PRS_MEMBER;
3257 else if (!strcmp(buf, "isolated"))
3258 val = PRS_ISOLATED;
3259 else
3260 return -EINVAL;
3261
3262 css_get(&cs->css);
3263 cpus_read_lock();
3264 mutex_lock(&cpuset_mutex);
3265 if (!is_cpuset_online(cs))
3266 goto out_unlock;
3267
3268 retval = update_prstate(cs, val);
3269 out_unlock:
3270 mutex_unlock(&cpuset_mutex);
3271 cpus_read_unlock();
3272 css_put(&cs->css);
3273 return retval ?: nbytes;
3274 }
3275
3276 /*
3277 * This is currently a minimal set for the default hierarchy. It can be
3278 * expanded later on by migrating more features and control files from v1.
3279 */
3280 static struct cftype dfl_files[] = {
3281 {
3282 .name = "cpus",
3283 .seq_show = cpuset_common_seq_show,
3284 .write = cpuset_write_resmask,
3285 .max_write_len = (100U + 6 * NR_CPUS),
3286 .private = FILE_CPULIST,
3287 .flags = CFTYPE_NOT_ON_ROOT,
3288 },
3289
3290 {
3291 .name = "mems",
3292 .seq_show = cpuset_common_seq_show,
3293 .write = cpuset_write_resmask,
3294 .max_write_len = (100U + 6 * MAX_NUMNODES),
3295 .private = FILE_MEMLIST,
3296 .flags = CFTYPE_NOT_ON_ROOT,
3297 },
3298
3299 {
3300 .name = "cpus.effective",
3301 .seq_show = cpuset_common_seq_show,
3302 .private = FILE_EFFECTIVE_CPULIST,
3303 },
3304
3305 {
3306 .name = "mems.effective",
3307 .seq_show = cpuset_common_seq_show,
3308 .private = FILE_EFFECTIVE_MEMLIST,
3309 },
3310
3311 {
3312 .name = "cpus.partition",
3313 .seq_show = sched_partition_show,
3314 .write = sched_partition_write,
3315 .private = FILE_PARTITION_ROOT,
3316 .flags = CFTYPE_NOT_ON_ROOT,
3317 .file_offset = offsetof(struct cpuset, partition_file),
3318 },
3319
3320 {
3321 .name = "cpus.exclusive",
3322 .seq_show = cpuset_common_seq_show,
3323 .write = cpuset_write_resmask,
3324 .max_write_len = (100U + 6 * NR_CPUS),
3325 .private = FILE_EXCLUSIVE_CPULIST,
3326 .flags = CFTYPE_NOT_ON_ROOT,
3327 },
3328
3329 {
3330 .name = "cpus.exclusive.effective",
3331 .seq_show = cpuset_common_seq_show,
3332 .private = FILE_EFFECTIVE_XCPULIST,
3333 .flags = CFTYPE_NOT_ON_ROOT,
3334 },
3335
3336 {
3337 .name = "cpus.subpartitions",
3338 .seq_show = cpuset_common_seq_show,
3339 .private = FILE_SUBPARTS_CPULIST,
3340 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3341 },
3342
3343 {
3344 .name = "cpus.isolated",
3345 .seq_show = cpuset_common_seq_show,
3346 .private = FILE_ISOLATED_CPULIST,
3347 .flags = CFTYPE_ONLY_ON_ROOT,
3348 },
3349
3350 { } /* terminate */
3351 };
3352
3353
3354 /**
3355 * cpuset_css_alloc - Allocate a cpuset css
3356 * @parent_css: Parent css of the control group that the new cpuset will be
3357 * part of
3358 * Return: cpuset css on success, -ENOMEM on failure.
3359 *
3360 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3361 * top cpuset css otherwise.
3362 */
3363 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3364 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3365 {
3366 struct cpuset *cs;
3367
3368 if (!parent_css)
3369 return &top_cpuset.css;
3370
3371 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3372 if (!cs)
3373 return ERR_PTR(-ENOMEM);
3374
3375 if (alloc_cpumasks(cs, NULL)) {
3376 kfree(cs);
3377 return ERR_PTR(-ENOMEM);
3378 }
3379
3380 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3381 fmeter_init(&cs->fmeter);
3382 cs->relax_domain_level = -1;
3383 INIT_LIST_HEAD(&cs->remote_sibling);
3384
3385 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3386 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3387 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3388
3389 return &cs->css;
3390 }
3391
cpuset_css_online(struct cgroup_subsys_state * css)3392 static int cpuset_css_online(struct cgroup_subsys_state *css)
3393 {
3394 struct cpuset *cs = css_cs(css);
3395 struct cpuset *parent = parent_cs(cs);
3396 struct cpuset *tmp_cs;
3397 struct cgroup_subsys_state *pos_css;
3398
3399 if (!parent)
3400 return 0;
3401
3402 cpus_read_lock();
3403 mutex_lock(&cpuset_mutex);
3404
3405 set_bit(CS_ONLINE, &cs->flags);
3406 if (is_spread_page(parent))
3407 set_bit(CS_SPREAD_PAGE, &cs->flags);
3408 if (is_spread_slab(parent))
3409 set_bit(CS_SPREAD_SLAB, &cs->flags);
3410 /*
3411 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3412 */
3413 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3414 !is_sched_load_balance(parent))
3415 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3416
3417 cpuset_inc();
3418
3419 spin_lock_irq(&callback_lock);
3420 if (is_in_v2_mode()) {
3421 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3422 cs->effective_mems = parent->effective_mems;
3423 }
3424 spin_unlock_irq(&callback_lock);
3425
3426 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3427 goto out_unlock;
3428
3429 /*
3430 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3431 * set. This flag handling is implemented in cgroup core for
3432 * historical reasons - the flag may be specified during mount.
3433 *
3434 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3435 * refuse to clone the configuration - thereby refusing the task to
3436 * be entered, and as a result refusing the sys_unshare() or
3437 * clone() which initiated it. If this becomes a problem for some
3438 * users who wish to allow that scenario, then this could be
3439 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3440 * (and likewise for mems) to the new cgroup.
3441 */
3442 rcu_read_lock();
3443 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3444 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3445 rcu_read_unlock();
3446 goto out_unlock;
3447 }
3448 }
3449 rcu_read_unlock();
3450
3451 spin_lock_irq(&callback_lock);
3452 cs->mems_allowed = parent->mems_allowed;
3453 cs->effective_mems = parent->mems_allowed;
3454 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3455 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3456 spin_unlock_irq(&callback_lock);
3457 out_unlock:
3458 mutex_unlock(&cpuset_mutex);
3459 cpus_read_unlock();
3460 return 0;
3461 }
3462
3463 /*
3464 * If the cpuset being removed has its flag 'sched_load_balance'
3465 * enabled, then simulate turning sched_load_balance off, which
3466 * will call rebuild_sched_domains_locked(). That is not needed
3467 * in the default hierarchy where only changes in partition
3468 * will cause repartitioning.
3469 *
3470 * If the cpuset has the 'sched.partition' flag enabled, simulate
3471 * turning 'sched.partition" off.
3472 */
3473
cpuset_css_offline(struct cgroup_subsys_state * css)3474 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3475 {
3476 struct cpuset *cs = css_cs(css);
3477
3478 cpus_read_lock();
3479 mutex_lock(&cpuset_mutex);
3480
3481 if (is_partition_valid(cs))
3482 update_prstate(cs, 0);
3483
3484 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3485 is_sched_load_balance(cs))
3486 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3487
3488 cpuset_dec();
3489 clear_bit(CS_ONLINE, &cs->flags);
3490
3491 mutex_unlock(&cpuset_mutex);
3492 cpus_read_unlock();
3493 }
3494
cpuset_css_free(struct cgroup_subsys_state * css)3495 static void cpuset_css_free(struct cgroup_subsys_state *css)
3496 {
3497 struct cpuset *cs = css_cs(css);
3498
3499 free_cpuset(cs);
3500 }
3501
cpuset_bind(struct cgroup_subsys_state * root_css)3502 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3503 {
3504 mutex_lock(&cpuset_mutex);
3505 spin_lock_irq(&callback_lock);
3506
3507 if (is_in_v2_mode()) {
3508 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3509 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3510 top_cpuset.mems_allowed = node_possible_map;
3511 } else {
3512 cpumask_copy(top_cpuset.cpus_allowed,
3513 top_cpuset.effective_cpus);
3514 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3515 }
3516
3517 spin_unlock_irq(&callback_lock);
3518 mutex_unlock(&cpuset_mutex);
3519 }
3520
3521 /*
3522 * In case the child is cloned into a cpuset different from its parent,
3523 * additional checks are done to see if the move is allowed.
3524 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3525 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3526 {
3527 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3528 bool same_cs;
3529 int ret;
3530
3531 rcu_read_lock();
3532 same_cs = (cs == task_cs(current));
3533 rcu_read_unlock();
3534
3535 if (same_cs)
3536 return 0;
3537
3538 lockdep_assert_held(&cgroup_mutex);
3539 mutex_lock(&cpuset_mutex);
3540
3541 /* Check to see if task is allowed in the cpuset */
3542 ret = cpuset_can_attach_check(cs);
3543 if (ret)
3544 goto out_unlock;
3545
3546 ret = task_can_attach(task);
3547 if (ret)
3548 goto out_unlock;
3549
3550 ret = security_task_setscheduler(task);
3551 if (ret)
3552 goto out_unlock;
3553
3554 /*
3555 * Mark attach is in progress. This makes validate_change() fail
3556 * changes which zero cpus/mems_allowed.
3557 */
3558 cs->attach_in_progress++;
3559 out_unlock:
3560 mutex_unlock(&cpuset_mutex);
3561 return ret;
3562 }
3563
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3564 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3565 {
3566 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3567 bool same_cs;
3568
3569 rcu_read_lock();
3570 same_cs = (cs == task_cs(current));
3571 rcu_read_unlock();
3572
3573 if (same_cs)
3574 return;
3575
3576 dec_attach_in_progress(cs);
3577 }
3578
3579 /*
3580 * Make sure the new task conform to the current state of its parent,
3581 * which could have been changed by cpuset just after it inherits the
3582 * state from the parent and before it sits on the cgroup's task list.
3583 */
cpuset_fork(struct task_struct * task)3584 static void cpuset_fork(struct task_struct *task)
3585 {
3586 struct cpuset *cs;
3587 bool same_cs;
3588
3589 rcu_read_lock();
3590 cs = task_cs(task);
3591 same_cs = (cs == task_cs(current));
3592 rcu_read_unlock();
3593
3594 if (same_cs) {
3595 if (cs == &top_cpuset)
3596 return;
3597
3598 set_cpus_allowed_ptr(task, current->cpus_ptr);
3599 task->mems_allowed = current->mems_allowed;
3600 return;
3601 }
3602
3603 /* CLONE_INTO_CGROUP */
3604 mutex_lock(&cpuset_mutex);
3605 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3606 cpuset_attach_task(cs, task);
3607
3608 dec_attach_in_progress_locked(cs);
3609 mutex_unlock(&cpuset_mutex);
3610 }
3611
3612 struct cgroup_subsys cpuset_cgrp_subsys = {
3613 .css_alloc = cpuset_css_alloc,
3614 .css_online = cpuset_css_online,
3615 .css_offline = cpuset_css_offline,
3616 .css_free = cpuset_css_free,
3617 .can_attach = cpuset_can_attach,
3618 .cancel_attach = cpuset_cancel_attach,
3619 .attach = cpuset_attach,
3620 .post_attach = cpuset_post_attach,
3621 .bind = cpuset_bind,
3622 .can_fork = cpuset_can_fork,
3623 .cancel_fork = cpuset_cancel_fork,
3624 .fork = cpuset_fork,
3625 #ifdef CONFIG_CPUSETS_V1
3626 .legacy_cftypes = cpuset1_files,
3627 #endif
3628 .dfl_cftypes = dfl_files,
3629 .early_init = true,
3630 .threaded = true,
3631 };
3632
3633 /**
3634 * cpuset_init - initialize cpusets at system boot
3635 *
3636 * Description: Initialize top_cpuset
3637 **/
3638
cpuset_init(void)3639 int __init cpuset_init(void)
3640 {
3641 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3642 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3643 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3644 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3645 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3646 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3647
3648 cpumask_setall(top_cpuset.cpus_allowed);
3649 nodes_setall(top_cpuset.mems_allowed);
3650 cpumask_setall(top_cpuset.effective_cpus);
3651 cpumask_setall(top_cpuset.effective_xcpus);
3652 cpumask_setall(top_cpuset.exclusive_cpus);
3653 nodes_setall(top_cpuset.effective_mems);
3654
3655 fmeter_init(&top_cpuset.fmeter);
3656 INIT_LIST_HEAD(&remote_children);
3657
3658 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3659
3660 have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
3661 if (have_boot_isolcpus) {
3662 BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
3663 cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
3664 cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
3665 }
3666
3667 return 0;
3668 }
3669
3670 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3671 hotplug_update_tasks(struct cpuset *cs,
3672 struct cpumask *new_cpus, nodemask_t *new_mems,
3673 bool cpus_updated, bool mems_updated)
3674 {
3675 /* A partition root is allowed to have empty effective cpus */
3676 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3677 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3678 if (nodes_empty(*new_mems))
3679 *new_mems = parent_cs(cs)->effective_mems;
3680
3681 spin_lock_irq(&callback_lock);
3682 cpumask_copy(cs->effective_cpus, new_cpus);
3683 cs->effective_mems = *new_mems;
3684 spin_unlock_irq(&callback_lock);
3685
3686 if (cpus_updated)
3687 cpuset_update_tasks_cpumask(cs, new_cpus);
3688 if (mems_updated)
3689 cpuset_update_tasks_nodemask(cs);
3690 }
3691
cpuset_force_rebuild(void)3692 void cpuset_force_rebuild(void)
3693 {
3694 force_sd_rebuild = true;
3695 }
3696
3697 /**
3698 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3699 * @cs: cpuset in interest
3700 * @tmp: the tmpmasks structure pointer
3701 *
3702 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3703 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3704 * all its tasks are moved to the nearest ancestor with both resources.
3705 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3706 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3707 {
3708 static cpumask_t new_cpus;
3709 static nodemask_t new_mems;
3710 bool cpus_updated;
3711 bool mems_updated;
3712 bool remote;
3713 int partcmd = -1;
3714 struct cpuset *parent;
3715 retry:
3716 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3717
3718 mutex_lock(&cpuset_mutex);
3719
3720 /*
3721 * We have raced with task attaching. We wait until attaching
3722 * is finished, so we won't attach a task to an empty cpuset.
3723 */
3724 if (cs->attach_in_progress) {
3725 mutex_unlock(&cpuset_mutex);
3726 goto retry;
3727 }
3728
3729 parent = parent_cs(cs);
3730 compute_effective_cpumask(&new_cpus, cs, parent);
3731 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3732
3733 if (!tmp || !cs->partition_root_state)
3734 goto update_tasks;
3735
3736 /*
3737 * Compute effective_cpus for valid partition root, may invalidate
3738 * child partition roots if necessary.
3739 */
3740 remote = is_remote_partition(cs);
3741 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3742 compute_partition_effective_cpumask(cs, &new_cpus);
3743
3744 if (remote && cpumask_empty(&new_cpus) &&
3745 partition_is_populated(cs, NULL)) {
3746 remote_partition_disable(cs, tmp);
3747 compute_effective_cpumask(&new_cpus, cs, parent);
3748 remote = false;
3749 cpuset_force_rebuild();
3750 }
3751
3752 /*
3753 * Force the partition to become invalid if either one of
3754 * the following conditions hold:
3755 * 1) empty effective cpus but not valid empty partition.
3756 * 2) parent is invalid or doesn't grant any cpus to child
3757 * partitions.
3758 */
3759 if (is_local_partition(cs) && (!is_partition_valid(parent) ||
3760 tasks_nocpu_error(parent, cs, &new_cpus)))
3761 partcmd = partcmd_invalidate;
3762 /*
3763 * On the other hand, an invalid partition root may be transitioned
3764 * back to a regular one.
3765 */
3766 else if (is_partition_valid(parent) && is_partition_invalid(cs))
3767 partcmd = partcmd_update;
3768
3769 if (partcmd >= 0) {
3770 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3771 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3772 compute_partition_effective_cpumask(cs, &new_cpus);
3773 cpuset_force_rebuild();
3774 }
3775 }
3776
3777 update_tasks:
3778 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3779 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3780 if (!cpus_updated && !mems_updated)
3781 goto unlock; /* Hotplug doesn't affect this cpuset */
3782
3783 if (mems_updated)
3784 check_insane_mems_config(&new_mems);
3785
3786 if (is_in_v2_mode())
3787 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3788 cpus_updated, mems_updated);
3789 else
3790 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3791 cpus_updated, mems_updated);
3792
3793 unlock:
3794 mutex_unlock(&cpuset_mutex);
3795 }
3796
3797 /**
3798 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3799 *
3800 * This function is called after either CPU or memory configuration has
3801 * changed and updates cpuset accordingly. The top_cpuset is always
3802 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3803 * order to make cpusets transparent (of no affect) on systems that are
3804 * actively using CPU hotplug but making no active use of cpusets.
3805 *
3806 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3807 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3808 * all descendants.
3809 *
3810 * Note that CPU offlining during suspend is ignored. We don't modify
3811 * cpusets across suspend/resume cycles at all.
3812 *
3813 * CPU / memory hotplug is handled synchronously.
3814 */
cpuset_handle_hotplug(void)3815 static void cpuset_handle_hotplug(void)
3816 {
3817 static cpumask_t new_cpus;
3818 static nodemask_t new_mems;
3819 bool cpus_updated, mems_updated;
3820 bool on_dfl = is_in_v2_mode();
3821 struct tmpmasks tmp, *ptmp = NULL;
3822
3823 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3824 ptmp = &tmp;
3825
3826 lockdep_assert_cpus_held();
3827 mutex_lock(&cpuset_mutex);
3828
3829 /* fetch the available cpus/mems and find out which changed how */
3830 cpumask_copy(&new_cpus, cpu_active_mask);
3831 new_mems = node_states[N_MEMORY];
3832
3833 /*
3834 * If subpartitions_cpus is populated, it is likely that the check
3835 * below will produce a false positive on cpus_updated when the cpu
3836 * list isn't changed. It is extra work, but it is better to be safe.
3837 */
3838 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3839 !cpumask_empty(subpartitions_cpus);
3840 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3841
3842 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3843 if (cpus_updated) {
3844 cpuset_force_rebuild();
3845 spin_lock_irq(&callback_lock);
3846 if (!on_dfl)
3847 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3848 /*
3849 * Make sure that CPUs allocated to child partitions
3850 * do not show up in effective_cpus. If no CPU is left,
3851 * we clear the subpartitions_cpus & let the child partitions
3852 * fight for the CPUs again.
3853 */
3854 if (!cpumask_empty(subpartitions_cpus)) {
3855 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3856 top_cpuset.nr_subparts = 0;
3857 cpumask_clear(subpartitions_cpus);
3858 } else {
3859 cpumask_andnot(&new_cpus, &new_cpus,
3860 subpartitions_cpus);
3861 }
3862 }
3863 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3864 spin_unlock_irq(&callback_lock);
3865 /* we don't mess with cpumasks of tasks in top_cpuset */
3866 }
3867
3868 /* synchronize mems_allowed to N_MEMORY */
3869 if (mems_updated) {
3870 spin_lock_irq(&callback_lock);
3871 if (!on_dfl)
3872 top_cpuset.mems_allowed = new_mems;
3873 top_cpuset.effective_mems = new_mems;
3874 spin_unlock_irq(&callback_lock);
3875 cpuset_update_tasks_nodemask(&top_cpuset);
3876 }
3877
3878 mutex_unlock(&cpuset_mutex);
3879
3880 /* if cpus or mems changed, we need to propagate to descendants */
3881 if (cpus_updated || mems_updated) {
3882 struct cpuset *cs;
3883 struct cgroup_subsys_state *pos_css;
3884
3885 rcu_read_lock();
3886 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3887 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3888 continue;
3889 rcu_read_unlock();
3890
3891 cpuset_hotplug_update_tasks(cs, ptmp);
3892
3893 rcu_read_lock();
3894 css_put(&cs->css);
3895 }
3896 rcu_read_unlock();
3897 }
3898
3899 /* rebuild sched domains if cpus_allowed has changed */
3900 if (force_sd_rebuild) {
3901 force_sd_rebuild = false;
3902 rebuild_sched_domains_cpuslocked();
3903 }
3904
3905 free_cpumasks(NULL, ptmp);
3906 }
3907
cpuset_update_active_cpus(void)3908 void cpuset_update_active_cpus(void)
3909 {
3910 /*
3911 * We're inside cpu hotplug critical region which usually nests
3912 * inside cgroup synchronization. Bounce actual hotplug processing
3913 * to a work item to avoid reverse locking order.
3914 */
3915 cpuset_handle_hotplug();
3916 }
3917
3918 /*
3919 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3920 * Call this routine anytime after node_states[N_MEMORY] changes.
3921 * See cpuset_update_active_cpus() for CPU hotplug handling.
3922 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3923 static int cpuset_track_online_nodes(struct notifier_block *self,
3924 unsigned long action, void *arg)
3925 {
3926 cpuset_handle_hotplug();
3927 return NOTIFY_OK;
3928 }
3929
3930 /**
3931 * cpuset_init_smp - initialize cpus_allowed
3932 *
3933 * Description: Finish top cpuset after cpu, node maps are initialized
3934 */
cpuset_init_smp(void)3935 void __init cpuset_init_smp(void)
3936 {
3937 /*
3938 * cpus_allowd/mems_allowed set to v2 values in the initial
3939 * cpuset_bind() call will be reset to v1 values in another
3940 * cpuset_bind() call when v1 cpuset is mounted.
3941 */
3942 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3943
3944 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3945 top_cpuset.effective_mems = node_states[N_MEMORY];
3946
3947 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3948
3949 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3950 BUG_ON(!cpuset_migrate_mm_wq);
3951 }
3952
3953 /**
3954 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3955 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3956 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3957 *
3958 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3959 * attached to the specified @tsk. Guaranteed to return some non-empty
3960 * subset of cpu_online_mask, even if this means going outside the
3961 * tasks cpuset, except when the task is in the top cpuset.
3962 **/
3963
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3964 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3965 {
3966 unsigned long flags;
3967 struct cpuset *cs;
3968
3969 spin_lock_irqsave(&callback_lock, flags);
3970 rcu_read_lock();
3971
3972 cs = task_cs(tsk);
3973 if (cs != &top_cpuset)
3974 guarantee_online_cpus(tsk, pmask);
3975 /*
3976 * Tasks in the top cpuset won't get update to their cpumasks
3977 * when a hotplug online/offline event happens. So we include all
3978 * offline cpus in the allowed cpu list.
3979 */
3980 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3981 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3982
3983 /*
3984 * We first exclude cpus allocated to partitions. If there is no
3985 * allowable online cpu left, we fall back to all possible cpus.
3986 */
3987 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
3988 if (!cpumask_intersects(pmask, cpu_online_mask))
3989 cpumask_copy(pmask, possible_mask);
3990 }
3991
3992 rcu_read_unlock();
3993 spin_unlock_irqrestore(&callback_lock, flags);
3994 }
3995
3996 /**
3997 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3998 * @tsk: pointer to task_struct with which the scheduler is struggling
3999 *
4000 * Description: In the case that the scheduler cannot find an allowed cpu in
4001 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4002 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4003 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4004 * This is the absolute last resort for the scheduler and it is only used if
4005 * _every_ other avenue has been traveled.
4006 *
4007 * Returns true if the affinity of @tsk was changed, false otherwise.
4008 **/
4009
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4010 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4011 {
4012 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4013 const struct cpumask *cs_mask;
4014 bool changed = false;
4015
4016 rcu_read_lock();
4017 cs_mask = task_cs(tsk)->cpus_allowed;
4018 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4019 do_set_cpus_allowed(tsk, cs_mask);
4020 changed = true;
4021 }
4022 rcu_read_unlock();
4023
4024 /*
4025 * We own tsk->cpus_allowed, nobody can change it under us.
4026 *
4027 * But we used cs && cs->cpus_allowed lockless and thus can
4028 * race with cgroup_attach_task() or update_cpumask() and get
4029 * the wrong tsk->cpus_allowed. However, both cases imply the
4030 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4031 * which takes task_rq_lock().
4032 *
4033 * If we are called after it dropped the lock we must see all
4034 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4035 * set any mask even if it is not right from task_cs() pov,
4036 * the pending set_cpus_allowed_ptr() will fix things.
4037 *
4038 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4039 * if required.
4040 */
4041 return changed;
4042 }
4043
cpuset_init_current_mems_allowed(void)4044 void __init cpuset_init_current_mems_allowed(void)
4045 {
4046 nodes_setall(current->mems_allowed);
4047 }
4048
4049 /**
4050 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4051 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4052 *
4053 * Description: Returns the nodemask_t mems_allowed of the cpuset
4054 * attached to the specified @tsk. Guaranteed to return some non-empty
4055 * subset of node_states[N_MEMORY], even if this means going outside the
4056 * tasks cpuset.
4057 **/
4058
cpuset_mems_allowed(struct task_struct * tsk)4059 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4060 {
4061 nodemask_t mask;
4062 unsigned long flags;
4063
4064 spin_lock_irqsave(&callback_lock, flags);
4065 rcu_read_lock();
4066 guarantee_online_mems(task_cs(tsk), &mask);
4067 rcu_read_unlock();
4068 spin_unlock_irqrestore(&callback_lock, flags);
4069
4070 return mask;
4071 }
4072
4073 /**
4074 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4075 * @nodemask: the nodemask to be checked
4076 *
4077 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4078 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4079 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4080 {
4081 return nodes_intersects(*nodemask, current->mems_allowed);
4082 }
4083
4084 /*
4085 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4086 * mem_hardwall ancestor to the specified cpuset. Call holding
4087 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4088 * (an unusual configuration), then returns the root cpuset.
4089 */
nearest_hardwall_ancestor(struct cpuset * cs)4090 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4091 {
4092 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4093 cs = parent_cs(cs);
4094 return cs;
4095 }
4096
4097 /*
4098 * cpuset_node_allowed - Can we allocate on a memory node?
4099 * @node: is this an allowed node?
4100 * @gfp_mask: memory allocation flags
4101 *
4102 * If we're in interrupt, yes, we can always allocate. If @node is set in
4103 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4104 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4105 * yes. If current has access to memory reserves as an oom victim, yes.
4106 * Otherwise, no.
4107 *
4108 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4109 * and do not allow allocations outside the current tasks cpuset
4110 * unless the task has been OOM killed.
4111 * GFP_KERNEL allocations are not so marked, so can escape to the
4112 * nearest enclosing hardwalled ancestor cpuset.
4113 *
4114 * Scanning up parent cpusets requires callback_lock. The
4115 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4116 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4117 * current tasks mems_allowed came up empty on the first pass over
4118 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4119 * cpuset are short of memory, might require taking the callback_lock.
4120 *
4121 * The first call here from mm/page_alloc:get_page_from_freelist()
4122 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4123 * so no allocation on a node outside the cpuset is allowed (unless
4124 * in interrupt, of course).
4125 *
4126 * The second pass through get_page_from_freelist() doesn't even call
4127 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4128 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4129 * in alloc_flags. That logic and the checks below have the combined
4130 * affect that:
4131 * in_interrupt - any node ok (current task context irrelevant)
4132 * GFP_ATOMIC - any node ok
4133 * tsk_is_oom_victim - any node ok
4134 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4135 * GFP_USER - only nodes in current tasks mems allowed ok.
4136 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4137 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4138 {
4139 struct cpuset *cs; /* current cpuset ancestors */
4140 bool allowed; /* is allocation in zone z allowed? */
4141 unsigned long flags;
4142
4143 if (in_interrupt())
4144 return true;
4145 if (node_isset(node, current->mems_allowed))
4146 return true;
4147 /*
4148 * Allow tasks that have access to memory reserves because they have
4149 * been OOM killed to get memory anywhere.
4150 */
4151 if (unlikely(tsk_is_oom_victim(current)))
4152 return true;
4153 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4154 return false;
4155
4156 if (current->flags & PF_EXITING) /* Let dying task have memory */
4157 return true;
4158
4159 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4160 spin_lock_irqsave(&callback_lock, flags);
4161
4162 rcu_read_lock();
4163 cs = nearest_hardwall_ancestor(task_cs(current));
4164 allowed = node_isset(node, cs->mems_allowed);
4165 rcu_read_unlock();
4166
4167 spin_unlock_irqrestore(&callback_lock, flags);
4168 return allowed;
4169 }
4170
4171 /**
4172 * cpuset_spread_node() - On which node to begin search for a page
4173 * @rotor: round robin rotor
4174 *
4175 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4176 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4177 * and if the memory allocation used cpuset_mem_spread_node()
4178 * to determine on which node to start looking, as it will for
4179 * certain page cache or slab cache pages such as used for file
4180 * system buffers and inode caches, then instead of starting on the
4181 * local node to look for a free page, rather spread the starting
4182 * node around the tasks mems_allowed nodes.
4183 *
4184 * We don't have to worry about the returned node being offline
4185 * because "it can't happen", and even if it did, it would be ok.
4186 *
4187 * The routines calling guarantee_online_mems() are careful to
4188 * only set nodes in task->mems_allowed that are online. So it
4189 * should not be possible for the following code to return an
4190 * offline node. But if it did, that would be ok, as this routine
4191 * is not returning the node where the allocation must be, only
4192 * the node where the search should start. The zonelist passed to
4193 * __alloc_pages() will include all nodes. If the slab allocator
4194 * is passed an offline node, it will fall back to the local node.
4195 * See kmem_cache_alloc_node().
4196 */
cpuset_spread_node(int * rotor)4197 static int cpuset_spread_node(int *rotor)
4198 {
4199 return *rotor = next_node_in(*rotor, current->mems_allowed);
4200 }
4201
4202 /**
4203 * cpuset_mem_spread_node() - On which node to begin search for a file page
4204 */
cpuset_mem_spread_node(void)4205 int cpuset_mem_spread_node(void)
4206 {
4207 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4208 current->cpuset_mem_spread_rotor =
4209 node_random(¤t->mems_allowed);
4210
4211 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4212 }
4213
4214 /**
4215 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4216 * @tsk1: pointer to task_struct of some task.
4217 * @tsk2: pointer to task_struct of some other task.
4218 *
4219 * Description: Return true if @tsk1's mems_allowed intersects the
4220 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4221 * one of the task's memory usage might impact the memory available
4222 * to the other.
4223 **/
4224
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4225 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4226 const struct task_struct *tsk2)
4227 {
4228 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4229 }
4230
4231 /**
4232 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4233 *
4234 * Description: Prints current's name, cpuset name, and cached copy of its
4235 * mems_allowed to the kernel log.
4236 */
cpuset_print_current_mems_allowed(void)4237 void cpuset_print_current_mems_allowed(void)
4238 {
4239 struct cgroup *cgrp;
4240
4241 rcu_read_lock();
4242
4243 cgrp = task_cs(current)->css.cgroup;
4244 pr_cont(",cpuset=");
4245 pr_cont_cgroup_name(cgrp);
4246 pr_cont(",mems_allowed=%*pbl",
4247 nodemask_pr_args(¤t->mems_allowed));
4248
4249 rcu_read_unlock();
4250 }
4251
4252 #ifdef CONFIG_PROC_PID_CPUSET
4253 /*
4254 * proc_cpuset_show()
4255 * - Print tasks cpuset path into seq_file.
4256 * - Used for /proc/<pid>/cpuset.
4257 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4258 * doesn't really matter if tsk->cpuset changes after we read it,
4259 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
4260 * anyway.
4261 */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)4262 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4263 struct pid *pid, struct task_struct *tsk)
4264 {
4265 char *buf;
4266 struct cgroup_subsys_state *css;
4267 int retval;
4268
4269 retval = -ENOMEM;
4270 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4271 if (!buf)
4272 goto out;
4273
4274 rcu_read_lock();
4275 spin_lock_irq(&css_set_lock);
4276 css = task_css(tsk, cpuset_cgrp_id);
4277 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
4278 current->nsproxy->cgroup_ns);
4279 spin_unlock_irq(&css_set_lock);
4280 rcu_read_unlock();
4281
4282 if (retval == -E2BIG)
4283 retval = -ENAMETOOLONG;
4284 if (retval < 0)
4285 goto out_free;
4286 seq_puts(m, buf);
4287 seq_putc(m, '\n');
4288 retval = 0;
4289 out_free:
4290 kfree(buf);
4291 out:
4292 return retval;
4293 }
4294 #endif /* CONFIG_PROC_PID_CPUSET */
4295
4296 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4297 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4298 {
4299 seq_printf(m, "Mems_allowed:\t%*pb\n",
4300 nodemask_pr_args(&task->mems_allowed));
4301 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4302 nodemask_pr_args(&task->mems_allowed));
4303 }
4304