1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24 #include "cgroup-internal.h"
25 #include "cpuset-internal.h"
26
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 #include <linux/mempolicy.h>
31 #include <linux/mm.h>
32 #include <linux/memory.h>
33 #include <linux/export.h>
34 #include <linux/rcupdate.h>
35 #include <linux/sched.h>
36 #include <linux/sched/deadline.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/task.h>
39 #include <linux/security.h>
40 #include <linux/oom.h>
41 #include <linux/sched/isolation.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44
45 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
46 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
47
48 /*
49 * There could be abnormal cpuset configurations for cpu or memory
50 * node binding, add this key to provide a quick low-cost judgment
51 * of the situation.
52 */
53 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
54
55 static const char * const perr_strings[] = {
56 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive",
57 [PERR_INVPARENT] = "Parent is an invalid partition root",
58 [PERR_NOTPART] = "Parent is not a partition root",
59 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
60 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
61 [PERR_HOTPLUG] = "No cpu available due to hotplug",
62 [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty",
63 [PERR_HKEEPING] = "partition config conflicts with housekeeping setup",
64 [PERR_ACCESS] = "Enable partition not permitted",
65 };
66
67 /*
68 * Exclusive CPUs distributed out to sub-partitions of top_cpuset
69 */
70 static cpumask_var_t subpartitions_cpus;
71
72 /*
73 * Exclusive CPUs in isolated partitions
74 */
75 static cpumask_var_t isolated_cpus;
76
77 /*
78 * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
79 */
80 static cpumask_var_t boot_hk_cpus;
81 static bool have_boot_isolcpus;
82
83 /* List of remote partition root children */
84 static struct list_head remote_children;
85
86 /*
87 * A flag to force sched domain rebuild at the end of an operation.
88 * It can be set in
89 * - update_partition_sd_lb()
90 * - remote_partition_check()
91 * - update_cpumasks_hier()
92 * - cpuset_update_flag()
93 * - cpuset_hotplug_update_tasks()
94 * - cpuset_handle_hotplug()
95 *
96 * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock.
97 *
98 * Note that update_relax_domain_level() in cpuset-v1.c can still call
99 * rebuild_sched_domains_locked() directly without using this flag.
100 */
101 static bool force_sd_rebuild;
102
103 /*
104 * Partition root states:
105 *
106 * 0 - member (not a partition root)
107 * 1 - partition root
108 * 2 - partition root without load balancing (isolated)
109 * -1 - invalid partition root
110 * -2 - invalid isolated partition root
111 *
112 * There are 2 types of partitions - local or remote. Local partitions are
113 * those whose parents are partition root themselves. Setting of
114 * cpuset.cpus.exclusive are optional in setting up local partitions.
115 * Remote partitions are those whose parents are not partition roots. Passing
116 * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor
117 * nodes are mandatory in creating a remote partition.
118 *
119 * For simplicity, a local partition can be created under a local or remote
120 * partition but a remote partition cannot have any partition root in its
121 * ancestor chain except the cgroup root.
122 */
123 #define PRS_MEMBER 0
124 #define PRS_ROOT 1
125 #define PRS_ISOLATED 2
126 #define PRS_INVALID_ROOT -1
127 #define PRS_INVALID_ISOLATED -2
128
is_prs_invalid(int prs_state)129 static inline bool is_prs_invalid(int prs_state)
130 {
131 return prs_state < 0;
132 }
133
134 /*
135 * Temporary cpumasks for working with partitions that are passed among
136 * functions to avoid memory allocation in inner functions.
137 */
138 struct tmpmasks {
139 cpumask_var_t addmask, delmask; /* For partition root */
140 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
141 };
142
inc_dl_tasks_cs(struct task_struct * p)143 void inc_dl_tasks_cs(struct task_struct *p)
144 {
145 struct cpuset *cs = task_cs(p);
146
147 cs->nr_deadline_tasks++;
148 }
149
dec_dl_tasks_cs(struct task_struct * p)150 void dec_dl_tasks_cs(struct task_struct *p)
151 {
152 struct cpuset *cs = task_cs(p);
153
154 cs->nr_deadline_tasks--;
155 }
156
is_partition_valid(const struct cpuset * cs)157 static inline int is_partition_valid(const struct cpuset *cs)
158 {
159 return cs->partition_root_state > 0;
160 }
161
is_partition_invalid(const struct cpuset * cs)162 static inline int is_partition_invalid(const struct cpuset *cs)
163 {
164 return cs->partition_root_state < 0;
165 }
166
167 /*
168 * Callers should hold callback_lock to modify partition_root_state.
169 */
make_partition_invalid(struct cpuset * cs)170 static inline void make_partition_invalid(struct cpuset *cs)
171 {
172 if (cs->partition_root_state > 0)
173 cs->partition_root_state = -cs->partition_root_state;
174 }
175
176 /*
177 * Send notification event of whenever partition_root_state changes.
178 */
notify_partition_change(struct cpuset * cs,int old_prs)179 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
180 {
181 if (old_prs == cs->partition_root_state)
182 return;
183 cgroup_file_notify(&cs->partition_file);
184
185 /* Reset prs_err if not invalid */
186 if (is_partition_valid(cs))
187 WRITE_ONCE(cs->prs_err, PERR_NONE);
188 }
189
190 static struct cpuset top_cpuset = {
191 .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) |
192 BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
193 .partition_root_state = PRS_ROOT,
194 .relax_domain_level = -1,
195 .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
196 };
197
198 /*
199 * There are two global locks guarding cpuset structures - cpuset_mutex and
200 * callback_lock. We also require taking task_lock() when dereferencing a
201 * task's cpuset pointer. See "The task_lock() exception", at the end of this
202 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
203 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
204 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
205 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
206 * correctness.
207 *
208 * A task must hold both locks to modify cpusets. If a task holds
209 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
210 * also acquire callback_lock and be able to modify cpusets. It can perform
211 * various checks on the cpuset structure first, knowing nothing will change.
212 * It can also allocate memory while just holding cpuset_mutex. While it is
213 * performing these checks, various callback routines can briefly acquire
214 * callback_lock to query cpusets. Once it is ready to make the changes, it
215 * takes callback_lock, blocking everyone else.
216 *
217 * Calls to the kernel memory allocator can not be made while holding
218 * callback_lock, as that would risk double tripping on callback_lock
219 * from one of the callbacks into the cpuset code from within
220 * __alloc_pages().
221 *
222 * If a task is only holding callback_lock, then it has read-only
223 * access to cpusets.
224 *
225 * Now, the task_struct fields mems_allowed and mempolicy may be changed
226 * by other task, we use alloc_lock in the task_struct fields to protect
227 * them.
228 *
229 * The cpuset_common_seq_show() handlers only hold callback_lock across
230 * small pieces of code, such as when reading out possibly multi-word
231 * cpumasks and nodemasks.
232 *
233 * Accessing a task's cpuset should be done in accordance with the
234 * guidelines for accessing subsystem state in kernel/cgroup.c
235 */
236
237 static DEFINE_MUTEX(cpuset_mutex);
238
cpuset_lock(void)239 void cpuset_lock(void)
240 {
241 mutex_lock(&cpuset_mutex);
242 }
243
cpuset_unlock(void)244 void cpuset_unlock(void)
245 {
246 mutex_unlock(&cpuset_mutex);
247 }
248
249 static DEFINE_SPINLOCK(callback_lock);
250
cpuset_callback_lock_irq(void)251 void cpuset_callback_lock_irq(void)
252 {
253 spin_lock_irq(&callback_lock);
254 }
255
cpuset_callback_unlock_irq(void)256 void cpuset_callback_unlock_irq(void)
257 {
258 spin_unlock_irq(&callback_lock);
259 }
260
261 static struct workqueue_struct *cpuset_migrate_mm_wq;
262
263 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
264
check_insane_mems_config(nodemask_t * nodes)265 static inline void check_insane_mems_config(nodemask_t *nodes)
266 {
267 if (!cpusets_insane_config() &&
268 movable_only_nodes(nodes)) {
269 static_branch_enable(&cpusets_insane_config_key);
270 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
271 "Cpuset allocations might fail even with a lot of memory available.\n",
272 nodemask_pr_args(nodes));
273 }
274 }
275
276 /*
277 * decrease cs->attach_in_progress.
278 * wake_up cpuset_attach_wq if cs->attach_in_progress==0.
279 */
dec_attach_in_progress_locked(struct cpuset * cs)280 static inline void dec_attach_in_progress_locked(struct cpuset *cs)
281 {
282 lockdep_assert_held(&cpuset_mutex);
283
284 cs->attach_in_progress--;
285 if (!cs->attach_in_progress)
286 wake_up(&cpuset_attach_wq);
287 }
288
dec_attach_in_progress(struct cpuset * cs)289 static inline void dec_attach_in_progress(struct cpuset *cs)
290 {
291 mutex_lock(&cpuset_mutex);
292 dec_attach_in_progress_locked(cs);
293 mutex_unlock(&cpuset_mutex);
294 }
295
cpuset_v2(void)296 static inline bool cpuset_v2(void)
297 {
298 return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
299 cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
300 }
301
302 /*
303 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
304 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
305 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
306 * With v2 behavior, "cpus" and "mems" are always what the users have
307 * requested and won't be changed by hotplug events. Only the effective
308 * cpus or mems will be affected.
309 */
is_in_v2_mode(void)310 static inline bool is_in_v2_mode(void)
311 {
312 return cpuset_v2() ||
313 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
314 }
315
316 /**
317 * partition_is_populated - check if partition has tasks
318 * @cs: partition root to be checked
319 * @excluded_child: a child cpuset to be excluded in task checking
320 * Return: true if there are tasks, false otherwise
321 *
322 * It is assumed that @cs is a valid partition root. @excluded_child should
323 * be non-NULL when this cpuset is going to become a partition itself.
324 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)325 static inline bool partition_is_populated(struct cpuset *cs,
326 struct cpuset *excluded_child)
327 {
328 struct cgroup_subsys_state *css;
329 struct cpuset *child;
330
331 if (cs->css.cgroup->nr_populated_csets)
332 return true;
333 if (!excluded_child && !cs->nr_subparts)
334 return cgroup_is_populated(cs->css.cgroup);
335
336 rcu_read_lock();
337 cpuset_for_each_child(child, css, cs) {
338 if (child == excluded_child)
339 continue;
340 if (is_partition_valid(child))
341 continue;
342 if (cgroup_is_populated(child->css.cgroup)) {
343 rcu_read_unlock();
344 return true;
345 }
346 }
347 rcu_read_unlock();
348 return false;
349 }
350
351 /*
352 * Return in pmask the portion of a task's cpusets's cpus_allowed that
353 * are online and are capable of running the task. If none are found,
354 * walk up the cpuset hierarchy until we find one that does have some
355 * appropriate cpus.
356 *
357 * One way or another, we guarantee to return some non-empty subset
358 * of cpu_online_mask.
359 *
360 * Call with callback_lock or cpuset_mutex held.
361 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)362 static void guarantee_online_cpus(struct task_struct *tsk,
363 struct cpumask *pmask)
364 {
365 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
366 struct cpuset *cs;
367
368 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
369 cpumask_copy(pmask, cpu_online_mask);
370
371 rcu_read_lock();
372 cs = task_cs(tsk);
373
374 while (!cpumask_intersects(cs->effective_cpus, pmask))
375 cs = parent_cs(cs);
376
377 cpumask_and(pmask, pmask, cs->effective_cpus);
378 rcu_read_unlock();
379 }
380
381 /*
382 * Return in *pmask the portion of a cpusets's mems_allowed that
383 * are online, with memory. If none are online with memory, walk
384 * up the cpuset hierarchy until we find one that does have some
385 * online mems. The top cpuset always has some mems online.
386 *
387 * One way or another, we guarantee to return some non-empty subset
388 * of node_states[N_MEMORY].
389 *
390 * Call with callback_lock or cpuset_mutex held.
391 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)392 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
393 {
394 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
395 cs = parent_cs(cs);
396 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
397 }
398
399 /**
400 * alloc_cpumasks - allocate three cpumasks for cpuset
401 * @cs: the cpuset that have cpumasks to be allocated.
402 * @tmp: the tmpmasks structure pointer
403 * Return: 0 if successful, -ENOMEM otherwise.
404 *
405 * Only one of the two input arguments should be non-NULL.
406 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)407 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
408 {
409 cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
410
411 if (cs) {
412 pmask1 = &cs->cpus_allowed;
413 pmask2 = &cs->effective_cpus;
414 pmask3 = &cs->effective_xcpus;
415 pmask4 = &cs->exclusive_cpus;
416 } else {
417 pmask1 = &tmp->new_cpus;
418 pmask2 = &tmp->addmask;
419 pmask3 = &tmp->delmask;
420 pmask4 = NULL;
421 }
422
423 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
424 return -ENOMEM;
425
426 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
427 goto free_one;
428
429 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
430 goto free_two;
431
432 if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL))
433 goto free_three;
434
435
436 return 0;
437
438 free_three:
439 free_cpumask_var(*pmask3);
440 free_two:
441 free_cpumask_var(*pmask2);
442 free_one:
443 free_cpumask_var(*pmask1);
444 return -ENOMEM;
445 }
446
447 /**
448 * free_cpumasks - free cpumasks in a tmpmasks structure
449 * @cs: the cpuset that have cpumasks to be free.
450 * @tmp: the tmpmasks structure pointer
451 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)452 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
453 {
454 if (cs) {
455 free_cpumask_var(cs->cpus_allowed);
456 free_cpumask_var(cs->effective_cpus);
457 free_cpumask_var(cs->effective_xcpus);
458 free_cpumask_var(cs->exclusive_cpus);
459 }
460 if (tmp) {
461 free_cpumask_var(tmp->new_cpus);
462 free_cpumask_var(tmp->addmask);
463 free_cpumask_var(tmp->delmask);
464 }
465 }
466
467 /**
468 * alloc_trial_cpuset - allocate a trial cpuset
469 * @cs: the cpuset that the trial cpuset duplicates
470 */
alloc_trial_cpuset(struct cpuset * cs)471 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
472 {
473 struct cpuset *trial;
474
475 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
476 if (!trial)
477 return NULL;
478
479 if (alloc_cpumasks(trial, NULL)) {
480 kfree(trial);
481 return NULL;
482 }
483
484 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
485 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
486 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
487 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
488 return trial;
489 }
490
491 /**
492 * free_cpuset - free the cpuset
493 * @cs: the cpuset to be freed
494 */
free_cpuset(struct cpuset * cs)495 static inline void free_cpuset(struct cpuset *cs)
496 {
497 free_cpumasks(cs, NULL);
498 kfree(cs);
499 }
500
501 /* Return user specified exclusive CPUs */
user_xcpus(struct cpuset * cs)502 static inline struct cpumask *user_xcpus(struct cpuset *cs)
503 {
504 return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed
505 : cs->exclusive_cpus;
506 }
507
xcpus_empty(struct cpuset * cs)508 static inline bool xcpus_empty(struct cpuset *cs)
509 {
510 return cpumask_empty(cs->cpus_allowed) &&
511 cpumask_empty(cs->exclusive_cpus);
512 }
513
514 /*
515 * cpusets_are_exclusive() - check if two cpusets are exclusive
516 *
517 * Return true if exclusive, false if not
518 */
cpusets_are_exclusive(struct cpuset * cs1,struct cpuset * cs2)519 static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
520 {
521 struct cpumask *xcpus1 = user_xcpus(cs1);
522 struct cpumask *xcpus2 = user_xcpus(cs2);
523
524 if (cpumask_intersects(xcpus1, xcpus2))
525 return false;
526 return true;
527 }
528
529 /*
530 * validate_change() - Used to validate that any proposed cpuset change
531 * follows the structural rules for cpusets.
532 *
533 * If we replaced the flag and mask values of the current cpuset
534 * (cur) with those values in the trial cpuset (trial), would
535 * our various subset and exclusive rules still be valid? Presumes
536 * cpuset_mutex held.
537 *
538 * 'cur' is the address of an actual, in-use cpuset. Operations
539 * such as list traversal that depend on the actual address of the
540 * cpuset in the list must use cur below, not trial.
541 *
542 * 'trial' is the address of bulk structure copy of cur, with
543 * perhaps one or more of the fields cpus_allowed, mems_allowed,
544 * or flags changed to new, trial values.
545 *
546 * Return 0 if valid, -errno if not.
547 */
548
validate_change(struct cpuset * cur,struct cpuset * trial)549 static int validate_change(struct cpuset *cur, struct cpuset *trial)
550 {
551 struct cgroup_subsys_state *css;
552 struct cpuset *c, *par;
553 int ret = 0;
554
555 rcu_read_lock();
556
557 if (!is_in_v2_mode())
558 ret = cpuset1_validate_change(cur, trial);
559 if (ret)
560 goto out;
561
562 /* Remaining checks don't apply to root cpuset */
563 if (cur == &top_cpuset)
564 goto out;
565
566 par = parent_cs(cur);
567
568 /*
569 * Cpusets with tasks - existing or newly being attached - can't
570 * be changed to have empty cpus_allowed or mems_allowed.
571 */
572 ret = -ENOSPC;
573 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
574 if (!cpumask_empty(cur->cpus_allowed) &&
575 cpumask_empty(trial->cpus_allowed))
576 goto out;
577 if (!nodes_empty(cur->mems_allowed) &&
578 nodes_empty(trial->mems_allowed))
579 goto out;
580 }
581
582 /*
583 * We can't shrink if we won't have enough room for SCHED_DEADLINE
584 * tasks. This check is not done when scheduling is disabled as the
585 * users should know what they are doing.
586 *
587 * For v1, effective_cpus == cpus_allowed & user_xcpus() returns
588 * cpus_allowed.
589 *
590 * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only
591 * for non-isolated partition root. At this point, the target
592 * effective_cpus isn't computed yet. user_xcpus() is the best
593 * approximation.
594 *
595 * TBD: May need to precompute the real effective_cpus here in case
596 * incorrect scheduling of SCHED_DEADLINE tasks in a partition
597 * becomes an issue.
598 */
599 ret = -EBUSY;
600 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
601 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
602 goto out;
603
604 /*
605 * If either I or some sibling (!= me) is exclusive, we can't
606 * overlap. exclusive_cpus cannot overlap with each other if set.
607 */
608 ret = -EINVAL;
609 cpuset_for_each_child(c, css, par) {
610 bool txset, cxset; /* Are exclusive_cpus set? */
611
612 if (c == cur)
613 continue;
614
615 txset = !cpumask_empty(trial->exclusive_cpus);
616 cxset = !cpumask_empty(c->exclusive_cpus);
617 if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) ||
618 (txset && cxset)) {
619 if (!cpusets_are_exclusive(trial, c))
620 goto out;
621 } else if (txset || cxset) {
622 struct cpumask *xcpus, *acpus;
623
624 /*
625 * When just one of the exclusive_cpus's is set,
626 * cpus_allowed of the other cpuset, if set, cannot be
627 * a subset of it or none of those CPUs will be
628 * available if these exclusive CPUs are activated.
629 */
630 if (txset) {
631 xcpus = trial->exclusive_cpus;
632 acpus = c->cpus_allowed;
633 } else {
634 xcpus = c->exclusive_cpus;
635 acpus = trial->cpus_allowed;
636 }
637 if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus))
638 goto out;
639 }
640 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
641 nodes_intersects(trial->mems_allowed, c->mems_allowed))
642 goto out;
643 }
644
645 ret = 0;
646 out:
647 rcu_read_unlock();
648 return ret;
649 }
650
651 #ifdef CONFIG_SMP
652 /*
653 * Helper routine for generate_sched_domains().
654 * Do cpusets a, b have overlapping effective cpus_allowed masks?
655 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)656 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
657 {
658 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
659 }
660
661 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)662 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
663 {
664 if (dattr->relax_domain_level < c->relax_domain_level)
665 dattr->relax_domain_level = c->relax_domain_level;
666 return;
667 }
668
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)669 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
670 struct cpuset *root_cs)
671 {
672 struct cpuset *cp;
673 struct cgroup_subsys_state *pos_css;
674
675 rcu_read_lock();
676 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
677 /* skip the whole subtree if @cp doesn't have any CPU */
678 if (cpumask_empty(cp->cpus_allowed)) {
679 pos_css = css_rightmost_descendant(pos_css);
680 continue;
681 }
682
683 if (is_sched_load_balance(cp))
684 update_domain_attr(dattr, cp);
685 }
686 rcu_read_unlock();
687 }
688
689 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)690 static inline int nr_cpusets(void)
691 {
692 /* jump label reference count + the top-level cpuset */
693 return static_key_count(&cpusets_enabled_key.key) + 1;
694 }
695
696 /*
697 * generate_sched_domains()
698 *
699 * This function builds a partial partition of the systems CPUs
700 * A 'partial partition' is a set of non-overlapping subsets whose
701 * union is a subset of that set.
702 * The output of this function needs to be passed to kernel/sched/core.c
703 * partition_sched_domains() routine, which will rebuild the scheduler's
704 * load balancing domains (sched domains) as specified by that partial
705 * partition.
706 *
707 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
708 * for a background explanation of this.
709 *
710 * Does not return errors, on the theory that the callers of this
711 * routine would rather not worry about failures to rebuild sched
712 * domains when operating in the severe memory shortage situations
713 * that could cause allocation failures below.
714 *
715 * Must be called with cpuset_mutex held.
716 *
717 * The three key local variables below are:
718 * cp - cpuset pointer, used (together with pos_css) to perform a
719 * top-down scan of all cpusets. For our purposes, rebuilding
720 * the schedulers sched domains, we can ignore !is_sched_load_
721 * balance cpusets.
722 * csa - (for CpuSet Array) Array of pointers to all the cpusets
723 * that need to be load balanced, for convenient iterative
724 * access by the subsequent code that finds the best partition,
725 * i.e the set of domains (subsets) of CPUs such that the
726 * cpus_allowed of every cpuset marked is_sched_load_balance
727 * is a subset of one of these domains, while there are as
728 * many such domains as possible, each as small as possible.
729 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
730 * the kernel/sched/core.c routine partition_sched_domains() in a
731 * convenient format, that can be easily compared to the prior
732 * value to determine what partition elements (sched domains)
733 * were changed (added or removed.)
734 *
735 * Finding the best partition (set of domains):
736 * The double nested loops below over i, j scan over the load
737 * balanced cpusets (using the array of cpuset pointers in csa[])
738 * looking for pairs of cpusets that have overlapping cpus_allowed
739 * and merging them using a union-find algorithm.
740 *
741 * The union of the cpus_allowed masks from the set of all cpusets
742 * having the same root then form the one element of the partition
743 * (one sched domain) to be passed to partition_sched_domains().
744 *
745 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)746 static int generate_sched_domains(cpumask_var_t **domains,
747 struct sched_domain_attr **attributes)
748 {
749 struct cpuset *cp; /* top-down scan of cpusets */
750 struct cpuset **csa; /* array of all cpuset ptrs */
751 int csn; /* how many cpuset ptrs in csa so far */
752 int i, j; /* indices for partition finding loops */
753 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
754 struct sched_domain_attr *dattr; /* attributes for custom domains */
755 int ndoms = 0; /* number of sched domains in result */
756 int nslot; /* next empty doms[] struct cpumask slot */
757 struct cgroup_subsys_state *pos_css;
758 bool root_load_balance = is_sched_load_balance(&top_cpuset);
759 bool cgrpv2 = cpuset_v2();
760 int nslot_update;
761
762 doms = NULL;
763 dattr = NULL;
764 csa = NULL;
765
766 /* Special case for the 99% of systems with one, full, sched domain */
767 if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
768 single_root_domain:
769 ndoms = 1;
770 doms = alloc_sched_domains(ndoms);
771 if (!doms)
772 goto done;
773
774 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
775 if (dattr) {
776 *dattr = SD_ATTR_INIT;
777 update_domain_attr_tree(dattr, &top_cpuset);
778 }
779 cpumask_and(doms[0], top_cpuset.effective_cpus,
780 housekeeping_cpumask(HK_TYPE_DOMAIN));
781
782 goto done;
783 }
784
785 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
786 if (!csa)
787 goto done;
788 csn = 0;
789
790 rcu_read_lock();
791 if (root_load_balance)
792 csa[csn++] = &top_cpuset;
793 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
794 if (cp == &top_cpuset)
795 continue;
796
797 if (cgrpv2)
798 goto v2;
799
800 /*
801 * v1:
802 * Continue traversing beyond @cp iff @cp has some CPUs and
803 * isn't load balancing. The former is obvious. The
804 * latter: All child cpusets contain a subset of the
805 * parent's cpus, so just skip them, and then we call
806 * update_domain_attr_tree() to calc relax_domain_level of
807 * the corresponding sched domain.
808 */
809 if (!cpumask_empty(cp->cpus_allowed) &&
810 !(is_sched_load_balance(cp) &&
811 cpumask_intersects(cp->cpus_allowed,
812 housekeeping_cpumask(HK_TYPE_DOMAIN))))
813 continue;
814
815 if (is_sched_load_balance(cp) &&
816 !cpumask_empty(cp->effective_cpus))
817 csa[csn++] = cp;
818
819 /* skip @cp's subtree */
820 pos_css = css_rightmost_descendant(pos_css);
821 continue;
822
823 v2:
824 /*
825 * Only valid partition roots that are not isolated and with
826 * non-empty effective_cpus will be saved into csn[].
827 */
828 if ((cp->partition_root_state == PRS_ROOT) &&
829 !cpumask_empty(cp->effective_cpus))
830 csa[csn++] = cp;
831
832 /*
833 * Skip @cp's subtree if not a partition root and has no
834 * exclusive CPUs to be granted to child cpusets.
835 */
836 if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
837 pos_css = css_rightmost_descendant(pos_css);
838 }
839 rcu_read_unlock();
840
841 /*
842 * If there are only isolated partitions underneath the cgroup root,
843 * we can optimize out unneeded sched domains scanning.
844 */
845 if (root_load_balance && (csn == 1))
846 goto single_root_domain;
847
848 for (i = 0; i < csn; i++)
849 uf_node_init(&csa[i]->node);
850
851 /* Merge overlapping cpusets */
852 for (i = 0; i < csn; i++) {
853 for (j = i + 1; j < csn; j++) {
854 if (cpusets_overlap(csa[i], csa[j])) {
855 /*
856 * Cgroup v2 shouldn't pass down overlapping
857 * partition root cpusets.
858 */
859 WARN_ON_ONCE(cgrpv2);
860 uf_union(&csa[i]->node, &csa[j]->node);
861 }
862 }
863 }
864
865 /* Count the total number of domains */
866 for (i = 0; i < csn; i++) {
867 if (uf_find(&csa[i]->node) == &csa[i]->node)
868 ndoms++;
869 }
870
871 /*
872 * Now we know how many domains to create.
873 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
874 */
875 doms = alloc_sched_domains(ndoms);
876 if (!doms)
877 goto done;
878
879 /*
880 * The rest of the code, including the scheduler, can deal with
881 * dattr==NULL case. No need to abort if alloc fails.
882 */
883 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
884 GFP_KERNEL);
885
886 /*
887 * Cgroup v2 doesn't support domain attributes, just set all of them
888 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
889 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
890 */
891 if (cgrpv2) {
892 for (i = 0; i < ndoms; i++) {
893 cpumask_copy(doms[i], csa[i]->effective_cpus);
894 if (dattr)
895 dattr[i] = SD_ATTR_INIT;
896 }
897 goto done;
898 }
899
900 for (nslot = 0, i = 0; i < csn; i++) {
901 nslot_update = 0;
902 for (j = i; j < csn; j++) {
903 if (uf_find(&csa[j]->node) == &csa[i]->node) {
904 struct cpumask *dp = doms[nslot];
905
906 if (i == j) {
907 nslot_update = 1;
908 cpumask_clear(dp);
909 if (dattr)
910 *(dattr + nslot) = SD_ATTR_INIT;
911 }
912 cpumask_or(dp, dp, csa[j]->effective_cpus);
913 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
914 if (dattr)
915 update_domain_attr_tree(dattr + nslot, csa[j]);
916 }
917 }
918 if (nslot_update)
919 nslot++;
920 }
921 BUG_ON(nslot != ndoms);
922
923 done:
924 kfree(csa);
925
926 /*
927 * Fallback to the default domain if kmalloc() failed.
928 * See comments in partition_sched_domains().
929 */
930 if (doms == NULL)
931 ndoms = 1;
932
933 *domains = doms;
934 *attributes = dattr;
935 return ndoms;
936 }
937
dl_update_tasks_root_domain(struct cpuset * cs)938 static void dl_update_tasks_root_domain(struct cpuset *cs)
939 {
940 struct css_task_iter it;
941 struct task_struct *task;
942
943 if (cs->nr_deadline_tasks == 0)
944 return;
945
946 css_task_iter_start(&cs->css, 0, &it);
947
948 while ((task = css_task_iter_next(&it)))
949 dl_add_task_root_domain(task);
950
951 css_task_iter_end(&it);
952 }
953
dl_rebuild_rd_accounting(void)954 static void dl_rebuild_rd_accounting(void)
955 {
956 struct cpuset *cs = NULL;
957 struct cgroup_subsys_state *pos_css;
958
959 lockdep_assert_held(&cpuset_mutex);
960 lockdep_assert_cpus_held();
961 lockdep_assert_held(&sched_domains_mutex);
962
963 rcu_read_lock();
964
965 /*
966 * Clear default root domain DL accounting, it will be computed again
967 * if a task belongs to it.
968 */
969 dl_clear_root_domain(&def_root_domain);
970
971 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
972
973 if (cpumask_empty(cs->effective_cpus)) {
974 pos_css = css_rightmost_descendant(pos_css);
975 continue;
976 }
977
978 css_get(&cs->css);
979
980 rcu_read_unlock();
981
982 dl_update_tasks_root_domain(cs);
983
984 rcu_read_lock();
985 css_put(&cs->css);
986 }
987 rcu_read_unlock();
988 }
989
990 static void
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)991 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
992 struct sched_domain_attr *dattr_new)
993 {
994 mutex_lock(&sched_domains_mutex);
995 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
996 dl_rebuild_rd_accounting();
997 mutex_unlock(&sched_domains_mutex);
998 }
999
1000 /*
1001 * Rebuild scheduler domains.
1002 *
1003 * If the flag 'sched_load_balance' of any cpuset with non-empty
1004 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1005 * which has that flag enabled, or if any cpuset with a non-empty
1006 * 'cpus' is removed, then call this routine to rebuild the
1007 * scheduler's dynamic sched domains.
1008 *
1009 * Call with cpuset_mutex held. Takes cpus_read_lock().
1010 */
rebuild_sched_domains_locked(void)1011 void rebuild_sched_domains_locked(void)
1012 {
1013 struct cgroup_subsys_state *pos_css;
1014 struct sched_domain_attr *attr;
1015 cpumask_var_t *doms;
1016 struct cpuset *cs;
1017 int ndoms;
1018
1019 lockdep_assert_cpus_held();
1020 lockdep_assert_held(&cpuset_mutex);
1021 force_sd_rebuild = false;
1022
1023 /*
1024 * If we have raced with CPU hotplug, return early to avoid
1025 * passing doms with offlined cpu to partition_sched_domains().
1026 * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
1027 *
1028 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1029 * should be the same as the active CPUs, so checking only top_cpuset
1030 * is enough to detect racing CPU offlines.
1031 */
1032 if (cpumask_empty(subpartitions_cpus) &&
1033 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1034 return;
1035
1036 /*
1037 * With subpartition CPUs, however, the effective CPUs of a partition
1038 * root should be only a subset of the active CPUs. Since a CPU in any
1039 * partition root could be offlined, all must be checked.
1040 */
1041 if (!cpumask_empty(subpartitions_cpus)) {
1042 rcu_read_lock();
1043 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1044 if (!is_partition_valid(cs)) {
1045 pos_css = css_rightmost_descendant(pos_css);
1046 continue;
1047 }
1048 if (!cpumask_subset(cs->effective_cpus,
1049 cpu_active_mask)) {
1050 rcu_read_unlock();
1051 return;
1052 }
1053 }
1054 rcu_read_unlock();
1055 }
1056
1057 /* Generate domain masks and attrs */
1058 ndoms = generate_sched_domains(&doms, &attr);
1059
1060 /* Have scheduler rebuild the domains */
1061 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1062 }
1063 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1064 void rebuild_sched_domains_locked(void)
1065 {
1066 }
1067 #endif /* CONFIG_SMP */
1068
rebuild_sched_domains_cpuslocked(void)1069 static void rebuild_sched_domains_cpuslocked(void)
1070 {
1071 mutex_lock(&cpuset_mutex);
1072 rebuild_sched_domains_locked();
1073 mutex_unlock(&cpuset_mutex);
1074 }
1075
rebuild_sched_domains(void)1076 void rebuild_sched_domains(void)
1077 {
1078 cpus_read_lock();
1079 rebuild_sched_domains_cpuslocked();
1080 cpus_read_unlock();
1081 }
1082
1083 /**
1084 * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1085 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1086 * @new_cpus: the temp variable for the new effective_cpus mask
1087 *
1088 * Iterate through each task of @cs updating its cpus_allowed to the
1089 * effective cpuset's. As this function is called with cpuset_mutex held,
1090 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1091 * is used instead of effective_cpus to make sure all offline CPUs are also
1092 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1093 */
cpuset_update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1094 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1095 {
1096 struct css_task_iter it;
1097 struct task_struct *task;
1098 bool top_cs = cs == &top_cpuset;
1099
1100 css_task_iter_start(&cs->css, 0, &it);
1101 while ((task = css_task_iter_next(&it))) {
1102 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1103
1104 if (top_cs) {
1105 /*
1106 * Percpu kthreads in top_cpuset are ignored
1107 */
1108 if (kthread_is_per_cpu(task))
1109 continue;
1110 cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1111 } else {
1112 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1113 }
1114 set_cpus_allowed_ptr(task, new_cpus);
1115 }
1116 css_task_iter_end(&it);
1117 }
1118
1119 /**
1120 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1121 * @new_cpus: the temp variable for the new effective_cpus mask
1122 * @cs: the cpuset the need to recompute the new effective_cpus mask
1123 * @parent: the parent cpuset
1124 *
1125 * The result is valid only if the given cpuset isn't a partition root.
1126 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1127 static void compute_effective_cpumask(struct cpumask *new_cpus,
1128 struct cpuset *cs, struct cpuset *parent)
1129 {
1130 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1131 }
1132
1133 /*
1134 * Commands for update_parent_effective_cpumask
1135 */
1136 enum partition_cmd {
1137 partcmd_enable, /* Enable partition root */
1138 partcmd_enablei, /* Enable isolated partition root */
1139 partcmd_disable, /* Disable partition root */
1140 partcmd_update, /* Update parent's effective_cpus */
1141 partcmd_invalidate, /* Make partition invalid */
1142 };
1143
1144 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1145 struct tmpmasks *tmp);
1146
1147 /*
1148 * Update partition exclusive flag
1149 *
1150 * Return: 0 if successful, an error code otherwise
1151 */
update_partition_exclusive(struct cpuset * cs,int new_prs)1152 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1153 {
1154 bool exclusive = (new_prs > PRS_MEMBER);
1155
1156 if (exclusive && !is_cpu_exclusive(cs)) {
1157 if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1158 return PERR_NOTEXCL;
1159 } else if (!exclusive && is_cpu_exclusive(cs)) {
1160 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1161 cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1162 }
1163 return 0;
1164 }
1165
1166 /*
1167 * Update partition load balance flag and/or rebuild sched domain
1168 *
1169 * Changing load balance flag will automatically call
1170 * rebuild_sched_domains_locked().
1171 * This function is for cgroup v2 only.
1172 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1173 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1174 {
1175 int new_prs = cs->partition_root_state;
1176 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1177 bool new_lb;
1178
1179 /*
1180 * If cs is not a valid partition root, the load balance state
1181 * will follow its parent.
1182 */
1183 if (new_prs > 0) {
1184 new_lb = (new_prs != PRS_ISOLATED);
1185 } else {
1186 new_lb = is_sched_load_balance(parent_cs(cs));
1187 }
1188 if (new_lb != !!is_sched_load_balance(cs)) {
1189 rebuild_domains = true;
1190 if (new_lb)
1191 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1192 else
1193 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1194 }
1195
1196 if (rebuild_domains)
1197 cpuset_force_rebuild();
1198 }
1199
1200 /*
1201 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1202 */
tasks_nocpu_error(struct cpuset * parent,struct cpuset * cs,struct cpumask * xcpus)1203 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1204 struct cpumask *xcpus)
1205 {
1206 /*
1207 * A populated partition (cs or parent) can't have empty effective_cpus
1208 */
1209 return (cpumask_subset(parent->effective_cpus, xcpus) &&
1210 partition_is_populated(parent, cs)) ||
1211 (!cpumask_intersects(xcpus, cpu_active_mask) &&
1212 partition_is_populated(cs, NULL));
1213 }
1214
reset_partition_data(struct cpuset * cs)1215 static void reset_partition_data(struct cpuset *cs)
1216 {
1217 struct cpuset *parent = parent_cs(cs);
1218
1219 if (!cpuset_v2())
1220 return;
1221
1222 lockdep_assert_held(&callback_lock);
1223
1224 cs->nr_subparts = 0;
1225 if (cpumask_empty(cs->exclusive_cpus)) {
1226 cpumask_clear(cs->effective_xcpus);
1227 if (is_cpu_exclusive(cs))
1228 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1229 }
1230 if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
1231 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1232 }
1233
1234 /*
1235 * partition_xcpus_newstate - Exclusive CPUs state change
1236 * @old_prs: old partition_root_state
1237 * @new_prs: new partition_root_state
1238 * @xcpus: exclusive CPUs with state change
1239 */
partition_xcpus_newstate(int old_prs,int new_prs,struct cpumask * xcpus)1240 static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
1241 {
1242 WARN_ON_ONCE(old_prs == new_prs);
1243 if (new_prs == PRS_ISOLATED)
1244 cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1245 else
1246 cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1247 }
1248
1249 /*
1250 * partition_xcpus_add - Add new exclusive CPUs to partition
1251 * @new_prs: new partition_root_state
1252 * @parent: parent cpuset
1253 * @xcpus: exclusive CPUs to be added
1254 * Return: true if isolated_cpus modified, false otherwise
1255 *
1256 * Remote partition if parent == NULL
1257 */
partition_xcpus_add(int new_prs,struct cpuset * parent,struct cpumask * xcpus)1258 static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
1259 struct cpumask *xcpus)
1260 {
1261 bool isolcpus_updated;
1262
1263 WARN_ON_ONCE(new_prs < 0);
1264 lockdep_assert_held(&callback_lock);
1265 if (!parent)
1266 parent = &top_cpuset;
1267
1268
1269 if (parent == &top_cpuset)
1270 cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1271
1272 isolcpus_updated = (new_prs != parent->partition_root_state);
1273 if (isolcpus_updated)
1274 partition_xcpus_newstate(parent->partition_root_state, new_prs,
1275 xcpus);
1276
1277 cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1278 return isolcpus_updated;
1279 }
1280
1281 /*
1282 * partition_xcpus_del - Remove exclusive CPUs from partition
1283 * @old_prs: old partition_root_state
1284 * @parent: parent cpuset
1285 * @xcpus: exclusive CPUs to be removed
1286 * Return: true if isolated_cpus modified, false otherwise
1287 *
1288 * Remote partition if parent == NULL
1289 */
partition_xcpus_del(int old_prs,struct cpuset * parent,struct cpumask * xcpus)1290 static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
1291 struct cpumask *xcpus)
1292 {
1293 bool isolcpus_updated;
1294
1295 WARN_ON_ONCE(old_prs < 0);
1296 lockdep_assert_held(&callback_lock);
1297 if (!parent)
1298 parent = &top_cpuset;
1299
1300 if (parent == &top_cpuset)
1301 cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1302
1303 isolcpus_updated = (old_prs != parent->partition_root_state);
1304 if (isolcpus_updated)
1305 partition_xcpus_newstate(old_prs, parent->partition_root_state,
1306 xcpus);
1307
1308 cpumask_and(xcpus, xcpus, cpu_active_mask);
1309 cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1310 return isolcpus_updated;
1311 }
1312
update_unbound_workqueue_cpumask(bool isolcpus_updated)1313 static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
1314 {
1315 int ret;
1316
1317 lockdep_assert_cpus_held();
1318
1319 if (!isolcpus_updated)
1320 return;
1321
1322 ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1323 WARN_ON_ONCE(ret < 0);
1324 }
1325
1326 /**
1327 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1328 * @cpu: the CPU number to be checked
1329 * Return: true if CPU is used in an isolated partition, false otherwise
1330 */
cpuset_cpu_is_isolated(int cpu)1331 bool cpuset_cpu_is_isolated(int cpu)
1332 {
1333 return cpumask_test_cpu(cpu, isolated_cpus);
1334 }
1335 EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1336
1337 /*
1338 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1339 * @cs: cpuset
1340 * @xcpus: effective exclusive CPUs value to be set
1341 * Return: true if xcpus is not empty, false otherwise.
1342 *
1343 * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set),
1344 * it must be a subset of parent's effective_xcpus.
1345 */
compute_effective_exclusive_cpumask(struct cpuset * cs,struct cpumask * xcpus)1346 static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
1347 struct cpumask *xcpus)
1348 {
1349 struct cpuset *parent = parent_cs(cs);
1350
1351 if (!xcpus)
1352 xcpus = cs->effective_xcpus;
1353
1354 return cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus);
1355 }
1356
is_remote_partition(struct cpuset * cs)1357 static inline bool is_remote_partition(struct cpuset *cs)
1358 {
1359 return !list_empty(&cs->remote_sibling);
1360 }
1361
is_local_partition(struct cpuset * cs)1362 static inline bool is_local_partition(struct cpuset *cs)
1363 {
1364 return is_partition_valid(cs) && !is_remote_partition(cs);
1365 }
1366
1367 /*
1368 * remote_partition_enable - Enable current cpuset as a remote partition root
1369 * @cs: the cpuset to update
1370 * @new_prs: new partition_root_state
1371 * @tmp: temporary masks
1372 * Return: 0 if successful, errcode if error
1373 *
1374 * Enable the current cpuset to become a remote partition root taking CPUs
1375 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1376 */
remote_partition_enable(struct cpuset * cs,int new_prs,struct tmpmasks * tmp)1377 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1378 struct tmpmasks *tmp)
1379 {
1380 bool isolcpus_updated;
1381
1382 /*
1383 * The user must have sysadmin privilege.
1384 */
1385 if (!capable(CAP_SYS_ADMIN))
1386 return PERR_ACCESS;
1387
1388 /*
1389 * The requested exclusive_cpus must not be allocated to other
1390 * partitions and it can't use up all the root's effective_cpus.
1391 *
1392 * Note that if there is any local partition root above it or
1393 * remote partition root underneath it, its exclusive_cpus must
1394 * have overlapped with subpartitions_cpus.
1395 */
1396 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1397 if (cpumask_empty(tmp->new_cpus) ||
1398 cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
1399 cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1400 return PERR_INVCPUS;
1401
1402 spin_lock_irq(&callback_lock);
1403 isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1404 list_add(&cs->remote_sibling, &remote_children);
1405 spin_unlock_irq(&callback_lock);
1406 update_unbound_workqueue_cpumask(isolcpus_updated);
1407
1408 /*
1409 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1410 */
1411 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1412 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1413 return 0;
1414 }
1415
1416 /*
1417 * remote_partition_disable - Remove current cpuset from remote partition list
1418 * @cs: the cpuset to update
1419 * @tmp: temporary masks
1420 *
1421 * The effective_cpus is also updated.
1422 *
1423 * cpuset_mutex must be held by the caller.
1424 */
remote_partition_disable(struct cpuset * cs,struct tmpmasks * tmp)1425 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1426 {
1427 bool isolcpus_updated;
1428
1429 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1430 WARN_ON_ONCE(!is_remote_partition(cs));
1431 WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus));
1432
1433 spin_lock_irq(&callback_lock);
1434 list_del_init(&cs->remote_sibling);
1435 isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1436 NULL, tmp->new_cpus);
1437 cs->partition_root_state = -cs->partition_root_state;
1438 if (!cs->prs_err)
1439 cs->prs_err = PERR_INVCPUS;
1440 reset_partition_data(cs);
1441 spin_unlock_irq(&callback_lock);
1442 update_unbound_workqueue_cpumask(isolcpus_updated);
1443
1444 /*
1445 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1446 */
1447 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1448 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1449 }
1450
1451 /*
1452 * remote_cpus_update - cpus_exclusive change of remote partition
1453 * @cs: the cpuset to be updated
1454 * @newmask: the new effective_xcpus mask
1455 * @tmp: temporary masks
1456 *
1457 * top_cpuset and subpartitions_cpus will be updated or partition can be
1458 * invalidated.
1459 */
remote_cpus_update(struct cpuset * cs,struct cpumask * newmask,struct tmpmasks * tmp)1460 static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1461 struct tmpmasks *tmp)
1462 {
1463 bool adding, deleting;
1464 int prs = cs->partition_root_state;
1465 int isolcpus_updated = 0;
1466
1467 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1468 return;
1469
1470 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1471
1472 if (cpumask_empty(newmask))
1473 goto invalidate;
1474
1475 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
1476 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
1477
1478 /*
1479 * Additions of remote CPUs is only allowed if those CPUs are
1480 * not allocated to other partitions and there are effective_cpus
1481 * left in the top cpuset.
1482 */
1483 if (adding && (!capable(CAP_SYS_ADMIN) ||
1484 cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1485 cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
1486 goto invalidate;
1487
1488 spin_lock_irq(&callback_lock);
1489 if (adding)
1490 isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
1491 if (deleting)
1492 isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
1493 spin_unlock_irq(&callback_lock);
1494 update_unbound_workqueue_cpumask(isolcpus_updated);
1495
1496 /*
1497 * Propagate changes in top_cpuset's effective_cpus down the hierarchy.
1498 */
1499 cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1500 update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1501 return;
1502
1503 invalidate:
1504 remote_partition_disable(cs, tmp);
1505 }
1506
1507 /*
1508 * remote_partition_check - check if a child remote partition needs update
1509 * @cs: the cpuset to be updated
1510 * @newmask: the new effective_xcpus mask
1511 * @delmask: temporary mask for deletion (not in tmp)
1512 * @tmp: temporary masks
1513 *
1514 * This should be called before the given cs has updated its cpus_allowed
1515 * and/or effective_xcpus.
1516 */
remote_partition_check(struct cpuset * cs,struct cpumask * newmask,struct cpumask * delmask,struct tmpmasks * tmp)1517 static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
1518 struct cpumask *delmask, struct tmpmasks *tmp)
1519 {
1520 struct cpuset *child, *next;
1521 int disable_cnt = 0;
1522
1523 /*
1524 * Compute the effective exclusive CPUs that will be deleted.
1525 */
1526 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) ||
1527 !cpumask_intersects(delmask, subpartitions_cpus))
1528 return; /* No deletion of exclusive CPUs in partitions */
1529
1530 /*
1531 * Searching the remote children list to look for those that will
1532 * be impacted by the deletion of exclusive CPUs.
1533 *
1534 * Since a cpuset must be removed from the remote children list
1535 * before it can go offline and holding cpuset_mutex will prevent
1536 * any change in cpuset status. RCU read lock isn't needed.
1537 */
1538 lockdep_assert_held(&cpuset_mutex);
1539 list_for_each_entry_safe(child, next, &remote_children, remote_sibling)
1540 if (cpumask_intersects(child->effective_cpus, delmask)) {
1541 remote_partition_disable(child, tmp);
1542 disable_cnt++;
1543 }
1544 if (disable_cnt)
1545 cpuset_force_rebuild();
1546 }
1547
1548 /*
1549 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1550 * @prstate: partition root state to be checked
1551 * @new_cpus: cpu mask
1552 * Return: true if there is conflict, false otherwise
1553 *
1554 * CPUs outside of boot_hk_cpus, if defined, can only be used in an
1555 * isolated partition.
1556 */
prstate_housekeeping_conflict(int prstate,struct cpumask * new_cpus)1557 static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1558 {
1559 if (!have_boot_isolcpus)
1560 return false;
1561
1562 if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
1563 return true;
1564
1565 return false;
1566 }
1567
1568 /**
1569 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1570 * @cs: The cpuset that requests change in partition root state
1571 * @cmd: Partition root state change command
1572 * @newmask: Optional new cpumask for partcmd_update
1573 * @tmp: Temporary addmask and delmask
1574 * Return: 0 or a partition root state error code
1575 *
1576 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1577 * root to a partition root. The effective_xcpus (cpus_allowed if
1578 * effective_xcpus not set) mask of the given cpuset will be taken away from
1579 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1580 * in effective_xcpus can be granted or an error code will be returned.
1581 *
1582 * For partcmd_disable, the cpuset is being transformed from a partition
1583 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1584 * given back to parent's effective_cpus. 0 will always be returned.
1585 *
1586 * For partcmd_update, if the optional newmask is specified, the cpu list is
1587 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1588 * assumed to remain the same. The cpuset should either be a valid or invalid
1589 * partition root. The partition root state may change from valid to invalid
1590 * or vice versa. An error code will be returned if transitioning from
1591 * invalid to valid violates the exclusivity rule.
1592 *
1593 * For partcmd_invalidate, the current partition will be made invalid.
1594 *
1595 * The partcmd_enable* and partcmd_disable commands are used by
1596 * update_prstate(). An error code may be returned and the caller will check
1597 * for error.
1598 *
1599 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1600 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1601 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1602 * check for error and so partition_root_state and prs_error will be updated
1603 * directly.
1604 */
update_parent_effective_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1605 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1606 struct cpumask *newmask,
1607 struct tmpmasks *tmp)
1608 {
1609 struct cpuset *parent = parent_cs(cs);
1610 int adding; /* Adding cpus to parent's effective_cpus */
1611 int deleting; /* Deleting cpus from parent's effective_cpus */
1612 int old_prs, new_prs;
1613 int part_error = PERR_NONE; /* Partition error? */
1614 int subparts_delta = 0;
1615 struct cpumask *xcpus; /* cs effective_xcpus */
1616 int isolcpus_updated = 0;
1617 bool nocpu;
1618
1619 lockdep_assert_held(&cpuset_mutex);
1620
1621 /*
1622 * new_prs will only be changed for the partcmd_update and
1623 * partcmd_invalidate commands.
1624 */
1625 adding = deleting = false;
1626 old_prs = new_prs = cs->partition_root_state;
1627 xcpus = user_xcpus(cs);
1628
1629 if (cmd == partcmd_invalidate) {
1630 if (is_prs_invalid(old_prs))
1631 return 0;
1632
1633 /*
1634 * Make the current partition invalid.
1635 */
1636 if (is_partition_valid(parent))
1637 adding = cpumask_and(tmp->addmask,
1638 xcpus, parent->effective_xcpus);
1639 if (old_prs > 0) {
1640 new_prs = -old_prs;
1641 subparts_delta--;
1642 }
1643 goto write_error;
1644 }
1645
1646 /*
1647 * The parent must be a partition root.
1648 * The new cpumask, if present, or the current cpus_allowed must
1649 * not be empty.
1650 */
1651 if (!is_partition_valid(parent)) {
1652 return is_partition_invalid(parent)
1653 ? PERR_INVPARENT : PERR_NOTPART;
1654 }
1655 if (!newmask && xcpus_empty(cs))
1656 return PERR_CPUSEMPTY;
1657
1658 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1659
1660 if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
1661 /*
1662 * Enabling partition root is not allowed if its
1663 * effective_xcpus is empty or doesn't overlap with
1664 * parent's effective_xcpus.
1665 */
1666 if (cpumask_empty(xcpus) ||
1667 !cpumask_intersects(xcpus, parent->effective_xcpus))
1668 return PERR_INVCPUS;
1669
1670 if (prstate_housekeeping_conflict(new_prs, xcpus))
1671 return PERR_HKEEPING;
1672
1673 /*
1674 * A parent can be left with no CPU as long as there is no
1675 * task directly associated with the parent partition.
1676 */
1677 if (nocpu)
1678 return PERR_NOCPUS;
1679
1680 cpumask_copy(tmp->delmask, xcpus);
1681 deleting = true;
1682 subparts_delta++;
1683 new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1684 } else if (cmd == partcmd_disable) {
1685 /*
1686 * May need to add cpus to parent's effective_cpus for
1687 * valid partition root.
1688 */
1689 adding = !is_prs_invalid(old_prs) &&
1690 cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus);
1691 if (adding)
1692 subparts_delta--;
1693 new_prs = PRS_MEMBER;
1694 } else if (newmask) {
1695 /*
1696 * Empty cpumask is not allowed
1697 */
1698 if (cpumask_empty(newmask)) {
1699 part_error = PERR_CPUSEMPTY;
1700 goto write_error;
1701 }
1702 /* Check newmask again, whether cpus are available for parent/cs */
1703 nocpu |= tasks_nocpu_error(parent, cs, newmask);
1704
1705 /*
1706 * partcmd_update with newmask:
1707 *
1708 * Compute add/delete mask to/from effective_cpus
1709 *
1710 * For valid partition:
1711 * addmask = exclusive_cpus & ~newmask
1712 * & parent->effective_xcpus
1713 * delmask = newmask & ~exclusive_cpus
1714 * & parent->effective_xcpus
1715 *
1716 * For invalid partition:
1717 * delmask = newmask & parent->effective_xcpus
1718 */
1719 if (is_prs_invalid(old_prs)) {
1720 adding = false;
1721 deleting = cpumask_and(tmp->delmask,
1722 newmask, parent->effective_xcpus);
1723 } else {
1724 cpumask_andnot(tmp->addmask, xcpus, newmask);
1725 adding = cpumask_and(tmp->addmask, tmp->addmask,
1726 parent->effective_xcpus);
1727
1728 cpumask_andnot(tmp->delmask, newmask, xcpus);
1729 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1730 parent->effective_xcpus);
1731 }
1732 /*
1733 * Make partition invalid if parent's effective_cpus could
1734 * become empty and there are tasks in the parent.
1735 */
1736 if (nocpu && (!adding ||
1737 !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
1738 part_error = PERR_NOCPUS;
1739 deleting = false;
1740 adding = cpumask_and(tmp->addmask,
1741 xcpus, parent->effective_xcpus);
1742 }
1743 } else {
1744 /*
1745 * partcmd_update w/o newmask
1746 *
1747 * delmask = effective_xcpus & parent->effective_cpus
1748 *
1749 * This can be called from:
1750 * 1) update_cpumasks_hier()
1751 * 2) cpuset_hotplug_update_tasks()
1752 *
1753 * Check to see if it can be transitioned from valid to
1754 * invalid partition or vice versa.
1755 *
1756 * A partition error happens when parent has tasks and all
1757 * its effective CPUs will have to be distributed out.
1758 */
1759 WARN_ON_ONCE(!is_partition_valid(parent));
1760 if (nocpu) {
1761 part_error = PERR_NOCPUS;
1762 if (is_partition_valid(cs))
1763 adding = cpumask_and(tmp->addmask,
1764 xcpus, parent->effective_xcpus);
1765 } else if (is_partition_invalid(cs) &&
1766 cpumask_subset(xcpus, parent->effective_xcpus)) {
1767 struct cgroup_subsys_state *css;
1768 struct cpuset *child;
1769 bool exclusive = true;
1770
1771 /*
1772 * Convert invalid partition to valid has to
1773 * pass the cpu exclusivity test.
1774 */
1775 rcu_read_lock();
1776 cpuset_for_each_child(child, css, parent) {
1777 if (child == cs)
1778 continue;
1779 if (!cpusets_are_exclusive(cs, child)) {
1780 exclusive = false;
1781 break;
1782 }
1783 }
1784 rcu_read_unlock();
1785 if (exclusive)
1786 deleting = cpumask_and(tmp->delmask,
1787 xcpus, parent->effective_cpus);
1788 else
1789 part_error = PERR_NOTEXCL;
1790 }
1791 }
1792
1793 write_error:
1794 if (part_error)
1795 WRITE_ONCE(cs->prs_err, part_error);
1796
1797 if (cmd == partcmd_update) {
1798 /*
1799 * Check for possible transition between valid and invalid
1800 * partition root.
1801 */
1802 switch (cs->partition_root_state) {
1803 case PRS_ROOT:
1804 case PRS_ISOLATED:
1805 if (part_error) {
1806 new_prs = -old_prs;
1807 subparts_delta--;
1808 }
1809 break;
1810 case PRS_INVALID_ROOT:
1811 case PRS_INVALID_ISOLATED:
1812 if (!part_error) {
1813 new_prs = -old_prs;
1814 subparts_delta++;
1815 }
1816 break;
1817 }
1818 }
1819
1820 if (!adding && !deleting && (new_prs == old_prs))
1821 return 0;
1822
1823 /*
1824 * Transitioning between invalid to valid or vice versa may require
1825 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
1826 * validate_change() has already been successfully called and
1827 * CPU lists in cs haven't been updated yet. So defer it to later.
1828 */
1829 if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1830 int err = update_partition_exclusive(cs, new_prs);
1831
1832 if (err)
1833 return err;
1834 }
1835
1836 /*
1837 * Change the parent's effective_cpus & effective_xcpus (top cpuset
1838 * only).
1839 *
1840 * Newly added CPUs will be removed from effective_cpus and
1841 * newly deleted ones will be added back to effective_cpus.
1842 */
1843 spin_lock_irq(&callback_lock);
1844 if (old_prs != new_prs) {
1845 cs->partition_root_state = new_prs;
1846 if (new_prs <= 0)
1847 cs->nr_subparts = 0;
1848 }
1849 /*
1850 * Adding to parent's effective_cpus means deletion CPUs from cs
1851 * and vice versa.
1852 */
1853 if (adding)
1854 isolcpus_updated += partition_xcpus_del(old_prs, parent,
1855 tmp->addmask);
1856 if (deleting)
1857 isolcpus_updated += partition_xcpus_add(new_prs, parent,
1858 tmp->delmask);
1859
1860 if (is_partition_valid(parent)) {
1861 parent->nr_subparts += subparts_delta;
1862 WARN_ON_ONCE(parent->nr_subparts < 0);
1863 }
1864 spin_unlock_irq(&callback_lock);
1865 update_unbound_workqueue_cpumask(isolcpus_updated);
1866
1867 if ((old_prs != new_prs) && (cmd == partcmd_update))
1868 update_partition_exclusive(cs, new_prs);
1869
1870 if (adding || deleting) {
1871 cpuset_update_tasks_cpumask(parent, tmp->addmask);
1872 update_sibling_cpumasks(parent, cs, tmp);
1873 }
1874
1875 /*
1876 * For partcmd_update without newmask, it is being called from
1877 * cpuset_handle_hotplug(). Update the load balance flag and
1878 * scheduling domain accordingly.
1879 */
1880 if ((cmd == partcmd_update) && !newmask)
1881 update_partition_sd_lb(cs, old_prs);
1882
1883 notify_partition_change(cs, old_prs);
1884 return 0;
1885 }
1886
1887 /**
1888 * compute_partition_effective_cpumask - compute effective_cpus for partition
1889 * @cs: partition root cpuset
1890 * @new_ecpus: previously computed effective_cpus to be updated
1891 *
1892 * Compute the effective_cpus of a partition root by scanning effective_xcpus
1893 * of child partition roots and excluding their effective_xcpus.
1894 *
1895 * This has the side effect of invalidating valid child partition roots,
1896 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
1897 * or update_cpumasks_hier() where parent and children are modified
1898 * successively, we don't need to call update_parent_effective_cpumask()
1899 * and the child's effective_cpus will be updated in later iterations.
1900 *
1901 * Note that rcu_read_lock() is assumed to be held.
1902 */
compute_partition_effective_cpumask(struct cpuset * cs,struct cpumask * new_ecpus)1903 static void compute_partition_effective_cpumask(struct cpuset *cs,
1904 struct cpumask *new_ecpus)
1905 {
1906 struct cgroup_subsys_state *css;
1907 struct cpuset *child;
1908 bool populated = partition_is_populated(cs, NULL);
1909
1910 /*
1911 * Check child partition roots to see if they should be
1912 * invalidated when
1913 * 1) child effective_xcpus not a subset of new
1914 * excluisve_cpus
1915 * 2) All the effective_cpus will be used up and cp
1916 * has tasks
1917 */
1918 compute_effective_exclusive_cpumask(cs, new_ecpus);
1919 cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
1920
1921 rcu_read_lock();
1922 cpuset_for_each_child(child, css, cs) {
1923 if (!is_partition_valid(child))
1924 continue;
1925
1926 child->prs_err = 0;
1927 if (!cpumask_subset(child->effective_xcpus,
1928 cs->effective_xcpus))
1929 child->prs_err = PERR_INVCPUS;
1930 else if (populated &&
1931 cpumask_subset(new_ecpus, child->effective_xcpus))
1932 child->prs_err = PERR_NOCPUS;
1933
1934 if (child->prs_err) {
1935 int old_prs = child->partition_root_state;
1936
1937 /*
1938 * Invalidate child partition
1939 */
1940 spin_lock_irq(&callback_lock);
1941 make_partition_invalid(child);
1942 cs->nr_subparts--;
1943 child->nr_subparts = 0;
1944 spin_unlock_irq(&callback_lock);
1945 notify_partition_change(child, old_prs);
1946 continue;
1947 }
1948 cpumask_andnot(new_ecpus, new_ecpus,
1949 child->effective_xcpus);
1950 }
1951 rcu_read_unlock();
1952 }
1953
1954 /*
1955 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1956 * @cs: the cpuset to consider
1957 * @tmp: temp variables for calculating effective_cpus & partition setup
1958 * @force: don't skip any descendant cpusets if set
1959 *
1960 * When configured cpumask is changed, the effective cpumasks of this cpuset
1961 * and all its descendants need to be updated.
1962 *
1963 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1964 *
1965 * Called with cpuset_mutex held
1966 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,bool force)1967 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1968 bool force)
1969 {
1970 struct cpuset *cp;
1971 struct cgroup_subsys_state *pos_css;
1972 bool need_rebuild_sched_domains = false;
1973 int old_prs, new_prs;
1974
1975 rcu_read_lock();
1976 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1977 struct cpuset *parent = parent_cs(cp);
1978 bool remote = is_remote_partition(cp);
1979 bool update_parent = false;
1980
1981 /*
1982 * Skip descendent remote partition that acquires CPUs
1983 * directly from top cpuset unless it is cs.
1984 */
1985 if (remote && (cp != cs)) {
1986 pos_css = css_rightmost_descendant(pos_css);
1987 continue;
1988 }
1989
1990 /*
1991 * Update effective_xcpus if exclusive_cpus set.
1992 * The case when exclusive_cpus isn't set is handled later.
1993 */
1994 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
1995 spin_lock_irq(&callback_lock);
1996 compute_effective_exclusive_cpumask(cp, NULL);
1997 spin_unlock_irq(&callback_lock);
1998 }
1999
2000 old_prs = new_prs = cp->partition_root_state;
2001 if (remote || (is_partition_valid(parent) &&
2002 is_partition_valid(cp)))
2003 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2004 else
2005 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2006
2007 /*
2008 * A partition with no effective_cpus is allowed as long as
2009 * there is no task associated with it. Call
2010 * update_parent_effective_cpumask() to check it.
2011 */
2012 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2013 update_parent = true;
2014 goto update_parent_effective;
2015 }
2016
2017 /*
2018 * If it becomes empty, inherit the effective mask of the
2019 * parent, which is guaranteed to have some CPUs unless
2020 * it is a partition root that has explicitly distributed
2021 * out all its CPUs.
2022 */
2023 if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
2024 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2025
2026 if (remote)
2027 goto get_css;
2028
2029 /*
2030 * Skip the whole subtree if
2031 * 1) the cpumask remains the same,
2032 * 2) has no partition root state,
2033 * 3) force flag not set, and
2034 * 4) for v2 load balance state same as its parent.
2035 */
2036 if (!cp->partition_root_state && !force &&
2037 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2038 (!cpuset_v2() ||
2039 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2040 pos_css = css_rightmost_descendant(pos_css);
2041 continue;
2042 }
2043
2044 update_parent_effective:
2045 /*
2046 * update_parent_effective_cpumask() should have been called
2047 * for cs already in update_cpumask(). We should also call
2048 * cpuset_update_tasks_cpumask() again for tasks in the parent
2049 * cpuset if the parent's effective_cpus changes.
2050 */
2051 if ((cp != cs) && old_prs) {
2052 switch (parent->partition_root_state) {
2053 case PRS_ROOT:
2054 case PRS_ISOLATED:
2055 update_parent = true;
2056 break;
2057
2058 default:
2059 /*
2060 * When parent is not a partition root or is
2061 * invalid, child partition roots become
2062 * invalid too.
2063 */
2064 if (is_partition_valid(cp))
2065 new_prs = -cp->partition_root_state;
2066 WRITE_ONCE(cp->prs_err,
2067 is_partition_invalid(parent)
2068 ? PERR_INVPARENT : PERR_NOTPART);
2069 break;
2070 }
2071 }
2072 get_css:
2073 if (!css_tryget_online(&cp->css))
2074 continue;
2075 rcu_read_unlock();
2076
2077 if (update_parent) {
2078 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2079 /*
2080 * The cpuset partition_root_state may become
2081 * invalid. Capture it.
2082 */
2083 new_prs = cp->partition_root_state;
2084 }
2085
2086 spin_lock_irq(&callback_lock);
2087 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2088 cp->partition_root_state = new_prs;
2089 /*
2090 * Make sure effective_xcpus is properly set for a valid
2091 * partition root.
2092 */
2093 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2094 cpumask_and(cp->effective_xcpus,
2095 cp->cpus_allowed, parent->effective_xcpus);
2096 else if (new_prs < 0)
2097 reset_partition_data(cp);
2098 spin_unlock_irq(&callback_lock);
2099
2100 notify_partition_change(cp, old_prs);
2101
2102 WARN_ON(!is_in_v2_mode() &&
2103 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2104
2105 cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
2106
2107 /*
2108 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2109 * from parent if current cpuset isn't a valid partition root
2110 * and their load balance states differ.
2111 */
2112 if (cpuset_v2() && !is_partition_valid(cp) &&
2113 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2114 if (is_sched_load_balance(parent))
2115 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2116 else
2117 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2118 }
2119
2120 /*
2121 * On legacy hierarchy, if the effective cpumask of any non-
2122 * empty cpuset is changed, we need to rebuild sched domains.
2123 * On default hierarchy, the cpuset needs to be a partition
2124 * root as well.
2125 */
2126 if (!cpumask_empty(cp->cpus_allowed) &&
2127 is_sched_load_balance(cp) &&
2128 (!cpuset_v2() || is_partition_valid(cp)))
2129 need_rebuild_sched_domains = true;
2130
2131 rcu_read_lock();
2132 css_put(&cp->css);
2133 }
2134 rcu_read_unlock();
2135
2136 if (need_rebuild_sched_domains)
2137 cpuset_force_rebuild();
2138 }
2139
2140 /**
2141 * update_sibling_cpumasks - Update siblings cpumasks
2142 * @parent: Parent cpuset
2143 * @cs: Current cpuset
2144 * @tmp: Temp variables
2145 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)2146 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2147 struct tmpmasks *tmp)
2148 {
2149 struct cpuset *sibling;
2150 struct cgroup_subsys_state *pos_css;
2151
2152 lockdep_assert_held(&cpuset_mutex);
2153
2154 /*
2155 * Check all its siblings and call update_cpumasks_hier()
2156 * if their effective_cpus will need to be changed.
2157 *
2158 * It is possible a change in parent's effective_cpus
2159 * due to a change in a child partition's effective_xcpus will impact
2160 * its siblings even if they do not inherit parent's effective_cpus
2161 * directly.
2162 *
2163 * The update_cpumasks_hier() function may sleep. So we have to
2164 * release the RCU read lock before calling it.
2165 */
2166 rcu_read_lock();
2167 cpuset_for_each_child(sibling, pos_css, parent) {
2168 if (sibling == cs)
2169 continue;
2170 if (!is_partition_valid(sibling)) {
2171 compute_effective_cpumask(tmp->new_cpus, sibling,
2172 parent);
2173 if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2174 continue;
2175 }
2176 if (!css_tryget_online(&sibling->css))
2177 continue;
2178
2179 rcu_read_unlock();
2180 update_cpumasks_hier(sibling, tmp, false);
2181 rcu_read_lock();
2182 css_put(&sibling->css);
2183 }
2184 rcu_read_unlock();
2185 }
2186
2187 /**
2188 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2189 * @cs: the cpuset to consider
2190 * @trialcs: trial cpuset
2191 * @buf: buffer of cpu numbers written to this cpuset
2192 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2193 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2194 const char *buf)
2195 {
2196 int retval;
2197 struct tmpmasks tmp;
2198 struct cpuset *parent = parent_cs(cs);
2199 bool invalidate = false;
2200 bool force = false;
2201 int old_prs = cs->partition_root_state;
2202
2203 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
2204 if (cs == &top_cpuset)
2205 return -EACCES;
2206
2207 /*
2208 * An empty cpus_allowed is ok only if the cpuset has no tasks.
2209 * Since cpulist_parse() fails on an empty mask, we special case
2210 * that parsing. The validate_change() call ensures that cpusets
2211 * with tasks have cpus.
2212 */
2213 if (!*buf) {
2214 cpumask_clear(trialcs->cpus_allowed);
2215 if (cpumask_empty(trialcs->exclusive_cpus))
2216 cpumask_clear(trialcs->effective_xcpus);
2217 } else {
2218 retval = cpulist_parse(buf, trialcs->cpus_allowed);
2219 if (retval < 0)
2220 return retval;
2221
2222 if (!cpumask_subset(trialcs->cpus_allowed,
2223 top_cpuset.cpus_allowed))
2224 return -EINVAL;
2225
2226 /*
2227 * When exclusive_cpus isn't explicitly set, it is constrained
2228 * by cpus_allowed and parent's effective_xcpus. Otherwise,
2229 * trialcs->effective_xcpus is used as a temporary cpumask
2230 * for checking validity of the partition root.
2231 */
2232 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2233 compute_effective_exclusive_cpumask(trialcs, NULL);
2234 }
2235
2236 /* Nothing to do if the cpus didn't change */
2237 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2238 return 0;
2239
2240 if (alloc_cpumasks(NULL, &tmp))
2241 return -ENOMEM;
2242
2243 if (old_prs) {
2244 if (is_partition_valid(cs) &&
2245 cpumask_empty(trialcs->effective_xcpus)) {
2246 invalidate = true;
2247 cs->prs_err = PERR_INVCPUS;
2248 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2249 invalidate = true;
2250 cs->prs_err = PERR_HKEEPING;
2251 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2252 invalidate = true;
2253 cs->prs_err = PERR_NOCPUS;
2254 }
2255 }
2256
2257 /*
2258 * Check all the descendants in update_cpumasks_hier() if
2259 * effective_xcpus is to be changed.
2260 */
2261 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2262
2263 retval = validate_change(cs, trialcs);
2264
2265 if ((retval == -EINVAL) && cpuset_v2()) {
2266 struct cgroup_subsys_state *css;
2267 struct cpuset *cp;
2268
2269 /*
2270 * The -EINVAL error code indicates that partition sibling
2271 * CPU exclusivity rule has been violated. We still allow
2272 * the cpumask change to proceed while invalidating the
2273 * partition. However, any conflicting sibling partitions
2274 * have to be marked as invalid too.
2275 */
2276 invalidate = true;
2277 rcu_read_lock();
2278 cpuset_for_each_child(cp, css, parent) {
2279 struct cpumask *xcpus = user_xcpus(trialcs);
2280
2281 if (is_partition_valid(cp) &&
2282 cpumask_intersects(xcpus, cp->effective_xcpus)) {
2283 rcu_read_unlock();
2284 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2285 rcu_read_lock();
2286 }
2287 }
2288 rcu_read_unlock();
2289 retval = 0;
2290 }
2291
2292 if (retval < 0)
2293 goto out_free;
2294
2295 if (is_partition_valid(cs) ||
2296 (is_partition_invalid(cs) && !invalidate)) {
2297 struct cpumask *xcpus = trialcs->effective_xcpus;
2298
2299 if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2300 xcpus = trialcs->cpus_allowed;
2301
2302 /*
2303 * Call remote_cpus_update() to handle valid remote partition
2304 */
2305 if (is_remote_partition(cs))
2306 remote_cpus_update(cs, xcpus, &tmp);
2307 else if (invalidate)
2308 update_parent_effective_cpumask(cs, partcmd_invalidate,
2309 NULL, &tmp);
2310 else
2311 update_parent_effective_cpumask(cs, partcmd_update,
2312 xcpus, &tmp);
2313 } else if (!cpumask_empty(cs->exclusive_cpus)) {
2314 /*
2315 * Use trialcs->effective_cpus as a temp cpumask
2316 */
2317 remote_partition_check(cs, trialcs->effective_xcpus,
2318 trialcs->effective_cpus, &tmp);
2319 }
2320
2321 spin_lock_irq(&callback_lock);
2322 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2323 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2324 if ((old_prs > 0) && !is_partition_valid(cs))
2325 reset_partition_data(cs);
2326 spin_unlock_irq(&callback_lock);
2327
2328 /* effective_cpus/effective_xcpus will be updated here */
2329 update_cpumasks_hier(cs, &tmp, force);
2330
2331 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2332 if (cs->partition_root_state)
2333 update_partition_sd_lb(cs, old_prs);
2334 out_free:
2335 free_cpumasks(NULL, &tmp);
2336 return retval;
2337 }
2338
2339 /**
2340 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2341 * @cs: the cpuset to consider
2342 * @trialcs: trial cpuset
2343 * @buf: buffer of cpu numbers written to this cpuset
2344 *
2345 * The tasks' cpumask will be updated if cs is a valid partition root.
2346 */
update_exclusive_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2347 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2348 const char *buf)
2349 {
2350 int retval;
2351 struct tmpmasks tmp;
2352 struct cpuset *parent = parent_cs(cs);
2353 bool invalidate = false;
2354 bool force = false;
2355 int old_prs = cs->partition_root_state;
2356
2357 if (!*buf) {
2358 cpumask_clear(trialcs->exclusive_cpus);
2359 cpumask_clear(trialcs->effective_xcpus);
2360 } else {
2361 retval = cpulist_parse(buf, trialcs->exclusive_cpus);
2362 if (retval < 0)
2363 return retval;
2364 }
2365
2366 /* Nothing to do if the CPUs didn't change */
2367 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2368 return 0;
2369
2370 if (*buf)
2371 compute_effective_exclusive_cpumask(trialcs, NULL);
2372
2373 /*
2374 * Check all the descendants in update_cpumasks_hier() if
2375 * effective_xcpus is to be changed.
2376 */
2377 force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus);
2378
2379 retval = validate_change(cs, trialcs);
2380 if (retval)
2381 return retval;
2382
2383 if (alloc_cpumasks(NULL, &tmp))
2384 return -ENOMEM;
2385
2386 if (old_prs) {
2387 if (cpumask_empty(trialcs->effective_xcpus)) {
2388 invalidate = true;
2389 cs->prs_err = PERR_INVCPUS;
2390 } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2391 invalidate = true;
2392 cs->prs_err = PERR_HKEEPING;
2393 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2394 invalidate = true;
2395 cs->prs_err = PERR_NOCPUS;
2396 }
2397
2398 if (is_remote_partition(cs)) {
2399 if (invalidate)
2400 remote_partition_disable(cs, &tmp);
2401 else
2402 remote_cpus_update(cs, trialcs->effective_xcpus,
2403 &tmp);
2404 } else if (invalidate) {
2405 update_parent_effective_cpumask(cs, partcmd_invalidate,
2406 NULL, &tmp);
2407 } else {
2408 update_parent_effective_cpumask(cs, partcmd_update,
2409 trialcs->effective_xcpus, &tmp);
2410 }
2411 } else if (!cpumask_empty(trialcs->exclusive_cpus)) {
2412 /*
2413 * Use trialcs->effective_cpus as a temp cpumask
2414 */
2415 remote_partition_check(cs, trialcs->effective_xcpus,
2416 trialcs->effective_cpus, &tmp);
2417 }
2418 spin_lock_irq(&callback_lock);
2419 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2420 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2421 if ((old_prs > 0) && !is_partition_valid(cs))
2422 reset_partition_data(cs);
2423 spin_unlock_irq(&callback_lock);
2424
2425 /*
2426 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2427 * of the subtree when it is a valid partition root or effective_xcpus
2428 * is updated.
2429 */
2430 if (is_partition_valid(cs) || force)
2431 update_cpumasks_hier(cs, &tmp, force);
2432
2433 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2434 if (cs->partition_root_state)
2435 update_partition_sd_lb(cs, old_prs);
2436
2437 free_cpumasks(NULL, &tmp);
2438 return 0;
2439 }
2440
2441 /*
2442 * Migrate memory region from one set of nodes to another. This is
2443 * performed asynchronously as it can be called from process migration path
2444 * holding locks involved in process management. All mm migrations are
2445 * performed in the queued order and can be waited for by flushing
2446 * cpuset_migrate_mm_wq.
2447 */
2448
2449 struct cpuset_migrate_mm_work {
2450 struct work_struct work;
2451 struct mm_struct *mm;
2452 nodemask_t from;
2453 nodemask_t to;
2454 };
2455
cpuset_migrate_mm_workfn(struct work_struct * work)2456 static void cpuset_migrate_mm_workfn(struct work_struct *work)
2457 {
2458 struct cpuset_migrate_mm_work *mwork =
2459 container_of(work, struct cpuset_migrate_mm_work, work);
2460
2461 /* on a wq worker, no need to worry about %current's mems_allowed */
2462 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2463 mmput(mwork->mm);
2464 kfree(mwork);
2465 }
2466
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)2467 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2468 const nodemask_t *to)
2469 {
2470 struct cpuset_migrate_mm_work *mwork;
2471
2472 if (nodes_equal(*from, *to)) {
2473 mmput(mm);
2474 return;
2475 }
2476
2477 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2478 if (mwork) {
2479 mwork->mm = mm;
2480 mwork->from = *from;
2481 mwork->to = *to;
2482 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2483 queue_work(cpuset_migrate_mm_wq, &mwork->work);
2484 } else {
2485 mmput(mm);
2486 }
2487 }
2488
cpuset_post_attach(void)2489 static void cpuset_post_attach(void)
2490 {
2491 flush_workqueue(cpuset_migrate_mm_wq);
2492 }
2493
2494 /*
2495 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2496 * @tsk: the task to change
2497 * @newmems: new nodes that the task will be set
2498 *
2499 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2500 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2501 * parallel, it might temporarily see an empty intersection, which results in
2502 * a seqlock check and retry before OOM or allocation failure.
2503 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2504 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2505 nodemask_t *newmems)
2506 {
2507 task_lock(tsk);
2508
2509 local_irq_disable();
2510 write_seqcount_begin(&tsk->mems_allowed_seq);
2511
2512 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2513 mpol_rebind_task(tsk, newmems);
2514 tsk->mems_allowed = *newmems;
2515
2516 write_seqcount_end(&tsk->mems_allowed_seq);
2517 local_irq_enable();
2518
2519 task_unlock(tsk);
2520 }
2521
2522 static void *cpuset_being_rebound;
2523
2524 /**
2525 * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2526 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2527 *
2528 * Iterate through each task of @cs updating its mems_allowed to the
2529 * effective cpuset's. As this function is called with cpuset_mutex held,
2530 * cpuset membership stays stable.
2531 */
cpuset_update_tasks_nodemask(struct cpuset * cs)2532 void cpuset_update_tasks_nodemask(struct cpuset *cs)
2533 {
2534 static nodemask_t newmems; /* protected by cpuset_mutex */
2535 struct css_task_iter it;
2536 struct task_struct *task;
2537
2538 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2539
2540 guarantee_online_mems(cs, &newmems);
2541
2542 /*
2543 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2544 * take while holding tasklist_lock. Forks can happen - the
2545 * mpol_dup() cpuset_being_rebound check will catch such forks,
2546 * and rebind their vma mempolicies too. Because we still hold
2547 * the global cpuset_mutex, we know that no other rebind effort
2548 * will be contending for the global variable cpuset_being_rebound.
2549 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2550 * is idempotent. Also migrate pages in each mm to new nodes.
2551 */
2552 css_task_iter_start(&cs->css, 0, &it);
2553 while ((task = css_task_iter_next(&it))) {
2554 struct mm_struct *mm;
2555 bool migrate;
2556
2557 cpuset_change_task_nodemask(task, &newmems);
2558
2559 mm = get_task_mm(task);
2560 if (!mm)
2561 continue;
2562
2563 migrate = is_memory_migrate(cs);
2564
2565 mpol_rebind_mm(mm, &cs->mems_allowed);
2566 if (migrate)
2567 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2568 else
2569 mmput(mm);
2570 }
2571 css_task_iter_end(&it);
2572
2573 /*
2574 * All the tasks' nodemasks have been updated, update
2575 * cs->old_mems_allowed.
2576 */
2577 cs->old_mems_allowed = newmems;
2578
2579 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2580 cpuset_being_rebound = NULL;
2581 }
2582
2583 /*
2584 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2585 * @cs: the cpuset to consider
2586 * @new_mems: a temp variable for calculating new effective_mems
2587 *
2588 * When configured nodemask is changed, the effective nodemasks of this cpuset
2589 * and all its descendants need to be updated.
2590 *
2591 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2592 *
2593 * Called with cpuset_mutex held
2594 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2595 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2596 {
2597 struct cpuset *cp;
2598 struct cgroup_subsys_state *pos_css;
2599
2600 rcu_read_lock();
2601 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2602 struct cpuset *parent = parent_cs(cp);
2603
2604 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2605
2606 /*
2607 * If it becomes empty, inherit the effective mask of the
2608 * parent, which is guaranteed to have some MEMs.
2609 */
2610 if (is_in_v2_mode() && nodes_empty(*new_mems))
2611 *new_mems = parent->effective_mems;
2612
2613 /* Skip the whole subtree if the nodemask remains the same. */
2614 if (nodes_equal(*new_mems, cp->effective_mems)) {
2615 pos_css = css_rightmost_descendant(pos_css);
2616 continue;
2617 }
2618
2619 if (!css_tryget_online(&cp->css))
2620 continue;
2621 rcu_read_unlock();
2622
2623 spin_lock_irq(&callback_lock);
2624 cp->effective_mems = *new_mems;
2625 spin_unlock_irq(&callback_lock);
2626
2627 WARN_ON(!is_in_v2_mode() &&
2628 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2629
2630 cpuset_update_tasks_nodemask(cp);
2631
2632 rcu_read_lock();
2633 css_put(&cp->css);
2634 }
2635 rcu_read_unlock();
2636 }
2637
2638 /*
2639 * Handle user request to change the 'mems' memory placement
2640 * of a cpuset. Needs to validate the request, update the
2641 * cpusets mems_allowed, and for each task in the cpuset,
2642 * update mems_allowed and rebind task's mempolicy and any vma
2643 * mempolicies and if the cpuset is marked 'memory_migrate',
2644 * migrate the tasks pages to the new memory.
2645 *
2646 * Call with cpuset_mutex held. May take callback_lock during call.
2647 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2648 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2649 * their mempolicies to the cpusets new mems_allowed.
2650 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2651 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2652 const char *buf)
2653 {
2654 int retval;
2655
2656 /*
2657 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2658 * it's read-only
2659 */
2660 if (cs == &top_cpuset) {
2661 retval = -EACCES;
2662 goto done;
2663 }
2664
2665 /*
2666 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2667 * Since nodelist_parse() fails on an empty mask, we special case
2668 * that parsing. The validate_change() call ensures that cpusets
2669 * with tasks have memory.
2670 */
2671 if (!*buf) {
2672 nodes_clear(trialcs->mems_allowed);
2673 } else {
2674 retval = nodelist_parse(buf, trialcs->mems_allowed);
2675 if (retval < 0)
2676 goto done;
2677
2678 if (!nodes_subset(trialcs->mems_allowed,
2679 top_cpuset.mems_allowed)) {
2680 retval = -EINVAL;
2681 goto done;
2682 }
2683 }
2684
2685 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2686 retval = 0; /* Too easy - nothing to do */
2687 goto done;
2688 }
2689 retval = validate_change(cs, trialcs);
2690 if (retval < 0)
2691 goto done;
2692
2693 check_insane_mems_config(&trialcs->mems_allowed);
2694
2695 spin_lock_irq(&callback_lock);
2696 cs->mems_allowed = trialcs->mems_allowed;
2697 spin_unlock_irq(&callback_lock);
2698
2699 /* use trialcs->mems_allowed as a temp variable */
2700 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2701 done:
2702 return retval;
2703 }
2704
current_cpuset_is_being_rebound(void)2705 bool current_cpuset_is_being_rebound(void)
2706 {
2707 bool ret;
2708
2709 rcu_read_lock();
2710 ret = task_cs(current) == cpuset_being_rebound;
2711 rcu_read_unlock();
2712
2713 return ret;
2714 }
2715
2716 /*
2717 * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
2718 * bit: the bit to update (see cpuset_flagbits_t)
2719 * cs: the cpuset to update
2720 * turning_on: whether the flag is being set or cleared
2721 *
2722 * Call with cpuset_mutex held.
2723 */
2724
cpuset_update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2725 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2726 int turning_on)
2727 {
2728 struct cpuset *trialcs;
2729 int balance_flag_changed;
2730 int spread_flag_changed;
2731 int err;
2732
2733 trialcs = alloc_trial_cpuset(cs);
2734 if (!trialcs)
2735 return -ENOMEM;
2736
2737 if (turning_on)
2738 set_bit(bit, &trialcs->flags);
2739 else
2740 clear_bit(bit, &trialcs->flags);
2741
2742 err = validate_change(cs, trialcs);
2743 if (err < 0)
2744 goto out;
2745
2746 balance_flag_changed = (is_sched_load_balance(cs) !=
2747 is_sched_load_balance(trialcs));
2748
2749 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2750 || (is_spread_page(cs) != is_spread_page(trialcs)));
2751
2752 spin_lock_irq(&callback_lock);
2753 cs->flags = trialcs->flags;
2754 spin_unlock_irq(&callback_lock);
2755
2756 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
2757 if (cpuset_v2())
2758 cpuset_force_rebuild();
2759 else
2760 rebuild_sched_domains_locked();
2761 }
2762
2763 if (spread_flag_changed)
2764 cpuset1_update_tasks_flags(cs);
2765 out:
2766 free_cpuset(trialcs);
2767 return err;
2768 }
2769
2770 /**
2771 * update_prstate - update partition_root_state
2772 * @cs: the cpuset to update
2773 * @new_prs: new partition root state
2774 * Return: 0 if successful, != 0 if error
2775 *
2776 * Call with cpuset_mutex held.
2777 */
update_prstate(struct cpuset * cs,int new_prs)2778 static int update_prstate(struct cpuset *cs, int new_prs)
2779 {
2780 int err = PERR_NONE, old_prs = cs->partition_root_state;
2781 struct cpuset *parent = parent_cs(cs);
2782 struct tmpmasks tmpmask;
2783 bool new_xcpus_state = false;
2784
2785 if (old_prs == new_prs)
2786 return 0;
2787
2788 /*
2789 * Treat a previously invalid partition root as if it is a "member".
2790 */
2791 if (new_prs && is_prs_invalid(old_prs))
2792 old_prs = PRS_MEMBER;
2793
2794 if (alloc_cpumasks(NULL, &tmpmask))
2795 return -ENOMEM;
2796
2797 /*
2798 * Setup effective_xcpus if not properly set yet, it will be cleared
2799 * later if partition becomes invalid.
2800 */
2801 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) {
2802 spin_lock_irq(&callback_lock);
2803 cpumask_and(cs->effective_xcpus,
2804 cs->cpus_allowed, parent->effective_xcpus);
2805 spin_unlock_irq(&callback_lock);
2806 }
2807
2808 err = update_partition_exclusive(cs, new_prs);
2809 if (err)
2810 goto out;
2811
2812 if (!old_prs) {
2813 /*
2814 * cpus_allowed and exclusive_cpus cannot be both empty.
2815 */
2816 if (xcpus_empty(cs)) {
2817 err = PERR_CPUSEMPTY;
2818 goto out;
2819 }
2820
2821 /*
2822 * If parent is valid partition, enable local partiion.
2823 * Otherwise, enable a remote partition.
2824 */
2825 if (is_partition_valid(parent)) {
2826 enum partition_cmd cmd = (new_prs == PRS_ROOT)
2827 ? partcmd_enable : partcmd_enablei;
2828
2829 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
2830 } else {
2831 err = remote_partition_enable(cs, new_prs, &tmpmask);
2832 }
2833 } else if (old_prs && new_prs) {
2834 /*
2835 * A change in load balance state only, no change in cpumasks.
2836 */
2837 new_xcpus_state = true;
2838 } else {
2839 /*
2840 * Switching back to member is always allowed even if it
2841 * disables child partitions.
2842 */
2843 if (is_remote_partition(cs))
2844 remote_partition_disable(cs, &tmpmask);
2845 else
2846 update_parent_effective_cpumask(cs, partcmd_disable,
2847 NULL, &tmpmask);
2848
2849 /*
2850 * Invalidation of child partitions will be done in
2851 * update_cpumasks_hier().
2852 */
2853 }
2854 out:
2855 /*
2856 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2857 * happens.
2858 */
2859 if (err) {
2860 new_prs = -new_prs;
2861 update_partition_exclusive(cs, new_prs);
2862 }
2863
2864 spin_lock_irq(&callback_lock);
2865 cs->partition_root_state = new_prs;
2866 WRITE_ONCE(cs->prs_err, err);
2867 if (!is_partition_valid(cs))
2868 reset_partition_data(cs);
2869 else if (new_xcpus_state)
2870 partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
2871 spin_unlock_irq(&callback_lock);
2872 update_unbound_workqueue_cpumask(new_xcpus_state);
2873
2874 /* Force update if switching back to member */
2875 update_cpumasks_hier(cs, &tmpmask, !new_prs);
2876
2877 /* Update sched domains and load balance flag */
2878 update_partition_sd_lb(cs, old_prs);
2879
2880 notify_partition_change(cs, old_prs);
2881 if (force_sd_rebuild)
2882 rebuild_sched_domains_locked();
2883 free_cpumasks(NULL, &tmpmask);
2884 return 0;
2885 }
2886
2887 static struct cpuset *cpuset_attach_old_cs;
2888
2889 /*
2890 * Check to see if a cpuset can accept a new task
2891 * For v1, cpus_allowed and mems_allowed can't be empty.
2892 * For v2, effective_cpus can't be empty.
2893 * Note that in v1, effective_cpus = cpus_allowed.
2894 */
cpuset_can_attach_check(struct cpuset * cs)2895 static int cpuset_can_attach_check(struct cpuset *cs)
2896 {
2897 if (cpumask_empty(cs->effective_cpus) ||
2898 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2899 return -ENOSPC;
2900 return 0;
2901 }
2902
reset_migrate_dl_data(struct cpuset * cs)2903 static void reset_migrate_dl_data(struct cpuset *cs)
2904 {
2905 cs->nr_migrate_dl_tasks = 0;
2906 cs->sum_migrate_dl_bw = 0;
2907 }
2908
2909 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2910 static int cpuset_can_attach(struct cgroup_taskset *tset)
2911 {
2912 struct cgroup_subsys_state *css;
2913 struct cpuset *cs, *oldcs;
2914 struct task_struct *task;
2915 bool cpus_updated, mems_updated;
2916 int ret;
2917
2918 /* used later by cpuset_attach() */
2919 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2920 oldcs = cpuset_attach_old_cs;
2921 cs = css_cs(css);
2922
2923 mutex_lock(&cpuset_mutex);
2924
2925 /* Check to see if task is allowed in the cpuset */
2926 ret = cpuset_can_attach_check(cs);
2927 if (ret)
2928 goto out_unlock;
2929
2930 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2931 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2932
2933 cgroup_taskset_for_each(task, css, tset) {
2934 ret = task_can_attach(task);
2935 if (ret)
2936 goto out_unlock;
2937
2938 /*
2939 * Skip rights over task check in v2 when nothing changes,
2940 * migration permission derives from hierarchy ownership in
2941 * cgroup_procs_write_permission()).
2942 */
2943 if (!cpuset_v2() || (cpus_updated || mems_updated)) {
2944 ret = security_task_setscheduler(task);
2945 if (ret)
2946 goto out_unlock;
2947 }
2948
2949 if (dl_task(task)) {
2950 cs->nr_migrate_dl_tasks++;
2951 cs->sum_migrate_dl_bw += task->dl.dl_bw;
2952 }
2953 }
2954
2955 if (!cs->nr_migrate_dl_tasks)
2956 goto out_success;
2957
2958 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2959 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2960
2961 if (unlikely(cpu >= nr_cpu_ids)) {
2962 reset_migrate_dl_data(cs);
2963 ret = -EINVAL;
2964 goto out_unlock;
2965 }
2966
2967 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2968 if (ret) {
2969 reset_migrate_dl_data(cs);
2970 goto out_unlock;
2971 }
2972 }
2973
2974 out_success:
2975 /*
2976 * Mark attach is in progress. This makes validate_change() fail
2977 * changes which zero cpus/mems_allowed.
2978 */
2979 cs->attach_in_progress++;
2980 out_unlock:
2981 mutex_unlock(&cpuset_mutex);
2982 return ret;
2983 }
2984
cpuset_cancel_attach(struct cgroup_taskset * tset)2985 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2986 {
2987 struct cgroup_subsys_state *css;
2988 struct cpuset *cs;
2989
2990 cgroup_taskset_first(tset, &css);
2991 cs = css_cs(css);
2992
2993 mutex_lock(&cpuset_mutex);
2994 dec_attach_in_progress_locked(cs);
2995
2996 if (cs->nr_migrate_dl_tasks) {
2997 int cpu = cpumask_any(cs->effective_cpus);
2998
2999 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3000 reset_migrate_dl_data(cs);
3001 }
3002
3003 mutex_unlock(&cpuset_mutex);
3004 }
3005
3006 /*
3007 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3008 * but we can't allocate it dynamically there. Define it global and
3009 * allocate from cpuset_init().
3010 */
3011 static cpumask_var_t cpus_attach;
3012 static nodemask_t cpuset_attach_nodemask_to;
3013
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)3014 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3015 {
3016 lockdep_assert_held(&cpuset_mutex);
3017
3018 if (cs != &top_cpuset)
3019 guarantee_online_cpus(task, cpus_attach);
3020 else
3021 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3022 subpartitions_cpus);
3023 /*
3024 * can_attach beforehand should guarantee that this doesn't
3025 * fail. TODO: have a better way to handle failure here
3026 */
3027 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3028
3029 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3030 cpuset1_update_task_spread_flags(cs, task);
3031 }
3032
cpuset_attach(struct cgroup_taskset * tset)3033 static void cpuset_attach(struct cgroup_taskset *tset)
3034 {
3035 struct task_struct *task;
3036 struct task_struct *leader;
3037 struct cgroup_subsys_state *css;
3038 struct cpuset *cs;
3039 struct cpuset *oldcs = cpuset_attach_old_cs;
3040 bool cpus_updated, mems_updated;
3041
3042 cgroup_taskset_first(tset, &css);
3043 cs = css_cs(css);
3044
3045 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
3046 mutex_lock(&cpuset_mutex);
3047 cpus_updated = !cpumask_equal(cs->effective_cpus,
3048 oldcs->effective_cpus);
3049 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3050
3051 /*
3052 * In the default hierarchy, enabling cpuset in the child cgroups
3053 * will trigger a number of cpuset_attach() calls with no change
3054 * in effective cpus and mems. In that case, we can optimize out
3055 * by skipping the task iteration and update.
3056 */
3057 if (cpuset_v2() && !cpus_updated && !mems_updated) {
3058 cpuset_attach_nodemask_to = cs->effective_mems;
3059 goto out;
3060 }
3061
3062 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3063
3064 cgroup_taskset_for_each(task, css, tset)
3065 cpuset_attach_task(cs, task);
3066
3067 /*
3068 * Change mm for all threadgroup leaders. This is expensive and may
3069 * sleep and should be moved outside migration path proper. Skip it
3070 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3071 * not set.
3072 */
3073 cpuset_attach_nodemask_to = cs->effective_mems;
3074 if (!is_memory_migrate(cs) && !mems_updated)
3075 goto out;
3076
3077 cgroup_taskset_for_each_leader(leader, css, tset) {
3078 struct mm_struct *mm = get_task_mm(leader);
3079
3080 if (mm) {
3081 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3082
3083 /*
3084 * old_mems_allowed is the same with mems_allowed
3085 * here, except if this task is being moved
3086 * automatically due to hotplug. In that case
3087 * @mems_allowed has been updated and is empty, so
3088 * @old_mems_allowed is the right nodesets that we
3089 * migrate mm from.
3090 */
3091 if (is_memory_migrate(cs))
3092 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3093 &cpuset_attach_nodemask_to);
3094 else
3095 mmput(mm);
3096 }
3097 }
3098
3099 out:
3100 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3101
3102 if (cs->nr_migrate_dl_tasks) {
3103 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3104 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3105 reset_migrate_dl_data(cs);
3106 }
3107
3108 dec_attach_in_progress_locked(cs);
3109
3110 mutex_unlock(&cpuset_mutex);
3111 }
3112
3113 /*
3114 * Common handling for a write to a "cpus" or "mems" file.
3115 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3116 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3117 char *buf, size_t nbytes, loff_t off)
3118 {
3119 struct cpuset *cs = css_cs(of_css(of));
3120 struct cpuset *trialcs;
3121 int retval = -ENODEV;
3122
3123 buf = strstrip(buf);
3124
3125 /*
3126 * CPU or memory hotunplug may leave @cs w/o any execution
3127 * resources, in which case the hotplug code asynchronously updates
3128 * configuration and transfers all tasks to the nearest ancestor
3129 * which can execute.
3130 *
3131 * As writes to "cpus" or "mems" may restore @cs's execution
3132 * resources, wait for the previously scheduled operations before
3133 * proceeding, so that we don't end up keep removing tasks added
3134 * after execution capability is restored.
3135 *
3136 * cpuset_handle_hotplug may call back into cgroup core asynchronously
3137 * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
3138 * operation like this one can lead to a deadlock through kernfs
3139 * active_ref protection. Let's break the protection. Losing the
3140 * protection is okay as we check whether @cs is online after
3141 * grabbing cpuset_mutex anyway. This only happens on the legacy
3142 * hierarchies.
3143 */
3144 css_get(&cs->css);
3145 kernfs_break_active_protection(of->kn);
3146
3147 cpus_read_lock();
3148 mutex_lock(&cpuset_mutex);
3149 if (!is_cpuset_online(cs))
3150 goto out_unlock;
3151
3152 trialcs = alloc_trial_cpuset(cs);
3153 if (!trialcs) {
3154 retval = -ENOMEM;
3155 goto out_unlock;
3156 }
3157
3158 switch (of_cft(of)->private) {
3159 case FILE_CPULIST:
3160 retval = update_cpumask(cs, trialcs, buf);
3161 break;
3162 case FILE_EXCLUSIVE_CPULIST:
3163 retval = update_exclusive_cpumask(cs, trialcs, buf);
3164 break;
3165 case FILE_MEMLIST:
3166 retval = update_nodemask(cs, trialcs, buf);
3167 break;
3168 default:
3169 retval = -EINVAL;
3170 break;
3171 }
3172
3173 free_cpuset(trialcs);
3174 if (force_sd_rebuild)
3175 rebuild_sched_domains_locked();
3176 out_unlock:
3177 mutex_unlock(&cpuset_mutex);
3178 cpus_read_unlock();
3179 kernfs_unbreak_active_protection(of->kn);
3180 css_put(&cs->css);
3181 flush_workqueue(cpuset_migrate_mm_wq);
3182 return retval ?: nbytes;
3183 }
3184
3185 /*
3186 * These ascii lists should be read in a single call, by using a user
3187 * buffer large enough to hold the entire map. If read in smaller
3188 * chunks, there is no guarantee of atomicity. Since the display format
3189 * used, list of ranges of sequential numbers, is variable length,
3190 * and since these maps can change value dynamically, one could read
3191 * gibberish by doing partial reads while a list was changing.
3192 */
cpuset_common_seq_show(struct seq_file * sf,void * v)3193 int cpuset_common_seq_show(struct seq_file *sf, void *v)
3194 {
3195 struct cpuset *cs = css_cs(seq_css(sf));
3196 cpuset_filetype_t type = seq_cft(sf)->private;
3197 int ret = 0;
3198
3199 spin_lock_irq(&callback_lock);
3200
3201 switch (type) {
3202 case FILE_CPULIST:
3203 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3204 break;
3205 case FILE_MEMLIST:
3206 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3207 break;
3208 case FILE_EFFECTIVE_CPULIST:
3209 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3210 break;
3211 case FILE_EFFECTIVE_MEMLIST:
3212 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3213 break;
3214 case FILE_EXCLUSIVE_CPULIST:
3215 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3216 break;
3217 case FILE_EFFECTIVE_XCPULIST:
3218 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3219 break;
3220 case FILE_SUBPARTS_CPULIST:
3221 seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3222 break;
3223 case FILE_ISOLATED_CPULIST:
3224 seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3225 break;
3226 default:
3227 ret = -EINVAL;
3228 }
3229
3230 spin_unlock_irq(&callback_lock);
3231 return ret;
3232 }
3233
sched_partition_show(struct seq_file * seq,void * v)3234 static int sched_partition_show(struct seq_file *seq, void *v)
3235 {
3236 struct cpuset *cs = css_cs(seq_css(seq));
3237 const char *err, *type = NULL;
3238
3239 switch (cs->partition_root_state) {
3240 case PRS_ROOT:
3241 seq_puts(seq, "root\n");
3242 break;
3243 case PRS_ISOLATED:
3244 seq_puts(seq, "isolated\n");
3245 break;
3246 case PRS_MEMBER:
3247 seq_puts(seq, "member\n");
3248 break;
3249 case PRS_INVALID_ROOT:
3250 type = "root";
3251 fallthrough;
3252 case PRS_INVALID_ISOLATED:
3253 if (!type)
3254 type = "isolated";
3255 err = perr_strings[READ_ONCE(cs->prs_err)];
3256 if (err)
3257 seq_printf(seq, "%s invalid (%s)\n", type, err);
3258 else
3259 seq_printf(seq, "%s invalid\n", type);
3260 break;
3261 }
3262 return 0;
3263 }
3264
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3265 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3266 size_t nbytes, loff_t off)
3267 {
3268 struct cpuset *cs = css_cs(of_css(of));
3269 int val;
3270 int retval = -ENODEV;
3271
3272 buf = strstrip(buf);
3273
3274 if (!strcmp(buf, "root"))
3275 val = PRS_ROOT;
3276 else if (!strcmp(buf, "member"))
3277 val = PRS_MEMBER;
3278 else if (!strcmp(buf, "isolated"))
3279 val = PRS_ISOLATED;
3280 else
3281 return -EINVAL;
3282
3283 css_get(&cs->css);
3284 cpus_read_lock();
3285 mutex_lock(&cpuset_mutex);
3286 if (!is_cpuset_online(cs))
3287 goto out_unlock;
3288
3289 retval = update_prstate(cs, val);
3290 out_unlock:
3291 mutex_unlock(&cpuset_mutex);
3292 cpus_read_unlock();
3293 css_put(&cs->css);
3294 return retval ?: nbytes;
3295 }
3296
3297 /*
3298 * This is currently a minimal set for the default hierarchy. It can be
3299 * expanded later on by migrating more features and control files from v1.
3300 */
3301 static struct cftype dfl_files[] = {
3302 {
3303 .name = "cpus",
3304 .seq_show = cpuset_common_seq_show,
3305 .write = cpuset_write_resmask,
3306 .max_write_len = (100U + 6 * NR_CPUS),
3307 .private = FILE_CPULIST,
3308 .flags = CFTYPE_NOT_ON_ROOT,
3309 },
3310
3311 {
3312 .name = "mems",
3313 .seq_show = cpuset_common_seq_show,
3314 .write = cpuset_write_resmask,
3315 .max_write_len = (100U + 6 * MAX_NUMNODES),
3316 .private = FILE_MEMLIST,
3317 .flags = CFTYPE_NOT_ON_ROOT,
3318 },
3319
3320 {
3321 .name = "cpus.effective",
3322 .seq_show = cpuset_common_seq_show,
3323 .private = FILE_EFFECTIVE_CPULIST,
3324 },
3325
3326 {
3327 .name = "mems.effective",
3328 .seq_show = cpuset_common_seq_show,
3329 .private = FILE_EFFECTIVE_MEMLIST,
3330 },
3331
3332 {
3333 .name = "cpus.partition",
3334 .seq_show = sched_partition_show,
3335 .write = sched_partition_write,
3336 .private = FILE_PARTITION_ROOT,
3337 .flags = CFTYPE_NOT_ON_ROOT,
3338 .file_offset = offsetof(struct cpuset, partition_file),
3339 },
3340
3341 {
3342 .name = "cpus.exclusive",
3343 .seq_show = cpuset_common_seq_show,
3344 .write = cpuset_write_resmask,
3345 .max_write_len = (100U + 6 * NR_CPUS),
3346 .private = FILE_EXCLUSIVE_CPULIST,
3347 .flags = CFTYPE_NOT_ON_ROOT,
3348 },
3349
3350 {
3351 .name = "cpus.exclusive.effective",
3352 .seq_show = cpuset_common_seq_show,
3353 .private = FILE_EFFECTIVE_XCPULIST,
3354 .flags = CFTYPE_NOT_ON_ROOT,
3355 },
3356
3357 {
3358 .name = "cpus.subpartitions",
3359 .seq_show = cpuset_common_seq_show,
3360 .private = FILE_SUBPARTS_CPULIST,
3361 .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3362 },
3363
3364 {
3365 .name = "cpus.isolated",
3366 .seq_show = cpuset_common_seq_show,
3367 .private = FILE_ISOLATED_CPULIST,
3368 .flags = CFTYPE_ONLY_ON_ROOT,
3369 },
3370
3371 { } /* terminate */
3372 };
3373
3374
3375 /**
3376 * cpuset_css_alloc - Allocate a cpuset css
3377 * @parent_css: Parent css of the control group that the new cpuset will be
3378 * part of
3379 * Return: cpuset css on success, -ENOMEM on failure.
3380 *
3381 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3382 * top cpuset css otherwise.
3383 */
3384 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3385 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3386 {
3387 struct cpuset *cs;
3388
3389 if (!parent_css)
3390 return &top_cpuset.css;
3391
3392 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3393 if (!cs)
3394 return ERR_PTR(-ENOMEM);
3395
3396 if (alloc_cpumasks(cs, NULL)) {
3397 kfree(cs);
3398 return ERR_PTR(-ENOMEM);
3399 }
3400
3401 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3402 fmeter_init(&cs->fmeter);
3403 cs->relax_domain_level = -1;
3404 INIT_LIST_HEAD(&cs->remote_sibling);
3405
3406 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3407 if (cpuset_v2())
3408 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3409
3410 return &cs->css;
3411 }
3412
cpuset_css_online(struct cgroup_subsys_state * css)3413 static int cpuset_css_online(struct cgroup_subsys_state *css)
3414 {
3415 struct cpuset *cs = css_cs(css);
3416 struct cpuset *parent = parent_cs(cs);
3417 struct cpuset *tmp_cs;
3418 struct cgroup_subsys_state *pos_css;
3419
3420 if (!parent)
3421 return 0;
3422
3423 cpus_read_lock();
3424 mutex_lock(&cpuset_mutex);
3425
3426 set_bit(CS_ONLINE, &cs->flags);
3427 if (is_spread_page(parent))
3428 set_bit(CS_SPREAD_PAGE, &cs->flags);
3429 if (is_spread_slab(parent))
3430 set_bit(CS_SPREAD_SLAB, &cs->flags);
3431 /*
3432 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3433 */
3434 if (cpuset_v2() && !is_sched_load_balance(parent))
3435 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3436
3437 cpuset_inc();
3438
3439 spin_lock_irq(&callback_lock);
3440 if (is_in_v2_mode()) {
3441 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3442 cs->effective_mems = parent->effective_mems;
3443 }
3444 spin_unlock_irq(&callback_lock);
3445
3446 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3447 goto out_unlock;
3448
3449 /*
3450 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3451 * set. This flag handling is implemented in cgroup core for
3452 * historical reasons - the flag may be specified during mount.
3453 *
3454 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3455 * refuse to clone the configuration - thereby refusing the task to
3456 * be entered, and as a result refusing the sys_unshare() or
3457 * clone() which initiated it. If this becomes a problem for some
3458 * users who wish to allow that scenario, then this could be
3459 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3460 * (and likewise for mems) to the new cgroup.
3461 */
3462 rcu_read_lock();
3463 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3464 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3465 rcu_read_unlock();
3466 goto out_unlock;
3467 }
3468 }
3469 rcu_read_unlock();
3470
3471 spin_lock_irq(&callback_lock);
3472 cs->mems_allowed = parent->mems_allowed;
3473 cs->effective_mems = parent->mems_allowed;
3474 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3475 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3476 spin_unlock_irq(&callback_lock);
3477 out_unlock:
3478 mutex_unlock(&cpuset_mutex);
3479 cpus_read_unlock();
3480 return 0;
3481 }
3482
3483 /*
3484 * If the cpuset being removed has its flag 'sched_load_balance'
3485 * enabled, then simulate turning sched_load_balance off, which
3486 * will call rebuild_sched_domains_locked(). That is not needed
3487 * in the default hierarchy where only changes in partition
3488 * will cause repartitioning.
3489 *
3490 * If the cpuset has the 'sched.partition' flag enabled, simulate
3491 * turning 'sched.partition" off.
3492 */
3493
cpuset_css_offline(struct cgroup_subsys_state * css)3494 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3495 {
3496 struct cpuset *cs = css_cs(css);
3497
3498 cpus_read_lock();
3499 mutex_lock(&cpuset_mutex);
3500
3501 if (is_partition_valid(cs))
3502 update_prstate(cs, 0);
3503
3504 if (!cpuset_v2() && is_sched_load_balance(cs))
3505 cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3506
3507 cpuset_dec();
3508 clear_bit(CS_ONLINE, &cs->flags);
3509
3510 mutex_unlock(&cpuset_mutex);
3511 cpus_read_unlock();
3512 }
3513
cpuset_css_free(struct cgroup_subsys_state * css)3514 static void cpuset_css_free(struct cgroup_subsys_state *css)
3515 {
3516 struct cpuset *cs = css_cs(css);
3517
3518 free_cpuset(cs);
3519 }
3520
cpuset_bind(struct cgroup_subsys_state * root_css)3521 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3522 {
3523 mutex_lock(&cpuset_mutex);
3524 spin_lock_irq(&callback_lock);
3525
3526 if (is_in_v2_mode()) {
3527 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3528 cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
3529 top_cpuset.mems_allowed = node_possible_map;
3530 } else {
3531 cpumask_copy(top_cpuset.cpus_allowed,
3532 top_cpuset.effective_cpus);
3533 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3534 }
3535
3536 spin_unlock_irq(&callback_lock);
3537 mutex_unlock(&cpuset_mutex);
3538 }
3539
3540 /*
3541 * In case the child is cloned into a cpuset different from its parent,
3542 * additional checks are done to see if the move is allowed.
3543 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3544 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3545 {
3546 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3547 bool same_cs;
3548 int ret;
3549
3550 rcu_read_lock();
3551 same_cs = (cs == task_cs(current));
3552 rcu_read_unlock();
3553
3554 if (same_cs)
3555 return 0;
3556
3557 lockdep_assert_held(&cgroup_mutex);
3558 mutex_lock(&cpuset_mutex);
3559
3560 /* Check to see if task is allowed in the cpuset */
3561 ret = cpuset_can_attach_check(cs);
3562 if (ret)
3563 goto out_unlock;
3564
3565 ret = task_can_attach(task);
3566 if (ret)
3567 goto out_unlock;
3568
3569 ret = security_task_setscheduler(task);
3570 if (ret)
3571 goto out_unlock;
3572
3573 /*
3574 * Mark attach is in progress. This makes validate_change() fail
3575 * changes which zero cpus/mems_allowed.
3576 */
3577 cs->attach_in_progress++;
3578 out_unlock:
3579 mutex_unlock(&cpuset_mutex);
3580 return ret;
3581 }
3582
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3583 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3584 {
3585 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3586 bool same_cs;
3587
3588 rcu_read_lock();
3589 same_cs = (cs == task_cs(current));
3590 rcu_read_unlock();
3591
3592 if (same_cs)
3593 return;
3594
3595 dec_attach_in_progress(cs);
3596 }
3597
3598 /*
3599 * Make sure the new task conform to the current state of its parent,
3600 * which could have been changed by cpuset just after it inherits the
3601 * state from the parent and before it sits on the cgroup's task list.
3602 */
cpuset_fork(struct task_struct * task)3603 static void cpuset_fork(struct task_struct *task)
3604 {
3605 struct cpuset *cs;
3606 bool same_cs;
3607
3608 rcu_read_lock();
3609 cs = task_cs(task);
3610 same_cs = (cs == task_cs(current));
3611 rcu_read_unlock();
3612
3613 if (same_cs) {
3614 if (cs == &top_cpuset)
3615 return;
3616
3617 set_cpus_allowed_ptr(task, current->cpus_ptr);
3618 task->mems_allowed = current->mems_allowed;
3619 return;
3620 }
3621
3622 /* CLONE_INTO_CGROUP */
3623 mutex_lock(&cpuset_mutex);
3624 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3625 cpuset_attach_task(cs, task);
3626
3627 dec_attach_in_progress_locked(cs);
3628 mutex_unlock(&cpuset_mutex);
3629 }
3630
3631 struct cgroup_subsys cpuset_cgrp_subsys = {
3632 .css_alloc = cpuset_css_alloc,
3633 .css_online = cpuset_css_online,
3634 .css_offline = cpuset_css_offline,
3635 .css_free = cpuset_css_free,
3636 .can_attach = cpuset_can_attach,
3637 .cancel_attach = cpuset_cancel_attach,
3638 .attach = cpuset_attach,
3639 .post_attach = cpuset_post_attach,
3640 .bind = cpuset_bind,
3641 .can_fork = cpuset_can_fork,
3642 .cancel_fork = cpuset_cancel_fork,
3643 .fork = cpuset_fork,
3644 #ifdef CONFIG_CPUSETS_V1
3645 .legacy_cftypes = cpuset1_files,
3646 #endif
3647 .dfl_cftypes = dfl_files,
3648 .early_init = true,
3649 .threaded = true,
3650 };
3651
3652 /**
3653 * cpuset_init - initialize cpusets at system boot
3654 *
3655 * Description: Initialize top_cpuset
3656 **/
3657
cpuset_init(void)3658 int __init cpuset_init(void)
3659 {
3660 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3661 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3662 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
3663 BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
3664 BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
3665 BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
3666
3667 cpumask_setall(top_cpuset.cpus_allowed);
3668 nodes_setall(top_cpuset.mems_allowed);
3669 cpumask_setall(top_cpuset.effective_cpus);
3670 cpumask_setall(top_cpuset.effective_xcpus);
3671 cpumask_setall(top_cpuset.exclusive_cpus);
3672 nodes_setall(top_cpuset.effective_mems);
3673
3674 fmeter_init(&top_cpuset.fmeter);
3675 INIT_LIST_HEAD(&remote_children);
3676
3677 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3678
3679 have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN);
3680 if (have_boot_isolcpus) {
3681 BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL));
3682 cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN));
3683 cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus);
3684 }
3685
3686 return 0;
3687 }
3688
3689 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3690 hotplug_update_tasks(struct cpuset *cs,
3691 struct cpumask *new_cpus, nodemask_t *new_mems,
3692 bool cpus_updated, bool mems_updated)
3693 {
3694 /* A partition root is allowed to have empty effective cpus */
3695 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3696 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3697 if (nodes_empty(*new_mems))
3698 *new_mems = parent_cs(cs)->effective_mems;
3699
3700 spin_lock_irq(&callback_lock);
3701 cpumask_copy(cs->effective_cpus, new_cpus);
3702 cs->effective_mems = *new_mems;
3703 spin_unlock_irq(&callback_lock);
3704
3705 if (cpus_updated)
3706 cpuset_update_tasks_cpumask(cs, new_cpus);
3707 if (mems_updated)
3708 cpuset_update_tasks_nodemask(cs);
3709 }
3710
cpuset_force_rebuild(void)3711 void cpuset_force_rebuild(void)
3712 {
3713 force_sd_rebuild = true;
3714 }
3715
3716 /**
3717 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3718 * @cs: cpuset in interest
3719 * @tmp: the tmpmasks structure pointer
3720 *
3721 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3722 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3723 * all its tasks are moved to the nearest ancestor with both resources.
3724 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3725 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3726 {
3727 static cpumask_t new_cpus;
3728 static nodemask_t new_mems;
3729 bool cpus_updated;
3730 bool mems_updated;
3731 bool remote;
3732 int partcmd = -1;
3733 struct cpuset *parent;
3734 retry:
3735 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3736
3737 mutex_lock(&cpuset_mutex);
3738
3739 /*
3740 * We have raced with task attaching. We wait until attaching
3741 * is finished, so we won't attach a task to an empty cpuset.
3742 */
3743 if (cs->attach_in_progress) {
3744 mutex_unlock(&cpuset_mutex);
3745 goto retry;
3746 }
3747
3748 parent = parent_cs(cs);
3749 compute_effective_cpumask(&new_cpus, cs, parent);
3750 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3751
3752 if (!tmp || !cs->partition_root_state)
3753 goto update_tasks;
3754
3755 /*
3756 * Compute effective_cpus for valid partition root, may invalidate
3757 * child partition roots if necessary.
3758 */
3759 remote = is_remote_partition(cs);
3760 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
3761 compute_partition_effective_cpumask(cs, &new_cpus);
3762
3763 if (remote && cpumask_empty(&new_cpus) &&
3764 partition_is_populated(cs, NULL)) {
3765 remote_partition_disable(cs, tmp);
3766 compute_effective_cpumask(&new_cpus, cs, parent);
3767 remote = false;
3768 cpuset_force_rebuild();
3769 }
3770
3771 /*
3772 * Force the partition to become invalid if either one of
3773 * the following conditions hold:
3774 * 1) empty effective cpus but not valid empty partition.
3775 * 2) parent is invalid or doesn't grant any cpus to child
3776 * partitions.
3777 */
3778 if (is_local_partition(cs) && (!is_partition_valid(parent) ||
3779 tasks_nocpu_error(parent, cs, &new_cpus)))
3780 partcmd = partcmd_invalidate;
3781 /*
3782 * On the other hand, an invalid partition root may be transitioned
3783 * back to a regular one.
3784 */
3785 else if (is_partition_valid(parent) && is_partition_invalid(cs))
3786 partcmd = partcmd_update;
3787
3788 if (partcmd >= 0) {
3789 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
3790 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
3791 compute_partition_effective_cpumask(cs, &new_cpus);
3792 cpuset_force_rebuild();
3793 }
3794 }
3795
3796 update_tasks:
3797 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3798 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3799 if (!cpus_updated && !mems_updated)
3800 goto unlock; /* Hotplug doesn't affect this cpuset */
3801
3802 if (mems_updated)
3803 check_insane_mems_config(&new_mems);
3804
3805 if (is_in_v2_mode())
3806 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3807 cpus_updated, mems_updated);
3808 else
3809 cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
3810 cpus_updated, mems_updated);
3811
3812 unlock:
3813 mutex_unlock(&cpuset_mutex);
3814 }
3815
3816 /**
3817 * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
3818 *
3819 * This function is called after either CPU or memory configuration has
3820 * changed and updates cpuset accordingly. The top_cpuset is always
3821 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3822 * order to make cpusets transparent (of no affect) on systems that are
3823 * actively using CPU hotplug but making no active use of cpusets.
3824 *
3825 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3826 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3827 * all descendants.
3828 *
3829 * Note that CPU offlining during suspend is ignored. We don't modify
3830 * cpusets across suspend/resume cycles at all.
3831 *
3832 * CPU / memory hotplug is handled synchronously.
3833 */
cpuset_handle_hotplug(void)3834 static void cpuset_handle_hotplug(void)
3835 {
3836 static cpumask_t new_cpus;
3837 static nodemask_t new_mems;
3838 bool cpus_updated, mems_updated;
3839 bool on_dfl = is_in_v2_mode();
3840 struct tmpmasks tmp, *ptmp = NULL;
3841
3842 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3843 ptmp = &tmp;
3844
3845 lockdep_assert_cpus_held();
3846 mutex_lock(&cpuset_mutex);
3847
3848 /* fetch the available cpus/mems and find out which changed how */
3849 cpumask_copy(&new_cpus, cpu_active_mask);
3850 new_mems = node_states[N_MEMORY];
3851
3852 /*
3853 * If subpartitions_cpus is populated, it is likely that the check
3854 * below will produce a false positive on cpus_updated when the cpu
3855 * list isn't changed. It is extra work, but it is better to be safe.
3856 */
3857 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
3858 !cpumask_empty(subpartitions_cpus);
3859 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3860
3861 /* For v1, synchronize cpus_allowed to cpu_active_mask */
3862 if (cpus_updated) {
3863 cpuset_force_rebuild();
3864 spin_lock_irq(&callback_lock);
3865 if (!on_dfl)
3866 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3867 /*
3868 * Make sure that CPUs allocated to child partitions
3869 * do not show up in effective_cpus. If no CPU is left,
3870 * we clear the subpartitions_cpus & let the child partitions
3871 * fight for the CPUs again.
3872 */
3873 if (!cpumask_empty(subpartitions_cpus)) {
3874 if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
3875 top_cpuset.nr_subparts = 0;
3876 cpumask_clear(subpartitions_cpus);
3877 } else {
3878 cpumask_andnot(&new_cpus, &new_cpus,
3879 subpartitions_cpus);
3880 }
3881 }
3882 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3883 spin_unlock_irq(&callback_lock);
3884 /* we don't mess with cpumasks of tasks in top_cpuset */
3885 }
3886
3887 /* synchronize mems_allowed to N_MEMORY */
3888 if (mems_updated) {
3889 spin_lock_irq(&callback_lock);
3890 if (!on_dfl)
3891 top_cpuset.mems_allowed = new_mems;
3892 top_cpuset.effective_mems = new_mems;
3893 spin_unlock_irq(&callback_lock);
3894 cpuset_update_tasks_nodemask(&top_cpuset);
3895 }
3896
3897 mutex_unlock(&cpuset_mutex);
3898
3899 /* if cpus or mems changed, we need to propagate to descendants */
3900 if (cpus_updated || mems_updated) {
3901 struct cpuset *cs;
3902 struct cgroup_subsys_state *pos_css;
3903
3904 rcu_read_lock();
3905 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3906 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3907 continue;
3908 rcu_read_unlock();
3909
3910 cpuset_hotplug_update_tasks(cs, ptmp);
3911
3912 rcu_read_lock();
3913 css_put(&cs->css);
3914 }
3915 rcu_read_unlock();
3916 }
3917
3918 /* rebuild sched domains if necessary */
3919 if (force_sd_rebuild)
3920 rebuild_sched_domains_cpuslocked();
3921
3922 free_cpumasks(NULL, ptmp);
3923 }
3924
cpuset_update_active_cpus(void)3925 void cpuset_update_active_cpus(void)
3926 {
3927 /*
3928 * We're inside cpu hotplug critical region which usually nests
3929 * inside cgroup synchronization. Bounce actual hotplug processing
3930 * to a work item to avoid reverse locking order.
3931 */
3932 cpuset_handle_hotplug();
3933 }
3934
3935 /*
3936 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3937 * Call this routine anytime after node_states[N_MEMORY] changes.
3938 * See cpuset_update_active_cpus() for CPU hotplug handling.
3939 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3940 static int cpuset_track_online_nodes(struct notifier_block *self,
3941 unsigned long action, void *arg)
3942 {
3943 cpuset_handle_hotplug();
3944 return NOTIFY_OK;
3945 }
3946
3947 /**
3948 * cpuset_init_smp - initialize cpus_allowed
3949 *
3950 * Description: Finish top cpuset after cpu, node maps are initialized
3951 */
cpuset_init_smp(void)3952 void __init cpuset_init_smp(void)
3953 {
3954 /*
3955 * cpus_allowd/mems_allowed set to v2 values in the initial
3956 * cpuset_bind() call will be reset to v1 values in another
3957 * cpuset_bind() call when v1 cpuset is mounted.
3958 */
3959 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3960
3961 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3962 top_cpuset.effective_mems = node_states[N_MEMORY];
3963
3964 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3965
3966 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3967 BUG_ON(!cpuset_migrate_mm_wq);
3968 }
3969
3970 /**
3971 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3972 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3973 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3974 *
3975 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3976 * attached to the specified @tsk. Guaranteed to return some non-empty
3977 * subset of cpu_online_mask, even if this means going outside the
3978 * tasks cpuset, except when the task is in the top cpuset.
3979 **/
3980
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3981 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3982 {
3983 unsigned long flags;
3984 struct cpuset *cs;
3985
3986 spin_lock_irqsave(&callback_lock, flags);
3987 rcu_read_lock();
3988
3989 cs = task_cs(tsk);
3990 if (cs != &top_cpuset)
3991 guarantee_online_cpus(tsk, pmask);
3992 /*
3993 * Tasks in the top cpuset won't get update to their cpumasks
3994 * when a hotplug online/offline event happens. So we include all
3995 * offline cpus in the allowed cpu list.
3996 */
3997 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3998 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3999
4000 /*
4001 * We first exclude cpus allocated to partitions. If there is no
4002 * allowable online cpu left, we fall back to all possible cpus.
4003 */
4004 cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4005 if (!cpumask_intersects(pmask, cpu_online_mask))
4006 cpumask_copy(pmask, possible_mask);
4007 }
4008
4009 rcu_read_unlock();
4010 spin_unlock_irqrestore(&callback_lock, flags);
4011 }
4012
4013 /**
4014 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4015 * @tsk: pointer to task_struct with which the scheduler is struggling
4016 *
4017 * Description: In the case that the scheduler cannot find an allowed cpu in
4018 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4019 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4020 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4021 * This is the absolute last resort for the scheduler and it is only used if
4022 * _every_ other avenue has been traveled.
4023 *
4024 * Returns true if the affinity of @tsk was changed, false otherwise.
4025 **/
4026
cpuset_cpus_allowed_fallback(struct task_struct * tsk)4027 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4028 {
4029 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4030 const struct cpumask *cs_mask;
4031 bool changed = false;
4032
4033 rcu_read_lock();
4034 cs_mask = task_cs(tsk)->cpus_allowed;
4035 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4036 do_set_cpus_allowed(tsk, cs_mask);
4037 changed = true;
4038 }
4039 rcu_read_unlock();
4040
4041 /*
4042 * We own tsk->cpus_allowed, nobody can change it under us.
4043 *
4044 * But we used cs && cs->cpus_allowed lockless and thus can
4045 * race with cgroup_attach_task() or update_cpumask() and get
4046 * the wrong tsk->cpus_allowed. However, both cases imply the
4047 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4048 * which takes task_rq_lock().
4049 *
4050 * If we are called after it dropped the lock we must see all
4051 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4052 * set any mask even if it is not right from task_cs() pov,
4053 * the pending set_cpus_allowed_ptr() will fix things.
4054 *
4055 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4056 * if required.
4057 */
4058 return changed;
4059 }
4060
cpuset_init_current_mems_allowed(void)4061 void __init cpuset_init_current_mems_allowed(void)
4062 {
4063 nodes_setall(current->mems_allowed);
4064 }
4065
4066 /**
4067 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4068 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4069 *
4070 * Description: Returns the nodemask_t mems_allowed of the cpuset
4071 * attached to the specified @tsk. Guaranteed to return some non-empty
4072 * subset of node_states[N_MEMORY], even if this means going outside the
4073 * tasks cpuset.
4074 **/
4075
cpuset_mems_allowed(struct task_struct * tsk)4076 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4077 {
4078 nodemask_t mask;
4079 unsigned long flags;
4080
4081 spin_lock_irqsave(&callback_lock, flags);
4082 rcu_read_lock();
4083 guarantee_online_mems(task_cs(tsk), &mask);
4084 rcu_read_unlock();
4085 spin_unlock_irqrestore(&callback_lock, flags);
4086
4087 return mask;
4088 }
4089
4090 /**
4091 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4092 * @nodemask: the nodemask to be checked
4093 *
4094 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4095 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4096 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4097 {
4098 return nodes_intersects(*nodemask, current->mems_allowed);
4099 }
4100
4101 /*
4102 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4103 * mem_hardwall ancestor to the specified cpuset. Call holding
4104 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4105 * (an unusual configuration), then returns the root cpuset.
4106 */
nearest_hardwall_ancestor(struct cpuset * cs)4107 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4108 {
4109 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4110 cs = parent_cs(cs);
4111 return cs;
4112 }
4113
4114 /*
4115 * cpuset_node_allowed - Can we allocate on a memory node?
4116 * @node: is this an allowed node?
4117 * @gfp_mask: memory allocation flags
4118 *
4119 * If we're in interrupt, yes, we can always allocate. If @node is set in
4120 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4121 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4122 * yes. If current has access to memory reserves as an oom victim, yes.
4123 * Otherwise, no.
4124 *
4125 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4126 * and do not allow allocations outside the current tasks cpuset
4127 * unless the task has been OOM killed.
4128 * GFP_KERNEL allocations are not so marked, so can escape to the
4129 * nearest enclosing hardwalled ancestor cpuset.
4130 *
4131 * Scanning up parent cpusets requires callback_lock. The
4132 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4133 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4134 * current tasks mems_allowed came up empty on the first pass over
4135 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4136 * cpuset are short of memory, might require taking the callback_lock.
4137 *
4138 * The first call here from mm/page_alloc:get_page_from_freelist()
4139 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4140 * so no allocation on a node outside the cpuset is allowed (unless
4141 * in interrupt, of course).
4142 *
4143 * The second pass through get_page_from_freelist() doesn't even call
4144 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4145 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4146 * in alloc_flags. That logic and the checks below have the combined
4147 * affect that:
4148 * in_interrupt - any node ok (current task context irrelevant)
4149 * GFP_ATOMIC - any node ok
4150 * tsk_is_oom_victim - any node ok
4151 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4152 * GFP_USER - only nodes in current tasks mems allowed ok.
4153 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4154 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4155 {
4156 struct cpuset *cs; /* current cpuset ancestors */
4157 bool allowed; /* is allocation in zone z allowed? */
4158 unsigned long flags;
4159
4160 if (in_interrupt())
4161 return true;
4162 if (node_isset(node, current->mems_allowed))
4163 return true;
4164 /*
4165 * Allow tasks that have access to memory reserves because they have
4166 * been OOM killed to get memory anywhere.
4167 */
4168 if (unlikely(tsk_is_oom_victim(current)))
4169 return true;
4170 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4171 return false;
4172
4173 if (current->flags & PF_EXITING) /* Let dying task have memory */
4174 return true;
4175
4176 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4177 spin_lock_irqsave(&callback_lock, flags);
4178
4179 rcu_read_lock();
4180 cs = nearest_hardwall_ancestor(task_cs(current));
4181 allowed = node_isset(node, cs->mems_allowed);
4182 rcu_read_unlock();
4183
4184 spin_unlock_irqrestore(&callback_lock, flags);
4185 return allowed;
4186 }
4187
4188 /**
4189 * cpuset_spread_node() - On which node to begin search for a page
4190 * @rotor: round robin rotor
4191 *
4192 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4193 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4194 * and if the memory allocation used cpuset_mem_spread_node()
4195 * to determine on which node to start looking, as it will for
4196 * certain page cache or slab cache pages such as used for file
4197 * system buffers and inode caches, then instead of starting on the
4198 * local node to look for a free page, rather spread the starting
4199 * node around the tasks mems_allowed nodes.
4200 *
4201 * We don't have to worry about the returned node being offline
4202 * because "it can't happen", and even if it did, it would be ok.
4203 *
4204 * The routines calling guarantee_online_mems() are careful to
4205 * only set nodes in task->mems_allowed that are online. So it
4206 * should not be possible for the following code to return an
4207 * offline node. But if it did, that would be ok, as this routine
4208 * is not returning the node where the allocation must be, only
4209 * the node where the search should start. The zonelist passed to
4210 * __alloc_pages() will include all nodes. If the slab allocator
4211 * is passed an offline node, it will fall back to the local node.
4212 * See kmem_cache_alloc_node().
4213 */
cpuset_spread_node(int * rotor)4214 static int cpuset_spread_node(int *rotor)
4215 {
4216 return *rotor = next_node_in(*rotor, current->mems_allowed);
4217 }
4218
4219 /**
4220 * cpuset_mem_spread_node() - On which node to begin search for a file page
4221 */
cpuset_mem_spread_node(void)4222 int cpuset_mem_spread_node(void)
4223 {
4224 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4225 current->cpuset_mem_spread_rotor =
4226 node_random(¤t->mems_allowed);
4227
4228 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4229 }
4230
4231 /**
4232 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4233 * @tsk1: pointer to task_struct of some task.
4234 * @tsk2: pointer to task_struct of some other task.
4235 *
4236 * Description: Return true if @tsk1's mems_allowed intersects the
4237 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4238 * one of the task's memory usage might impact the memory available
4239 * to the other.
4240 **/
4241
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4242 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4243 const struct task_struct *tsk2)
4244 {
4245 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4246 }
4247
4248 /**
4249 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4250 *
4251 * Description: Prints current's name, cpuset name, and cached copy of its
4252 * mems_allowed to the kernel log.
4253 */
cpuset_print_current_mems_allowed(void)4254 void cpuset_print_current_mems_allowed(void)
4255 {
4256 struct cgroup *cgrp;
4257
4258 rcu_read_lock();
4259
4260 cgrp = task_cs(current)->css.cgroup;
4261 pr_cont(",cpuset=");
4262 pr_cont_cgroup_name(cgrp);
4263 pr_cont(",mems_allowed=%*pbl",
4264 nodemask_pr_args(¤t->mems_allowed));
4265
4266 rcu_read_unlock();
4267 }
4268
4269 #ifdef CONFIG_PROC_PID_CPUSET
4270 /*
4271 * proc_cpuset_show()
4272 * - Print tasks cpuset path into seq_file.
4273 * - Used for /proc/<pid>/cpuset.
4274 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4275 * doesn't really matter if tsk->cpuset changes after we read it,
4276 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
4277 * anyway.
4278 */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)4279 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4280 struct pid *pid, struct task_struct *tsk)
4281 {
4282 char *buf;
4283 struct cgroup_subsys_state *css;
4284 int retval;
4285
4286 retval = -ENOMEM;
4287 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4288 if (!buf)
4289 goto out;
4290
4291 rcu_read_lock();
4292 spin_lock_irq(&css_set_lock);
4293 css = task_css(tsk, cpuset_cgrp_id);
4294 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
4295 current->nsproxy->cgroup_ns);
4296 spin_unlock_irq(&css_set_lock);
4297 rcu_read_unlock();
4298
4299 if (retval == -E2BIG)
4300 retval = -ENAMETOOLONG;
4301 if (retval < 0)
4302 goto out_free;
4303 seq_puts(m, buf);
4304 seq_putc(m, '\n');
4305 retval = 0;
4306 out_free:
4307 kfree(buf);
4308 out:
4309 return retval;
4310 }
4311 #endif /* CONFIG_PROC_PID_CPUSET */
4312
4313 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4314 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4315 {
4316 seq_printf(m, "Mems_allowed:\t%*pb\n",
4317 nodemask_pr_args(&task->mems_allowed));
4318 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4319 nodemask_pr_args(&task->mems_allowed));
4320 }
4321